Lines Matching +full:es +full:- +full:enable

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
5 * AMD SVM-SEV support
16 #include <linux/psp-sev.h>
22 #include <uapi/linux/sev-guest.h>
44 /* enable/disable SEV support */
48 /* enable/disable SEV-ES support */
52 /* enable/disable SEV-SNP support */
56 /* enable/disable SEV-ES DebugSwap support */
68 /* As defined by SEV-SNP Firmware ABI, under "Guest Policy". */
118 return -EBUSY; in sev_flush_asids()
138 sev_snp_enabled ? "-SNP" : "", ret, error); in sev_flush_asids()
145 return !!to_kvm_sev_info(kvm)->enc_context_owner; in is_mirroring_enc_context()
150 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_vcpu_has_debug_swap()
151 struct kvm_sev_info *sev = to_kvm_sev_info(vcpu->kvm); in sev_vcpu_has_debug_swap()
153 return sev->vmsa_features & SVM_SEV_FEAT_DEBUG_SWAP; in sev_vcpu_has_debug_swap()
160 return (sev->vmsa_features & SVM_SEV_FEAT_SECURE_TSC) && in snp_is_secure_tsc_enabled()
170 /* The flush process will flush all reclaimable SEV and SEV-ES ASIDs */ in __sev_recycle_asids()
180 enum misc_res_type type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV; in sev_misc_cg_try_charge()
181 return misc_cg_try_charge(type, sev->misc_cg, 1); in sev_misc_cg_try_charge()
186 enum misc_res_type type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV; in sev_misc_cg_uncharge()
187 misc_cg_uncharge(type, sev->misc_cg, 1); in sev_misc_cg_uncharge()
193 * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid. in sev_asid_new()
194 * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1. in sev_asid_new()
203 } else if (sev->es_active) { in sev_asid_new()
214 * Similarly for SEV-ES guests the min ASID can end up larger than the in sev_asid_new()
215 * max when ciphertext hiding is enabled, effectively disabling SEV-ES in sev_asid_new()
219 return -ENOTTY; in sev_asid_new()
221 WARN_ON(sev->misc_cg); in sev_asid_new()
222 sev->misc_cg = get_current_misc_cg(); in sev_asid_new()
225 put_misc_cg(sev->misc_cg); in sev_asid_new()
226 sev->misc_cg = NULL; in sev_asid_new()
240 ret = -EBUSY; in sev_asid_new()
248 sev->asid = asid; in sev_asid_new()
252 put_misc_cg(sev->misc_cg); in sev_asid_new()
253 sev->misc_cg = NULL; in sev_asid_new()
259 return to_kvm_sev_info(kvm)->asid; in sev_get_asid()
269 __set_bit(sev->asid, sev_reclaim_asid_bitmap); in sev_asid_free()
273 sd->sev_vmcbs[sev->asid] = NULL; in sev_asid_free()
279 put_misc_cg(sev->misc_cg); in sev_asid_free()
280 sev->misc_cg = NULL; in sev_asid_free()
295 * Transition a page to hypervisor-owned/shared state in the RMP table. This
303 return -EIO; in kvm_rmp_make_shared()
310 * Certain page-states, such as Pre-Guest and Firmware pages (as documented
311 * in Chapter 5 of the SEV-SNP Firmware ABI under "Page States") cannot be
312 * directly transitioned back to normal/hypervisor-owned state via RMPUPDATE
332 return -EIO; in snp_page_reclaim()
336 return -EIO; in snp_page_reclaim()
368 * - Both pages are from shared guest memory, so they need to be protected
375 * - The response page needs to be switched to Firmware-owned[1] state
381 * Both of these issues can be avoided completely by using separately-allocated
391 * [1] See the "Page States" section of the SEV-SNP Firmware ABI for more
392 * details on Firmware-owned pages, along with "RMP and VMPL Access Checks"
402 return -ENOMEM; in snp_guest_req_init()
404 sev->guest_resp_buf = snp_alloc_firmware_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); in snp_guest_req_init()
405 if (!sev->guest_resp_buf) { in snp_guest_req_init()
407 return -EIO; in snp_guest_req_init()
410 sev->guest_req_buf = page_address(req_page); in snp_guest_req_init()
411 mutex_init(&sev->guest_req_mutex); in snp_guest_req_init()
420 if (sev->guest_resp_buf) in snp_guest_req_cleanup()
421 snp_free_firmware_page(sev->guest_resp_buf); in snp_guest_req_cleanup()
423 if (sev->guest_req_buf) in snp_guest_req_cleanup()
424 __free_page(virt_to_page(sev->guest_req_buf)); in snp_guest_req_cleanup()
426 sev->guest_req_buf = NULL; in snp_guest_req_cleanup()
427 sev->guest_resp_buf = NULL; in snp_guest_req_cleanup()
441 if (kvm->created_vcpus) in __sev_guest_init()
442 return -EINVAL; in __sev_guest_init()
444 if (data->flags) in __sev_guest_init()
445 return -EINVAL; in __sev_guest_init()
450 if (data->vmsa_features & ~valid_vmsa_features) in __sev_guest_init()
451 return -EINVAL; in __sev_guest_init()
453 if (data->ghcb_version > GHCB_VERSION_MAX || (!es_active && data->ghcb_version)) in __sev_guest_init()
454 return -EINVAL; in __sev_guest_init()
458 * 2 of the GHCB protocol, so default to that for SEV-ES guests created in __sev_guest_init()
461 if (es_active && !data->ghcb_version) in __sev_guest_init()
462 data->ghcb_version = 2; in __sev_guest_init()
464 if (snp_active && data->ghcb_version < 2) in __sev_guest_init()
465 return -EINVAL; in __sev_guest_init()
467 if (unlikely(sev->active)) in __sev_guest_init()
468 return -EINVAL; in __sev_guest_init()
470 sev->active = true; in __sev_guest_init()
471 sev->es_active = es_active; in __sev_guest_init()
472 sev->vmsa_features = data->vmsa_features; in __sev_guest_init()
473 sev->ghcb_version = data->ghcb_version; in __sev_guest_init()
476 sev->vmsa_features |= SVM_SEV_FEAT_SNP_ACTIVE; in __sev_guest_init()
487 if (!zalloc_cpumask_var(&sev->have_run_cpus, GFP_KERNEL_ACCOUNT)) { in __sev_guest_init()
488 ret = -ENOMEM; in __sev_guest_init()
499 INIT_LIST_HEAD(&sev->regions_list); in __sev_guest_init()
500 INIT_LIST_HEAD(&sev->mirror_vms); in __sev_guest_init()
501 sev->need_init = false; in __sev_guest_init()
508 free_cpumask_var(sev->have_run_cpus); in __sev_guest_init()
510 argp->error = init_args.error; in __sev_guest_init()
512 sev->asid = 0; in __sev_guest_init()
514 sev->vmsa_features = 0; in __sev_guest_init()
515 sev->es_active = false; in __sev_guest_init()
516 sev->active = false; in __sev_guest_init()
528 if (kvm->arch.vm_type != KVM_X86_DEFAULT_VM) in sev_guest_init()
529 return -EINVAL; in sev_guest_init()
531 vm_type = (argp->id == KVM_SEV_INIT ? KVM_X86_SEV_VM : KVM_X86_SEV_ES_VM); in sev_guest_init()
547 if (!to_kvm_sev_info(kvm)->need_init) in sev_guest_init2()
548 return -EINVAL; in sev_guest_init2()
550 if (kvm->arch.vm_type != KVM_X86_SEV_VM && in sev_guest_init2()
551 kvm->arch.vm_type != KVM_X86_SEV_ES_VM && in sev_guest_init2()
552 kvm->arch.vm_type != KVM_X86_SNP_VM) in sev_guest_init2()
553 return -EINVAL; in sev_guest_init2()
555 if (copy_from_user(&data, u64_to_user_ptr(argp->data), sizeof(data))) in sev_guest_init2()
556 return -EFAULT; in sev_guest_init2()
558 return __sev_guest_init(kvm, argp, &data, kvm->arch.vm_type); in sev_guest_init2()
580 return -EBADF; in __sev_issue_cmd()
589 return __sev_issue_cmd(sev->fd, id, data, error); in sev_issue_cmd()
598 int *error = &argp->error; in sev_launch_start()
602 return -ENOTTY; in sev_launch_start()
604 if (copy_from_user(&params, u64_to_user_ptr(argp->data), sizeof(params))) in sev_launch_start()
605 return -EFAULT; in sev_launch_start()
635 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, &start, error); in sev_launch_start()
648 if (copy_to_user(u64_to_user_ptr(argp->data), &params, sizeof(params))) { in sev_launch_start()
650 ret = -EFAULT; in sev_launch_start()
654 sev->policy = params.policy; in sev_launch_start()
655 sev->handle = start.handle; in sev_launch_start()
656 sev->fd = argp->sev_fd; in sev_launch_start()
677 lockdep_assert_held(&kvm->lock); in sev_pin_memory()
680 return ERR_PTR(-EINVAL); in sev_pin_memory()
684 last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT; in sev_pin_memory()
685 npages = (last - first + 1); in sev_pin_memory()
687 locked = sev->pages_locked + npages; in sev_pin_memory()
691 return ERR_PTR(-ENOMEM); in sev_pin_memory()
695 return ERR_PTR(-EINVAL); in sev_pin_memory()
705 return ERR_PTR(-ENOMEM); in sev_pin_memory()
711 ret = -ENOMEM; in sev_pin_memory()
716 sev->pages_locked = locked; in sev_pin_memory()
733 to_kvm_sev_info(kvm)->pages_locked -= npages; in sev_unpin_memory()
773 wbnoinvd_on_cpus_mask(to_kvm_sev_info(kvm)->have_run_cpus); in sev_writeback_caches()
806 return -ENOTTY; in sev_launch_update_data()
808 if (copy_from_user(&params, u64_to_user_ptr(argp->data), sizeof(params))) in sev_launch_update_data()
809 return -EFAULT; in sev_launch_update_data()
821 * Flush (on non-coherent CPUs) before LAUNCH_UPDATE encrypts pages in in sev_launch_update_data()
827 data.handle = to_kvm_sev_info(kvm)->handle; in sev_launch_update_data()
833 * If the user buffer is not page-aligned, calculate the offset in sev_launch_update_data()
836 offset = vaddr & (PAGE_SIZE - 1); in sev_launch_update_data()
841 len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size); in sev_launch_update_data()
845 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, &data, &argp->error); in sev_launch_update_data()
849 size -= len; in sev_launch_update_data()
866 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_es_sync_vmsa()
867 struct kvm_sev_info *sev = to_kvm_sev_info(vcpu->kvm); in sev_es_sync_vmsa()
868 struct sev_es_save_area *save = svm->sev_es.vmsa; in sev_es_sync_vmsa()
875 if (svm->vcpu.guest_debug || (svm->vmcb->save.dr7 & ~DR7_FIXED_1)) in sev_es_sync_vmsa()
876 return -EINVAL; in sev_es_sync_vmsa()
879 * SEV-ES will use a VMSA that is pointed to by the VMCB, not in sev_es_sync_vmsa()
882 * for LAUNCH_UPDATE_VMSA) to be the initial SEV-ES state. in sev_es_sync_vmsa()
884 memcpy(save, &svm->vmcb->save, sizeof(svm->vmcb->save)); in sev_es_sync_vmsa()
887 save->rax = svm->vcpu.arch.regs[VCPU_REGS_RAX]; in sev_es_sync_vmsa()
888 save->rbx = svm->vcpu.arch.regs[VCPU_REGS_RBX]; in sev_es_sync_vmsa()
889 save->rcx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; in sev_es_sync_vmsa()
890 save->rdx = svm->vcpu.arch.regs[VCPU_REGS_RDX]; in sev_es_sync_vmsa()
891 save->rsp = svm->vcpu.arch.regs[VCPU_REGS_RSP]; in sev_es_sync_vmsa()
892 save->rbp = svm->vcpu.arch.regs[VCPU_REGS_RBP]; in sev_es_sync_vmsa()
893 save->rsi = svm->vcpu.arch.regs[VCPU_REGS_RSI]; in sev_es_sync_vmsa()
894 save->rdi = svm->vcpu.arch.regs[VCPU_REGS_RDI]; in sev_es_sync_vmsa()
896 save->r8 = svm->vcpu.arch.regs[VCPU_REGS_R8]; in sev_es_sync_vmsa()
897 save->r9 = svm->vcpu.arch.regs[VCPU_REGS_R9]; in sev_es_sync_vmsa()
898 save->r10 = svm->vcpu.arch.regs[VCPU_REGS_R10]; in sev_es_sync_vmsa()
899 save->r11 = svm->vcpu.arch.regs[VCPU_REGS_R11]; in sev_es_sync_vmsa()
900 save->r12 = svm->vcpu.arch.regs[VCPU_REGS_R12]; in sev_es_sync_vmsa()
901 save->r13 = svm->vcpu.arch.regs[VCPU_REGS_R13]; in sev_es_sync_vmsa()
902 save->r14 = svm->vcpu.arch.regs[VCPU_REGS_R14]; in sev_es_sync_vmsa()
903 save->r15 = svm->vcpu.arch.regs[VCPU_REGS_R15]; in sev_es_sync_vmsa()
905 save->rip = svm->vcpu.arch.regs[VCPU_REGS_RIP]; in sev_es_sync_vmsa()
907 /* Sync some non-GPR registers before encrypting */ in sev_es_sync_vmsa()
908 save->xcr0 = svm->vcpu.arch.xcr0; in sev_es_sync_vmsa()
909 save->pkru = svm->vcpu.arch.pkru; in sev_es_sync_vmsa()
910 save->xss = svm->vcpu.arch.ia32_xss; in sev_es_sync_vmsa()
911 save->dr6 = svm->vcpu.arch.dr6; in sev_es_sync_vmsa()
913 save->sev_features = sev->vmsa_features; in sev_es_sync_vmsa()
919 if (vcpu->kvm->arch.vm_type != KVM_X86_DEFAULT_VM) { in sev_es_sync_vmsa()
920 xsave = &vcpu->arch.guest_fpu.fpstate->regs.xsave; in sev_es_sync_vmsa()
921 save->x87_dp = xsave->i387.rdp; in sev_es_sync_vmsa()
922 save->mxcsr = xsave->i387.mxcsr; in sev_es_sync_vmsa()
923 save->x87_ftw = xsave->i387.twd; in sev_es_sync_vmsa()
924 save->x87_fsw = xsave->i387.swd; in sev_es_sync_vmsa()
925 save->x87_fcw = xsave->i387.cwd; in sev_es_sync_vmsa()
926 save->x87_fop = xsave->i387.fop; in sev_es_sync_vmsa()
927 save->x87_ds = 0; in sev_es_sync_vmsa()
928 save->x87_cs = 0; in sev_es_sync_vmsa()
929 save->x87_rip = xsave->i387.rip; in sev_es_sync_vmsa()
935 * an 8*8 bytes area with bytes 0-7, and an 8*2 bytes in sev_es_sync_vmsa()
936 * area with bytes 8-9 of each register. in sev_es_sync_vmsa()
938 d = save->fpreg_x87 + i * 8; in sev_es_sync_vmsa()
939 s = ((u8 *)xsave->i387.st_space) + i * 16; in sev_es_sync_vmsa()
941 save->fpreg_x87[64 + i * 2] = s[8]; in sev_es_sync_vmsa()
942 save->fpreg_x87[64 + i * 2 + 1] = s[9]; in sev_es_sync_vmsa()
944 memcpy(save->fpreg_xmm, xsave->i387.xmm_space, 256); in sev_es_sync_vmsa()
948 memcpy(save->fpreg_ymm, s, 256); in sev_es_sync_vmsa()
950 memset(save->fpreg_ymm, 0, 256); in sev_es_sync_vmsa()
966 if (vcpu->guest_debug) { in __sev_launch_update_vmsa()
967 pr_warn_once("KVM_SET_GUEST_DEBUG for SEV-ES guest is not supported"); in __sev_launch_update_vmsa()
968 return -EINVAL; in __sev_launch_update_vmsa()
971 /* Perform some pre-encryption checks against the VMSA */ in __sev_launch_update_vmsa()
977 * The LAUNCH_UPDATE_VMSA command will perform in-place encryption of in __sev_launch_update_vmsa()
981 clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE); in __sev_launch_update_vmsa()
984 vmsa.handle = to_kvm_sev_info(kvm)->handle; in __sev_launch_update_vmsa()
985 vmsa.address = __sme_pa(svm->sev_es.vmsa); in __sev_launch_update_vmsa()
992 * SEV-ES guests maintain an encrypted version of their FPU in __sev_launch_update_vmsa()
994 * Mark vcpu->arch.guest_fpu->fpstate as scratch so it won't in __sev_launch_update_vmsa()
997 fpstate_set_confidential(&vcpu->arch.guest_fpu); in __sev_launch_update_vmsa()
998 vcpu->arch.guest_state_protected = true; in __sev_launch_update_vmsa()
1001 * SEV-ES guest mandates LBR Virtualization to be _always_ ON. Enable it in __sev_launch_update_vmsa()
1017 return -ENOTTY; in sev_launch_update_vmsa()
1020 ret = mutex_lock_killable(&vcpu->mutex); in sev_launch_update_vmsa()
1024 ret = __sev_launch_update_vmsa(kvm, vcpu, &argp->error); in sev_launch_update_vmsa()
1026 mutex_unlock(&vcpu->mutex); in sev_launch_update_vmsa()
1036 void __user *measure = u64_to_user_ptr(argp->data); in sev_launch_measure()
1044 return -ENOTTY; in sev_launch_measure()
1047 return -EFAULT; in sev_launch_measure()
1058 return -EINVAL; in sev_launch_measure()
1062 return -ENOMEM; in sev_launch_measure()
1069 data.handle = to_kvm_sev_info(kvm)->handle; in sev_launch_measure()
1070 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, &data, &argp->error); in sev_launch_measure()
1083 ret = -EFAULT; in sev_launch_measure()
1089 ret = -EFAULT; in sev_launch_measure()
1100 return -ENOTTY; in sev_launch_finish()
1102 data.handle = to_kvm_sev_info(kvm)->handle; in sev_launch_finish()
1103 return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, &data, &argp->error); in sev_launch_finish()
1113 return -ENOTTY; in sev_guest_status()
1117 data.handle = to_kvm_sev_info(kvm)->handle; in sev_guest_status()
1118 ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, &data, &argp->error); in sev_guest_status()
1126 if (copy_to_user(u64_to_user_ptr(argp->data), &params, sizeof(params))) in sev_guest_status()
1127 ret = -EFAULT; in sev_guest_status()
1139 data.handle = to_kvm_sev_info(kvm)->handle; in __sev_issue_dbg_cmd()
1173 /* if inputs are not 16-byte then use intermediate buffer */ in __sev_dbg_decrypt_user()
1179 return -ENOMEM; in __sev_dbg_decrypt_user()
1191 ret = -EFAULT; in __sev_dbg_decrypt_user()
1215 return -ENOMEM; in __sev_dbg_encrypt_user()
1219 return -EFAULT; in __sev_dbg_encrypt_user()
1226 * If destination buffer or length is not aligned then do read-modify-write: in __sev_dbg_encrypt_user()
1227 * - decrypt destination in an intermediate buffer in __sev_dbg_encrypt_user()
1228 * - copy the source buffer in an intermediate buffer in __sev_dbg_encrypt_user()
1229 * - use the intermediate buffer as source buffer in __sev_dbg_encrypt_user()
1236 ret = -ENOMEM; in __sev_dbg_encrypt_user()
1257 ret = -EFAULT; in __sev_dbg_encrypt_user()
1288 return -ENOTTY; in sev_dbg_crypt()
1290 if (copy_from_user(&debug, u64_to_user_ptr(argp->data), sizeof(debug))) in sev_dbg_crypt()
1291 return -EFAULT; in sev_dbg_crypt()
1294 return -EINVAL; in sev_dbg_crypt()
1296 return -EINVAL; in sev_dbg_crypt()
1318 * Flush (on non-coherent CPUs) before DBG_{DE,EN}CRYPT read or modify in sev_dbg_crypt()
1331 len = min_t(size_t, (PAGE_SIZE - s_off), size); in sev_dbg_crypt()
1338 len, &argp->error); in sev_dbg_crypt()
1345 len, &argp->error); in sev_dbg_crypt()
1355 size -= len; in sev_dbg_crypt()
1371 return -ENOTTY; in sev_launch_secret()
1373 if (copy_from_user(&params, u64_to_user_ptr(argp->data), sizeof(params))) in sev_launch_secret()
1374 return -EFAULT; in sev_launch_secret()
1381 * Flush (on non-coherent CPUs) before LAUNCH_SECRET encrypts pages in in sev_launch_secret()
1391 ret = -EINVAL; in sev_launch_secret()
1397 offset = params.guest_uaddr & (PAGE_SIZE - 1); in sev_launch_secret()
1418 data.handle = to_kvm_sev_info(kvm)->handle; in sev_launch_secret()
1419 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, &data, &argp->error); in sev_launch_secret()
1437 void __user *report = u64_to_user_ptr(argp->data); in sev_get_attestation_report()
1445 return -ENOTTY; in sev_get_attestation_report()
1447 if (copy_from_user(&params, u64_to_user_ptr(argp->data), sizeof(params))) in sev_get_attestation_report()
1448 return -EFAULT; in sev_get_attestation_report()
1459 return -EINVAL; in sev_get_attestation_report()
1463 return -ENOMEM; in sev_get_attestation_report()
1470 data.handle = to_kvm_sev_info(kvm)->handle; in sev_get_attestation_report()
1471 ret = sev_issue_cmd(kvm, SEV_CMD_ATTESTATION_REPORT, &data, &argp->error); in sev_get_attestation_report()
1483 ret = -EFAULT; in sev_get_attestation_report()
1489 ret = -EFAULT; in sev_get_attestation_report()
1504 data.handle = to_kvm_sev_info(kvm)->handle; in __sev_send_start_query_session_length()
1505 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error); in __sev_send_start_query_session_length()
1507 params->session_len = data.session_len; in __sev_send_start_query_session_length()
1508 if (copy_to_user(u64_to_user_ptr(argp->data), params, in __sev_send_start_query_session_length()
1510 ret = -EFAULT; in __sev_send_start_query_session_length()
1524 return -ENOTTY; in sev_send_start()
1526 if (copy_from_user(&params, u64_to_user_ptr(argp->data), in sev_send_start()
1528 return -EFAULT; in sev_send_start()
1538 return -EINVAL; in sev_send_start()
1543 return -ENOMEM; in sev_send_start()
1577 data.handle = to_kvm_sev_info(kvm)->handle; in sev_send_start()
1579 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error); in sev_send_start()
1583 ret = -EFAULT; in sev_send_start()
1589 if (copy_to_user(u64_to_user_ptr(argp->data), &params, in sev_send_start()
1591 ret = -EFAULT; in sev_send_start()
1613 data.handle = to_kvm_sev_info(kvm)->handle; in __sev_send_update_data_query_lengths()
1614 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error); in __sev_send_update_data_query_lengths()
1616 params->hdr_len = data.hdr_len; in __sev_send_update_data_query_lengths()
1617 params->trans_len = data.trans_len; in __sev_send_update_data_query_lengths()
1619 if (copy_to_user(u64_to_user_ptr(argp->data), params, in __sev_send_update_data_query_lengths()
1621 ret = -EFAULT; in __sev_send_update_data_query_lengths()
1636 return -ENOTTY; in sev_send_update_data()
1638 if (copy_from_user(&params, u64_to_user_ptr(argp->data), in sev_send_update_data()
1640 return -EFAULT; in sev_send_update_data()
1648 return -EINVAL; in sev_send_update_data()
1651 offset = params.guest_uaddr & (PAGE_SIZE - 1); in sev_send_update_data()
1653 return -EINVAL; in sev_send_update_data()
1662 ret = -ENOMEM; in sev_send_update_data()
1677 /* The SEND_UPDATE_DATA command requires C-bit to be always set. */ in sev_send_update_data()
1681 data.handle = to_kvm_sev_info(kvm)->handle; in sev_send_update_data()
1683 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error); in sev_send_update_data()
1691 ret = -EFAULT; in sev_send_update_data()
1698 ret = -EFAULT; in sev_send_update_data()
1715 return -ENOTTY; in sev_send_finish()
1717 data.handle = to_kvm_sev_info(kvm)->handle; in sev_send_finish()
1718 return sev_issue_cmd(kvm, SEV_CMD_SEND_FINISH, &data, &argp->error); in sev_send_finish()
1726 return -ENOTTY; in sev_send_cancel()
1728 data.handle = to_kvm_sev_info(kvm)->handle; in sev_send_cancel()
1729 return sev_issue_cmd(kvm, SEV_CMD_SEND_CANCEL, &data, &argp->error); in sev_send_cancel()
1737 int *error = &argp->error; in sev_receive_start()
1743 return -ENOTTY; in sev_receive_start()
1746 if (copy_from_user(&params, u64_to_user_ptr(argp->data), in sev_receive_start()
1748 return -EFAULT; in sev_receive_start()
1753 return -EINVAL; in sev_receive_start()
1775 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_RECEIVE_START, &start, in sev_receive_start()
1788 if (copy_to_user(u64_to_user_ptr(argp->data), in sev_receive_start()
1790 ret = -EFAULT; in sev_receive_start()
1795 sev->handle = start.handle; in sev_receive_start()
1796 sev->fd = argp->sev_fd; in sev_receive_start()
1816 return -EINVAL; in sev_receive_update_data()
1818 if (copy_from_user(&params, u64_to_user_ptr(argp->data), in sev_receive_update_data()
1820 return -EFAULT; in sev_receive_update_data()
1825 return -EINVAL; in sev_receive_update_data()
1828 offset = params.guest_uaddr & (PAGE_SIZE - 1); in sev_receive_update_data()
1830 return -EINVAL; in sev_receive_update_data()
1857 * Flush (on non-coherent CPUs) before RECEIVE_UPDATE_DATA, the PSP in sev_receive_update_data()
1863 /* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */ in sev_receive_update_data()
1867 data.handle = to_kvm_sev_info(kvm)->handle; in sev_receive_update_data()
1870 &argp->error); in sev_receive_update_data()
1887 return -ENOTTY; in sev_receive_finish()
1889 data.handle = to_kvm_sev_info(kvm)->handle; in sev_receive_finish()
1890 return sev_issue_cmd(kvm, SEV_CMD_RECEIVE_FINISH, &data, &argp->error); in sev_receive_finish()
1896 * Allow mirrors VM to call KVM_SEV_LAUNCH_UPDATE_VMSA to enable SEV-ES in is_cmd_allowed_from_mirror()
1911 int r = -EBUSY; in sev_lock_two_vms()
1914 return -EINVAL; in sev_lock_two_vms()
1920 if (atomic_cmpxchg_acquire(&dst_sev->migration_in_progress, 0, 1)) in sev_lock_two_vms()
1921 return -EBUSY; in sev_lock_two_vms()
1923 if (atomic_cmpxchg_acquire(&src_sev->migration_in_progress, 0, 1)) in sev_lock_two_vms()
1926 r = -EINTR; in sev_lock_two_vms()
1927 if (mutex_lock_killable(&dst_kvm->lock)) in sev_lock_two_vms()
1929 if (mutex_lock_killable_nested(&src_kvm->lock, SINGLE_DEPTH_NESTING)) in sev_lock_two_vms()
1934 mutex_unlock(&dst_kvm->lock); in sev_lock_two_vms()
1936 atomic_set_release(&src_sev->migration_in_progress, 0); in sev_lock_two_vms()
1938 atomic_set_release(&dst_sev->migration_in_progress, 0); in sev_lock_two_vms()
1947 mutex_unlock(&dst_kvm->lock); in sev_unlock_two_vms()
1948 mutex_unlock(&src_kvm->lock); in sev_unlock_two_vms()
1949 atomic_set_release(&dst_sev->migration_in_progress, 0); in sev_unlock_two_vms()
1950 atomic_set_release(&src_sev->migration_in_progress, 0); in sev_unlock_two_vms()
1962 dst->active = true; in sev_migrate_from()
1963 dst->asid = src->asid; in sev_migrate_from()
1964 dst->handle = src->handle; in sev_migrate_from()
1965 dst->pages_locked = src->pages_locked; in sev_migrate_from()
1966 dst->enc_context_owner = src->enc_context_owner; in sev_migrate_from()
1967 dst->es_active = src->es_active; in sev_migrate_from()
1968 dst->vmsa_features = src->vmsa_features; in sev_migrate_from()
1970 src->asid = 0; in sev_migrate_from()
1971 src->active = false; in sev_migrate_from()
1972 src->handle = 0; in sev_migrate_from()
1973 src->pages_locked = 0; in sev_migrate_from()
1974 src->enc_context_owner = NULL; in sev_migrate_from()
1975 src->es_active = false; in sev_migrate_from()
1977 list_cut_before(&dst->regions_list, &src->regions_list, &src->regions_list); in sev_migrate_from()
1982 * to the source, so there's no danger of use-after-free. in sev_migrate_from()
1984 list_cut_before(&dst->mirror_vms, &src->mirror_vms, &src->mirror_vms); in sev_migrate_from()
1985 list_for_each_entry(mirror, &dst->mirror_vms, mirror_entry) { in sev_migrate_from()
1988 mirror->enc_context_owner = dst_kvm; in sev_migrate_from()
1996 struct kvm_sev_info *owner_sev_info = to_kvm_sev_info(dst->enc_context_owner); in sev_migrate_from()
1998 list_del(&src->mirror_entry); in sev_migrate_from()
1999 list_add_tail(&dst->mirror_entry, &owner_sev_info->mirror_vms); in sev_migrate_from()
2007 if (!dst->es_active) in sev_migrate_from()
2022 memcpy(&dst_svm->sev_es, &src_svm->sev_es, sizeof(src_svm->sev_es)); in sev_migrate_from()
2023 dst_svm->vmcb->control.ghcb_gpa = src_svm->vmcb->control.ghcb_gpa; in sev_migrate_from()
2024 dst_svm->vmcb->control.vmsa_pa = src_svm->vmcb->control.vmsa_pa; in sev_migrate_from()
2025 dst_vcpu->arch.guest_state_protected = true; in sev_migrate_from()
2027 memset(&src_svm->sev_es, 0, sizeof(src_svm->sev_es)); in sev_migrate_from()
2028 src_svm->vmcb->control.ghcb_gpa = INVALID_PAGE; in sev_migrate_from()
2029 src_svm->vmcb->control.vmsa_pa = INVALID_PAGE; in sev_migrate_from()
2030 src_vcpu->arch.guest_state_protected = false; in sev_migrate_from()
2039 if (src->created_vcpus != atomic_read(&src->online_vcpus) || in sev_check_source_vcpus()
2040 dst->created_vcpus != atomic_read(&dst->online_vcpus)) in sev_check_source_vcpus()
2041 return -EBUSY; in sev_check_source_vcpus()
2046 if (atomic_read(&src->online_vcpus) != atomic_read(&dst->online_vcpus)) in sev_check_source_vcpus()
2047 return -EINVAL; in sev_check_source_vcpus()
2050 if (!src_vcpu->arch.guest_state_protected) in sev_check_source_vcpus()
2051 return -EINVAL; in sev_check_source_vcpus()
2067 return -EBADF; in sev_vm_move_enc_context_from()
2070 return -EBADF; in sev_vm_move_enc_context_from()
2072 source_kvm = fd_file(f)->private_data; in sev_vm_move_enc_context_from()
2077 if (kvm->arch.vm_type != source_kvm->arch.vm_type || in sev_vm_move_enc_context_from()
2079 ret = -EINVAL; in sev_vm_move_enc_context_from()
2085 dst_sev->misc_cg = get_current_misc_cg(); in sev_vm_move_enc_context_from()
2087 if (dst_sev->misc_cg != src_sev->misc_cg) { in sev_vm_move_enc_context_from()
2111 if (!zalloc_cpumask_var(&dst_sev->have_run_cpus, GFP_KERNEL_ACCOUNT)) { in sev_vm_move_enc_context_from()
2112 ret = -ENOMEM; in sev_vm_move_enc_context_from()
2129 put_misc_cg(cg_cleanup_sev->misc_cg); in sev_vm_move_enc_context_from()
2130 cg_cleanup_sev->misc_cg = NULL; in sev_vm_move_enc_context_from()
2139 return -ENXIO; in sev_dev_get_attr()
2147 return -ENXIO; in sev_dev_get_attr()
2169 rc = __sev_issue_cmd(argp->sev_fd, SEV_CMD_SNP_GCTX_CREATE, &data, &argp->error); in snp_context_create()
2171 pr_warn("Failed to create SEV-SNP context, rc %d fw_error %d", in snp_context_create()
2172 rc, argp->error); in snp_context_create()
2185 data.gctx_paddr = __psp_pa(sev->snp_context); in snp_bind_asid()
2198 return -ENOTTY; in snp_launch_start()
2200 if (copy_from_user(&params, u64_to_user_ptr(argp->data), sizeof(params))) in snp_launch_start()
2201 return -EFAULT; in snp_launch_start()
2204 if (sev->snp_context) in snp_launch_start()
2205 return -EINVAL; in snp_launch_start()
2208 return -EINVAL; in snp_launch_start()
2211 return -EINVAL; in snp_launch_start()
2215 return -EINVAL; in snp_launch_start()
2218 if (WARN_ON_ONCE(!kvm->arch.default_tsc_khz)) in snp_launch_start()
2219 return -EINVAL; in snp_launch_start()
2221 start.desired_tsc_khz = kvm->arch.default_tsc_khz; in snp_launch_start()
2224 sev->snp_context = snp_context_create(kvm, argp); in snp_launch_start()
2225 if (!sev->snp_context) in snp_launch_start()
2226 return -ENOTTY; in snp_launch_start()
2228 start.gctx_paddr = __psp_pa(sev->snp_context); in snp_launch_start()
2232 rc = __sev_issue_cmd(argp->sev_fd, SEV_CMD_SNP_LAUNCH_START, &start, &argp->error); in snp_launch_start()
2239 sev->policy = params.policy; in snp_launch_start()
2240 sev->fd = argp->sev_fd; in snp_launch_start()
2241 rc = snp_bind_asid(kvm, &argp->error); in snp_launch_start()
2243 pr_debug("%s: Failed to bind ASID to SEV-SNP context, rc %d\n", in snp_launch_start()
2271 if (WARN_ON_ONCE(sev_populate_args->type != KVM_SEV_SNP_PAGE_TYPE_ZERO && !src)) in sev_gmem_post_populate()
2272 return -EINVAL; in sev_gmem_post_populate()
2283 ret = ret ? -EINVAL : -EEXIST; in sev_gmem_post_populate()
2291 ret = -EFAULT; in sev_gmem_post_populate()
2304 fw_args.gctx_paddr = __psp_pa(sev->snp_context); in sev_gmem_post_populate()
2307 fw_args.page_type = sev_populate_args->type; in sev_gmem_post_populate()
2309 ret = __sev_issue_cmd(sev_populate_args->sev_fd, SEV_CMD_SNP_LAUNCH_UPDATE, in sev_gmem_post_populate()
2310 &fw_args, &sev_populate_args->fw_error); in sev_gmem_post_populate()
2325 * unencrypted so it can be used for debugging and error-reporting. in sev_gmem_post_populate()
2332 sev_populate_args->type == KVM_SEV_SNP_PAGE_TYPE_CPUID && in sev_gmem_post_populate()
2333 sev_populate_args->fw_error == SEV_RET_INVALID_PARAM) { in sev_gmem_post_populate()
2342 /* pfn + i is hypervisor-owned now, so skip below cleanup for it. */ in sev_gmem_post_populate()
2343 n_private--; in sev_gmem_post_populate()
2347 __func__, ret, sev_populate_args->fw_error, n_private); in sev_gmem_post_populate()
2364 if (!sev_snp_guest(kvm) || !sev->snp_context) in snp_launch_update()
2365 return -EINVAL; in snp_launch_update()
2367 if (copy_from_user(&params, u64_to_user_ptr(argp->data), sizeof(params))) in snp_launch_update()
2368 return -EFAULT; in snp_launch_update()
2379 return -EINVAL; in snp_launch_update()
2385 * state, the following pre-conditions are verified: in snp_launch_update()
2393 * The KVM MMU relies on kvm->mmu_invalidate_seq to retry nested page in snp_launch_update()
2396 * here. However, kvm->slots_lock guards against both this as well as in snp_launch_update()
2402 mutex_lock(&kvm->slots_lock); in snp_launch_update()
2406 ret = -EINVAL; in snp_launch_update()
2410 sev_populate_args.sev_fd = argp->sev_fd; in snp_launch_update()
2417 argp->error = sev_populate_args.fw_error; in snp_launch_update()
2419 __func__, count, argp->error); in snp_launch_update()
2420 ret = -EIO; in snp_launch_update()
2423 params.len -= count * PAGE_SIZE; in snp_launch_update()
2428 if (copy_to_user(u64_to_user_ptr(argp->data), &params, sizeof(params))) in snp_launch_update()
2429 ret = -EFAULT; in snp_launch_update()
2433 mutex_unlock(&kvm->slots_lock); in snp_launch_update()
2446 data.gctx_paddr = __psp_pa(sev->snp_context); in snp_launch_update_vmsa()
2451 u64 pfn = __pa(svm->sev_es.vmsa) >> PAGE_SHIFT; in snp_launch_update_vmsa()
2458 ret = rmp_make_private(pfn, INITIAL_VMSA_GPA, PG_LEVEL_4K, sev->asid, true); in snp_launch_update_vmsa()
2463 data.address = __sme_pa(svm->sev_es.vmsa); in snp_launch_update_vmsa()
2464 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_SNP_LAUNCH_UPDATE, in snp_launch_update_vmsa()
2465 &data, &argp->error); in snp_launch_update_vmsa()
2472 svm->vcpu.arch.guest_state_protected = true; in snp_launch_update_vmsa()
2474 * SEV-ES (and thus SNP) guest mandates LBR Virtualization to in snp_launch_update_vmsa()
2475 * be _always_ ON. Enable it only after setting in snp_launch_update_vmsa()
2495 return -ENOTTY; in snp_launch_finish()
2497 if (!sev->snp_context) in snp_launch_finish()
2498 return -EINVAL; in snp_launch_finish()
2500 if (copy_from_user(&params, u64_to_user_ptr(argp->data), sizeof(params))) in snp_launch_finish()
2501 return -EFAULT; in snp_launch_finish()
2504 return -EINVAL; in snp_launch_finish()
2513 return -ENOMEM; in snp_launch_finish()
2522 data->id_block_en = 1; in snp_launch_finish()
2523 data->id_block_paddr = __sme_pa(id_block); in snp_launch_finish()
2531 data->id_auth_paddr = __sme_pa(id_auth); in snp_launch_finish()
2534 data->auth_key_en = 1; in snp_launch_finish()
2537 data->vcek_disabled = params.vcek_disabled; in snp_launch_finish()
2539 memcpy(data->host_data, params.host_data, KVM_SEV_SNP_FINISH_DATA_SIZE); in snp_launch_finish()
2540 data->gctx_paddr = __psp_pa(sev->snp_context); in snp_launch_finish()
2541 ret = sev_issue_cmd(kvm, SEV_CMD_SNP_LAUNCH_FINISH, data, &argp->error); in snp_launch_finish()
2549 kvm->arch.pre_fault_allowed = true; in snp_launch_finish()
2568 return -ENOTTY; in sev_mem_enc_ioctl()
2574 return -EFAULT; in sev_mem_enc_ioctl()
2576 mutex_lock(&kvm->lock); in sev_mem_enc_ioctl()
2581 r = -EINVAL; in sev_mem_enc_ioctl()
2587 * allow the use of SNP-specific commands. in sev_mem_enc_ioctl()
2590 r = -EPERM; in sev_mem_enc_ioctl()
2597 r = -ENOTTY; in sev_mem_enc_ioctl()
2668 r = -EINVAL; in sev_mem_enc_ioctl()
2673 r = -EFAULT; in sev_mem_enc_ioctl()
2676 mutex_unlock(&kvm->lock); in sev_mem_enc_ioctl()
2688 return -ENOTTY; in sev_mem_enc_register_region()
2692 return -EINVAL; in sev_mem_enc_register_region()
2694 if (range->addr > ULONG_MAX || range->size > ULONG_MAX) in sev_mem_enc_register_region()
2695 return -EINVAL; in sev_mem_enc_register_region()
2699 return -ENOMEM; in sev_mem_enc_register_region()
2701 mutex_lock(&kvm->lock); in sev_mem_enc_register_region()
2702 region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, in sev_mem_enc_register_region()
2704 if (IS_ERR(region->pages)) { in sev_mem_enc_register_region()
2705 ret = PTR_ERR(region->pages); in sev_mem_enc_register_region()
2706 mutex_unlock(&kvm->lock); in sev_mem_enc_register_region()
2711 * The guest may change the memory encryption attribute from C=0 -> C=1 in sev_mem_enc_register_region()
2714 * correct C-bit. Note, this must be done before dropping kvm->lock, in sev_mem_enc_register_region()
2716 * once kvm->lock is released. in sev_mem_enc_register_region()
2718 sev_clflush_pages(region->pages, region->npages); in sev_mem_enc_register_region()
2720 region->uaddr = range->addr; in sev_mem_enc_register_region()
2721 region->size = range->size; in sev_mem_enc_register_region()
2723 list_add_tail(&region->list, &sev->regions_list); in sev_mem_enc_register_region()
2724 mutex_unlock(&kvm->lock); in sev_mem_enc_register_region()
2737 struct list_head *head = &sev->regions_list; in find_enc_region()
2741 if (i->uaddr == range->addr && in find_enc_region()
2742 i->size == range->size) in find_enc_region()
2752 sev_unpin_memory(kvm, region->pages, region->npages); in __unregister_enc_region_locked()
2753 list_del(&region->list); in __unregister_enc_region_locked()
2765 return -EINVAL; in sev_mem_enc_unregister_region()
2767 mutex_lock(&kvm->lock); in sev_mem_enc_unregister_region()
2770 ret = -ENOTTY; in sev_mem_enc_unregister_region()
2776 ret = -EINVAL; in sev_mem_enc_unregister_region()
2784 mutex_unlock(&kvm->lock); in sev_mem_enc_unregister_region()
2788 mutex_unlock(&kvm->lock); in sev_mem_enc_unregister_region()
2800 return -EBADF; in sev_vm_copy_enc_context_from()
2803 return -EBADF; in sev_vm_copy_enc_context_from()
2805 source_kvm = fd_file(f)->private_data; in sev_vm_copy_enc_context_from()
2812 * disallow out-of-band SEV/SEV-ES init if the target is already an in sev_vm_copy_enc_context_from()
2814 * created after SEV/SEV-ES initialization, e.g. to init intercepts. in sev_vm_copy_enc_context_from()
2817 is_mirroring_enc_context(source_kvm) || kvm->created_vcpus) { in sev_vm_copy_enc_context_from()
2818 ret = -EINVAL; in sev_vm_copy_enc_context_from()
2823 if (!zalloc_cpumask_var(&mirror_sev->have_run_cpus, GFP_KERNEL_ACCOUNT)) { in sev_vm_copy_enc_context_from()
2824 ret = -ENOMEM; in sev_vm_copy_enc_context_from()
2834 list_add_tail(&mirror_sev->mirror_entry, &source_sev->mirror_vms); in sev_vm_copy_enc_context_from()
2837 mirror_sev->enc_context_owner = source_kvm; in sev_vm_copy_enc_context_from()
2838 mirror_sev->active = true; in sev_vm_copy_enc_context_from()
2839 mirror_sev->asid = source_sev->asid; in sev_vm_copy_enc_context_from()
2840 mirror_sev->fd = source_sev->fd; in sev_vm_copy_enc_context_from()
2841 mirror_sev->es_active = source_sev->es_active; in sev_vm_copy_enc_context_from()
2842 mirror_sev->need_init = false; in sev_vm_copy_enc_context_from()
2843 mirror_sev->handle = source_sev->handle; in sev_vm_copy_enc_context_from()
2844 INIT_LIST_HEAD(&mirror_sev->regions_list); in sev_vm_copy_enc_context_from()
2845 INIT_LIST_HEAD(&mirror_sev->mirror_vms); in sev_vm_copy_enc_context_from()
2851 * memory-views. in sev_vm_copy_enc_context_from()
2866 if (!sev->snp_context) in snp_decommission_context()
2870 data.address = __sme_pa(sev->snp_context); in snp_decommission_context()
2878 snp_free_firmware_page(sev->snp_context); in snp_decommission_context()
2879 sev->snp_context = NULL; in snp_decommission_context()
2887 struct list_head *head = &sev->regions_list; in sev_vm_destroy()
2893 WARN_ON(!list_empty(&sev->mirror_vms)); in sev_vm_destroy()
2895 free_cpumask_var(sev->have_run_cpus); in sev_vm_destroy()
2903 struct kvm *owner_kvm = sev->enc_context_owner; in sev_vm_destroy()
2905 mutex_lock(&owner_kvm->lock); in sev_vm_destroy()
2906 list_del(&sev->mirror_entry); in sev_vm_destroy()
2907 mutex_unlock(&owner_kvm->lock); in sev_vm_destroy()
2935 sev_unbind_asid(kvm, sev->handle); in sev_vm_destroy()
2976 initialized = !!status->state; in is_sev_snp_initialized()
3011 * PSP SEV driver is initialized before proceeding if KVM is built-in, in sev_hardware_setup()
3020 /* Set encryption bit location for SEV-ES guests */ in sev_hardware_setup()
3050 sev_asid_count = max_sev_asid - min_sev_asid + 1; in sev_hardware_setup()
3055 /* SEV-ES support requested? */ in sev_hardware_setup()
3060 * SEV-ES requires MMIO caching as KVM doesn't have access to the guest in sev_hardware_setup()
3068 /* Does the CPU support SEV-ES? */ in sev_hardware_setup()
3074 "LBRV must be present for SEV-ES support"); in sev_hardware_setup()
3078 /* Has the system been allocated ASIDs for SEV-ES? */ in sev_hardware_setup()
3083 max_sev_es_asid = max_snp_asid = min_sev_asid - 1; in sev_hardware_setup()
3085 sev_es_asid_count = min_sev_asid - 1; in sev_hardware_setup()
3096 min_sev_asid - 1); in sev_hardware_setup()
3107 * If ciphertext hiding is enabled, the joint SEV-ES/SEV-SNP in sev_hardware_setup()
3108 * ASID range is partitioned into separate SEV-ES and SEV-SNP in sev_hardware_setup()
3109 * ASID ranges, with the SEV-SNP range being [1..max_snp_asid] in sev_hardware_setup()
3110 * and the SEV-ES range being (max_snp_asid..max_sev_es_asid]. in sev_hardware_setup()
3111 * Note, SEV-ES may effectively be disabled if all ASIDs from in sev_hardware_setup()
3112 * the joint range are assigned to SEV-SNP. in sev_hardware_setup()
3117 pr_info("SEV-SNP ciphertext hiding enabled\n"); in sev_hardware_setup()
3122 pr_info("SEV %s (ASIDs %u - %u)\n", in sev_hardware_setup()
3128 pr_info("SEV-ES %s (ASIDs %u - %u)\n", in sev_hardware_setup()
3134 pr_info("SEV-SNP %s (ASIDs %u - %u)\n", in sev_hardware_setup()
3176 sd->sev_vmcbs = kcalloc(nr_asids, sizeof(void *), GFP_KERNEL); in sev_cpu_init()
3177 if (!sd->sev_vmcbs) in sev_cpu_init()
3178 return -ENOMEM; in sev_cpu_init()
3189 unsigned int asid = sev_get_asid(vcpu->kvm); in sev_flush_encrypted_page()
3194 * address is non-deterministic and unsafe. This function deliberately in sev_flush_encrypted_page()
3220 sev_writeback_caches(vcpu->kvm); in sev_flush_encrypted_page()
3227 * hva-based mmu notifiers, i.e. these events are explicitly scoped to in sev_guest_memory_reclaimed()
3240 if (!sev_es_guest(vcpu->kvm)) in sev_free_vcpu()
3247 * a guest-owned page. Transition the page to hypervisor state before in sev_free_vcpu()
3250 if (sev_snp_guest(vcpu->kvm)) { in sev_free_vcpu()
3251 u64 pfn = __pa(svm->sev_es.vmsa) >> PAGE_SHIFT; in sev_free_vcpu()
3253 if (kvm_rmp_make_shared(vcpu->kvm, pfn, PG_LEVEL_4K)) in sev_free_vcpu()
3257 if (vcpu->arch.guest_state_protected) in sev_free_vcpu()
3258 sev_flush_encrypted_page(vcpu, svm->sev_es.vmsa); in sev_free_vcpu()
3260 __free_page(virt_to_page(svm->sev_es.vmsa)); in sev_free_vcpu()
3263 if (svm->sev_es.ghcb_sa_free) in sev_free_vcpu()
3264 kvfree(svm->sev_es.ghcb_sa); in sev_free_vcpu()
3269 return (((u64)control->exit_code_hi) << 32) | control->exit_code; in kvm_get_cached_sw_exit_code()
3274 struct vmcb_control_area *control = &svm->vmcb->control; in dump_ghcb()
3277 /* Re-use the dump_invalid_vmcb module parameter */ in dump_ghcb()
3283 nbits = sizeof(svm->sev_es.valid_bitmap) * 8; in dump_ghcb()
3291 pr_err("GHCB (GPA=%016llx) snapshot:\n", svm->vmcb->control.ghcb_gpa); in dump_ghcb()
3292 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_code", in dump_ghcb()
3294 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_1", in dump_ghcb()
3295 control->exit_info_1, kvm_ghcb_sw_exit_info_1_is_valid(svm)); in dump_ghcb()
3296 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_2", in dump_ghcb()
3297 control->exit_info_2, kvm_ghcb_sw_exit_info_2_is_valid(svm)); in dump_ghcb()
3298 pr_err("%-20s%016llx is_valid: %u\n", "sw_scratch", in dump_ghcb()
3299 svm->sev_es.sw_scratch, kvm_ghcb_sw_scratch_is_valid(svm)); in dump_ghcb()
3300 pr_err("%-20s%*pb\n", "valid_bitmap", nbits, svm->sev_es.valid_bitmap); in dump_ghcb()
3305 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_es_sync_to_ghcb()
3306 struct ghcb *ghcb = svm->sev_es.ghcb; in sev_es_sync_to_ghcb()
3314 * VM-Exit. It's the guest's responsibility to not consume random data. in sev_es_sync_to_ghcb()
3316 ghcb_set_rax(ghcb, vcpu->arch.regs[VCPU_REGS_RAX]); in sev_es_sync_to_ghcb()
3317 ghcb_set_rbx(ghcb, vcpu->arch.regs[VCPU_REGS_RBX]); in sev_es_sync_to_ghcb()
3318 ghcb_set_rcx(ghcb, vcpu->arch.regs[VCPU_REGS_RCX]); in sev_es_sync_to_ghcb()
3319 ghcb_set_rdx(ghcb, vcpu->arch.regs[VCPU_REGS_RDX]); in sev_es_sync_to_ghcb()
3324 struct vmcb_control_area *control = &svm->vmcb->control; in sev_es_sync_from_ghcb()
3325 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_es_sync_from_ghcb()
3326 struct ghcb *ghcb = svm->sev_es.ghcb; in sev_es_sync_from_ghcb()
3341 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); in sev_es_sync_from_ghcb()
3343 BUILD_BUG_ON(sizeof(svm->sev_es.valid_bitmap) != sizeof(ghcb->save.valid_bitmap)); in sev_es_sync_from_ghcb()
3344 memcpy(&svm->sev_es.valid_bitmap, &ghcb->save.valid_bitmap, sizeof(ghcb->save.valid_bitmap)); in sev_es_sync_from_ghcb()
3346 vcpu->arch.regs[VCPU_REGS_RAX] = kvm_ghcb_get_rax_if_valid(svm); in sev_es_sync_from_ghcb()
3347 vcpu->arch.regs[VCPU_REGS_RBX] = kvm_ghcb_get_rbx_if_valid(svm); in sev_es_sync_from_ghcb()
3348 vcpu->arch.regs[VCPU_REGS_RCX] = kvm_ghcb_get_rcx_if_valid(svm); in sev_es_sync_from_ghcb()
3349 vcpu->arch.regs[VCPU_REGS_RDX] = kvm_ghcb_get_rdx_if_valid(svm); in sev_es_sync_from_ghcb()
3350 vcpu->arch.regs[VCPU_REGS_RSI] = kvm_ghcb_get_rsi_if_valid(svm); in sev_es_sync_from_ghcb()
3352 svm->vmcb->save.cpl = kvm_ghcb_get_cpl_if_valid(svm); in sev_es_sync_from_ghcb()
3362 control->exit_code = lower_32_bits(exit_code); in sev_es_sync_from_ghcb()
3363 control->exit_code_hi = upper_32_bits(exit_code); in sev_es_sync_from_ghcb()
3364 control->exit_info_1 = kvm_ghcb_get_sw_exit_info_1(svm); in sev_es_sync_from_ghcb()
3365 control->exit_info_2 = kvm_ghcb_get_sw_exit_info_2(svm); in sev_es_sync_from_ghcb()
3366 svm->sev_es.sw_scratch = kvm_ghcb_get_sw_scratch_if_valid(svm); in sev_es_sync_from_ghcb()
3369 memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap)); in sev_es_sync_from_ghcb()
3374 struct vmcb_control_area *control = &svm->vmcb->control; in sev_es_validate_vmgexit()
3375 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_es_validate_vmgexit()
3386 if (svm->sev_es.ghcb->ghcb_usage) { in sev_es_validate_vmgexit()
3415 if (vcpu->arch.regs[VCPU_REGS_RAX] == 0xd) in sev_es_validate_vmgexit()
3422 if (control->exit_info_1 & SVM_IOIO_STR_MASK) { in sev_es_validate_vmgexit()
3426 if (!(control->exit_info_1 & SVM_IOIO_TYPE_MASK)) in sev_es_validate_vmgexit()
3434 if (control->exit_info_1) { in sev_es_validate_vmgexit()
3466 if (!sev_snp_guest(vcpu->kvm)) in sev_es_validate_vmgexit()
3468 if (lower_32_bits(control->exit_info_1) != SVM_VMGEXIT_AP_DESTROY) in sev_es_validate_vmgexit()
3480 if (!sev_snp_guest(vcpu->kvm) || !kvm_ghcb_sw_scratch_is_valid(svm)) in sev_es_validate_vmgexit()
3485 if (!sev_snp_guest(vcpu->kvm) || in sev_es_validate_vmgexit()
3486 !PAGE_ALIGNED(control->exit_info_1) || in sev_es_validate_vmgexit()
3487 !PAGE_ALIGNED(control->exit_info_2) || in sev_es_validate_vmgexit()
3488 control->exit_info_1 == control->exit_info_2) in sev_es_validate_vmgexit()
3501 svm->sev_es.ghcb->ghcb_usage); in sev_es_validate_vmgexit()
3520 svm->sev_es.ap_reset_hold_type = AP_RESET_HOLD_NONE; in sev_es_unmap_ghcb()
3522 if (!svm->sev_es.ghcb) in sev_es_unmap_ghcb()
3525 if (svm->sev_es.ghcb_sa_free) { in sev_es_unmap_ghcb()
3531 if (svm->sev_es.ghcb_sa_sync) { in sev_es_unmap_ghcb()
3532 kvm_write_guest(svm->vcpu.kvm, in sev_es_unmap_ghcb()
3533 svm->sev_es.sw_scratch, in sev_es_unmap_ghcb()
3534 svm->sev_es.ghcb_sa, in sev_es_unmap_ghcb()
3535 svm->sev_es.ghcb_sa_len); in sev_es_unmap_ghcb()
3536 svm->sev_es.ghcb_sa_sync = false; in sev_es_unmap_ghcb()
3539 kvfree(svm->sev_es.ghcb_sa); in sev_es_unmap_ghcb()
3540 svm->sev_es.ghcb_sa = NULL; in sev_es_unmap_ghcb()
3541 svm->sev_es.ghcb_sa_free = false; in sev_es_unmap_ghcb()
3544 trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, svm->sev_es.ghcb); in sev_es_unmap_ghcb()
3548 kvm_vcpu_unmap(&svm->vcpu, &svm->sev_es.ghcb_map); in sev_es_unmap_ghcb()
3549 svm->sev_es.ghcb = NULL; in sev_es_unmap_ghcb()
3555 struct kvm *kvm = svm->vcpu.kvm; in pre_sev_run()
3563 if (sev_es_guest(kvm) && !VALID_PAGE(svm->vmcb->control.vmsa_pa)) in pre_sev_run()
3564 return -EINVAL; in pre_sev_run()
3572 if (!cpumask_test_cpu(cpu, to_kvm_sev_info(kvm)->have_run_cpus)) in pre_sev_run()
3573 cpumask_set_cpu(cpu, to_kvm_sev_info(kvm)->have_run_cpus); in pre_sev_run()
3576 svm->asid = asid; in pre_sev_run()
3584 if (sd->sev_vmcbs[asid] == svm->vmcb && in pre_sev_run()
3585 svm->vcpu.arch.last_vmentry_cpu == cpu) in pre_sev_run()
3588 sd->sev_vmcbs[asid] = svm->vmcb; in pre_sev_run()
3589 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; in pre_sev_run()
3590 vmcb_mark_dirty(svm->vmcb, VMCB_ASID); in pre_sev_run()
3597 struct vmcb_control_area *control = &svm->vmcb->control; in setup_vmgexit_scratch()
3602 scratch_gpa_beg = svm->sev_es.sw_scratch; in setup_vmgexit_scratch()
3615 if ((scratch_gpa_beg & PAGE_MASK) == control->ghcb_gpa) { in setup_vmgexit_scratch()
3617 ghcb_scratch_beg = control->ghcb_gpa + in setup_vmgexit_scratch()
3619 ghcb_scratch_end = control->ghcb_gpa + in setup_vmgexit_scratch()
3628 pr_err("vmgexit: scratch area is outside of GHCB shared buffer area (%#llx - %#llx)\n", in setup_vmgexit_scratch()
3633 scratch_va = (void *)svm->sev_es.ghcb; in setup_vmgexit_scratch()
3634 scratch_va += (scratch_gpa_beg - control->ghcb_gpa); in setup_vmgexit_scratch()
3647 return -ENOMEM; in setup_vmgexit_scratch()
3649 if (kvm_read_guest(svm->vcpu.kvm, scratch_gpa_beg, scratch_va, len)) { in setup_vmgexit_scratch()
3654 return -EFAULT; in setup_vmgexit_scratch()
3663 svm->sev_es.ghcb_sa_sync = sync; in setup_vmgexit_scratch()
3664 svm->sev_es.ghcb_sa_free = true; in setup_vmgexit_scratch()
3667 svm->sev_es.ghcb_sa = scratch_va; in setup_vmgexit_scratch()
3668 svm->sev_es.ghcb_sa_len = len; in setup_vmgexit_scratch()
3681 svm->vmcb->control.ghcb_gpa &= ~(mask << pos); in set_ghcb_msr_bits()
3682 svm->vmcb->control.ghcb_gpa |= (value & mask) << pos; in set_ghcb_msr_bits()
3687 return (svm->vmcb->control.ghcb_gpa >> pos) & mask; in get_ghcb_msr_bits()
3692 svm->vmcb->control.ghcb_gpa = value; in set_ghcb_msr()
3699 pfn = pfn & ~(KVM_PAGES_PER_HPAGE(PG_LEVEL_2M) - 1); in snp_rmptable_psmash()
3716 if (vcpu->run->hypercall.ret) in snp_complete_psc_msr()
3728 struct kvm_vcpu *vcpu = &svm->vcpu; in snp_begin_psc_msr()
3735 if (!user_exit_on_hypercall(vcpu->kvm, KVM_HC_MAP_GPA_RANGE)) { in snp_begin_psc_msr()
3740 vcpu->run->exit_reason = KVM_EXIT_HYPERCALL; in snp_begin_psc_msr()
3741 vcpu->run->hypercall.nr = KVM_HC_MAP_GPA_RANGE; in snp_begin_psc_msr()
3743 * In principle this should have been -KVM_ENOSYS, but userspace (QEMU <=9.2) in snp_begin_psc_msr()
3744 * assumed that vcpu->run->hypercall.ret is never changed by KVM and thus that in snp_begin_psc_msr()
3746 * vcpu->run->hypercall.ret, ensuring that it is zero to not break QEMU. in snp_begin_psc_msr()
3748 vcpu->run->hypercall.ret = 0; in snp_begin_psc_msr()
3749 vcpu->run->hypercall.args[0] = gpa; in snp_begin_psc_msr()
3750 vcpu->run->hypercall.args[1] = 1; in snp_begin_psc_msr()
3751 vcpu->run->hypercall.args[2] = (op == SNP_PAGE_STATE_PRIVATE) in snp_begin_psc_msr()
3754 vcpu->run->hypercall.args[2] |= KVM_MAP_GPA_RANGE_PAGE_SZ_4K; in snp_begin_psc_msr()
3756 vcpu->arch.complete_userspace_io = snp_complete_psc_msr; in snp_begin_psc_msr()
3770 svm->sev_es.psc_inflight = 0; in snp_complete_psc()
3771 svm->sev_es.psc_idx = 0; in snp_complete_psc()
3772 svm->sev_es.psc_2m = false; in snp_complete_psc()
3776 * a PSC-specific return code in SW_EXITINFO2 that provides the "real" in snp_complete_psc()
3785 struct psc_buffer *psc = svm->sev_es.ghcb_sa; in __snp_complete_one_psc()
3786 struct psc_entry *entries = psc->entries; in __snp_complete_one_psc()
3787 struct psc_hdr *hdr = &psc->hdr; in __snp_complete_one_psc()
3791 * Everything in-flight has been processed successfully. Update the in __snp_complete_one_psc()
3793 * count of in-flight PSC entries. in __snp_complete_one_psc()
3795 for (idx = svm->sev_es.psc_idx; svm->sev_es.psc_inflight; in __snp_complete_one_psc()
3796 svm->sev_es.psc_inflight--, idx++) { in __snp_complete_one_psc()
3799 entry->cur_page = entry->pagesize ? 512 : 1; in __snp_complete_one_psc()
3802 hdr->cur_entry = idx; in __snp_complete_one_psc()
3808 struct psc_buffer *psc = svm->sev_es.ghcb_sa; in snp_complete_one_psc()
3810 if (vcpu->run->hypercall.ret) { in snp_complete_one_psc()
3823 struct psc_entry *entries = psc->entries; in snp_begin_psc()
3824 struct kvm_vcpu *vcpu = &svm->vcpu; in snp_begin_psc()
3825 struct psc_hdr *hdr = &psc->hdr; in snp_begin_psc()
3832 if (!user_exit_on_hypercall(vcpu->kvm, KVM_HC_MAP_GPA_RANGE)) { in snp_begin_psc()
3838 /* There should be no other PSCs in-flight at this point. */ in snp_begin_psc()
3839 if (WARN_ON_ONCE(svm->sev_es.psc_inflight)) { in snp_begin_psc()
3849 idx_start = hdr->cur_entry; in snp_begin_psc()
3850 idx_end = hdr->end_entry; in snp_begin_psc()
3858 for (idx = idx_start; idx <= idx_end; idx++, hdr->cur_entry++) { in snp_begin_psc()
3872 * If this is a partially-completed 2M range, force 4K handling in snp_begin_psc()
3878 npages -= entry_start.cur_page; in snp_begin_psc()
3893 svm->sev_es.psc_2m = huge; in snp_begin_psc()
3894 svm->sev_es.psc_idx = idx; in snp_begin_psc()
3895 svm->sev_es.psc_inflight = 1; in snp_begin_psc()
3910 svm->sev_es.psc_inflight++; in snp_begin_psc()
3917 vcpu->run->exit_reason = KVM_EXIT_HYPERCALL; in snp_begin_psc()
3918 vcpu->run->hypercall.nr = KVM_HC_MAP_GPA_RANGE; in snp_begin_psc()
3920 * In principle this should have been -KVM_ENOSYS, but userspace (QEMU <=9.2) in snp_begin_psc()
3921 * assumed that vcpu->run->hypercall.ret is never changed by KVM and thus that in snp_begin_psc()
3923 * vcpu->run->hypercall.ret, ensuring that it is zero to not break QEMU. in snp_begin_psc()
3925 vcpu->run->hypercall.ret = 0; in snp_begin_psc()
3926 vcpu->run->hypercall.args[0] = gfn_to_gpa(gfn); in snp_begin_psc()
3927 vcpu->run->hypercall.args[1] = npages; in snp_begin_psc()
3928 vcpu->run->hypercall.args[2] = entry_start.operation == VMGEXIT_PSC_OP_PRIVATE in snp_begin_psc()
3931 vcpu->run->hypercall.args[2] |= entry_start.pagesize in snp_begin_psc()
3934 vcpu->arch.complete_userspace_io = snp_complete_one_psc; in snp_begin_psc()
3963 guard(mutex)(&svm->sev_es.snp_vmsa_mutex); in sev_snp_init_protected_guest_state()
3965 if (!svm->sev_es.snp_ap_waiting_for_reset) in sev_snp_init_protected_guest_state()
3968 svm->sev_es.snp_ap_waiting_for_reset = false; in sev_snp_init_protected_guest_state()
3971 vcpu->arch.pv.pv_unhalted = false; in sev_snp_init_protected_guest_state()
3975 svm->vmcb->control.vmsa_pa = INVALID_PAGE; in sev_snp_init_protected_guest_state()
3978 * When replacing the VMSA during SEV-SNP AP creation, in sev_snp_init_protected_guest_state()
3981 vmcb_mark_all_dirty(svm->vmcb); in sev_snp_init_protected_guest_state()
3983 if (!VALID_PAGE(svm->sev_es.snp_vmsa_gpa)) in sev_snp_init_protected_guest_state()
3986 gfn = gpa_to_gfn(svm->sev_es.snp_vmsa_gpa); in sev_snp_init_protected_guest_state()
3987 svm->sev_es.snp_vmsa_gpa = INVALID_PAGE; in sev_snp_init_protected_guest_state()
3989 slot = gfn_to_memslot(vcpu->kvm, gfn); in sev_snp_init_protected_guest_state()
3997 if (kvm_gmem_get_pfn(vcpu->kvm, slot, gfn, &pfn, &page, NULL)) in sev_snp_init_protected_guest_state()
4001 * From this point forward, the VMSA will always be a guest-mapped page in sev_snp_init_protected_guest_state()
4002 * rather than the initial one allocated by KVM in svm->sev_es.vmsa. In in sev_snp_init_protected_guest_state()
4003 * theory, svm->sev_es.vmsa could be free'd and cleaned up here, but in sev_snp_init_protected_guest_state()
4006 * allows the existing logic for SEV-ES VMSAs to be re-used with in sev_snp_init_protected_guest_state()
4007 * minimal SNP-specific changes. in sev_snp_init_protected_guest_state()
4009 svm->sev_es.snp_has_guest_vmsa = true; in sev_snp_init_protected_guest_state()
4012 svm->vmcb->control.vmsa_pa = pfn_to_hpa(pfn); in sev_snp_init_protected_guest_state()
4019 * then care should be taken to ensure svm->sev_es.vmsa is pinned in sev_snp_init_protected_guest_state()
4027 struct kvm_sev_info *sev = to_kvm_sev_info(svm->vcpu.kvm); in sev_snp_ap_creation()
4028 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_snp_ap_creation()
4034 request = lower_32_bits(svm->vmcb->control.exit_info_1); in sev_snp_ap_creation()
4035 apic_id = upper_32_bits(svm->vmcb->control.exit_info_1); in sev_snp_ap_creation()
4038 target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, apic_id); in sev_snp_ap_creation()
4042 return -EINVAL; in sev_snp_ap_creation()
4047 guard(mutex)(&target_svm->sev_es.snp_vmsa_mutex); in sev_snp_ap_creation()
4052 if (vcpu->arch.regs[VCPU_REGS_RAX] != sev->vmsa_features) { in sev_snp_ap_creation()
4054 vcpu->arch.regs[VCPU_REGS_RAX], sev->vmsa_features); in sev_snp_ap_creation()
4055 return -EINVAL; in sev_snp_ap_creation()
4058 if (!page_address_valid(vcpu, svm->vmcb->control.exit_info_2)) { in sev_snp_ap_creation()
4060 svm->vmcb->control.exit_info_2); in sev_snp_ap_creation()
4061 return -EINVAL; in sev_snp_ap_creation()
4071 if (IS_ALIGNED(svm->vmcb->control.exit_info_2, PMD_SIZE)) { in sev_snp_ap_creation()
4074 svm->vmcb->control.exit_info_2); in sev_snp_ap_creation()
4075 return -EINVAL; in sev_snp_ap_creation()
4078 target_svm->sev_es.snp_vmsa_gpa = svm->vmcb->control.exit_info_2; in sev_snp_ap_creation()
4081 target_svm->sev_es.snp_vmsa_gpa = INVALID_PAGE; in sev_snp_ap_creation()
4086 return -EINVAL; in sev_snp_ap_creation()
4089 target_svm->sev_es.snp_ap_waiting_for_reset = true; in sev_snp_ap_creation()
4104 struct kvm *kvm = svm->vcpu.kvm; in snp_handle_guest_req()
4110 return -EINVAL; in snp_handle_guest_req()
4112 mutex_lock(&sev->guest_req_mutex); in snp_handle_guest_req()
4114 if (kvm_read_guest(kvm, req_gpa, sev->guest_req_buf, PAGE_SIZE)) { in snp_handle_guest_req()
4115 ret = -EIO; in snp_handle_guest_req()
4119 data.gctx_paddr = __psp_pa(sev->snp_context); in snp_handle_guest_req()
4120 data.req_paddr = __psp_pa(sev->guest_req_buf); in snp_handle_guest_req()
4121 data.res_paddr = __psp_pa(sev->guest_resp_buf); in snp_handle_guest_req()
4132 if (kvm_write_guest(kvm, resp_gpa, sev->guest_resp_buf, PAGE_SIZE)) { in snp_handle_guest_req()
4133 ret = -EIO; in snp_handle_guest_req()
4143 mutex_unlock(&sev->guest_req_mutex); in snp_handle_guest_req()
4149 struct kvm *kvm = svm->vcpu.kvm; in snp_handle_ext_guest_req()
4153 return -EINVAL; in snp_handle_ext_guest_req()
4157 return -EIO; in snp_handle_ext_guest_req()
4162 * report via the guest-provided data pages indicated by RAX/RBX. The in snp_handle_ext_guest_req()
4167 * certificate table in the guest-provided data pages. in snp_handle_ext_guest_req()
4170 struct kvm_vcpu *vcpu = &svm->vcpu; in snp_handle_ext_guest_req()
4177 data_gpa = vcpu->arch.regs[VCPU_REGS_RAX]; in snp_handle_ext_guest_req()
4178 data_npages = vcpu->arch.regs[VCPU_REGS_RBX]; in snp_handle_ext_guest_req()
4185 * certificate table is terminated by 24-bytes of zeroes. in snp_handle_ext_guest_req()
4188 return -EIO; in snp_handle_ext_guest_req()
4200 struct vmcb_control_area *control = &svm->vmcb->control; in sev_handle_vmgexit_msr_protocol()
4201 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_handle_vmgexit_msr_protocol()
4202 struct kvm_sev_info *sev = to_kvm_sev_info(vcpu->kvm); in sev_handle_vmgexit_msr_protocol()
4206 ghcb_info = control->ghcb_gpa & GHCB_MSR_INFO_MASK; in sev_handle_vmgexit_msr_protocol()
4208 trace_kvm_vmgexit_msr_protocol_enter(svm->vcpu.vcpu_id, in sev_handle_vmgexit_msr_protocol()
4209 control->ghcb_gpa); in sev_handle_vmgexit_msr_protocol()
4213 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO((__u64)sev->ghcb_version, in sev_handle_vmgexit_msr_protocol()
4225 vcpu->arch.regs[VCPU_REGS_RAX] = cpuid_fn; in sev_handle_vmgexit_msr_protocol()
4226 vcpu->arch.regs[VCPU_REGS_RCX] = 0; in sev_handle_vmgexit_msr_protocol()
4230 /* Error, keep GHCB MSR value as-is */ in sev_handle_vmgexit_msr_protocol()
4238 cpuid_value = vcpu->arch.regs[VCPU_REGS_RAX]; in sev_handle_vmgexit_msr_protocol()
4240 cpuid_value = vcpu->arch.regs[VCPU_REGS_RBX]; in sev_handle_vmgexit_msr_protocol()
4242 cpuid_value = vcpu->arch.regs[VCPU_REGS_RCX]; in sev_handle_vmgexit_msr_protocol()
4244 cpuid_value = vcpu->arch.regs[VCPU_REGS_RDX]; in sev_handle_vmgexit_msr_protocol()
4256 svm->sev_es.ap_reset_hold_type = AP_RESET_HOLD_MSR_PROTO; in sev_handle_vmgexit_msr_protocol()
4257 ret = kvm_emulate_ap_reset_hold(&svm->vcpu); in sev_handle_vmgexit_msr_protocol()
4260 * Preset the result to a non-SIPI return and then only set in sev_handle_vmgexit_msr_protocol()
4261 * the result to non-zero when delivering a SIPI. in sev_handle_vmgexit_msr_protocol()
4278 if (!sev_snp_guest(vcpu->kvm)) in sev_handle_vmgexit_msr_protocol()
4289 if (!sev_snp_guest(vcpu->kvm)) in sev_handle_vmgexit_msr_protocol()
4295 svm->sev_es.ghcb_registered_gpa = gfn_to_gpa(gfn); in sev_handle_vmgexit_msr_protocol()
4304 if (!sev_snp_guest(vcpu->kvm)) in sev_handle_vmgexit_msr_protocol()
4307 ret = snp_begin_psc_msr(svm, control->ghcb_gpa); in sev_handle_vmgexit_msr_protocol()
4318 pr_info("SEV-ES guest requested termination: %#llx:%#llx\n", in sev_handle_vmgexit_msr_protocol()
4324 /* Error, keep GHCB MSR value as-is */ in sev_handle_vmgexit_msr_protocol()
4328 trace_kvm_vmgexit_msr_protocol_exit(svm->vcpu.vcpu_id, in sev_handle_vmgexit_msr_protocol()
4329 control->ghcb_gpa, ret); in sev_handle_vmgexit_msr_protocol()
4334 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; in sev_handle_vmgexit_msr_protocol()
4335 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_SEV_TERM; in sev_handle_vmgexit_msr_protocol()
4336 vcpu->run->system_event.ndata = 1; in sev_handle_vmgexit_msr_protocol()
4337 vcpu->run->system_event.data[0] = control->ghcb_gpa; in sev_handle_vmgexit_msr_protocol()
4345 struct vmcb_control_area *control = &svm->vmcb->control; in sev_handle_vmgexit()
4350 ghcb_gpa = control->ghcb_gpa; in sev_handle_vmgexit()
4361 if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) { in sev_handle_vmgexit()
4370 svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva; in sev_handle_vmgexit()
4372 trace_kvm_vmgexit_enter(vcpu->vcpu_id, svm->sev_es.ghcb); in sev_handle_vmgexit()
4376 /* SEV-SNP guest requires that the GHCB GPA must be registered */ in sev_handle_vmgexit()
4377 if (sev_snp_guest(svm->vcpu.kvm) && !ghcb_gpa_is_registered(svm, ghcb_gpa)) { in sev_handle_vmgexit()
4378 vcpu_unimpl(&svm->vcpu, "vmgexit: GHCB GPA [%#llx] is not registered.\n", ghcb_gpa); in sev_handle_vmgexit()
4379 return -EINVAL; in sev_handle_vmgexit()
4391 ret = setup_vmgexit_scratch(svm, true, control->exit_info_2); in sev_handle_vmgexit()
4396 control->exit_info_1, in sev_handle_vmgexit()
4397 control->exit_info_2, in sev_handle_vmgexit()
4398 svm->sev_es.ghcb_sa); in sev_handle_vmgexit()
4401 ret = setup_vmgexit_scratch(svm, false, control->exit_info_2); in sev_handle_vmgexit()
4406 control->exit_info_1, in sev_handle_vmgexit()
4407 control->exit_info_2, in sev_handle_vmgexit()
4408 svm->sev_es.ghcb_sa); in sev_handle_vmgexit()
4411 ++vcpu->stat.nmi_window_exits; in sev_handle_vmgexit()
4412 svm->nmi_masked = false; in sev_handle_vmgexit()
4417 svm->sev_es.ap_reset_hold_type = AP_RESET_HOLD_NAE_EVENT; in sev_handle_vmgexit()
4421 struct kvm_sev_info *sev = to_kvm_sev_info(vcpu->kvm); in sev_handle_vmgexit()
4423 switch (control->exit_info_1) { in sev_handle_vmgexit()
4426 sev->ap_jump_table = control->exit_info_2; in sev_handle_vmgexit()
4430 svm_vmgexit_success(svm, sev->ap_jump_table); in sev_handle_vmgexit()
4433 pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n", in sev_handle_vmgexit()
4434 control->exit_info_1); in sev_handle_vmgexit()
4446 pr_info("SEV-ES guest requested termination: reason %#llx info %#llx\n", in sev_handle_vmgexit()
4447 control->exit_info_1, control->exit_info_2); in sev_handle_vmgexit()
4448 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; in sev_handle_vmgexit()
4449 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_SEV_TERM; in sev_handle_vmgexit()
4450 vcpu->run->system_event.ndata = 1; in sev_handle_vmgexit()
4451 vcpu->run->system_event.data[0] = control->ghcb_gpa; in sev_handle_vmgexit()
4454 ret = setup_vmgexit_scratch(svm, true, control->exit_info_2); in sev_handle_vmgexit()
4458 ret = snp_begin_psc(svm, svm->sev_es.ghcb_sa); in sev_handle_vmgexit()
4469 ret = snp_handle_guest_req(svm, control->exit_info_1, control->exit_info_2); in sev_handle_vmgexit()
4472 ret = snp_handle_ext_guest_req(svm, control->exit_info_1, control->exit_info_2); in sev_handle_vmgexit()
4476 "vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n", in sev_handle_vmgexit()
4477 control->exit_info_1, control->exit_info_2); in sev_handle_vmgexit()
4478 ret = -EINVAL; in sev_handle_vmgexit()
4493 if (svm->vmcb->control.exit_info_2 > INT_MAX) in sev_es_string_io()
4494 return -EINVAL; in sev_es_string_io()
4496 count = svm->vmcb->control.exit_info_2; in sev_es_string_io()
4498 return -EINVAL; in sev_es_string_io()
4504 return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->sev_es.ghcb_sa, in sev_es_string_io()
4521 !snp_is_secure_tsc_enabled(vcpu->kvm)); in sev_es_recalc_msr_intercepts()
4524 * For SEV-ES, accesses to MSR_IA32_XSS should not be intercepted if in sev_es_recalc_msr_intercepts()
4542 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_vcpu_after_set_cpuid()
4548 vcpu->arch.reserved_gpa_bits &= ~(1UL << (best->ebx & 0x3f)); in sev_vcpu_after_set_cpuid()
4553 struct kvm_sev_info *sev = to_kvm_sev_info(svm->vcpu.kvm); in sev_es_init_vmcb()
4554 struct vmcb *vmcb = svm->vmcb01.ptr; in sev_es_init_vmcb()
4556 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE; in sev_es_init_vmcb()
4559 * An SEV-ES guest requires a VMSA area that is a separate from the in sev_es_init_vmcb()
4565 if (!svm->sev_es.snp_has_guest_vmsa) { in sev_es_init_vmcb()
4566 if (svm->sev_es.vmsa) in sev_es_init_vmcb()
4567 svm->vmcb->control.vmsa_pa = __pa(svm->sev_es.vmsa); in sev_es_init_vmcb()
4569 svm->vmcb->control.vmsa_pa = INVALID_PAGE; in sev_es_init_vmcb()
4573 svm->vmcb->control.allowed_sev_features = sev->vmsa_features | in sev_es_init_vmcb()
4592 vmcb->control.intercepts[INTERCEPT_DR] = 0; in sev_es_init_vmcb()
4594 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ); in sev_es_init_vmcb()
4595 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE); in sev_es_init_vmcb()
4600 * allow debugging SEV-ES guests, and enables DebugSwap iff in sev_es_init_vmcb()
4615 * vCPU RESET for an SEV-ES guest. in sev_es_init_vmcb()
4618 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO((__u64)sev->ghcb_version, in sev_es_init_vmcb()
4625 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_init_vmcb()
4627 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE; in sev_init_vmcb()
4636 if (init_event && sev_snp_guest(vcpu->kvm)) in sev_init_vmcb()
4639 if (sev_es_guest(vcpu->kvm)) in sev_init_vmcb()
4648 mutex_init(&svm->sev_es.snp_vmsa_mutex); in sev_vcpu_create()
4650 if (!sev_es_guest(vcpu->kvm)) in sev_vcpu_create()
4654 * SEV-ES guests require a separate (from the VMCB) VMSA page used to in sev_vcpu_create()
4659 return -ENOMEM; in sev_vcpu_create()
4661 svm->sev_es.vmsa = page_address(vmsa_page); in sev_vcpu_create()
4663 vcpu->arch.guest_tsc_protected = snp_is_secure_tsc_enabled(vcpu->kvm); in sev_vcpu_create()
4670 struct kvm *kvm = svm->vcpu.kvm; in sev_es_prepare_switch_to_guest()
4673 * All host state for SEV-ES guests is categorized into three swap types in sev_es_prepare_switch_to_guest()
4685 * Manually save type-B state, i.e. state that is loaded by VMEXIT but in sev_es_prepare_switch_to_guest()
4689 hostsa->xcr0 = kvm_host.xcr0; in sev_es_prepare_switch_to_guest()
4690 hostsa->pkru = read_pkru(); in sev_es_prepare_switch_to_guest()
4691 hostsa->xss = kvm_host.xss; in sev_es_prepare_switch_to_guest()
4695 * the CPU (Type-B). If DebugSwap is disabled/unsupported, the CPU does in sev_es_prepare_switch_to_guest()
4711 hostsa->dr0_addr_mask = amd_get_dr_addr_mask(0); in sev_es_prepare_switch_to_guest()
4712 hostsa->dr1_addr_mask = amd_get_dr_addr_mask(1); in sev_es_prepare_switch_to_guest()
4713 hostsa->dr2_addr_mask = amd_get_dr_addr_mask(2); in sev_es_prepare_switch_to_guest()
4714 hostsa->dr3_addr_mask = amd_get_dr_addr_mask(3); in sev_es_prepare_switch_to_guest()
4718 * TSC_AUX is always virtualized for SEV-ES guests when the feature is in sev_es_prepare_switch_to_guest()
4725 hostsa->tsc_aux = kvm_get_user_return_msr(tsc_aux_uret_slot); in sev_es_prepare_switch_to_guest()
4733 if (!svm->sev_es.received_first_sipi) { in sev_vcpu_deliver_sipi_vector()
4734 svm->sev_es.received_first_sipi = true; in sev_vcpu_deliver_sipi_vector()
4739 switch (svm->sev_es.ap_reset_hold_type) { in sev_vcpu_deliver_sipi_vector()
4743 * set the CS and RIP. Set SW_EXIT_INFO_2 to a non-zero value. in sev_vcpu_deliver_sipi_vector()
4750 * set the CS and RIP. Set GHCB data field to a non-zero value. in sev_vcpu_deliver_sipi_vector()
4774 * Allocate an SNP-safe page to workaround the SNP erratum where in snp_safe_alloc_page_node()
4777 * 2MB-aligned VMCB, VMSA, or AVIC backing page. in snp_safe_alloc_page_node()
4780 * 2MB-aligned, and free the other. in snp_safe_alloc_page_node()
4800 struct kvm *kvm = vcpu->kvm; in sev_handle_rmp_fault()
4811 * triggering an RMP fault for an implicit page-state change from in sev_handle_rmp_fault()
4812 * shared->private. Implicit page-state changes are forwarded to in sev_handle_rmp_fault()
4817 pr_warn_ratelimited("SEV: Unexpected RMP fault for non-private GPA 0x%llx\n", in sev_handle_rmp_fault()
4824 pr_warn_ratelimited("SEV: Unexpected RMP fault, non-private slot for GPA 0x%llx\n", in sev_handle_rmp_fault()
4849 * what is indicated by the page-size bit in the 2MB RMP entry for in sev_handle_rmp_fault()
4860 * GPA range that is backed by a 2MB-aligned PFN who's RMP entry is in in sev_handle_rmp_fault()
4931 * PFN is currently shared, then the entire 2M-aligned range can be in is_large_rmp_possible()
4956 return -ENOENT; in sev_gmem_prepare()
4975 rc = rmp_make_private(pfn_aligned, gfn_to_gpa(gfn_aligned), level, sev->asid, false); in sev_gmem_prepare()
4979 return -EINVAL; in sev_gmem_prepare()
5013 * 4K RMP entries before attempting to convert a 4K sub-page. in sev_gmem_invalidate()
5032 * SEV-ES avoids host/guest cache coherency issues through in sev_gmem_invalidate()
5033 * WBNOINVD hooks issued via MMU notifiers during run-time, and in sev_gmem_invalidate()
5038 * userspace may also free gmem pages during run-time via in sev_gmem_invalidate()
5039 * hole-punching operations on the guest_memfd, so flush the in sev_gmem_invalidate()
5074 if (!sev_es_guest(vcpu->kvm)) in sev_decrypt_vmsa()
5079 * current un-encrypted VMSA. in sev_decrypt_vmsa()
5081 if (!vcpu->arch.guest_state_protected) in sev_decrypt_vmsa()
5082 return (struct vmcb_save_area *)svm->sev_es.vmsa; in sev_decrypt_vmsa()
5084 sev = to_kvm_sev_info(vcpu->kvm); in sev_decrypt_vmsa()
5087 if (sev_snp_guest(vcpu->kvm)) { in sev_decrypt_vmsa()
5088 if (!(sev->policy & SNP_POLICY_DEBUG)) in sev_decrypt_vmsa()
5091 if (sev->policy & SEV_POLICY_NODBG) in sev_decrypt_vmsa()
5095 if (sev_snp_guest(vcpu->kvm)) { in sev_decrypt_vmsa()
5102 dbg.gctx_paddr = __psp_pa(sev->snp_context); in sev_decrypt_vmsa()
5103 dbg.src_addr = svm->vmcb->control.vmsa_pa; in sev_decrypt_vmsa()
5113 if (snp_page_reclaim(vcpu->kvm, PHYS_PFN(__pa(vmsa)))) in sev_decrypt_vmsa()
5133 dbg.handle = sev->handle; in sev_decrypt_vmsa()
5134 dbg.src_addr = svm->vmcb->control.vmsa_pa; in sev_decrypt_vmsa()
5154 if (!vcpu->arch.guest_state_protected || !vmsa) in sev_free_decrypted_vmsa()