1199cd1d7SUros Bizjak/* SPDX-License-Identifier: GPL-2.0 */ 2199cd1d7SUros Bizjak#include <linux/linkage.h> 3199cd1d7SUros Bizjak#include <asm/asm.h> 45d821386SThomas Gleixner#include <asm/asm-offsets.h> 5199cd1d7SUros Bizjak#include <asm/bitsperlong.h> 64367a758SSean Christopherson#include <asm/frame.h> 7199cd1d7SUros Bizjak#include <asm/kvm_vcpu_regs.h> 8f14eec0aSPaolo Bonzini#include <asm/nospec-branch.h> 916fdc1deSPaolo Bonzini#include "kvm-asm-offsets.h" 10199cd1d7SUros Bizjak 11199cd1d7SUros Bizjak#define WORD_SIZE (BITS_PER_LONG / 8) 12199cd1d7SUros Bizjak 13199cd1d7SUros Bizjak/* Intentionally omit RAX as it's context switched by hardware */ 1416fdc1deSPaolo Bonzini#define VCPU_RCX (SVM_vcpu_arch_regs + __VCPU_REGS_RCX * WORD_SIZE) 1516fdc1deSPaolo Bonzini#define VCPU_RDX (SVM_vcpu_arch_regs + __VCPU_REGS_RDX * WORD_SIZE) 1616fdc1deSPaolo Bonzini#define VCPU_RBX (SVM_vcpu_arch_regs + __VCPU_REGS_RBX * WORD_SIZE) 17199cd1d7SUros Bizjak/* Intentionally omit RSP as it's context switched by hardware */ 1816fdc1deSPaolo Bonzini#define VCPU_RBP (SVM_vcpu_arch_regs + __VCPU_REGS_RBP * WORD_SIZE) 1916fdc1deSPaolo Bonzini#define VCPU_RSI (SVM_vcpu_arch_regs + __VCPU_REGS_RSI * WORD_SIZE) 2016fdc1deSPaolo Bonzini#define VCPU_RDI (SVM_vcpu_arch_regs + __VCPU_REGS_RDI * WORD_SIZE) 21199cd1d7SUros Bizjak 22199cd1d7SUros Bizjak#ifdef CONFIG_X86_64 2316fdc1deSPaolo Bonzini#define VCPU_R8 (SVM_vcpu_arch_regs + __VCPU_REGS_R8 * WORD_SIZE) 2416fdc1deSPaolo Bonzini#define VCPU_R9 (SVM_vcpu_arch_regs + __VCPU_REGS_R9 * WORD_SIZE) 2516fdc1deSPaolo Bonzini#define VCPU_R10 (SVM_vcpu_arch_regs + __VCPU_REGS_R10 * WORD_SIZE) 2616fdc1deSPaolo Bonzini#define VCPU_R11 (SVM_vcpu_arch_regs + __VCPU_REGS_R11 * WORD_SIZE) 2716fdc1deSPaolo Bonzini#define VCPU_R12 (SVM_vcpu_arch_regs + __VCPU_REGS_R12 * WORD_SIZE) 2816fdc1deSPaolo Bonzini#define VCPU_R13 (SVM_vcpu_arch_regs + __VCPU_REGS_R13 * WORD_SIZE) 2916fdc1deSPaolo Bonzini#define VCPU_R14 (SVM_vcpu_arch_regs + __VCPU_REGS_R14 * WORD_SIZE) 3016fdc1deSPaolo Bonzini#define VCPU_R15 (SVM_vcpu_arch_regs + __VCPU_REGS_R15 * WORD_SIZE) 31199cd1d7SUros Bizjak#endif 32199cd1d7SUros Bizjak 33e61ab42dSPaolo Bonzini#define SVM_vmcb01_pa (SVM_vmcb01 + KVM_VMCB_pa) 34e61ab42dSPaolo Bonzini 35135961e0SThomas Gleixner.section .noinstr.text, "ax" 36199cd1d7SUros Bizjak 379f2febf3SPaolo Bonzini.macro RESTORE_GUEST_SPEC_CTRL 389f2febf3SPaolo Bonzini /* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */ 399f2febf3SPaolo Bonzini ALTERNATIVE_2 "", \ 409f2febf3SPaolo Bonzini "jmp 800f", X86_FEATURE_MSR_SPEC_CTRL, \ 419f2febf3SPaolo Bonzini "", X86_FEATURE_V_SPEC_CTRL 429f2febf3SPaolo Bonzini801: 439f2febf3SPaolo Bonzini.endm 449f2febf3SPaolo Bonzini.macro RESTORE_GUEST_SPEC_CTRL_BODY 459f2febf3SPaolo Bonzini800: 469f2febf3SPaolo Bonzini /* 479f2febf3SPaolo Bonzini * SPEC_CTRL handling: if the guest's SPEC_CTRL value differs from the 489f2febf3SPaolo Bonzini * host's, write the MSR. This is kept out-of-line so that the common 499f2febf3SPaolo Bonzini * case does not have to jump. 509f2febf3SPaolo Bonzini * 519f2febf3SPaolo Bonzini * IMPORTANT: To avoid RSB underflow attacks and any other nastiness, 529f2febf3SPaolo Bonzini * there must not be any returns or indirect branches between this code 539f2febf3SPaolo Bonzini * and vmentry. 549f2febf3SPaolo Bonzini */ 559f2febf3SPaolo Bonzini movl SVM_spec_ctrl(%_ASM_DI), %eax 569f2febf3SPaolo Bonzini cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax 579f2febf3SPaolo Bonzini je 801b 589f2febf3SPaolo Bonzini mov $MSR_IA32_SPEC_CTRL, %ecx 599f2febf3SPaolo Bonzini xor %edx, %edx 609f2febf3SPaolo Bonzini wrmsr 619f2febf3SPaolo Bonzini jmp 801b 629f2febf3SPaolo Bonzini.endm 639f2febf3SPaolo Bonzini 649f2febf3SPaolo Bonzini.macro RESTORE_HOST_SPEC_CTRL 659f2febf3SPaolo Bonzini /* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */ 669f2febf3SPaolo Bonzini ALTERNATIVE_2 "", \ 679f2febf3SPaolo Bonzini "jmp 900f", X86_FEATURE_MSR_SPEC_CTRL, \ 689f2febf3SPaolo Bonzini "", X86_FEATURE_V_SPEC_CTRL 699f2febf3SPaolo Bonzini901: 709f2febf3SPaolo Bonzini.endm 71adac42bfSSean Christopherson.macro RESTORE_HOST_SPEC_CTRL_BODY spec_ctrl_intercepted:req 729f2febf3SPaolo Bonzini900: 739f2febf3SPaolo Bonzini /* Same for after vmexit. */ 749f2febf3SPaolo Bonzini mov $MSR_IA32_SPEC_CTRL, %ecx 759f2febf3SPaolo Bonzini 769f2febf3SPaolo Bonzini /* 779f2febf3SPaolo Bonzini * Load the value that the guest had written into MSR_IA32_SPEC_CTRL, 789f2febf3SPaolo Bonzini * if it was not intercepted during guest execution. 799f2febf3SPaolo Bonzini */ 80adac42bfSSean Christopherson cmpb $0, \spec_ctrl_intercepted 819f2febf3SPaolo Bonzini jnz 998f 829f2febf3SPaolo Bonzini rdmsr 839f2febf3SPaolo Bonzini movl %eax, SVM_spec_ctrl(%_ASM_DI) 849f2febf3SPaolo Bonzini998: 859f2febf3SPaolo Bonzini 869f2febf3SPaolo Bonzini /* Now restore the host value of the MSR if different from the guest's. */ 879f2febf3SPaolo Bonzini movl PER_CPU_VAR(x86_spec_ctrl_current), %eax 889f2febf3SPaolo Bonzini cmp SVM_spec_ctrl(%_ASM_DI), %eax 899f2febf3SPaolo Bonzini je 901b 909f2febf3SPaolo Bonzini xor %edx, %edx 919f2febf3SPaolo Bonzini wrmsr 929f2febf3SPaolo Bonzini jmp 901b 939f2febf3SPaolo Bonzini.endm 949f2febf3SPaolo Bonzini 959f2febf3SPaolo Bonzini 96199cd1d7SUros Bizjak/** 97199cd1d7SUros Bizjak * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode 9816fdc1deSPaolo Bonzini * @svm: struct vcpu_svm * 999f2febf3SPaolo Bonzini * @spec_ctrl_intercepted: bool 100199cd1d7SUros Bizjak */ 101199cd1d7SUros BizjakSYM_FUNC_START(__svm_vcpu_run) 102199cd1d7SUros Bizjak push %_ASM_BP 10319597a71SSean Christopherson mov %_ASM_SP, %_ASM_BP 104199cd1d7SUros Bizjak#ifdef CONFIG_X86_64 105199cd1d7SUros Bizjak push %r15 106199cd1d7SUros Bizjak push %r14 107199cd1d7SUros Bizjak push %r13 108199cd1d7SUros Bizjak push %r12 109199cd1d7SUros Bizjak#else 110199cd1d7SUros Bizjak push %edi 111199cd1d7SUros Bizjak push %esi 112199cd1d7SUros Bizjak#endif 113199cd1d7SUros Bizjak push %_ASM_BX 114199cd1d7SUros Bizjak 115e287bd00SPaolo Bonzini /* 116e287bd00SPaolo Bonzini * Save variables needed after vmexit on the stack, in inverse 117e287bd00SPaolo Bonzini * order compared to when they are needed. 118e287bd00SPaolo Bonzini */ 119e287bd00SPaolo Bonzini 1209f2febf3SPaolo Bonzini /* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL. */ 121199cd1d7SUros Bizjak push %_ASM_ARG2 122199cd1d7SUros Bizjak 123e287bd00SPaolo Bonzini /* Needed to restore access to percpu variables. */ 124e287bd00SPaolo Bonzini __ASM_SIZE(push) PER_CPU_VAR(svm_data + SD_save_area_pa) 125e287bd00SPaolo Bonzini 1269f2febf3SPaolo Bonzini /* Finally save @svm. */ 127199cd1d7SUros Bizjak push %_ASM_ARG1 128199cd1d7SUros Bizjak 129f6d58266SPaolo Bonzini.ifnc _ASM_ARG1, _ASM_DI 1309f2febf3SPaolo Bonzini /* 1319f2febf3SPaolo Bonzini * Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX 1329f2febf3SPaolo Bonzini * and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL. 1339f2febf3SPaolo Bonzini */ 134f6d58266SPaolo Bonzini mov %_ASM_ARG1, %_ASM_DI 135f6d58266SPaolo Bonzini.endif 136199cd1d7SUros Bizjak 1379f2febf3SPaolo Bonzini /* Clobbers RAX, RCX, RDX. */ 1389f2febf3SPaolo Bonzini RESTORE_GUEST_SPEC_CTRL 1399f2febf3SPaolo Bonzini 140e61ab42dSPaolo Bonzini /* 141e61ab42dSPaolo Bonzini * Use a single vmcb (vmcb01 because it's always valid) for 142e61ab42dSPaolo Bonzini * context switching guest state via VMLOAD/VMSAVE, that way 143e61ab42dSPaolo Bonzini * the state doesn't need to be copied between vmcb01 and 144e61ab42dSPaolo Bonzini * vmcb02 when switching vmcbs for nested virtualization. 145e61ab42dSPaolo Bonzini */ 146e61ab42dSPaolo Bonzini mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX 147e61ab42dSPaolo Bonzini1: vmload %_ASM_AX 148e61ab42dSPaolo Bonzini2: 149e61ab42dSPaolo Bonzini 150f6d58266SPaolo Bonzini /* Get svm->current_vmcb->pa into RAX. */ 151f6d58266SPaolo Bonzini mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX 152f6d58266SPaolo Bonzini mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX 153199cd1d7SUros Bizjak 154199cd1d7SUros Bizjak /* Load guest registers. */ 155f7ef2801SPaolo Bonzini mov VCPU_RCX(%_ASM_DI), %_ASM_CX 156f7ef2801SPaolo Bonzini mov VCPU_RDX(%_ASM_DI), %_ASM_DX 157f7ef2801SPaolo Bonzini mov VCPU_RBX(%_ASM_DI), %_ASM_BX 158f7ef2801SPaolo Bonzini mov VCPU_RBP(%_ASM_DI), %_ASM_BP 159f7ef2801SPaolo Bonzini mov VCPU_RSI(%_ASM_DI), %_ASM_SI 160199cd1d7SUros Bizjak#ifdef CONFIG_X86_64 161f7ef2801SPaolo Bonzini mov VCPU_R8 (%_ASM_DI), %r8 162f7ef2801SPaolo Bonzini mov VCPU_R9 (%_ASM_DI), %r9 163f7ef2801SPaolo Bonzini mov VCPU_R10(%_ASM_DI), %r10 164f7ef2801SPaolo Bonzini mov VCPU_R11(%_ASM_DI), %r11 165f7ef2801SPaolo Bonzini mov VCPU_R12(%_ASM_DI), %r12 166f7ef2801SPaolo Bonzini mov VCPU_R13(%_ASM_DI), %r13 167f7ef2801SPaolo Bonzini mov VCPU_R14(%_ASM_DI), %r14 168f7ef2801SPaolo Bonzini mov VCPU_R15(%_ASM_DI), %r15 169199cd1d7SUros Bizjak#endif 170f7ef2801SPaolo Bonzini mov VCPU_RDI(%_ASM_DI), %_ASM_DI 171199cd1d7SUros Bizjak 172199cd1d7SUros Bizjak /* Enter guest mode */ 173f14eec0aSPaolo Bonzini sti 174199cd1d7SUros Bizjak 175e61ab42dSPaolo Bonzini3: vmrun %_ASM_AX 176e61ab42dSPaolo Bonzini4: 177e61ab42dSPaolo Bonzini cli 178199cd1d7SUros Bizjak 179e61ab42dSPaolo Bonzini /* Pop @svm to RAX while it's the only available register. */ 180199cd1d7SUros Bizjak pop %_ASM_AX 181199cd1d7SUros Bizjak 182199cd1d7SUros Bizjak /* Save all guest registers. */ 183199cd1d7SUros Bizjak mov %_ASM_CX, VCPU_RCX(%_ASM_AX) 184199cd1d7SUros Bizjak mov %_ASM_DX, VCPU_RDX(%_ASM_AX) 185199cd1d7SUros Bizjak mov %_ASM_BX, VCPU_RBX(%_ASM_AX) 186199cd1d7SUros Bizjak mov %_ASM_BP, VCPU_RBP(%_ASM_AX) 187199cd1d7SUros Bizjak mov %_ASM_SI, VCPU_RSI(%_ASM_AX) 188199cd1d7SUros Bizjak mov %_ASM_DI, VCPU_RDI(%_ASM_AX) 189199cd1d7SUros Bizjak#ifdef CONFIG_X86_64 190199cd1d7SUros Bizjak mov %r8, VCPU_R8 (%_ASM_AX) 191199cd1d7SUros Bizjak mov %r9, VCPU_R9 (%_ASM_AX) 192199cd1d7SUros Bizjak mov %r10, VCPU_R10(%_ASM_AX) 193199cd1d7SUros Bizjak mov %r11, VCPU_R11(%_ASM_AX) 194199cd1d7SUros Bizjak mov %r12, VCPU_R12(%_ASM_AX) 195199cd1d7SUros Bizjak mov %r13, VCPU_R13(%_ASM_AX) 196199cd1d7SUros Bizjak mov %r14, VCPU_R14(%_ASM_AX) 197199cd1d7SUros Bizjak mov %r15, VCPU_R15(%_ASM_AX) 198199cd1d7SUros Bizjak#endif 199199cd1d7SUros Bizjak 200e61ab42dSPaolo Bonzini /* @svm can stay in RDI from now on. */ 201e61ab42dSPaolo Bonzini mov %_ASM_AX, %_ASM_DI 202e61ab42dSPaolo Bonzini 203e61ab42dSPaolo Bonzini mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX 204e61ab42dSPaolo Bonzini5: vmsave %_ASM_AX 205e61ab42dSPaolo Bonzini6: 206e61ab42dSPaolo Bonzini 207e287bd00SPaolo Bonzini /* Restores GSBASE among other things, allowing access to percpu data. */ 208e287bd00SPaolo Bonzini pop %_ASM_AX 209e287bd00SPaolo Bonzini7: vmload %_ASM_AX 210e287bd00SPaolo Bonzini8: 211e287bd00SPaolo Bonzini 212e61ab42dSPaolo Bonzini /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ 213*4440337aSAmit Shah FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT 214e61ab42dSPaolo Bonzini 2159f2febf3SPaolo Bonzini /* Clobbers RAX, RCX, RDX. */ 2169f2febf3SPaolo Bonzini RESTORE_HOST_SPEC_CTRL 2179f2febf3SPaolo Bonzini 218199cd1d7SUros Bizjak /* 219a149180fSPeter Zijlstra * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be 220a149180fSPeter Zijlstra * untrained as soon as we exit the VM and are back to the 221a149180fSPeter Zijlstra * kernel. This should be done before re-enabling interrupts 222a149180fSPeter Zijlstra * because interrupt handlers won't sanitize 'ret' if the return is 223a149180fSPeter Zijlstra * from the kernel. 224a149180fSPeter Zijlstra */ 225864bcaa3SPeter Zijlstra UNTRAIN_RET_VM 226d893832dSBorislav Petkov (AMD) 227a149180fSPeter Zijlstra /* 228199cd1d7SUros Bizjak * Clear all general purpose registers except RSP and RAX to prevent 229199cd1d7SUros Bizjak * speculative use of the guest's values, even those that are reloaded 230199cd1d7SUros Bizjak * via the stack. In theory, an L1 cache miss when restoring registers 231199cd1d7SUros Bizjak * could lead to speculative execution with the guest's values. 232199cd1d7SUros Bizjak * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially 233199cd1d7SUros Bizjak * free. RSP and RAX are exempt as they are restored by hardware 234199cd1d7SUros Bizjak * during VM-Exit. 235199cd1d7SUros Bizjak */ 236199cd1d7SUros Bizjak xor %ecx, %ecx 237199cd1d7SUros Bizjak xor %edx, %edx 238199cd1d7SUros Bizjak xor %ebx, %ebx 239199cd1d7SUros Bizjak xor %ebp, %ebp 240199cd1d7SUros Bizjak xor %esi, %esi 241199cd1d7SUros Bizjak xor %edi, %edi 242199cd1d7SUros Bizjak#ifdef CONFIG_X86_64 243199cd1d7SUros Bizjak xor %r8d, %r8d 244199cd1d7SUros Bizjak xor %r9d, %r9d 245199cd1d7SUros Bizjak xor %r10d, %r10d 246199cd1d7SUros Bizjak xor %r11d, %r11d 247199cd1d7SUros Bizjak xor %r12d, %r12d 248199cd1d7SUros Bizjak xor %r13d, %r13d 249199cd1d7SUros Bizjak xor %r14d, %r14d 250199cd1d7SUros Bizjak xor %r15d, %r15d 251199cd1d7SUros Bizjak#endif 252199cd1d7SUros Bizjak 2539f2febf3SPaolo Bonzini /* "Pop" @spec_ctrl_intercepted. */ 2549f2febf3SPaolo Bonzini pop %_ASM_BX 2559f2febf3SPaolo Bonzini 256199cd1d7SUros Bizjak pop %_ASM_BX 257199cd1d7SUros Bizjak 258199cd1d7SUros Bizjak#ifdef CONFIG_X86_64 259199cd1d7SUros Bizjak pop %r12 260199cd1d7SUros Bizjak pop %r13 261199cd1d7SUros Bizjak pop %r14 262199cd1d7SUros Bizjak pop %r15 263199cd1d7SUros Bizjak#else 264199cd1d7SUros Bizjak pop %esi 265199cd1d7SUros Bizjak pop %edi 266199cd1d7SUros Bizjak#endif 267199cd1d7SUros Bizjak pop %_ASM_BP 268f94909ceSPeter Zijlstra RET 2697531b47cSUros Bizjak 2709f2febf3SPaolo Bonzini RESTORE_GUEST_SPEC_CTRL_BODY 271adac42bfSSean Christopherson RESTORE_HOST_SPEC_CTRL_BODY (%_ASM_SP) 2729f2febf3SPaolo Bonzini 27315223c4fSUros Bizjak10: cmpb $0, _ASM_RIP(kvm_rebooting) 2747531b47cSUros Bizjak jne 2b 2757531b47cSUros Bizjak ud2 27615223c4fSUros Bizjak30: cmpb $0, _ASM_RIP(kvm_rebooting) 277e61ab42dSPaolo Bonzini jne 4b 278e61ab42dSPaolo Bonzini ud2 27915223c4fSUros Bizjak50: cmpb $0, _ASM_RIP(kvm_rebooting) 280e61ab42dSPaolo Bonzini jne 6b 281e61ab42dSPaolo Bonzini ud2 28215223c4fSUros Bizjak70: cmpb $0, _ASM_RIP(kvm_rebooting) 283e287bd00SPaolo Bonzini jne 8b 284e287bd00SPaolo Bonzini ud2 2857531b47cSUros Bizjak 286e61ab42dSPaolo Bonzini _ASM_EXTABLE(1b, 10b) 287e61ab42dSPaolo Bonzini _ASM_EXTABLE(3b, 30b) 288e61ab42dSPaolo Bonzini _ASM_EXTABLE(5b, 50b) 289e287bd00SPaolo Bonzini _ASM_EXTABLE(7b, 70b) 2907531b47cSUros Bizjak 291199cd1d7SUros BizjakSYM_FUNC_END(__svm_vcpu_run) 29216809ecdSTom Lendacky 2937774c8f3SSean Christopherson#ifdef CONFIG_KVM_AMD_SEV 294c92be2fdSSean Christopherson 295c92be2fdSSean Christopherson 296c92be2fdSSean Christopherson#ifdef CONFIG_X86_64 297c92be2fdSSean Christopherson#define SEV_ES_GPRS_BASE 0x300 298c92be2fdSSean Christopherson#define SEV_ES_RBX (SEV_ES_GPRS_BASE + __VCPU_REGS_RBX * WORD_SIZE) 299c92be2fdSSean Christopherson#define SEV_ES_RBP (SEV_ES_GPRS_BASE + __VCPU_REGS_RBP * WORD_SIZE) 300adac42bfSSean Christopherson#define SEV_ES_RSI (SEV_ES_GPRS_BASE + __VCPU_REGS_RSI * WORD_SIZE) 301adac42bfSSean Christopherson#define SEV_ES_RDI (SEV_ES_GPRS_BASE + __VCPU_REGS_RDI * WORD_SIZE) 302c92be2fdSSean Christopherson#define SEV_ES_R12 (SEV_ES_GPRS_BASE + __VCPU_REGS_R12 * WORD_SIZE) 303c92be2fdSSean Christopherson#define SEV_ES_R13 (SEV_ES_GPRS_BASE + __VCPU_REGS_R13 * WORD_SIZE) 304c92be2fdSSean Christopherson#define SEV_ES_R14 (SEV_ES_GPRS_BASE + __VCPU_REGS_R14 * WORD_SIZE) 305c92be2fdSSean Christopherson#define SEV_ES_R15 (SEV_ES_GPRS_BASE + __VCPU_REGS_R15 * WORD_SIZE) 306c92be2fdSSean Christopherson#endif 307c92be2fdSSean Christopherson 30816809ecdSTom Lendacky/** 30916809ecdSTom Lendacky * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode 310f6d58266SPaolo Bonzini * @svm: struct vcpu_svm * 3119f2febf3SPaolo Bonzini * @spec_ctrl_intercepted: bool 31216809ecdSTom Lendacky */ 31316809ecdSTom LendackySYM_FUNC_START(__svm_sev_es_vcpu_run) 3144367a758SSean Christopherson FRAME_BEGIN 3154367a758SSean Christopherson 3169f2febf3SPaolo Bonzini /* 317c92be2fdSSean Christopherson * Save non-volatile (callee-saved) registers to the host save area. 318c92be2fdSSean Christopherson * Except for RAX and RSP, all GPRs are restored on #VMEXIT, but not 319c92be2fdSSean Christopherson * saved on VMRUN. 3209f2febf3SPaolo Bonzini */ 321c92be2fdSSean Christopherson mov %rbp, SEV_ES_RBP (%rdx) 322c92be2fdSSean Christopherson mov %r15, SEV_ES_R15 (%rdx) 323c92be2fdSSean Christopherson mov %r14, SEV_ES_R14 (%rdx) 324c92be2fdSSean Christopherson mov %r13, SEV_ES_R13 (%rdx) 325c92be2fdSSean Christopherson mov %r12, SEV_ES_R12 (%rdx) 326c92be2fdSSean Christopherson mov %rbx, SEV_ES_RBX (%rdx) 3279f2febf3SPaolo Bonzini 328adac42bfSSean Christopherson /* 329adac42bfSSean Christopherson * Save volatile registers that hold arguments that are needed after 330adac42bfSSean Christopherson * #VMEXIT (RDI=@svm and RSI=@spec_ctrl_intercepted). 331adac42bfSSean Christopherson */ 332adac42bfSSean Christopherson mov %rdi, SEV_ES_RDI (%rdx) 333adac42bfSSean Christopherson mov %rsi, SEV_ES_RSI (%rdx) 3349f2febf3SPaolo Bonzini 335c92be2fdSSean Christopherson /* Clobbers RAX, RCX, RDX (@hostsa). */ 3369f2febf3SPaolo Bonzini RESTORE_GUEST_SPEC_CTRL 3379f2febf3SPaolo Bonzini 338f6d58266SPaolo Bonzini /* Get svm->current_vmcb->pa into RAX. */ 339331282fdSSean Christopherson mov SVM_current_vmcb(%rdi), %rax 340331282fdSSean Christopherson mov KVM_VMCB_pa(%rax), %rax 3417531b47cSUros Bizjak 3427531b47cSUros Bizjak /* Enter guest mode */ 34316809ecdSTom Lendacky sti 34416809ecdSTom Lendacky 345331282fdSSean Christopherson1: vmrun %rax 34616809ecdSTom Lendacky 3477531b47cSUros Bizjak2: cli 34816809ecdSTom Lendacky 34916809ecdSTom Lendacky /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ 350*4440337aSAmit Shah FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT 35116809ecdSTom Lendacky 352adac42bfSSean Christopherson /* Clobbers RAX, RCX, RDX, consumes RDI (@svm) and RSI (@spec_ctrl_intercepted). */ 3539f2febf3SPaolo Bonzini RESTORE_HOST_SPEC_CTRL 3549f2febf3SPaolo Bonzini 355a149180fSPeter Zijlstra /* 356a149180fSPeter Zijlstra * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be 357a149180fSPeter Zijlstra * untrained as soon as we exit the VM and are back to the 358a149180fSPeter Zijlstra * kernel. This should be done before re-enabling interrupts 359a149180fSPeter Zijlstra * because interrupt handlers won't sanitize RET if the return is 360a149180fSPeter Zijlstra * from the kernel. 361a149180fSPeter Zijlstra */ 362864bcaa3SPeter Zijlstra UNTRAIN_RET_VM 363a149180fSPeter Zijlstra 3644367a758SSean Christopherson FRAME_END 365f94909ceSPeter Zijlstra RET 3667531b47cSUros Bizjak 3679f2febf3SPaolo Bonzini RESTORE_GUEST_SPEC_CTRL_BODY 368adac42bfSSean Christopherson RESTORE_HOST_SPEC_CTRL_BODY %sil 3699f2febf3SPaolo Bonzini 370331282fdSSean Christopherson3: cmpb $0, kvm_rebooting(%rip) 3717531b47cSUros Bizjak jne 2b 3727531b47cSUros Bizjak ud2 3737531b47cSUros Bizjak 3747531b47cSUros Bizjak _ASM_EXTABLE(1b, 3b) 3757531b47cSUros Bizjak 37616809ecdSTom LendackySYM_FUNC_END(__svm_sev_es_vcpu_run) 3777774c8f3SSean Christopherson#endif /* CONFIG_KVM_AMD_SEV */ 378