1199cd1d7SUros Bizjak/* SPDX-License-Identifier: GPL-2.0 */ 2199cd1d7SUros Bizjak#include <linux/linkage.h> 3199cd1d7SUros Bizjak#include <asm/asm.h> 45d821386SThomas Gleixner#include <asm/asm-offsets.h> 5199cd1d7SUros Bizjak#include <asm/bitsperlong.h> 6199cd1d7SUros Bizjak#include <asm/kvm_vcpu_regs.h> 7f14eec0aSPaolo Bonzini#include <asm/nospec-branch.h> 816fdc1deSPaolo Bonzini#include "kvm-asm-offsets.h" 9199cd1d7SUros Bizjak 10199cd1d7SUros Bizjak#define WORD_SIZE (BITS_PER_LONG / 8) 11199cd1d7SUros Bizjak 12199cd1d7SUros Bizjak/* Intentionally omit RAX as it's context switched by hardware */ 1316fdc1deSPaolo Bonzini#define VCPU_RCX (SVM_vcpu_arch_regs + __VCPU_REGS_RCX * WORD_SIZE) 1416fdc1deSPaolo Bonzini#define VCPU_RDX (SVM_vcpu_arch_regs + __VCPU_REGS_RDX * WORD_SIZE) 1516fdc1deSPaolo Bonzini#define VCPU_RBX (SVM_vcpu_arch_regs + __VCPU_REGS_RBX * WORD_SIZE) 16199cd1d7SUros Bizjak/* Intentionally omit RSP as it's context switched by hardware */ 1716fdc1deSPaolo Bonzini#define VCPU_RBP (SVM_vcpu_arch_regs + __VCPU_REGS_RBP * WORD_SIZE) 1816fdc1deSPaolo Bonzini#define VCPU_RSI (SVM_vcpu_arch_regs + __VCPU_REGS_RSI * WORD_SIZE) 1916fdc1deSPaolo Bonzini#define VCPU_RDI (SVM_vcpu_arch_regs + __VCPU_REGS_RDI * WORD_SIZE) 20199cd1d7SUros Bizjak 21199cd1d7SUros Bizjak#ifdef CONFIG_X86_64 2216fdc1deSPaolo Bonzini#define VCPU_R8 (SVM_vcpu_arch_regs + __VCPU_REGS_R8 * WORD_SIZE) 2316fdc1deSPaolo Bonzini#define VCPU_R9 (SVM_vcpu_arch_regs + __VCPU_REGS_R9 * WORD_SIZE) 2416fdc1deSPaolo Bonzini#define VCPU_R10 (SVM_vcpu_arch_regs + __VCPU_REGS_R10 * WORD_SIZE) 2516fdc1deSPaolo Bonzini#define VCPU_R11 (SVM_vcpu_arch_regs + __VCPU_REGS_R11 * WORD_SIZE) 2616fdc1deSPaolo Bonzini#define VCPU_R12 (SVM_vcpu_arch_regs + __VCPU_REGS_R12 * WORD_SIZE) 2716fdc1deSPaolo Bonzini#define VCPU_R13 (SVM_vcpu_arch_regs + __VCPU_REGS_R13 * WORD_SIZE) 2816fdc1deSPaolo Bonzini#define VCPU_R14 (SVM_vcpu_arch_regs + __VCPU_REGS_R14 * WORD_SIZE) 2916fdc1deSPaolo Bonzini#define VCPU_R15 (SVM_vcpu_arch_regs + __VCPU_REGS_R15 * WORD_SIZE) 30199cd1d7SUros Bizjak#endif 31199cd1d7SUros Bizjak 32e61ab42dSPaolo Bonzini#define SVM_vmcb01_pa (SVM_vmcb01 + KVM_VMCB_pa) 33e61ab42dSPaolo Bonzini 34135961e0SThomas Gleixner.section .noinstr.text, "ax" 35199cd1d7SUros Bizjak 369f2febf3SPaolo Bonzini.macro RESTORE_GUEST_SPEC_CTRL 379f2febf3SPaolo Bonzini /* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */ 389f2febf3SPaolo Bonzini ALTERNATIVE_2 "", \ 399f2febf3SPaolo Bonzini "jmp 800f", X86_FEATURE_MSR_SPEC_CTRL, \ 409f2febf3SPaolo Bonzini "", X86_FEATURE_V_SPEC_CTRL 419f2febf3SPaolo Bonzini801: 429f2febf3SPaolo Bonzini.endm 439f2febf3SPaolo Bonzini.macro RESTORE_GUEST_SPEC_CTRL_BODY 449f2febf3SPaolo Bonzini800: 459f2febf3SPaolo Bonzini /* 469f2febf3SPaolo Bonzini * SPEC_CTRL handling: if the guest's SPEC_CTRL value differs from the 479f2febf3SPaolo Bonzini * host's, write the MSR. This is kept out-of-line so that the common 489f2febf3SPaolo Bonzini * case does not have to jump. 499f2febf3SPaolo Bonzini * 509f2febf3SPaolo Bonzini * IMPORTANT: To avoid RSB underflow attacks and any other nastiness, 519f2febf3SPaolo Bonzini * there must not be any returns or indirect branches between this code 529f2febf3SPaolo Bonzini * and vmentry. 539f2febf3SPaolo Bonzini */ 549f2febf3SPaolo Bonzini movl SVM_spec_ctrl(%_ASM_DI), %eax 559f2febf3SPaolo Bonzini cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax 569f2febf3SPaolo Bonzini je 801b 579f2febf3SPaolo Bonzini mov $MSR_IA32_SPEC_CTRL, %ecx 589f2febf3SPaolo Bonzini xor %edx, %edx 599f2febf3SPaolo Bonzini wrmsr 609f2febf3SPaolo Bonzini jmp 801b 619f2febf3SPaolo Bonzini.endm 629f2febf3SPaolo Bonzini 639f2febf3SPaolo Bonzini.macro RESTORE_HOST_SPEC_CTRL 649f2febf3SPaolo Bonzini /* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */ 659f2febf3SPaolo Bonzini ALTERNATIVE_2 "", \ 669f2febf3SPaolo Bonzini "jmp 900f", X86_FEATURE_MSR_SPEC_CTRL, \ 679f2febf3SPaolo Bonzini "", X86_FEATURE_V_SPEC_CTRL 689f2febf3SPaolo Bonzini901: 699f2febf3SPaolo Bonzini.endm 709f2febf3SPaolo Bonzini.macro RESTORE_HOST_SPEC_CTRL_BODY 719f2febf3SPaolo Bonzini900: 729f2febf3SPaolo Bonzini /* Same for after vmexit. */ 739f2febf3SPaolo Bonzini mov $MSR_IA32_SPEC_CTRL, %ecx 749f2febf3SPaolo Bonzini 759f2febf3SPaolo Bonzini /* 769f2febf3SPaolo Bonzini * Load the value that the guest had written into MSR_IA32_SPEC_CTRL, 779f2febf3SPaolo Bonzini * if it was not intercepted during guest execution. 789f2febf3SPaolo Bonzini */ 799f2febf3SPaolo Bonzini cmpb $0, (%_ASM_SP) 809f2febf3SPaolo Bonzini jnz 998f 819f2febf3SPaolo Bonzini rdmsr 829f2febf3SPaolo Bonzini movl %eax, SVM_spec_ctrl(%_ASM_DI) 839f2febf3SPaolo Bonzini998: 849f2febf3SPaolo Bonzini 859f2febf3SPaolo Bonzini /* Now restore the host value of the MSR if different from the guest's. */ 869f2febf3SPaolo Bonzini movl PER_CPU_VAR(x86_spec_ctrl_current), %eax 879f2febf3SPaolo Bonzini cmp SVM_spec_ctrl(%_ASM_DI), %eax 889f2febf3SPaolo Bonzini je 901b 899f2febf3SPaolo Bonzini xor %edx, %edx 909f2febf3SPaolo Bonzini wrmsr 919f2febf3SPaolo Bonzini jmp 901b 929f2febf3SPaolo Bonzini.endm 939f2febf3SPaolo Bonzini 949f2febf3SPaolo Bonzini 95199cd1d7SUros Bizjak/** 96199cd1d7SUros Bizjak * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode 9716fdc1deSPaolo Bonzini * @svm: struct vcpu_svm * 989f2febf3SPaolo Bonzini * @spec_ctrl_intercepted: bool 99199cd1d7SUros Bizjak */ 100199cd1d7SUros BizjakSYM_FUNC_START(__svm_vcpu_run) 101199cd1d7SUros Bizjak push %_ASM_BP 102199cd1d7SUros Bizjak#ifdef CONFIG_X86_64 103199cd1d7SUros Bizjak push %r15 104199cd1d7SUros Bizjak push %r14 105199cd1d7SUros Bizjak push %r13 106199cd1d7SUros Bizjak push %r12 107199cd1d7SUros Bizjak#else 108199cd1d7SUros Bizjak push %edi 109199cd1d7SUros Bizjak push %esi 110199cd1d7SUros Bizjak#endif 111199cd1d7SUros Bizjak push %_ASM_BX 112199cd1d7SUros Bizjak 113e287bd00SPaolo Bonzini /* 114e287bd00SPaolo Bonzini * Save variables needed after vmexit on the stack, in inverse 115e287bd00SPaolo Bonzini * order compared to when they are needed. 116e287bd00SPaolo Bonzini */ 117e287bd00SPaolo Bonzini 1189f2febf3SPaolo Bonzini /* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL. */ 119199cd1d7SUros Bizjak push %_ASM_ARG2 120199cd1d7SUros Bizjak 121e287bd00SPaolo Bonzini /* Needed to restore access to percpu variables. */ 122e287bd00SPaolo Bonzini __ASM_SIZE(push) PER_CPU_VAR(svm_data + SD_save_area_pa) 123e287bd00SPaolo Bonzini 1249f2febf3SPaolo Bonzini /* Finally save @svm. */ 125199cd1d7SUros Bizjak push %_ASM_ARG1 126199cd1d7SUros Bizjak 127f6d58266SPaolo Bonzini.ifnc _ASM_ARG1, _ASM_DI 1289f2febf3SPaolo Bonzini /* 1299f2febf3SPaolo Bonzini * Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX 1309f2febf3SPaolo Bonzini * and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL. 1319f2febf3SPaolo Bonzini */ 132f6d58266SPaolo Bonzini mov %_ASM_ARG1, %_ASM_DI 133f6d58266SPaolo Bonzini.endif 134199cd1d7SUros Bizjak 1359f2febf3SPaolo Bonzini /* Clobbers RAX, RCX, RDX. */ 1369f2febf3SPaolo Bonzini RESTORE_GUEST_SPEC_CTRL 1379f2febf3SPaolo Bonzini 138e61ab42dSPaolo Bonzini /* 139e61ab42dSPaolo Bonzini * Use a single vmcb (vmcb01 because it's always valid) for 140e61ab42dSPaolo Bonzini * context switching guest state via VMLOAD/VMSAVE, that way 141e61ab42dSPaolo Bonzini * the state doesn't need to be copied between vmcb01 and 142e61ab42dSPaolo Bonzini * vmcb02 when switching vmcbs for nested virtualization. 143e61ab42dSPaolo Bonzini */ 144e61ab42dSPaolo Bonzini mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX 145e61ab42dSPaolo Bonzini1: vmload %_ASM_AX 146e61ab42dSPaolo Bonzini2: 147e61ab42dSPaolo Bonzini 148f6d58266SPaolo Bonzini /* Get svm->current_vmcb->pa into RAX. */ 149f6d58266SPaolo Bonzini mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX 150f6d58266SPaolo Bonzini mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX 151199cd1d7SUros Bizjak 152199cd1d7SUros Bizjak /* Load guest registers. */ 153f7ef2801SPaolo Bonzini mov VCPU_RCX(%_ASM_DI), %_ASM_CX 154f7ef2801SPaolo Bonzini mov VCPU_RDX(%_ASM_DI), %_ASM_DX 155f7ef2801SPaolo Bonzini mov VCPU_RBX(%_ASM_DI), %_ASM_BX 156f7ef2801SPaolo Bonzini mov VCPU_RBP(%_ASM_DI), %_ASM_BP 157f7ef2801SPaolo Bonzini mov VCPU_RSI(%_ASM_DI), %_ASM_SI 158199cd1d7SUros Bizjak#ifdef CONFIG_X86_64 159f7ef2801SPaolo Bonzini mov VCPU_R8 (%_ASM_DI), %r8 160f7ef2801SPaolo Bonzini mov VCPU_R9 (%_ASM_DI), %r9 161f7ef2801SPaolo Bonzini mov VCPU_R10(%_ASM_DI), %r10 162f7ef2801SPaolo Bonzini mov VCPU_R11(%_ASM_DI), %r11 163f7ef2801SPaolo Bonzini mov VCPU_R12(%_ASM_DI), %r12 164f7ef2801SPaolo Bonzini mov VCPU_R13(%_ASM_DI), %r13 165f7ef2801SPaolo Bonzini mov VCPU_R14(%_ASM_DI), %r14 166f7ef2801SPaolo Bonzini mov VCPU_R15(%_ASM_DI), %r15 167199cd1d7SUros Bizjak#endif 168f7ef2801SPaolo Bonzini mov VCPU_RDI(%_ASM_DI), %_ASM_DI 169199cd1d7SUros Bizjak 170199cd1d7SUros Bizjak /* Enter guest mode */ 171f14eec0aSPaolo Bonzini sti 172199cd1d7SUros Bizjak 173e61ab42dSPaolo Bonzini3: vmrun %_ASM_AX 174e61ab42dSPaolo Bonzini4: 175e61ab42dSPaolo Bonzini cli 176199cd1d7SUros Bizjak 177e61ab42dSPaolo Bonzini /* Pop @svm to RAX while it's the only available register. */ 178199cd1d7SUros Bizjak pop %_ASM_AX 179199cd1d7SUros Bizjak 180199cd1d7SUros Bizjak /* Save all guest registers. */ 181199cd1d7SUros Bizjak mov %_ASM_CX, VCPU_RCX(%_ASM_AX) 182199cd1d7SUros Bizjak mov %_ASM_DX, VCPU_RDX(%_ASM_AX) 183199cd1d7SUros Bizjak mov %_ASM_BX, VCPU_RBX(%_ASM_AX) 184199cd1d7SUros Bizjak mov %_ASM_BP, VCPU_RBP(%_ASM_AX) 185199cd1d7SUros Bizjak mov %_ASM_SI, VCPU_RSI(%_ASM_AX) 186199cd1d7SUros Bizjak mov %_ASM_DI, VCPU_RDI(%_ASM_AX) 187199cd1d7SUros Bizjak#ifdef CONFIG_X86_64 188199cd1d7SUros Bizjak mov %r8, VCPU_R8 (%_ASM_AX) 189199cd1d7SUros Bizjak mov %r9, VCPU_R9 (%_ASM_AX) 190199cd1d7SUros Bizjak mov %r10, VCPU_R10(%_ASM_AX) 191199cd1d7SUros Bizjak mov %r11, VCPU_R11(%_ASM_AX) 192199cd1d7SUros Bizjak mov %r12, VCPU_R12(%_ASM_AX) 193199cd1d7SUros Bizjak mov %r13, VCPU_R13(%_ASM_AX) 194199cd1d7SUros Bizjak mov %r14, VCPU_R14(%_ASM_AX) 195199cd1d7SUros Bizjak mov %r15, VCPU_R15(%_ASM_AX) 196199cd1d7SUros Bizjak#endif 197199cd1d7SUros Bizjak 198e61ab42dSPaolo Bonzini /* @svm can stay in RDI from now on. */ 199e61ab42dSPaolo Bonzini mov %_ASM_AX, %_ASM_DI 200e61ab42dSPaolo Bonzini 201e61ab42dSPaolo Bonzini mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX 202e61ab42dSPaolo Bonzini5: vmsave %_ASM_AX 203e61ab42dSPaolo Bonzini6: 204e61ab42dSPaolo Bonzini 205e287bd00SPaolo Bonzini /* Restores GSBASE among other things, allowing access to percpu data. */ 206e287bd00SPaolo Bonzini pop %_ASM_AX 207e287bd00SPaolo Bonzini7: vmload %_ASM_AX 208e287bd00SPaolo Bonzini8: 209e287bd00SPaolo Bonzini 210e61ab42dSPaolo Bonzini#ifdef CONFIG_RETPOLINE 211e61ab42dSPaolo Bonzini /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ 212e61ab42dSPaolo Bonzini FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE 213e61ab42dSPaolo Bonzini#endif 214e61ab42dSPaolo Bonzini 2159f2febf3SPaolo Bonzini /* Clobbers RAX, RCX, RDX. */ 2169f2febf3SPaolo Bonzini RESTORE_HOST_SPEC_CTRL 2179f2febf3SPaolo Bonzini 218199cd1d7SUros Bizjak /* 219a149180fSPeter Zijlstra * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be 220a149180fSPeter Zijlstra * untrained as soon as we exit the VM and are back to the 221a149180fSPeter Zijlstra * kernel. This should be done before re-enabling interrupts 222a149180fSPeter Zijlstra * because interrupt handlers won't sanitize 'ret' if the return is 223a149180fSPeter Zijlstra * from the kernel. 224a149180fSPeter Zijlstra */ 225a149180fSPeter Zijlstra UNTRAIN_RET 226a149180fSPeter Zijlstra 227*d893832dSBorislav Petkov (AMD) /* SRSO */ 228*d893832dSBorislav Petkov (AMD) ALTERNATIVE "", "call entry_ibpb", X86_FEATURE_IBPB_ON_VMEXIT 229*d893832dSBorislav Petkov (AMD) 230a149180fSPeter Zijlstra /* 231199cd1d7SUros Bizjak * Clear all general purpose registers except RSP and RAX to prevent 232199cd1d7SUros Bizjak * speculative use of the guest's values, even those that are reloaded 233199cd1d7SUros Bizjak * via the stack. In theory, an L1 cache miss when restoring registers 234199cd1d7SUros Bizjak * could lead to speculative execution with the guest's values. 235199cd1d7SUros Bizjak * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially 236199cd1d7SUros Bizjak * free. RSP and RAX are exempt as they are restored by hardware 237199cd1d7SUros Bizjak * during VM-Exit. 238199cd1d7SUros Bizjak */ 239199cd1d7SUros Bizjak xor %ecx, %ecx 240199cd1d7SUros Bizjak xor %edx, %edx 241199cd1d7SUros Bizjak xor %ebx, %ebx 242199cd1d7SUros Bizjak xor %ebp, %ebp 243199cd1d7SUros Bizjak xor %esi, %esi 244199cd1d7SUros Bizjak xor %edi, %edi 245199cd1d7SUros Bizjak#ifdef CONFIG_X86_64 246199cd1d7SUros Bizjak xor %r8d, %r8d 247199cd1d7SUros Bizjak xor %r9d, %r9d 248199cd1d7SUros Bizjak xor %r10d, %r10d 249199cd1d7SUros Bizjak xor %r11d, %r11d 250199cd1d7SUros Bizjak xor %r12d, %r12d 251199cd1d7SUros Bizjak xor %r13d, %r13d 252199cd1d7SUros Bizjak xor %r14d, %r14d 253199cd1d7SUros Bizjak xor %r15d, %r15d 254199cd1d7SUros Bizjak#endif 255199cd1d7SUros Bizjak 2569f2febf3SPaolo Bonzini /* "Pop" @spec_ctrl_intercepted. */ 2579f2febf3SPaolo Bonzini pop %_ASM_BX 2589f2febf3SPaolo Bonzini 259199cd1d7SUros Bizjak pop %_ASM_BX 260199cd1d7SUros Bizjak 261199cd1d7SUros Bizjak#ifdef CONFIG_X86_64 262199cd1d7SUros Bizjak pop %r12 263199cd1d7SUros Bizjak pop %r13 264199cd1d7SUros Bizjak pop %r14 265199cd1d7SUros Bizjak pop %r15 266199cd1d7SUros Bizjak#else 267199cd1d7SUros Bizjak pop %esi 268199cd1d7SUros Bizjak pop %edi 269199cd1d7SUros Bizjak#endif 270199cd1d7SUros Bizjak pop %_ASM_BP 271f94909ceSPeter Zijlstra RET 2727531b47cSUros Bizjak 2739f2febf3SPaolo Bonzini RESTORE_GUEST_SPEC_CTRL_BODY 2749f2febf3SPaolo Bonzini RESTORE_HOST_SPEC_CTRL_BODY 2759f2febf3SPaolo Bonzini 276e61ab42dSPaolo Bonzini10: cmpb $0, kvm_rebooting 2777531b47cSUros Bizjak jne 2b 2787531b47cSUros Bizjak ud2 279e61ab42dSPaolo Bonzini30: cmpb $0, kvm_rebooting 280e61ab42dSPaolo Bonzini jne 4b 281e61ab42dSPaolo Bonzini ud2 282e61ab42dSPaolo Bonzini50: cmpb $0, kvm_rebooting 283e61ab42dSPaolo Bonzini jne 6b 284e61ab42dSPaolo Bonzini ud2 285e287bd00SPaolo Bonzini70: cmpb $0, kvm_rebooting 286e287bd00SPaolo Bonzini jne 8b 287e287bd00SPaolo Bonzini ud2 2887531b47cSUros Bizjak 289e61ab42dSPaolo Bonzini _ASM_EXTABLE(1b, 10b) 290e61ab42dSPaolo Bonzini _ASM_EXTABLE(3b, 30b) 291e61ab42dSPaolo Bonzini _ASM_EXTABLE(5b, 50b) 292e287bd00SPaolo Bonzini _ASM_EXTABLE(7b, 70b) 2937531b47cSUros Bizjak 294199cd1d7SUros BizjakSYM_FUNC_END(__svm_vcpu_run) 29516809ecdSTom Lendacky 29616809ecdSTom Lendacky/** 29716809ecdSTom Lendacky * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode 298f6d58266SPaolo Bonzini * @svm: struct vcpu_svm * 2999f2febf3SPaolo Bonzini * @spec_ctrl_intercepted: bool 30016809ecdSTom Lendacky */ 30116809ecdSTom LendackySYM_FUNC_START(__svm_sev_es_vcpu_run) 30216809ecdSTom Lendacky push %_ASM_BP 30316809ecdSTom Lendacky#ifdef CONFIG_X86_64 30416809ecdSTom Lendacky push %r15 30516809ecdSTom Lendacky push %r14 30616809ecdSTom Lendacky push %r13 30716809ecdSTom Lendacky push %r12 30816809ecdSTom Lendacky#else 30916809ecdSTom Lendacky push %edi 31016809ecdSTom Lendacky push %esi 31116809ecdSTom Lendacky#endif 31216809ecdSTom Lendacky push %_ASM_BX 31316809ecdSTom Lendacky 3149f2febf3SPaolo Bonzini /* 3159f2febf3SPaolo Bonzini * Save variables needed after vmexit on the stack, in inverse 3169f2febf3SPaolo Bonzini * order compared to when they are needed. 3179f2febf3SPaolo Bonzini */ 3189f2febf3SPaolo Bonzini 3199f2febf3SPaolo Bonzini /* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL. */ 3209f2febf3SPaolo Bonzini push %_ASM_ARG2 3219f2febf3SPaolo Bonzini 3229f2febf3SPaolo Bonzini /* Save @svm. */ 3239f2febf3SPaolo Bonzini push %_ASM_ARG1 3249f2febf3SPaolo Bonzini 3259f2febf3SPaolo Bonzini.ifnc _ASM_ARG1, _ASM_DI 3269f2febf3SPaolo Bonzini /* 3279f2febf3SPaolo Bonzini * Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX 3289f2febf3SPaolo Bonzini * and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL. 3299f2febf3SPaolo Bonzini */ 3309f2febf3SPaolo Bonzini mov %_ASM_ARG1, %_ASM_DI 3319f2febf3SPaolo Bonzini.endif 3329f2febf3SPaolo Bonzini 3339f2febf3SPaolo Bonzini /* Clobbers RAX, RCX, RDX. */ 3349f2febf3SPaolo Bonzini RESTORE_GUEST_SPEC_CTRL 3359f2febf3SPaolo Bonzini 336f6d58266SPaolo Bonzini /* Get svm->current_vmcb->pa into RAX. */ 3379f2febf3SPaolo Bonzini mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX 338f6d58266SPaolo Bonzini mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX 3397531b47cSUros Bizjak 3407531b47cSUros Bizjak /* Enter guest mode */ 34116809ecdSTom Lendacky sti 34216809ecdSTom Lendacky 34316809ecdSTom Lendacky1: vmrun %_ASM_AX 34416809ecdSTom Lendacky 3457531b47cSUros Bizjak2: cli 34616809ecdSTom Lendacky 3479f2febf3SPaolo Bonzini /* Pop @svm to RDI, guest registers have been saved already. */ 3489f2febf3SPaolo Bonzini pop %_ASM_DI 3499f2febf3SPaolo Bonzini 35016809ecdSTom Lendacky#ifdef CONFIG_RETPOLINE 35116809ecdSTom Lendacky /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ 35216809ecdSTom Lendacky FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE 35316809ecdSTom Lendacky#endif 35416809ecdSTom Lendacky 3559f2febf3SPaolo Bonzini /* Clobbers RAX, RCX, RDX. */ 3569f2febf3SPaolo Bonzini RESTORE_HOST_SPEC_CTRL 3579f2febf3SPaolo Bonzini 358a149180fSPeter Zijlstra /* 359a149180fSPeter Zijlstra * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be 360a149180fSPeter Zijlstra * untrained as soon as we exit the VM and are back to the 361a149180fSPeter Zijlstra * kernel. This should be done before re-enabling interrupts 362a149180fSPeter Zijlstra * because interrupt handlers won't sanitize RET if the return is 363a149180fSPeter Zijlstra * from the kernel. 364a149180fSPeter Zijlstra */ 365a149180fSPeter Zijlstra UNTRAIN_RET 366a149180fSPeter Zijlstra 3679f2febf3SPaolo Bonzini /* "Pop" @spec_ctrl_intercepted. */ 3689f2febf3SPaolo Bonzini pop %_ASM_BX 3699f2febf3SPaolo Bonzini 37016809ecdSTom Lendacky pop %_ASM_BX 37116809ecdSTom Lendacky 37216809ecdSTom Lendacky#ifdef CONFIG_X86_64 37316809ecdSTom Lendacky pop %r12 37416809ecdSTom Lendacky pop %r13 37516809ecdSTom Lendacky pop %r14 37616809ecdSTom Lendacky pop %r15 37716809ecdSTom Lendacky#else 37816809ecdSTom Lendacky pop %esi 37916809ecdSTom Lendacky pop %edi 38016809ecdSTom Lendacky#endif 38116809ecdSTom Lendacky pop %_ASM_BP 382f94909ceSPeter Zijlstra RET 3837531b47cSUros Bizjak 3849f2febf3SPaolo Bonzini RESTORE_GUEST_SPEC_CTRL_BODY 3859f2febf3SPaolo Bonzini RESTORE_HOST_SPEC_CTRL_BODY 3869f2febf3SPaolo Bonzini 3877531b47cSUros Bizjak3: cmpb $0, kvm_rebooting 3887531b47cSUros Bizjak jne 2b 3897531b47cSUros Bizjak ud2 3907531b47cSUros Bizjak 3917531b47cSUros Bizjak _ASM_EXTABLE(1b, 3b) 3927531b47cSUros Bizjak 39316809ecdSTom LendackySYM_FUNC_END(__svm_sev_es_vcpu_run) 394