vmenter.S (c92be2fd8edf7b300a758c185fe032fd0257b886) vmenter.S (adac42bf42c1608f23938c03e3ca53fa6c87f337)
1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/linkage.h>
3#include <asm/asm.h>
4#include <asm/asm-offsets.h>
5#include <asm/bitsperlong.h>
6#include <asm/kvm_vcpu_regs.h>
7#include <asm/nospec-branch.h>
8#include "kvm-asm-offsets.h"

--- 53 unchanged lines hidden (view full) ---

62
63.macro RESTORE_HOST_SPEC_CTRL
64 /* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */
65 ALTERNATIVE_2 "", \
66 "jmp 900f", X86_FEATURE_MSR_SPEC_CTRL, \
67 "", X86_FEATURE_V_SPEC_CTRL
68901:
69.endm
1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/linkage.h>
3#include <asm/asm.h>
4#include <asm/asm-offsets.h>
5#include <asm/bitsperlong.h>
6#include <asm/kvm_vcpu_regs.h>
7#include <asm/nospec-branch.h>
8#include "kvm-asm-offsets.h"

--- 53 unchanged lines hidden (view full) ---

62
63.macro RESTORE_HOST_SPEC_CTRL
64 /* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */
65 ALTERNATIVE_2 "", \
66 "jmp 900f", X86_FEATURE_MSR_SPEC_CTRL, \
67 "", X86_FEATURE_V_SPEC_CTRL
68901:
69.endm
70.macro RESTORE_HOST_SPEC_CTRL_BODY
70.macro RESTORE_HOST_SPEC_CTRL_BODY spec_ctrl_intercepted:req
71900:
72 /* Same for after vmexit. */
73 mov $MSR_IA32_SPEC_CTRL, %ecx
74
75 /*
76 * Load the value that the guest had written into MSR_IA32_SPEC_CTRL,
77 * if it was not intercepted during guest execution.
78 */
71900:
72 /* Same for after vmexit. */
73 mov $MSR_IA32_SPEC_CTRL, %ecx
74
75 /*
76 * Load the value that the guest had written into MSR_IA32_SPEC_CTRL,
77 * if it was not intercepted during guest execution.
78 */
79 cmpb $0, (%_ASM_SP)
79 cmpb $0, \spec_ctrl_intercepted
80 jnz 998f
81 rdmsr
82 movl %eax, SVM_spec_ctrl(%_ASM_DI)
83998:
84
85 /* Now restore the host value of the MSR if different from the guest's. */
86 movl PER_CPU_VAR(x86_spec_ctrl_current), %eax
87 cmp SVM_spec_ctrl(%_ASM_DI), %eax

--- 176 unchanged lines hidden (view full) ---

264#else
265 pop %esi
266 pop %edi
267#endif
268 pop %_ASM_BP
269 RET
270
271 RESTORE_GUEST_SPEC_CTRL_BODY
80 jnz 998f
81 rdmsr
82 movl %eax, SVM_spec_ctrl(%_ASM_DI)
83998:
84
85 /* Now restore the host value of the MSR if different from the guest's. */
86 movl PER_CPU_VAR(x86_spec_ctrl_current), %eax
87 cmp SVM_spec_ctrl(%_ASM_DI), %eax

--- 176 unchanged lines hidden (view full) ---

264#else
265 pop %esi
266 pop %edi
267#endif
268 pop %_ASM_BP
269 RET
270
271 RESTORE_GUEST_SPEC_CTRL_BODY
272 RESTORE_HOST_SPEC_CTRL_BODY
272 RESTORE_HOST_SPEC_CTRL_BODY (%_ASM_SP)
273
27410: cmpb $0, _ASM_RIP(kvm_rebooting)
275 jne 2b
276 ud2
27730: cmpb $0, _ASM_RIP(kvm_rebooting)
278 jne 4b
279 ud2
28050: cmpb $0, _ASM_RIP(kvm_rebooting)

--- 12 unchanged lines hidden (view full) ---

293
294#ifdef CONFIG_KVM_AMD_SEV
295
296
297#ifdef CONFIG_X86_64
298#define SEV_ES_GPRS_BASE 0x300
299#define SEV_ES_RBX (SEV_ES_GPRS_BASE + __VCPU_REGS_RBX * WORD_SIZE)
300#define SEV_ES_RBP (SEV_ES_GPRS_BASE + __VCPU_REGS_RBP * WORD_SIZE)
273
27410: cmpb $0, _ASM_RIP(kvm_rebooting)
275 jne 2b
276 ud2
27730: cmpb $0, _ASM_RIP(kvm_rebooting)
278 jne 4b
279 ud2
28050: cmpb $0, _ASM_RIP(kvm_rebooting)

--- 12 unchanged lines hidden (view full) ---

293
294#ifdef CONFIG_KVM_AMD_SEV
295
296
297#ifdef CONFIG_X86_64
298#define SEV_ES_GPRS_BASE 0x300
299#define SEV_ES_RBX (SEV_ES_GPRS_BASE + __VCPU_REGS_RBX * WORD_SIZE)
300#define SEV_ES_RBP (SEV_ES_GPRS_BASE + __VCPU_REGS_RBP * WORD_SIZE)
301#define SEV_ES_RSI (SEV_ES_GPRS_BASE + __VCPU_REGS_RSI * WORD_SIZE)
302#define SEV_ES_RDI (SEV_ES_GPRS_BASE + __VCPU_REGS_RDI * WORD_SIZE)
301#define SEV_ES_R12 (SEV_ES_GPRS_BASE + __VCPU_REGS_R12 * WORD_SIZE)
302#define SEV_ES_R13 (SEV_ES_GPRS_BASE + __VCPU_REGS_R13 * WORD_SIZE)
303#define SEV_ES_R14 (SEV_ES_GPRS_BASE + __VCPU_REGS_R14 * WORD_SIZE)
304#define SEV_ES_R15 (SEV_ES_GPRS_BASE + __VCPU_REGS_R15 * WORD_SIZE)
305#endif
306
307/**
308 * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode

--- 8 unchanged lines hidden (view full) ---

317 */
318 mov %rbp, SEV_ES_RBP (%rdx)
319 mov %r15, SEV_ES_R15 (%rdx)
320 mov %r14, SEV_ES_R14 (%rdx)
321 mov %r13, SEV_ES_R13 (%rdx)
322 mov %r12, SEV_ES_R12 (%rdx)
323 mov %rbx, SEV_ES_RBX (%rdx)
324
303#define SEV_ES_R12 (SEV_ES_GPRS_BASE + __VCPU_REGS_R12 * WORD_SIZE)
304#define SEV_ES_R13 (SEV_ES_GPRS_BASE + __VCPU_REGS_R13 * WORD_SIZE)
305#define SEV_ES_R14 (SEV_ES_GPRS_BASE + __VCPU_REGS_R14 * WORD_SIZE)
306#define SEV_ES_R15 (SEV_ES_GPRS_BASE + __VCPU_REGS_R15 * WORD_SIZE)
307#endif
308
309/**
310 * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode

--- 8 unchanged lines hidden (view full) ---

319 */
320 mov %rbp, SEV_ES_RBP (%rdx)
321 mov %r15, SEV_ES_R15 (%rdx)
322 mov %r14, SEV_ES_R14 (%rdx)
323 mov %r13, SEV_ES_R13 (%rdx)
324 mov %r12, SEV_ES_R12 (%rdx)
325 mov %rbx, SEV_ES_RBX (%rdx)
326
325 /* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL. */
326 push %rsi
327 /*
328 * Save volatile registers that hold arguments that are needed after
329 * #VMEXIT (RDI=@svm and RSI=@spec_ctrl_intercepted).
330 */
331 mov %rdi, SEV_ES_RDI (%rdx)
332 mov %rsi, SEV_ES_RSI (%rdx)
327
333
328 /* Save @svm. */
329 push %rdi
330
331 /* Clobbers RAX, RCX, RDX (@hostsa). */
332 RESTORE_GUEST_SPEC_CTRL
333
334 /* Get svm->current_vmcb->pa into RAX. */
335 mov SVM_current_vmcb(%rdi), %rax
336 mov KVM_VMCB_pa(%rax), %rax
337
338 /* Enter guest mode */
339 sti
340
3411: vmrun %rax
342
3432: cli
344
334 /* Clobbers RAX, RCX, RDX (@hostsa). */
335 RESTORE_GUEST_SPEC_CTRL
336
337 /* Get svm->current_vmcb->pa into RAX. */
338 mov SVM_current_vmcb(%rdi), %rax
339 mov KVM_VMCB_pa(%rax), %rax
340
341 /* Enter guest mode */
342 sti
343
3441: vmrun %rax
345
3462: cli
347
345 /* Pop @svm to RDI, guest registers have been saved already. */
346 pop %rdi
347
348#ifdef CONFIG_MITIGATION_RETPOLINE
349 /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
350 FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
351#endif
352
348#ifdef CONFIG_MITIGATION_RETPOLINE
349 /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
350 FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
351#endif
352
353 /* Clobbers RAX, RCX, RDX, consumes RDI (@svm). */
353 /* Clobbers RAX, RCX, RDX, consumes RDI (@svm) and RSI (@spec_ctrl_intercepted). */
354 RESTORE_HOST_SPEC_CTRL
355
356 /*
357 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
358 * untrained as soon as we exit the VM and are back to the
359 * kernel. This should be done before re-enabling interrupts
360 * because interrupt handlers won't sanitize RET if the return is
361 * from the kernel.
362 */
363 UNTRAIN_RET_VM
364
354 RESTORE_HOST_SPEC_CTRL
355
356 /*
357 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
358 * untrained as soon as we exit the VM and are back to the
359 * kernel. This should be done before re-enabling interrupts
360 * because interrupt handlers won't sanitize RET if the return is
361 * from the kernel.
362 */
363 UNTRAIN_RET_VM
364
365 /* "Pop" and discard @spec_ctrl_intercepted. */
366 pop %rax
367
368 RET
369
370 RESTORE_GUEST_SPEC_CTRL_BODY
365 RET
366
367 RESTORE_GUEST_SPEC_CTRL_BODY
371 RESTORE_HOST_SPEC_CTRL_BODY
368 RESTORE_HOST_SPEC_CTRL_BODY %sil
372
3733: cmpb $0, kvm_rebooting(%rip)
374 jne 2b
375 ud2
376
377 _ASM_EXTABLE(1b, 3b)
378
379SYM_FUNC_END(__svm_sev_es_vcpu_run)
380#endif /* CONFIG_KVM_AMD_SEV */
369
3703: cmpb $0, kvm_rebooting(%rip)
371 jne 2b
372 ud2
373
374 _ASM_EXTABLE(1b, 3b)
375
376SYM_FUNC_END(__svm_sev_es_vcpu_run)
377#endif /* CONFIG_KVM_AMD_SEV */