vmenter.S (87e8e360a05fd29465691aeac179bcf585600c59) vmenter.S (c92be2fd8edf7b300a758c185fe032fd0257b886)
1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/linkage.h>
3#include <asm/asm.h>
4#include <asm/asm-offsets.h>
5#include <asm/bitsperlong.h>
6#include <asm/kvm_vcpu_regs.h>
7#include <asm/nospec-branch.h>
8#include "kvm-asm-offsets.h"

--- 278 unchanged lines hidden (view full) ---

287 _ASM_EXTABLE(1b, 10b)
288 _ASM_EXTABLE(3b, 30b)
289 _ASM_EXTABLE(5b, 50b)
290 _ASM_EXTABLE(7b, 70b)
291
292SYM_FUNC_END(__svm_vcpu_run)
293
294#ifdef CONFIG_KVM_AMD_SEV
1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/linkage.h>
3#include <asm/asm.h>
4#include <asm/asm-offsets.h>
5#include <asm/bitsperlong.h>
6#include <asm/kvm_vcpu_regs.h>
7#include <asm/nospec-branch.h>
8#include "kvm-asm-offsets.h"

--- 278 unchanged lines hidden (view full) ---

287 _ASM_EXTABLE(1b, 10b)
288 _ASM_EXTABLE(3b, 30b)
289 _ASM_EXTABLE(5b, 50b)
290 _ASM_EXTABLE(7b, 70b)
291
292SYM_FUNC_END(__svm_vcpu_run)
293
294#ifdef CONFIG_KVM_AMD_SEV
295
296
297#ifdef CONFIG_X86_64
298#define SEV_ES_GPRS_BASE 0x300
299#define SEV_ES_RBX (SEV_ES_GPRS_BASE + __VCPU_REGS_RBX * WORD_SIZE)
300#define SEV_ES_RBP (SEV_ES_GPRS_BASE + __VCPU_REGS_RBP * WORD_SIZE)
301#define SEV_ES_R12 (SEV_ES_GPRS_BASE + __VCPU_REGS_R12 * WORD_SIZE)
302#define SEV_ES_R13 (SEV_ES_GPRS_BASE + __VCPU_REGS_R13 * WORD_SIZE)
303#define SEV_ES_R14 (SEV_ES_GPRS_BASE + __VCPU_REGS_R14 * WORD_SIZE)
304#define SEV_ES_R15 (SEV_ES_GPRS_BASE + __VCPU_REGS_R15 * WORD_SIZE)
305#endif
306
295/**
296 * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
297 * @svm: struct vcpu_svm *
298 * @spec_ctrl_intercepted: bool
299 */
300SYM_FUNC_START(__svm_sev_es_vcpu_run)
307/**
308 * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
309 * @svm: struct vcpu_svm *
310 * @spec_ctrl_intercepted: bool
311 */
312SYM_FUNC_START(__svm_sev_es_vcpu_run)
301 push %rbp
302 push %r15
303 push %r14
304 push %r13
305 push %r12
306 push %rbx
307
308 /*
313 /*
309 * Save variables needed after vmexit on the stack, in inverse
310 * order compared to when they are needed.
314 * Save non-volatile (callee-saved) registers to the host save area.
315 * Except for RAX and RSP, all GPRs are restored on #VMEXIT, but not
316 * saved on VMRUN.
311 */
317 */
318 mov %rbp, SEV_ES_RBP (%rdx)
319 mov %r15, SEV_ES_R15 (%rdx)
320 mov %r14, SEV_ES_R14 (%rdx)
321 mov %r13, SEV_ES_R13 (%rdx)
322 mov %r12, SEV_ES_R12 (%rdx)
323 mov %rbx, SEV_ES_RBX (%rdx)
312
313 /* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL. */
314 push %rsi
315
316 /* Save @svm. */
317 push %rdi
318
324
325 /* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL. */
326 push %rsi
327
328 /* Save @svm. */
329 push %rdi
330
319 /* Clobbers RAX, RCX, RDX. */
331 /* Clobbers RAX, RCX, RDX (@hostsa). */
320 RESTORE_GUEST_SPEC_CTRL
321
322 /* Get svm->current_vmcb->pa into RAX. */
323 mov SVM_current_vmcb(%rdi), %rax
324 mov KVM_VMCB_pa(%rax), %rax
325
326 /* Enter guest mode */
327 sti

--- 5 unchanged lines hidden (view full) ---

333 /* Pop @svm to RDI, guest registers have been saved already. */
334 pop %rdi
335
336#ifdef CONFIG_MITIGATION_RETPOLINE
337 /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
338 FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
339#endif
340
332 RESTORE_GUEST_SPEC_CTRL
333
334 /* Get svm->current_vmcb->pa into RAX. */
335 mov SVM_current_vmcb(%rdi), %rax
336 mov KVM_VMCB_pa(%rax), %rax
337
338 /* Enter guest mode */
339 sti

--- 5 unchanged lines hidden (view full) ---

345 /* Pop @svm to RDI, guest registers have been saved already. */
346 pop %rdi
347
348#ifdef CONFIG_MITIGATION_RETPOLINE
349 /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
350 FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
351#endif
352
341 /* Clobbers RAX, RCX, RDX. */
353 /* Clobbers RAX, RCX, RDX, consumes RDI (@svm). */
342 RESTORE_HOST_SPEC_CTRL
343
344 /*
345 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
346 * untrained as soon as we exit the VM and are back to the
347 * kernel. This should be done before re-enabling interrupts
348 * because interrupt handlers won't sanitize RET if the return is
349 * from the kernel.
350 */
351 UNTRAIN_RET_VM
352
353 /* "Pop" and discard @spec_ctrl_intercepted. */
354 pop %rax
355
354 RESTORE_HOST_SPEC_CTRL
355
356 /*
357 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
358 * untrained as soon as we exit the VM and are back to the
359 * kernel. This should be done before re-enabling interrupts
360 * because interrupt handlers won't sanitize RET if the return is
361 * from the kernel.
362 */
363 UNTRAIN_RET_VM
364
365 /* "Pop" and discard @spec_ctrl_intercepted. */
366 pop %rax
367
356 pop %rbx
357
358 pop %r12
359 pop %r13
360 pop %r14
361 pop %r15
362 pop %rbp
363 RET
364
365 RESTORE_GUEST_SPEC_CTRL_BODY
366 RESTORE_HOST_SPEC_CTRL_BODY
367
3683: cmpb $0, kvm_rebooting(%rip)
369 jne 2b
370 ud2
371
372 _ASM_EXTABLE(1b, 3b)
373
374SYM_FUNC_END(__svm_sev_es_vcpu_run)
375#endif /* CONFIG_KVM_AMD_SEV */
368 RET
369
370 RESTORE_GUEST_SPEC_CTRL_BODY
371 RESTORE_HOST_SPEC_CTRL_BODY
372
3733: cmpb $0, kvm_rebooting(%rip)
374 jne 2b
375 ud2
376
377 _ASM_EXTABLE(1b, 3b)
378
379SYM_FUNC_END(__svm_sev_es_vcpu_run)
380#endif /* CONFIG_KVM_AMD_SEV */