vmenter.S (7774c8f32e99b1f314c0df7c204a897792b4f378) | vmenter.S (331282fdb15edaf1beb1d27a64d3f65a34d7394d) |
---|---|
1/* SPDX-License-Identifier: GPL-2.0 */ 2#include <linux/linkage.h> 3#include <asm/asm.h> 4#include <asm/asm-offsets.h> 5#include <asm/bitsperlong.h> 6#include <asm/kvm_vcpu_regs.h> 7#include <asm/nospec-branch.h> 8#include "kvm-asm-offsets.h" --- 284 unchanged lines hidden (view full) --- 293 294#ifdef CONFIG_KVM_AMD_SEV 295/** 296 * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode 297 * @svm: struct vcpu_svm * 298 * @spec_ctrl_intercepted: bool 299 */ 300SYM_FUNC_START(__svm_sev_es_vcpu_run) | 1/* SPDX-License-Identifier: GPL-2.0 */ 2#include <linux/linkage.h> 3#include <asm/asm.h> 4#include <asm/asm-offsets.h> 5#include <asm/bitsperlong.h> 6#include <asm/kvm_vcpu_regs.h> 7#include <asm/nospec-branch.h> 8#include "kvm-asm-offsets.h" --- 284 unchanged lines hidden (view full) --- 293 294#ifdef CONFIG_KVM_AMD_SEV 295/** 296 * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode 297 * @svm: struct vcpu_svm * 298 * @spec_ctrl_intercepted: bool 299 */ 300SYM_FUNC_START(__svm_sev_es_vcpu_run) |
301 push %_ASM_BP 302#ifdef CONFIG_X86_64 | 301 push %rbp |
303 push %r15 304 push %r14 305 push %r13 306 push %r12 | 302 push %r15 303 push %r14 304 push %r13 305 push %r12 |
307#else 308 push %edi 309 push %esi 310#endif 311 push %_ASM_BX | 306 push %rbx |
312 313 /* 314 * Save variables needed after vmexit on the stack, in inverse 315 * order compared to when they are needed. 316 */ 317 318 /* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL. */ | 307 308 /* 309 * Save variables needed after vmexit on the stack, in inverse 310 * order compared to when they are needed. 311 */ 312 313 /* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL. */ |
319 push %_ASM_ARG2 | 314 push %rsi |
320 321 /* Save @svm. */ | 315 316 /* Save @svm. */ |
322 push %_ASM_ARG1 | 317 push %rdi |
323 | 318 |
324.ifnc _ASM_ARG1, _ASM_DI 325 /* 326 * Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX 327 * and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL. 328 */ 329 mov %_ASM_ARG1, %_ASM_DI 330.endif 331 | |
332 /* Clobbers RAX, RCX, RDX. */ 333 RESTORE_GUEST_SPEC_CTRL 334 335 /* Get svm->current_vmcb->pa into RAX. */ | 319 /* Clobbers RAX, RCX, RDX. */ 320 RESTORE_GUEST_SPEC_CTRL 321 322 /* Get svm->current_vmcb->pa into RAX. */ |
336 mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX 337 mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX | 323 mov SVM_current_vmcb(%rdi), %rax 324 mov KVM_VMCB_pa(%rax), %rax |
338 339 /* Enter guest mode */ 340 sti 341 | 325 326 /* Enter guest mode */ 327 sti 328 |
3421: vmrun %_ASM_AX | 3291: vmrun %rax |
343 3442: cli 345 346 /* Pop @svm to RDI, guest registers have been saved already. */ | 330 3312: cli 332 333 /* Pop @svm to RDI, guest registers have been saved already. */ |
347 pop %_ASM_DI | 334 pop %rdi |
348 349#ifdef CONFIG_MITIGATION_RETPOLINE 350 /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ | 335 336#ifdef CONFIG_MITIGATION_RETPOLINE 337 /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ |
351 FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE | 338 FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE |
352#endif 353 354 /* Clobbers RAX, RCX, RDX. */ 355 RESTORE_HOST_SPEC_CTRL 356 357 /* 358 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be 359 * untrained as soon as we exit the VM and are back to the 360 * kernel. This should be done before re-enabling interrupts 361 * because interrupt handlers won't sanitize RET if the return is 362 * from the kernel. 363 */ 364 UNTRAIN_RET_VM 365 366 /* "Pop" @spec_ctrl_intercepted. */ | 339#endif 340 341 /* Clobbers RAX, RCX, RDX. */ 342 RESTORE_HOST_SPEC_CTRL 343 344 /* 345 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be 346 * untrained as soon as we exit the VM and are back to the 347 * kernel. This should be done before re-enabling interrupts 348 * because interrupt handlers won't sanitize RET if the return is 349 * from the kernel. 350 */ 351 UNTRAIN_RET_VM 352 353 /* "Pop" @spec_ctrl_intercepted. */ |
367 pop %_ASM_BX | 354 pop %rbx |
368 | 355 |
369 pop %_ASM_BX | 356 pop %rbx |
370 | 357 |
371#ifdef CONFIG_X86_64 | |
372 pop %r12 373 pop %r13 374 pop %r14 375 pop %r15 | 358 pop %r12 359 pop %r13 360 pop %r14 361 pop %r15 |
376#else 377 pop %esi 378 pop %edi 379#endif 380 pop %_ASM_BP | 362 pop %rbp |
381 RET 382 383 RESTORE_GUEST_SPEC_CTRL_BODY 384 RESTORE_HOST_SPEC_CTRL_BODY 385 | 363 RET 364 365 RESTORE_GUEST_SPEC_CTRL_BODY 366 RESTORE_HOST_SPEC_CTRL_BODY 367 |
3863: cmpb $0, _ASM_RIP(kvm_rebooting) | 3683: cmpb $0, kvm_rebooting(%rip) |
387 jne 2b 388 ud2 389 390 _ASM_EXTABLE(1b, 3b) 391 392SYM_FUNC_END(__svm_sev_es_vcpu_run) 393#endif /* CONFIG_KVM_AMD_SEV */ | 369 jne 2b 370 ud2 371 372 _ASM_EXTABLE(1b, 3b) 373 374SYM_FUNC_END(__svm_sev_es_vcpu_run) 375#endif /* CONFIG_KVM_AMD_SEV */ |