1/* SPDX-License-Identifier: GPL-2.0 */ 2#include <linux/linkage.h> 3#include <asm/asm.h> 4#include <asm/asm-offsets.h> 5#include <asm/bitsperlong.h> 6#include <asm/kvm_vcpu_regs.h> 7#include <asm/nospec-branch.h> 8#include <asm/percpu.h> 9#include <asm/segment.h> 10#include "run_flags.h" 11 12#define WORD_SIZE (BITS_PER_LONG / 8) 13 14#define VCPU_RAX __VCPU_REGS_RAX * WORD_SIZE 15#define VCPU_RCX __VCPU_REGS_RCX * WORD_SIZE 16#define VCPU_RDX __VCPU_REGS_RDX * WORD_SIZE 17#define VCPU_RBX __VCPU_REGS_RBX * WORD_SIZE 18/* Intentionally omit RSP as it's context switched by hardware */ 19#define VCPU_RBP __VCPU_REGS_RBP * WORD_SIZE 20#define VCPU_RSI __VCPU_REGS_RSI * WORD_SIZE 21#define VCPU_RDI __VCPU_REGS_RDI * WORD_SIZE 22 23#ifdef CONFIG_X86_64 24#define VCPU_R8 __VCPU_REGS_R8 * WORD_SIZE 25#define VCPU_R9 __VCPU_REGS_R9 * WORD_SIZE 26#define VCPU_R10 __VCPU_REGS_R10 * WORD_SIZE 27#define VCPU_R11 __VCPU_REGS_R11 * WORD_SIZE 28#define VCPU_R12 __VCPU_REGS_R12 * WORD_SIZE 29#define VCPU_R13 __VCPU_REGS_R13 * WORD_SIZE 30#define VCPU_R14 __VCPU_REGS_R14 * WORD_SIZE 31#define VCPU_R15 __VCPU_REGS_R15 * WORD_SIZE 32#endif 33 34.section .noinstr.text, "ax" 35 36/** 37 * __vmx_vcpu_run - Run a vCPU via a transition to VMX guest mode 38 * @vmx: struct vcpu_vmx * 39 * @regs: unsigned long * (to guest registers) 40 * @flags: VMX_RUN_VMRESUME: use VMRESUME instead of VMLAUNCH 41 * VMX_RUN_SAVE_SPEC_CTRL: save guest SPEC_CTRL into vmx->spec_ctrl 42 * 43 * Returns: 44 * 0 on VM-Exit, 1 on VM-Fail 45 */ 46SYM_FUNC_START(__vmx_vcpu_run) 47 push %_ASM_BP 48 mov %_ASM_SP, %_ASM_BP 49#ifdef CONFIG_X86_64 50 push %r15 51 push %r14 52 push %r13 53 push %r12 54#else 55 push %edi 56 push %esi 57#endif 58 push %_ASM_BX 59 60 /* Save @vmx for SPEC_CTRL handling */ 61 push %_ASM_ARG1 62 63 /* Save @flags for SPEC_CTRL handling */ 64 push %_ASM_ARG3 65 66 /* 67 * Save @regs, _ASM_ARG2 may be modified by vmx_update_host_rsp() and 68 * @regs is needed after VM-Exit to save the guest's register values. 69 */ 70 push %_ASM_ARG2 71 72 /* Copy @flags to BL, _ASM_ARG3 is volatile. */ 73 mov %_ASM_ARG3B, %bl 74 75 lea (%_ASM_SP), %_ASM_ARG2 76 call vmx_update_host_rsp 77 78 ALTERNATIVE "jmp .Lspec_ctrl_done", "", X86_FEATURE_MSR_SPEC_CTRL 79 80 /* 81 * SPEC_CTRL handling: if the guest's SPEC_CTRL value differs from the 82 * host's, write the MSR. 83 * 84 * IMPORTANT: To avoid RSB underflow attacks and any other nastiness, 85 * there must not be any returns or indirect branches between this code 86 * and vmentry. 87 */ 88 mov 2*WORD_SIZE(%_ASM_SP), %_ASM_DI 89 movl VMX_spec_ctrl(%_ASM_DI), %edi 90 movl PER_CPU_VAR(x86_spec_ctrl_current), %esi 91 cmp %edi, %esi 92 je .Lspec_ctrl_done 93 mov $MSR_IA32_SPEC_CTRL, %ecx 94 xor %edx, %edx 95 mov %edi, %eax 96 wrmsr 97 98.Lspec_ctrl_done: 99 100 /* 101 * Since vmentry is serializing on affected CPUs, there's no need for 102 * an LFENCE to stop speculation from skipping the wrmsr. 103 */ 104 105 /* Load @regs to RAX. */ 106 mov (%_ASM_SP), %_ASM_AX 107 108 /* Check if vmlaunch or vmresume is needed */ 109 testb $VMX_RUN_VMRESUME, %bl 110 111 /* Load guest registers. Don't clobber flags. */ 112 mov VCPU_RCX(%_ASM_AX), %_ASM_CX 113 mov VCPU_RDX(%_ASM_AX), %_ASM_DX 114 mov VCPU_RBX(%_ASM_AX), %_ASM_BX 115 mov VCPU_RBP(%_ASM_AX), %_ASM_BP 116 mov VCPU_RSI(%_ASM_AX), %_ASM_SI 117 mov VCPU_RDI(%_ASM_AX), %_ASM_DI 118#ifdef CONFIG_X86_64 119 mov VCPU_R8 (%_ASM_AX), %r8 120 mov VCPU_R9 (%_ASM_AX), %r9 121 mov VCPU_R10(%_ASM_AX), %r10 122 mov VCPU_R11(%_ASM_AX), %r11 123 mov VCPU_R12(%_ASM_AX), %r12 124 mov VCPU_R13(%_ASM_AX), %r13 125 mov VCPU_R14(%_ASM_AX), %r14 126 mov VCPU_R15(%_ASM_AX), %r15 127#endif 128 /* Load guest RAX. This kills the @regs pointer! */ 129 mov VCPU_RAX(%_ASM_AX), %_ASM_AX 130 131 /* Check EFLAGS.ZF from 'testb' above */ 132 jz .Lvmlaunch 133 134 /* 135 * After a successful VMRESUME/VMLAUNCH, control flow "magically" 136 * resumes below at 'vmx_vmexit' due to the VMCS HOST_RIP setting. 137 * So this isn't a typical function and objtool needs to be told to 138 * save the unwind state here and restore it below. 139 */ 140 UNWIND_HINT_SAVE 141 142/* 143 * If VMRESUME/VMLAUNCH and corresponding vmexit succeed, execution resumes at 144 * the 'vmx_vmexit' label below. 145 */ 146.Lvmresume: 147 vmresume 148 jmp .Lvmfail 149 150.Lvmlaunch: 151 vmlaunch 152 jmp .Lvmfail 153 154 _ASM_EXTABLE(.Lvmresume, .Lfixup) 155 _ASM_EXTABLE(.Lvmlaunch, .Lfixup) 156 157SYM_INNER_LABEL(vmx_vmexit, SYM_L_GLOBAL) 158 159 /* Restore unwind state from before the VMRESUME/VMLAUNCH. */ 160 UNWIND_HINT_RESTORE 161 ENDBR 162 163 /* Temporarily save guest's RAX. */ 164 push %_ASM_AX 165 166 /* Reload @regs to RAX. */ 167 mov WORD_SIZE(%_ASM_SP), %_ASM_AX 168 169 /* Save all guest registers, including RAX from the stack */ 170 pop VCPU_RAX(%_ASM_AX) 171 mov %_ASM_CX, VCPU_RCX(%_ASM_AX) 172 mov %_ASM_DX, VCPU_RDX(%_ASM_AX) 173 mov %_ASM_BX, VCPU_RBX(%_ASM_AX) 174 mov %_ASM_BP, VCPU_RBP(%_ASM_AX) 175 mov %_ASM_SI, VCPU_RSI(%_ASM_AX) 176 mov %_ASM_DI, VCPU_RDI(%_ASM_AX) 177#ifdef CONFIG_X86_64 178 mov %r8, VCPU_R8 (%_ASM_AX) 179 mov %r9, VCPU_R9 (%_ASM_AX) 180 mov %r10, VCPU_R10(%_ASM_AX) 181 mov %r11, VCPU_R11(%_ASM_AX) 182 mov %r12, VCPU_R12(%_ASM_AX) 183 mov %r13, VCPU_R13(%_ASM_AX) 184 mov %r14, VCPU_R14(%_ASM_AX) 185 mov %r15, VCPU_R15(%_ASM_AX) 186#endif 187 188 /* Clear return value to indicate VM-Exit (as opposed to VM-Fail). */ 189 xor %ebx, %ebx 190 191.Lclear_regs: 192 /* Discard @regs. The register is irrelevant, it just can't be RBX. */ 193 pop %_ASM_AX 194 195 /* 196 * Clear all general purpose registers except RSP and RBX to prevent 197 * speculative use of the guest's values, even those that are reloaded 198 * via the stack. In theory, an L1 cache miss when restoring registers 199 * could lead to speculative execution with the guest's values. 200 * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially 201 * free. RSP and RBX are exempt as RSP is restored by hardware during 202 * VM-Exit and RBX is explicitly loaded with 0 or 1 to hold the return 203 * value. 204 */ 205 xor %eax, %eax 206 xor %ecx, %ecx 207 xor %edx, %edx 208 xor %ebp, %ebp 209 xor %esi, %esi 210 xor %edi, %edi 211#ifdef CONFIG_X86_64 212 xor %r8d, %r8d 213 xor %r9d, %r9d 214 xor %r10d, %r10d 215 xor %r11d, %r11d 216 xor %r12d, %r12d 217 xor %r13d, %r13d 218 xor %r14d, %r14d 219 xor %r15d, %r15d 220#endif 221 222 /* 223 * IMPORTANT: RSB filling and SPEC_CTRL handling must be done before 224 * the first unbalanced RET after vmexit! 225 * 226 * For retpoline or IBRS, RSB filling is needed to prevent poisoned RSB 227 * entries and (in some cases) RSB underflow. 228 * 229 * eIBRS has its own protection against poisoned RSB, so it doesn't 230 * need the RSB filling sequence. But it does need to be enabled, and a 231 * single call to retire, before the first unbalanced RET. 232 */ 233 234 FILL_RETURN_BUFFER %_ASM_CX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT,\ 235 X86_FEATURE_RSB_VMEXIT_LITE 236 237 pop %_ASM_ARG2 /* @flags */ 238 pop %_ASM_ARG1 /* @vmx */ 239 240 call vmx_spec_ctrl_restore_host 241 242 /* Put return value in AX */ 243 mov %_ASM_BX, %_ASM_AX 244 245 pop %_ASM_BX 246#ifdef CONFIG_X86_64 247 pop %r12 248 pop %r13 249 pop %r14 250 pop %r15 251#else 252 pop %esi 253 pop %edi 254#endif 255 pop %_ASM_BP 256 RET 257 258.Lfixup: 259 cmpb $0, kvm_rebooting 260 jne .Lvmfail 261 ud2 262.Lvmfail: 263 /* VM-Fail: set return value to 1 */ 264 mov $1, %_ASM_BX 265 jmp .Lclear_regs 266 267SYM_FUNC_END(__vmx_vcpu_run) 268 269 270.section .text, "ax" 271 272/** 273 * vmread_error_trampoline - Trampoline from inline asm to vmread_error() 274 * @field: VMCS field encoding that failed 275 * @fault: %true if the VMREAD faulted, %false if it failed 276 277 * Save and restore volatile registers across a call to vmread_error(). Note, 278 * all parameters are passed on the stack. 279 */ 280SYM_FUNC_START(vmread_error_trampoline) 281 push %_ASM_BP 282 mov %_ASM_SP, %_ASM_BP 283 284 push %_ASM_AX 285 push %_ASM_CX 286 push %_ASM_DX 287#ifdef CONFIG_X86_64 288 push %rdi 289 push %rsi 290 push %r8 291 push %r9 292 push %r10 293 push %r11 294#endif 295 296 /* Load @field and @fault to arg1 and arg2 respectively. */ 297 mov 3*WORD_SIZE(%_ASM_BP), %_ASM_ARG2 298 mov 2*WORD_SIZE(%_ASM_BP), %_ASM_ARG1 299 300 call vmread_error 301 302 /* Zero out @fault, which will be popped into the result register. */ 303 _ASM_MOV $0, 3*WORD_SIZE(%_ASM_BP) 304 305#ifdef CONFIG_X86_64 306 pop %r11 307 pop %r10 308 pop %r9 309 pop %r8 310 pop %rsi 311 pop %rdi 312#endif 313 pop %_ASM_DX 314 pop %_ASM_CX 315 pop %_ASM_AX 316 pop %_ASM_BP 317 318 RET 319SYM_FUNC_END(vmread_error_trampoline) 320 321SYM_FUNC_START(vmx_do_interrupt_nmi_irqoff) 322 /* 323 * Unconditionally create a stack frame, getting the correct RSP on the 324 * stack (for x86-64) would take two instructions anyways, and RBP can 325 * be used to restore RSP to make objtool happy (see below). 326 */ 327 push %_ASM_BP 328 mov %_ASM_SP, %_ASM_BP 329 330#ifdef CONFIG_X86_64 331 /* 332 * Align RSP to a 16-byte boundary (to emulate CPU behavior) before 333 * creating the synthetic interrupt stack frame for the IRQ/NMI. 334 */ 335 and $-16, %rsp 336 push $__KERNEL_DS 337 push %rbp 338#endif 339 pushf 340 push $__KERNEL_CS 341 CALL_NOSPEC _ASM_ARG1 342 343 /* 344 * "Restore" RSP from RBP, even though IRET has already unwound RSP to 345 * the correct value. objtool doesn't know the callee will IRET and, 346 * without the explicit restore, thinks the stack is getting walloped. 347 * Using an unwind hint is problematic due to x86-64's dynamic alignment. 348 */ 349 mov %_ASM_BP, %_ASM_SP 350 pop %_ASM_BP 351 RET 352SYM_FUNC_END(vmx_do_interrupt_nmi_irqoff) 353