vmenter.S (e287bd005ad9d85dd6271dd795d3ecfb6bca46ad) vmenter.S (9f2febf3f04daebdaaa5a43cfa20e3844905c0f9)
1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/linkage.h>
3#include <asm/asm.h>
4#include <asm/bitsperlong.h>
5#include <asm/kvm_vcpu_regs.h>
6#include <asm/nospec-branch.h>
7#include "kvm-asm-offsets.h"
8

--- 18 unchanged lines hidden (view full) ---

27#define VCPU_R14 (SVM_vcpu_arch_regs + __VCPU_REGS_R14 * WORD_SIZE)
28#define VCPU_R15 (SVM_vcpu_arch_regs + __VCPU_REGS_R15 * WORD_SIZE)
29#endif
30
31#define SVM_vmcb01_pa (SVM_vmcb01 + KVM_VMCB_pa)
32
33.section .noinstr.text, "ax"
34
1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/linkage.h>
3#include <asm/asm.h>
4#include <asm/bitsperlong.h>
5#include <asm/kvm_vcpu_regs.h>
6#include <asm/nospec-branch.h>
7#include "kvm-asm-offsets.h"
8

--- 18 unchanged lines hidden (view full) ---

27#define VCPU_R14 (SVM_vcpu_arch_regs + __VCPU_REGS_R14 * WORD_SIZE)
28#define VCPU_R15 (SVM_vcpu_arch_regs + __VCPU_REGS_R15 * WORD_SIZE)
29#endif
30
31#define SVM_vmcb01_pa (SVM_vmcb01 + KVM_VMCB_pa)
32
33.section .noinstr.text, "ax"
34
35.macro RESTORE_GUEST_SPEC_CTRL
36 /* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */
37 ALTERNATIVE_2 "", \
38 "jmp 800f", X86_FEATURE_MSR_SPEC_CTRL, \
39 "", X86_FEATURE_V_SPEC_CTRL
40801:
41.endm
42.macro RESTORE_GUEST_SPEC_CTRL_BODY
43800:
44 /*
45 * SPEC_CTRL handling: if the guest's SPEC_CTRL value differs from the
46 * host's, write the MSR. This is kept out-of-line so that the common
47 * case does not have to jump.
48 *
49 * IMPORTANT: To avoid RSB underflow attacks and any other nastiness,
50 * there must not be any returns or indirect branches between this code
51 * and vmentry.
52 */
53 movl SVM_spec_ctrl(%_ASM_DI), %eax
54 cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax
55 je 801b
56 mov $MSR_IA32_SPEC_CTRL, %ecx
57 xor %edx, %edx
58 wrmsr
59 jmp 801b
60.endm
61
62.macro RESTORE_HOST_SPEC_CTRL
63 /* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */
64 ALTERNATIVE_2 "", \
65 "jmp 900f", X86_FEATURE_MSR_SPEC_CTRL, \
66 "", X86_FEATURE_V_SPEC_CTRL
67901:
68.endm
69.macro RESTORE_HOST_SPEC_CTRL_BODY
70900:
71 /* Same for after vmexit. */
72 mov $MSR_IA32_SPEC_CTRL, %ecx
73
74 /*
75 * Load the value that the guest had written into MSR_IA32_SPEC_CTRL,
76 * if it was not intercepted during guest execution.
77 */
78 cmpb $0, (%_ASM_SP)
79 jnz 998f
80 rdmsr
81 movl %eax, SVM_spec_ctrl(%_ASM_DI)
82998:
83
84 /* Now restore the host value of the MSR if different from the guest's. */
85 movl PER_CPU_VAR(x86_spec_ctrl_current), %eax
86 cmp SVM_spec_ctrl(%_ASM_DI), %eax
87 je 901b
88 xor %edx, %edx
89 wrmsr
90 jmp 901b
91.endm
92
93
35/**
36 * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
37 * @svm: struct vcpu_svm *
94/**
95 * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
96 * @svm: struct vcpu_svm *
97 * @spec_ctrl_intercepted: bool
38 */
39SYM_FUNC_START(__svm_vcpu_run)
40 push %_ASM_BP
41#ifdef CONFIG_X86_64
42 push %r15
43 push %r14
44 push %r13
45 push %r12
46#else
47 push %edi
48 push %esi
49#endif
50 push %_ASM_BX
51
52 /*
53 * Save variables needed after vmexit on the stack, in inverse
54 * order compared to when they are needed.
55 */
56
98 */
99SYM_FUNC_START(__svm_vcpu_run)
100 push %_ASM_BP
101#ifdef CONFIG_X86_64
102 push %r15
103 push %r14
104 push %r13
105 push %r12
106#else
107 push %edi
108 push %esi
109#endif
110 push %_ASM_BX
111
112 /*
113 * Save variables needed after vmexit on the stack, in inverse
114 * order compared to when they are needed.
115 */
116
117 /* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL. */
118 push %_ASM_ARG2
119
57 /* Needed to restore access to percpu variables. */
58 __ASM_SIZE(push) PER_CPU_VAR(svm_data + SD_save_area_pa)
59
120 /* Needed to restore access to percpu variables. */
121 __ASM_SIZE(push) PER_CPU_VAR(svm_data + SD_save_area_pa)
122
60 /* Save @svm. */
123 /* Finally save @svm. */
61 push %_ASM_ARG1
62
63.ifnc _ASM_ARG1, _ASM_DI
124 push %_ASM_ARG1
125
126.ifnc _ASM_ARG1, _ASM_DI
64 /* Move @svm to RDI. */
127 /*
128 * Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX
129 * and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL.
130 */
65 mov %_ASM_ARG1, %_ASM_DI
66.endif
67
131 mov %_ASM_ARG1, %_ASM_DI
132.endif
133
134 /* Clobbers RAX, RCX, RDX. */
135 RESTORE_GUEST_SPEC_CTRL
136
68 /*
69 * Use a single vmcb (vmcb01 because it's always valid) for
70 * context switching guest state via VMLOAD/VMSAVE, that way
71 * the state doesn't need to be copied between vmcb01 and
72 * vmcb02 when switching vmcbs for nested virtualization.
73 */
74 mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
751: vmload %_ASM_AX

--- 61 unchanged lines hidden (view full) ---

1377: vmload %_ASM_AX
1388:
139
140#ifdef CONFIG_RETPOLINE
141 /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
142 FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
143#endif
144
137 /*
138 * Use a single vmcb (vmcb01 because it's always valid) for
139 * context switching guest state via VMLOAD/VMSAVE, that way
140 * the state doesn't need to be copied between vmcb01 and
141 * vmcb02 when switching vmcbs for nested virtualization.
142 */
143 mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
1441: vmload %_ASM_AX

--- 61 unchanged lines hidden (view full) ---

2067: vmload %_ASM_AX
2078:
208
209#ifdef CONFIG_RETPOLINE
210 /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
211 FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
212#endif
213
214 /* Clobbers RAX, RCX, RDX. */
215 RESTORE_HOST_SPEC_CTRL
216
145 /*
146 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
147 * untrained as soon as we exit the VM and are back to the
148 * kernel. This should be done before re-enabling interrupts
149 * because interrupt handlers won't sanitize 'ret' if the return is
150 * from the kernel.
151 */
152 UNTRAIN_RET

--- 19 unchanged lines hidden (view full) ---

172 xor %r10d, %r10d
173 xor %r11d, %r11d
174 xor %r12d, %r12d
175 xor %r13d, %r13d
176 xor %r14d, %r14d
177 xor %r15d, %r15d
178#endif
179
217 /*
218 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
219 * untrained as soon as we exit the VM and are back to the
220 * kernel. This should be done before re-enabling interrupts
221 * because interrupt handlers won't sanitize 'ret' if the return is
222 * from the kernel.
223 */
224 UNTRAIN_RET

--- 19 unchanged lines hidden (view full) ---

244 xor %r10d, %r10d
245 xor %r11d, %r11d
246 xor %r12d, %r12d
247 xor %r13d, %r13d
248 xor %r14d, %r14d
249 xor %r15d, %r15d
250#endif
251
252 /* "Pop" @spec_ctrl_intercepted. */
180 pop %_ASM_BX
181
253 pop %_ASM_BX
254
255 pop %_ASM_BX
256
182#ifdef CONFIG_X86_64
183 pop %r12
184 pop %r13
185 pop %r14
186 pop %r15
187#else
188 pop %esi
189 pop %edi
190#endif
191 pop %_ASM_BP
192 RET
193
257#ifdef CONFIG_X86_64
258 pop %r12
259 pop %r13
260 pop %r14
261 pop %r15
262#else
263 pop %esi
264 pop %edi
265#endif
266 pop %_ASM_BP
267 RET
268
269 RESTORE_GUEST_SPEC_CTRL_BODY
270 RESTORE_HOST_SPEC_CTRL_BODY
271
19410: cmpb $0, kvm_rebooting
195 jne 2b
196 ud2
19730: cmpb $0, kvm_rebooting
198 jne 4b
199 ud2
20050: cmpb $0, kvm_rebooting
201 jne 6b

--- 7 unchanged lines hidden (view full) ---

209 _ASM_EXTABLE(5b, 50b)
210 _ASM_EXTABLE(7b, 70b)
211
212SYM_FUNC_END(__svm_vcpu_run)
213
214/**
215 * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
216 * @svm: struct vcpu_svm *
27210: cmpb $0, kvm_rebooting
273 jne 2b
274 ud2
27530: cmpb $0, kvm_rebooting
276 jne 4b
277 ud2
27850: cmpb $0, kvm_rebooting
279 jne 6b

--- 7 unchanged lines hidden (view full) ---

287 _ASM_EXTABLE(5b, 50b)
288 _ASM_EXTABLE(7b, 70b)
289
290SYM_FUNC_END(__svm_vcpu_run)
291
292/**
293 * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
294 * @svm: struct vcpu_svm *
295 * @spec_ctrl_intercepted: bool
217 */
218SYM_FUNC_START(__svm_sev_es_vcpu_run)
219 push %_ASM_BP
220#ifdef CONFIG_X86_64
221 push %r15
222 push %r14
223 push %r13
224 push %r12
225#else
226 push %edi
227 push %esi
228#endif
229 push %_ASM_BX
230
296 */
297SYM_FUNC_START(__svm_sev_es_vcpu_run)
298 push %_ASM_BP
299#ifdef CONFIG_X86_64
300 push %r15
301 push %r14
302 push %r13
303 push %r12
304#else
305 push %edi
306 push %esi
307#endif
308 push %_ASM_BX
309
310 /*
311 * Save variables needed after vmexit on the stack, in inverse
312 * order compared to when they are needed.
313 */
314
315 /* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL. */
316 push %_ASM_ARG2
317
318 /* Save @svm. */
319 push %_ASM_ARG1
320
321.ifnc _ASM_ARG1, _ASM_DI
322 /*
323 * Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX
324 * and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL.
325 */
326 mov %_ASM_ARG1, %_ASM_DI
327.endif
328
329 /* Clobbers RAX, RCX, RDX. */
330 RESTORE_GUEST_SPEC_CTRL
331
231 /* Get svm->current_vmcb->pa into RAX. */
332 /* Get svm->current_vmcb->pa into RAX. */
232 mov SVM_current_vmcb(%_ASM_ARG1), %_ASM_AX
333 mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
233 mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
234
235 /* Enter guest mode */
236 sti
237
2381: vmrun %_ASM_AX
239
2402: cli
241
334 mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
335
336 /* Enter guest mode */
337 sti
338
3391: vmrun %_ASM_AX
340
3412: cli
342
343 /* Pop @svm to RDI, guest registers have been saved already. */
344 pop %_ASM_DI
345
242#ifdef CONFIG_RETPOLINE
243 /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
244 FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
245#endif
246
346#ifdef CONFIG_RETPOLINE
347 /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
348 FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
349#endif
350
351 /* Clobbers RAX, RCX, RDX. */
352 RESTORE_HOST_SPEC_CTRL
353
247 /*
248 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
249 * untrained as soon as we exit the VM and are back to the
250 * kernel. This should be done before re-enabling interrupts
251 * because interrupt handlers won't sanitize RET if the return is
252 * from the kernel.
253 */
254 UNTRAIN_RET
255
354 /*
355 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
356 * untrained as soon as we exit the VM and are back to the
357 * kernel. This should be done before re-enabling interrupts
358 * because interrupt handlers won't sanitize RET if the return is
359 * from the kernel.
360 */
361 UNTRAIN_RET
362
363 /* "Pop" @spec_ctrl_intercepted. */
256 pop %_ASM_BX
257
364 pop %_ASM_BX
365
366 pop %_ASM_BX
367
258#ifdef CONFIG_X86_64
259 pop %r12
260 pop %r13
261 pop %r14
262 pop %r15
263#else
264 pop %esi
265 pop %edi
266#endif
267 pop %_ASM_BP
268 RET
269
368#ifdef CONFIG_X86_64
369 pop %r12
370 pop %r13
371 pop %r14
372 pop %r15
373#else
374 pop %esi
375 pop %edi
376#endif
377 pop %_ASM_BP
378 RET
379
380 RESTORE_GUEST_SPEC_CTRL_BODY
381 RESTORE_HOST_SPEC_CTRL_BODY
382
2703: cmpb $0, kvm_rebooting
271 jne 2b
272 ud2
273
274 _ASM_EXTABLE(1b, 3b)
275
276SYM_FUNC_END(__svm_sev_es_vcpu_run)
3833: cmpb $0, kvm_rebooting
384 jne 2b
385 ud2
386
387 _ASM_EXTABLE(1b, 3b)
388
389SYM_FUNC_END(__svm_sev_es_vcpu_run)