1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * AMD SVM support
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9 *
10 * Authors:
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
13 */
14
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17 #include <linux/kvm_types.h>
18 #include <linux/kvm_host.h>
19 #include <linux/kernel.h>
20
21 #include <asm/msr-index.h>
22 #include <asm/debugreg.h>
23
24 #include "kvm_emulate.h"
25 #include "trace.h"
26 #include "mmu.h"
27 #include "x86.h"
28 #include "smm.h"
29 #include "cpuid.h"
30 #include "lapic.h"
31 #include "svm.h"
32 #include "hyperv.h"
33
34 #define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK
35
nested_svm_inject_npf_exit(struct kvm_vcpu * vcpu,struct x86_exception * fault)36 static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
37 struct x86_exception *fault)
38 {
39 struct vcpu_svm *svm = to_svm(vcpu);
40 struct vmcb *vmcb = svm->vmcb;
41
42 if (vmcb->control.exit_code != SVM_EXIT_NPF) {
43 /*
44 * TODO: track the cause of the nested page fault, and
45 * correctly fill in the high bits of exit_info_1.
46 */
47 vmcb->control.exit_code = SVM_EXIT_NPF;
48 vmcb->control.exit_info_1 = (1ULL << 32);
49 vmcb->control.exit_info_2 = fault->address;
50 }
51
52 vmcb->control.exit_info_1 &= ~0xffffffffULL;
53 vmcb->control.exit_info_1 |= fault->error_code;
54
55 nested_svm_vmexit(svm);
56 }
57
nested_svm_get_tdp_pdptr(struct kvm_vcpu * vcpu,int index)58 static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
59 {
60 struct vcpu_svm *svm = to_svm(vcpu);
61 u64 cr3 = svm->nested.ctl.nested_cr3;
62 u64 pdpte;
63 int ret;
64
65 /*
66 * Note, nCR3 is "assumed" to be 32-byte aligned, i.e. the CPU ignores
67 * nCR3[4:0] when loading PDPTEs from memory.
68 */
69 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte,
70 (cr3 & GENMASK(11, 5)) + index * 8, 8);
71 if (ret)
72 return 0;
73 return pdpte;
74 }
75
nested_svm_get_tdp_cr3(struct kvm_vcpu * vcpu)76 static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
77 {
78 struct vcpu_svm *svm = to_svm(vcpu);
79
80 return svm->nested.ctl.nested_cr3;
81 }
82
nested_svm_init_mmu_context(struct kvm_vcpu * vcpu)83 static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
84 {
85 struct vcpu_svm *svm = to_svm(vcpu);
86
87 WARN_ON(mmu_is_nested(vcpu));
88
89 vcpu->arch.mmu = &vcpu->arch.guest_mmu;
90
91 /*
92 * The NPT format depends on L1's CR4 and EFER, which is in vmcb01. Note,
93 * when called via KVM_SET_NESTED_STATE, that state may _not_ match current
94 * vCPU state. CR0.WP is explicitly ignored, while CR0.PG is required.
95 */
96 kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, svm->vmcb01.ptr->save.cr4,
97 svm->vmcb01.ptr->save.efer,
98 svm->nested.ctl.nested_cr3);
99 vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3;
100 vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr;
101 vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
102 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
103 }
104
nested_svm_uninit_mmu_context(struct kvm_vcpu * vcpu)105 static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
106 {
107 vcpu->arch.mmu = &vcpu->arch.root_mmu;
108 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
109 }
110
nested_vmcb_needs_vls_intercept(struct vcpu_svm * svm)111 static bool nested_vmcb_needs_vls_intercept(struct vcpu_svm *svm)
112 {
113 if (!guest_cpu_cap_has(&svm->vcpu, X86_FEATURE_V_VMSAVE_VMLOAD))
114 return true;
115
116 if (!nested_npt_enabled(svm))
117 return true;
118
119 if (!(svm->nested.ctl.misc_ctl2 & SVM_MISC2_ENABLE_V_VMLOAD_VMSAVE))
120 return true;
121
122 return false;
123 }
124
nested_vmcb02_recalc_intercepts(struct vcpu_svm * svm)125 void nested_vmcb02_recalc_intercepts(struct vcpu_svm *svm)
126 {
127 struct vmcb_ctrl_area_cached *vmcb12_ctrl = &svm->nested.ctl;
128 struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
129 struct vmcb *vmcb01 = svm->vmcb01.ptr;
130 unsigned int i;
131
132 if (WARN_ON_ONCE(svm->vmcb != vmcb02))
133 return;
134
135 vmcb_mark_dirty(vmcb02, VMCB_INTERCEPTS);
136
137 for (i = 0; i < MAX_INTERCEPT; i++)
138 vmcb02->control.intercepts[i] = vmcb01->control.intercepts[i];
139
140 if (vmcb12_ctrl->int_ctl & V_INTR_MASKING_MASK) {
141 /*
142 * If L2 is active and V_INTR_MASKING is enabled in vmcb12,
143 * disable intercept of CR8 writes as L2's CR8 does not affect
144 * any interrupt KVM may want to inject.
145 *
146 * Similarly, disable intercept of virtual interrupts (used to
147 * detect interrupt windows) if the saved RFLAGS.IF is '0', as
148 * the effective RFLAGS.IF for L1 interrupts will never be set
149 * while L2 is running (L2's RFLAGS.IF doesn't affect L1 IRQs).
150 */
151 vmcb_clr_intercept(&vmcb02->control, INTERCEPT_CR8_WRITE);
152 if (!(vmcb01->save.rflags & X86_EFLAGS_IF))
153 vmcb_clr_intercept(&vmcb02->control, INTERCEPT_VINTR);
154 }
155
156 for (i = 0; i < MAX_INTERCEPT; i++)
157 vmcb02->control.intercepts[i] |= vmcb12_ctrl->intercepts[i];
158
159 /* If SMI is not intercepted, ignore guest SMI intercept as well */
160 if (!intercept_smi)
161 vmcb_clr_intercept(&vmcb02->control, INTERCEPT_SMI);
162
163 if (nested_vmcb_needs_vls_intercept(svm)) {
164 /*
165 * If the virtual VMLOAD/VMSAVE is not enabled for the L2,
166 * we must intercept these instructions to correctly
167 * emulate them in case L1 doesn't intercept them.
168 */
169 vmcb_set_intercept(&vmcb02->control, INTERCEPT_VMLOAD);
170 vmcb_set_intercept(&vmcb02->control, INTERCEPT_VMSAVE);
171 } else {
172 WARN_ON_ONCE(!(vmcb02->control.misc_ctl2 & SVM_MISC2_ENABLE_V_VMLOAD_VMSAVE));
173 }
174 }
175
176 /*
177 * This array (and its actual size) holds the set of offsets (indexing by chunk
178 * size) to process when merging vmcb12's MSRPM with vmcb01's MSRPM. Note, the
179 * set of MSRs for which interception is disabled in vmcb01 is per-vCPU, e.g.
180 * based on CPUID features. This array only tracks MSRs that *might* be passed
181 * through to the guest.
182 *
183 * Hardcode the capacity of the array based on the maximum number of _offsets_.
184 * MSRs are batched together, so there are fewer offsets than MSRs.
185 */
186 static int nested_svm_msrpm_merge_offsets[10] __ro_after_init;
187 static int nested_svm_nr_msrpm_merge_offsets __ro_after_init;
188 typedef unsigned long nsvm_msrpm_merge_t;
189
nested_svm_init_msrpm_merge_offsets(void)190 int __init nested_svm_init_msrpm_merge_offsets(void)
191 {
192 static const u32 merge_msrs[] __initconst = {
193 MSR_STAR,
194 MSR_IA32_SYSENTER_CS,
195 MSR_IA32_SYSENTER_EIP,
196 MSR_IA32_SYSENTER_ESP,
197 #ifdef CONFIG_X86_64
198 MSR_GS_BASE,
199 MSR_FS_BASE,
200 MSR_KERNEL_GS_BASE,
201 MSR_LSTAR,
202 MSR_CSTAR,
203 MSR_SYSCALL_MASK,
204 #endif
205 MSR_IA32_SPEC_CTRL,
206 MSR_IA32_PRED_CMD,
207 MSR_IA32_FLUSH_CMD,
208 MSR_IA32_APERF,
209 MSR_IA32_MPERF,
210 MSR_IA32_LASTBRANCHFROMIP,
211 MSR_IA32_LASTBRANCHTOIP,
212 MSR_IA32_LASTINTFROMIP,
213 MSR_IA32_LASTINTTOIP,
214
215 MSR_K7_PERFCTR0,
216 MSR_K7_PERFCTR1,
217 MSR_K7_PERFCTR2,
218 MSR_K7_PERFCTR3,
219 MSR_F15H_PERF_CTR0,
220 MSR_F15H_PERF_CTR1,
221 MSR_F15H_PERF_CTR2,
222 MSR_F15H_PERF_CTR3,
223 MSR_F15H_PERF_CTR4,
224 MSR_F15H_PERF_CTR5,
225
226 MSR_AMD64_PERF_CNTR_GLOBAL_CTL,
227 MSR_AMD64_PERF_CNTR_GLOBAL_STATUS,
228 MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR,
229 MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET,
230 };
231 int i, j;
232
233 for (i = 0; i < ARRAY_SIZE(merge_msrs); i++) {
234 int bit_nr = svm_msrpm_bit_nr(merge_msrs[i]);
235 u32 offset;
236
237 if (WARN_ON(bit_nr < 0))
238 return -EIO;
239
240 /*
241 * Merging is done in chunks to reduce the number of accesses
242 * to L1's bitmap.
243 */
244 offset = bit_nr / BITS_PER_BYTE / sizeof(nsvm_msrpm_merge_t);
245
246 for (j = 0; j < nested_svm_nr_msrpm_merge_offsets; j++) {
247 if (nested_svm_msrpm_merge_offsets[j] == offset)
248 break;
249 }
250
251 if (j < nested_svm_nr_msrpm_merge_offsets)
252 continue;
253
254 if (WARN_ON(j >= ARRAY_SIZE(nested_svm_msrpm_merge_offsets)))
255 return -EIO;
256
257 nested_svm_msrpm_merge_offsets[j] = offset;
258 nested_svm_nr_msrpm_merge_offsets++;
259 }
260
261 return 0;
262 }
263
264 /*
265 * Merge L0's (KVM) and L1's (Nested VMCB) MSR permission bitmaps. The function
266 * is optimized in that it only merges the parts where KVM MSR permission bitmap
267 * may contain zero bits.
268 */
nested_svm_merge_msrpm(struct kvm_vcpu * vcpu)269 static bool nested_svm_merge_msrpm(struct kvm_vcpu *vcpu)
270 {
271 struct vcpu_svm *svm = to_svm(vcpu);
272 nsvm_msrpm_merge_t *msrpm02 = svm->nested.msrpm;
273 nsvm_msrpm_merge_t *msrpm01 = svm->msrpm;
274 int i;
275
276 /*
277 * MSR bitmap update can be skipped when:
278 * - MSR bitmap for L1 hasn't changed.
279 * - Nested hypervisor (L1) is attempting to launch the same L2 as
280 * before.
281 * - Nested hypervisor (L1) is using Hyper-V emulation interface and
282 * tells KVM (L0) there were no changes in MSR bitmap for L2.
283 */
284 #ifdef CONFIG_KVM_HYPERV
285 if (!svm->nested.force_msr_bitmap_recalc) {
286 struct hv_vmcb_enlightenments *hve = &svm->nested.ctl.hv_enlightenments;
287
288 if (kvm_hv_hypercall_enabled(vcpu) &&
289 hve->hv_enlightenments_control.msr_bitmap &&
290 (svm->nested.ctl.clean & BIT(HV_VMCB_NESTED_ENLIGHTENMENTS)))
291 goto set_msrpm_base_pa;
292 }
293 #endif
294
295 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
296 return true;
297
298 for (i = 0; i < nested_svm_nr_msrpm_merge_offsets; i++) {
299 const int p = nested_svm_msrpm_merge_offsets[i];
300 nsvm_msrpm_merge_t l1_val;
301 gpa_t gpa;
302
303 gpa = svm->nested.ctl.msrpm_base_pa + (p * sizeof(l1_val));
304
305 if (kvm_vcpu_read_guest(vcpu, gpa, &l1_val, sizeof(l1_val)))
306 return false;
307
308 msrpm02[p] = msrpm01[p] | l1_val;
309 }
310
311 svm->nested.force_msr_bitmap_recalc = false;
312
313 #ifdef CONFIG_KVM_HYPERV
314 set_msrpm_base_pa:
315 #endif
316 svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
317
318 return true;
319 }
320
321 /*
322 * Bits 11:0 of bitmap address are ignored by hardware
323 */
nested_svm_check_bitmap_pa(struct kvm_vcpu * vcpu,u64 pa,u32 size)324 static bool nested_svm_check_bitmap_pa(struct kvm_vcpu *vcpu, u64 pa, u32 size)
325 {
326 u64 addr = PAGE_ALIGN(pa);
327
328 return kvm_vcpu_is_legal_gpa(vcpu, addr) &&
329 kvm_vcpu_is_legal_gpa(vcpu, addr + size - 1);
330 }
331
nested_svm_event_inj_valid_exept(struct kvm_vcpu * vcpu,u8 vector)332 static bool nested_svm_event_inj_valid_exept(struct kvm_vcpu *vcpu, u8 vector)
333 {
334 /*
335 * Vectors that do not correspond to a defined exception are invalid
336 * (including #NMI and reserved vectors). In a best effort to define
337 * valid exceptions based on the virtual CPU, make all exceptions always
338 * valid except those obviously tied to a CPU feature.
339 */
340 switch (vector) {
341 case DE_VECTOR: case DB_VECTOR: case BP_VECTOR: case OF_VECTOR:
342 case BR_VECTOR: case UD_VECTOR: case NM_VECTOR: case DF_VECTOR:
343 case TS_VECTOR: case NP_VECTOR: case SS_VECTOR: case GP_VECTOR:
344 case PF_VECTOR: case MF_VECTOR: case AC_VECTOR: case MC_VECTOR:
345 case XM_VECTOR: case HV_VECTOR: case SX_VECTOR:
346 return true;
347 case CP_VECTOR:
348 return guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK);
349 case VC_VECTOR:
350 return guest_cpu_cap_has(vcpu, X86_FEATURE_SEV_ES);
351 }
352 return false;
353 }
354
355 /*
356 * According to the APM, VMRUN exits with SVM_EXIT_ERR if SVM_EVTINJ_VALID is
357 * set and:
358 * - The type of event_inj is not one of the defined values.
359 * - The type is SVM_EVTINJ_TYPE_EXEPT, but the vector is not a valid exception.
360 */
nested_svm_check_event_inj(struct kvm_vcpu * vcpu,u32 event_inj)361 static bool nested_svm_check_event_inj(struct kvm_vcpu *vcpu, u32 event_inj)
362 {
363 u32 type = event_inj & SVM_EVTINJ_TYPE_MASK;
364 u8 vector = event_inj & SVM_EVTINJ_VEC_MASK;
365
366 if (!(event_inj & SVM_EVTINJ_VALID))
367 return true;
368
369 if (type != SVM_EVTINJ_TYPE_INTR && type != SVM_EVTINJ_TYPE_NMI &&
370 type != SVM_EVTINJ_TYPE_EXEPT && type != SVM_EVTINJ_TYPE_SOFT)
371 return false;
372
373 if (type == SVM_EVTINJ_TYPE_EXEPT &&
374 !nested_svm_event_inj_valid_exept(vcpu, vector))
375 return false;
376
377 return true;
378 }
379
nested_vmcb_check_controls(struct kvm_vcpu * vcpu,struct vmcb_ctrl_area_cached * control)380 static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu,
381 struct vmcb_ctrl_area_cached *control)
382 {
383 if (CC(!vmcb12_is_intercept(control, INTERCEPT_VMRUN)))
384 return false;
385
386 if (CC(control->asid == 0))
387 return false;
388
389 if (CC((control->misc_ctl & SVM_MISC_ENABLE_NP) &&
390 !kvm_vcpu_is_legal_gpa(vcpu, control->nested_cr3)))
391 return false;
392
393 if (CC(!nested_svm_check_bitmap_pa(vcpu, control->msrpm_base_pa,
394 MSRPM_SIZE)))
395 return false;
396 if (CC(!nested_svm_check_bitmap_pa(vcpu, control->iopm_base_pa,
397 IOPM_SIZE)))
398 return false;
399
400 if (CC((control->int_ctl & V_NMI_ENABLE_MASK) &&
401 !vmcb12_is_intercept(control, INTERCEPT_NMI))) {
402 return false;
403 }
404
405 if (CC(!nested_svm_check_event_inj(vcpu, control->event_inj)))
406 return false;
407
408 return true;
409 }
410
411 /* Common checks that apply to both L1 and L2 state. */
nested_vmcb_check_save(struct kvm_vcpu * vcpu,struct vmcb_save_area_cached * save)412 static bool nested_vmcb_check_save(struct kvm_vcpu *vcpu,
413 struct vmcb_save_area_cached *save)
414 {
415 if (CC(!(save->efer & EFER_SVME)))
416 return false;
417
418 if (CC((save->cr0 & X86_CR0_CD) == 0 && (save->cr0 & X86_CR0_NW)) ||
419 CC(save->cr0 & ~0xffffffffULL))
420 return false;
421
422 if (CC(!kvm_dr6_valid(save->dr6)) || CC(!kvm_dr7_valid(save->dr7)))
423 return false;
424
425 /*
426 * These checks are also performed by KVM_SET_SREGS,
427 * except that EFER.LMA is not checked by SVM against
428 * CR0.PG && EFER.LME.
429 */
430 if ((save->efer & EFER_LME) && (save->cr0 & X86_CR0_PG)) {
431 if (CC(!(save->cr4 & X86_CR4_PAE)) ||
432 CC(!(save->cr0 & X86_CR0_PE)) ||
433 CC(!kvm_vcpu_is_legal_cr3(vcpu, save->cr3)))
434 return false;
435
436 if (CC((save->cs.attrib & SVM_SELECTOR_L_MASK) &&
437 (save->cs.attrib & SVM_SELECTOR_DB_MASK)))
438 return false;
439 }
440
441 /* Note, SVM doesn't have any additional restrictions on CR4. */
442 if (CC(!__kvm_is_valid_cr4(vcpu, save->cr4)))
443 return false;
444
445 if (CC(!kvm_valid_efer(vcpu, save->efer)))
446 return false;
447
448 return true;
449 }
450
nested_svm_check_cached_vmcb12(struct kvm_vcpu * vcpu)451 int nested_svm_check_cached_vmcb12(struct kvm_vcpu *vcpu)
452 {
453 struct vcpu_svm *svm = to_svm(vcpu);
454
455 if (!nested_vmcb_check_save(vcpu, &svm->nested.save) ||
456 !nested_vmcb_check_controls(vcpu, &svm->nested.ctl))
457 return -EINVAL;
458
459 return 0;
460 }
461
462 /*
463 * If a feature is not advertised to L1, clear the corresponding vmcb12
464 * intercept.
465 */
466 #define __nested_svm_sanitize_intercept(__vcpu, __control, fname, iname) \
467 do { \
468 if (!guest_cpu_cap_has(__vcpu, X86_FEATURE_##fname)) \
469 vmcb12_clr_intercept(__control, INTERCEPT_##iname); \
470 } while (0)
471
472 #define nested_svm_sanitize_intercept(__vcpu, __control, name) \
473 __nested_svm_sanitize_intercept(__vcpu, __control, name, name)
474
475 static
__nested_copy_vmcb_control_to_cache(struct kvm_vcpu * vcpu,struct vmcb_ctrl_area_cached * to,struct vmcb_control_area * from)476 void __nested_copy_vmcb_control_to_cache(struct kvm_vcpu *vcpu,
477 struct vmcb_ctrl_area_cached *to,
478 struct vmcb_control_area *from)
479 {
480 unsigned int i;
481
482 for (i = 0; i < MAX_INTERCEPT; i++)
483 to->intercepts[i] = from->intercepts[i];
484
485 __nested_svm_sanitize_intercept(vcpu, to, XSAVE, XSETBV);
486 nested_svm_sanitize_intercept(vcpu, to, INVPCID);
487 nested_svm_sanitize_intercept(vcpu, to, RDTSCP);
488 nested_svm_sanitize_intercept(vcpu, to, SKINIT);
489 nested_svm_sanitize_intercept(vcpu, to, RDPRU);
490
491 /* Always clear SVM_MISC_ENABLE_NP if the guest cannot use NPTs */
492 to->misc_ctl = from->misc_ctl;
493 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_NPT))
494 to->misc_ctl &= ~SVM_MISC_ENABLE_NP;
495
496 to->iopm_base_pa = from->iopm_base_pa & PAGE_MASK;
497 to->msrpm_base_pa = from->msrpm_base_pa & PAGE_MASK;
498 to->tsc_offset = from->tsc_offset;
499 to->tlb_ctl = from->tlb_ctl & TLB_CONTROL_MASK;
500 to->erap_ctl = from->erap_ctl;
501 to->int_ctl = from->int_ctl;
502 to->int_vector = from->int_vector & SVM_INT_VECTOR_MASK;
503 to->int_state = from->int_state & SVM_INTERRUPT_SHADOW_MASK;
504 to->exit_code = from->exit_code;
505 to->exit_info_1 = from->exit_info_1;
506 to->exit_info_2 = from->exit_info_2;
507 to->exit_int_info = from->exit_int_info;
508 to->exit_int_info_err = from->exit_int_info_err;
509 to->event_inj = from->event_inj & ~SVM_EVTINJ_RESERVED_BITS;
510 to->event_inj_err = from->event_inj_err;
511 to->next_rip = from->next_rip;
512 to->nested_cr3 = from->nested_cr3;
513 to->misc_ctl2 = from->misc_ctl2;
514 to->pause_filter_count = from->pause_filter_count;
515 to->pause_filter_thresh = from->pause_filter_thresh;
516
517 /* Copy asid here because nested_vmcb_check_controls() will check it */
518 to->asid = from->asid;
519 to->clean = from->clean;
520
521 #ifdef CONFIG_KVM_HYPERV
522 /* Hyper-V extensions (Enlightened VMCB) */
523 if (kvm_hv_hypercall_enabled(vcpu)) {
524 memcpy(&to->hv_enlightenments, &from->hv_enlightenments,
525 sizeof(to->hv_enlightenments));
526 }
527 #endif
528 }
529
nested_copy_vmcb_control_to_cache(struct vcpu_svm * svm,struct vmcb_control_area * control)530 void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
531 struct vmcb_control_area *control)
532 {
533 __nested_copy_vmcb_control_to_cache(&svm->vcpu, &svm->nested.ctl, control);
534 }
535
__nested_copy_vmcb_save_to_cache(struct vmcb_save_area_cached * to,struct vmcb_save_area * from)536 static void __nested_copy_vmcb_save_to_cache(struct vmcb_save_area_cached *to,
537 struct vmcb_save_area *from)
538 {
539 to->es = from->es;
540 to->cs = from->cs;
541 to->ss = from->ss;
542 to->ds = from->ds;
543 to->gdtr = from->gdtr;
544 to->idtr = from->idtr;
545
546 to->cpl = from->cpl;
547
548 to->efer = from->efer;
549 to->cr4 = from->cr4;
550 to->cr3 = from->cr3;
551 to->cr0 = from->cr0;
552 to->dr7 = from->dr7;
553 to->dr6 = from->dr6;
554
555 to->rflags = from->rflags;
556 to->rip = from->rip;
557 to->rsp = from->rsp;
558
559 to->s_cet = from->s_cet;
560 to->ssp = from->ssp;
561 to->isst_addr = from->isst_addr;
562
563 to->rax = from->rax;
564 to->cr2 = from->cr2;
565
566 svm_copy_lbrs(to, from);
567 }
568
nested_copy_vmcb_save_to_cache(struct vcpu_svm * svm,struct vmcb_save_area * save)569 void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
570 struct vmcb_save_area *save)
571 {
572 __nested_copy_vmcb_save_to_cache(&svm->nested.save, save);
573 }
574
575 /*
576 * Synchronize fields that are written by the processor, so that
577 * they can be copied back into the vmcb12.
578 */
nested_sync_control_from_vmcb02(struct vcpu_svm * svm)579 void nested_sync_control_from_vmcb02(struct vcpu_svm *svm)
580 {
581 u32 mask;
582 svm->nested.ctl.event_inj = svm->vmcb->control.event_inj;
583 svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err;
584 svm->nested.ctl.int_state = svm->vmcb->control.int_state;
585
586 /* Only a few fields of int_ctl are written by the processor. */
587 mask = V_IRQ_MASK | V_TPR_MASK;
588 /*
589 * Don't sync vmcb02 V_IRQ back to vmcb12 if KVM (L0) is intercepting
590 * virtual interrupts in order to request an interrupt window, as KVM
591 * has usurped vmcb02's int_ctl. If an interrupt window opens before
592 * the next VM-Exit, svm_clear_vintr() will restore vmcb12's int_ctl.
593 * If no window opens, V_IRQ will be correctly preserved in vmcb12's
594 * int_ctl (because it was never recognized while L2 was running).
595 */
596 if (svm_is_intercept(svm, INTERCEPT_VINTR) &&
597 !vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_VINTR))
598 mask &= ~V_IRQ_MASK;
599
600 if (nested_vgif_enabled(svm))
601 mask |= V_GIF_MASK;
602
603 if (nested_vnmi_enabled(svm))
604 mask |= V_NMI_BLOCKING_MASK | V_NMI_PENDING_MASK;
605
606 svm->nested.ctl.int_ctl &= ~mask;
607 svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask;
608 }
609
610 /*
611 * Transfer any event that L0 or L1 wanted to inject into L2 to
612 * EXIT_INT_INFO.
613 */
nested_save_pending_event_to_vmcb12(struct vcpu_svm * svm,struct vmcb * vmcb12)614 static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm,
615 struct vmcb *vmcb12)
616 {
617 struct kvm_vcpu *vcpu = &svm->vcpu;
618 u32 exit_int_info = 0;
619 unsigned int nr;
620
621 if (vcpu->arch.exception.injected) {
622 nr = vcpu->arch.exception.vector;
623 exit_int_info = nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT;
624
625 if (vcpu->arch.exception.has_error_code) {
626 exit_int_info |= SVM_EVTINJ_VALID_ERR;
627 vmcb12->control.exit_int_info_err =
628 vcpu->arch.exception.error_code;
629 }
630
631 } else if (vcpu->arch.nmi_injected) {
632 exit_int_info = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
633
634 } else if (vcpu->arch.interrupt.injected) {
635 nr = vcpu->arch.interrupt.nr;
636 exit_int_info = nr | SVM_EVTINJ_VALID;
637
638 if (vcpu->arch.interrupt.soft)
639 exit_int_info |= SVM_EVTINJ_TYPE_SOFT;
640 else
641 exit_int_info |= SVM_EVTINJ_TYPE_INTR;
642 }
643
644 vmcb12->control.exit_int_info = exit_int_info;
645 }
646
nested_svm_transition_tlb_flush(struct kvm_vcpu * vcpu)647 static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu)
648 {
649 /* Handle pending Hyper-V TLB flush requests */
650 kvm_hv_nested_transtion_tlb_flush(vcpu, npt_enabled);
651
652 /*
653 * TODO: optimize unconditional TLB flush/MMU sync. A partial list of
654 * things to fix before this can be conditional:
655 *
656 * - Flush TLBs for both L1 and L2 remote TLB flush
657 * - Honor L1's request to flush an ASID on nested VMRUN
658 * - Sync nested NPT MMU on VMRUN that flushes L2's ASID[*]
659 * - Don't crush a pending TLB flush in vmcb02 on nested VMRUN
660 * - Flush L1's ASID on KVM_REQ_TLB_FLUSH_GUEST
661 *
662 * [*] Unlike nested EPT, SVM's ASID management can invalidate nested
663 * NPT guest-physical mappings on VMRUN.
664 */
665 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
666 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
667 }
668
669 /*
670 * Load guest's/host's cr3 on nested vmentry or vmexit. @nested_npt is true
671 * if we are emulating VM-Entry into a guest with NPT enabled.
672 */
nested_svm_load_cr3(struct kvm_vcpu * vcpu,unsigned long cr3,bool nested_npt,bool reload_pdptrs)673 static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
674 bool nested_npt, bool reload_pdptrs)
675 {
676 if (CC(!kvm_vcpu_is_legal_cr3(vcpu, cr3)))
677 return -EINVAL;
678
679 if (reload_pdptrs && !nested_npt && is_pae_paging(vcpu) &&
680 CC(!load_pdptrs(vcpu, cr3)))
681 return -EINVAL;
682
683 vcpu->arch.cr3 = cr3;
684
685 /* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */
686 kvm_init_mmu(vcpu);
687
688 if (!nested_npt)
689 kvm_mmu_new_pgd(vcpu, cr3);
690
691 return 0;
692 }
693
nested_vmcb02_compute_g_pat(struct vcpu_svm * svm)694 void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm)
695 {
696 if (!svm->nested.vmcb02.ptr)
697 return;
698
699 /* FIXME: merge g_pat from vmcb01 and vmcb12. */
700 svm->nested.vmcb02.ptr->save.g_pat = svm->vmcb01.ptr->save.g_pat;
701 }
702
nested_vmcb12_has_lbrv(struct kvm_vcpu * vcpu)703 static bool nested_vmcb12_has_lbrv(struct kvm_vcpu *vcpu)
704 {
705 return guest_cpu_cap_has(vcpu, X86_FEATURE_LBRV) &&
706 (to_svm(vcpu)->nested.ctl.misc_ctl2 & SVM_MISC2_ENABLE_V_LBR);
707 }
708
nested_vmcb02_prepare_save(struct vcpu_svm * svm)709 static void nested_vmcb02_prepare_save(struct vcpu_svm *svm)
710 {
711 struct vmcb_ctrl_area_cached *control = &svm->nested.ctl;
712 struct vmcb_save_area_cached *save = &svm->nested.save;
713 bool new_vmcb12 = false;
714 struct vmcb *vmcb01 = svm->vmcb01.ptr;
715 struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
716 struct kvm_vcpu *vcpu = &svm->vcpu;
717
718 nested_vmcb02_compute_g_pat(svm);
719 vmcb_mark_dirty(vmcb02, VMCB_NPT);
720
721 /* Load the nested guest state */
722 if (svm->nested.vmcb12_gpa != svm->nested.last_vmcb12_gpa) {
723 new_vmcb12 = true;
724 svm->nested.last_vmcb12_gpa = svm->nested.vmcb12_gpa;
725 svm->nested.force_msr_bitmap_recalc = true;
726 }
727
728 if (unlikely(new_vmcb12 || vmcb12_is_dirty(control, VMCB_SEG))) {
729 vmcb02->save.es = save->es;
730 vmcb02->save.cs = save->cs;
731 vmcb02->save.ss = save->ss;
732 vmcb02->save.ds = save->ds;
733 vmcb02->save.cpl = save->cpl;
734 vmcb_mark_dirty(vmcb02, VMCB_SEG);
735 }
736
737 if (unlikely(new_vmcb12 || vmcb12_is_dirty(control, VMCB_DT))) {
738 vmcb02->save.gdtr = save->gdtr;
739 vmcb02->save.idtr = save->idtr;
740 vmcb_mark_dirty(vmcb02, VMCB_DT);
741 }
742
743 if (guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK) &&
744 (unlikely(new_vmcb12 || vmcb12_is_dirty(control, VMCB_CET)))) {
745 vmcb02->save.s_cet = save->s_cet;
746 vmcb02->save.isst_addr = save->isst_addr;
747 vmcb02->save.ssp = save->ssp;
748 vmcb_mark_dirty(vmcb02, VMCB_CET);
749 }
750
751 kvm_set_rflags(vcpu, save->rflags | X86_EFLAGS_FIXED);
752
753 svm_set_efer(vcpu, svm->nested.save.efer);
754
755 svm_set_cr0(vcpu, svm->nested.save.cr0);
756 svm_set_cr4(vcpu, svm->nested.save.cr4);
757
758 svm->vcpu.arch.cr2 = save->cr2;
759
760 kvm_rax_write(vcpu, save->rax);
761 kvm_rsp_write(vcpu, save->rsp);
762 kvm_rip_write(vcpu, save->rip);
763
764 /* In case we don't even reach vcpu_run, the fields are not updated */
765 vmcb02->save.rax = save->rax;
766 vmcb02->save.rsp = save->rsp;
767 vmcb02->save.rip = save->rip;
768
769 if (unlikely(new_vmcb12 || vmcb12_is_dirty(control, VMCB_DR))) {
770 vmcb02->save.dr7 = svm->nested.save.dr7 | DR7_FIXED_1;
771 svm->vcpu.arch.dr6 = svm->nested.save.dr6 | DR6_ACTIVE_LOW;
772 vmcb_mark_dirty(vmcb02, VMCB_DR);
773 }
774
775 if (nested_vmcb12_has_lbrv(vcpu)) {
776 /*
777 * Reserved bits of DEBUGCTL are ignored. Be consistent with
778 * svm_set_msr's definition of reserved bits.
779 */
780 svm_copy_lbrs(&vmcb02->save, save);
781 vmcb02->save.dbgctl &= ~DEBUGCTL_RESERVED_BITS;
782 } else {
783 svm_copy_lbrs(&vmcb02->save, &vmcb01->save);
784 }
785 vmcb_mark_dirty(vmcb02, VMCB_LBR);
786 svm_update_lbrv(&svm->vcpu);
787 }
788
is_evtinj_soft(u32 evtinj)789 static inline bool is_evtinj_soft(u32 evtinj)
790 {
791 u32 type = evtinj & SVM_EVTINJ_TYPE_MASK;
792 u8 vector = evtinj & SVM_EVTINJ_VEC_MASK;
793
794 if (!(evtinj & SVM_EVTINJ_VALID))
795 return false;
796
797 if (type == SVM_EVTINJ_TYPE_SOFT)
798 return true;
799
800 return type == SVM_EVTINJ_TYPE_EXEPT && kvm_exception_is_soft(vector);
801 }
802
is_evtinj_nmi(u32 evtinj)803 static bool is_evtinj_nmi(u32 evtinj)
804 {
805 u32 type = evtinj & SVM_EVTINJ_TYPE_MASK;
806
807 if (!(evtinj & SVM_EVTINJ_VALID))
808 return false;
809
810 return type == SVM_EVTINJ_TYPE_NMI;
811 }
812
nested_vmcb02_prepare_control(struct vcpu_svm * svm)813 static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
814 {
815 u32 int_ctl_vmcb01_bits = V_INTR_MASKING_MASK;
816 u32 int_ctl_vmcb12_bits = V_TPR_MASK | V_IRQ_INJECTION_BITS_MASK;
817
818 struct vmcb_ctrl_area_cached *vmcb12_ctrl = &svm->nested.ctl;
819 struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
820 struct vmcb *vmcb01 = svm->vmcb01.ptr;
821 struct kvm_vcpu *vcpu = &svm->vcpu;
822 u32 pause_count12, pause_thresh12;
823
824 nested_svm_transition_tlb_flush(vcpu);
825
826 /* Enter Guest-Mode */
827 enter_guest_mode(vcpu);
828
829 /*
830 * Filled at exit: exit_code, exit_info_1, exit_info_2, exit_int_info,
831 * exit_int_info_err, next_rip, insn_len, insn_bytes.
832 */
833
834 if (guest_cpu_cap_has(vcpu, X86_FEATURE_VGIF) &&
835 (vmcb12_ctrl->int_ctl & V_GIF_ENABLE_MASK))
836 int_ctl_vmcb12_bits |= (V_GIF_MASK | V_GIF_ENABLE_MASK);
837 else
838 int_ctl_vmcb01_bits |= (V_GIF_MASK | V_GIF_ENABLE_MASK);
839
840 if (vnmi) {
841 if (vmcb01->control.int_ctl & V_NMI_PENDING_MASK) {
842 svm->vcpu.arch.nmi_pending++;
843 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
844 }
845 if (nested_vnmi_enabled(svm))
846 int_ctl_vmcb12_bits |= (V_NMI_PENDING_MASK |
847 V_NMI_ENABLE_MASK |
848 V_NMI_BLOCKING_MASK);
849 }
850
851 /*
852 * Copied from vmcb01. msrpm_base can be overwritten later.
853 *
854 * SVM_MISC_ENABLE_NP in vmcb12 is only used for consistency checks. If
855 * L1 enables NPTs, KVM shadows L1's NPTs and uses those to run L2. If
856 * L1 disables NPT, KVM runs L2 with the same NPTs used to run L1. For
857 * the latter, L1 runs L2 with shadow page tables that translate L2 GVAs
858 * to L1 GPAs, so the same NPTs can be used for L1 and L2.
859 */
860 vmcb02->control.misc_ctl = vmcb01->control.misc_ctl & SVM_MISC_ENABLE_NP;
861 vmcb02->control.iopm_base_pa = vmcb01->control.iopm_base_pa;
862 vmcb02->control.msrpm_base_pa = vmcb01->control.msrpm_base_pa;
863 vmcb_mark_dirty(vmcb02, VMCB_PERM_MAP);
864
865 /*
866 * Stash vmcb02's counter if the guest hasn't moved past the guilty
867 * instruction; otherwise, reset the counter to '0'.
868 *
869 * In order to detect if L2 has made forward progress or not, track the
870 * RIP at which a bus lock has occurred on a per-vmcb12 basis. If RIP
871 * is changed, guest has clearly made forward progress, bus_lock_counter
872 * still remained '1', so reset bus_lock_counter to '0'. Eg. In the
873 * scenario, where a buslock happened in L1 before VMRUN, the bus lock
874 * firmly happened on an instruction in the past. Even if vmcb01's
875 * counter is still '1', (because the guilty instruction got patched),
876 * the vCPU has clearly made forward progress and so KVM should reset
877 * vmcb02's counter to '0'.
878 *
879 * If the RIP hasn't changed, stash the bus lock counter at nested VMRUN
880 * to prevent the same guilty instruction from triggering a VM-Exit. Eg.
881 * if userspace rate-limits the vCPU, then it's entirely possible that
882 * L1's tick interrupt is pending by the time userspace re-runs the
883 * vCPU. If KVM unconditionally clears the counter on VMRUN, then when
884 * L1 re-enters L2, the same instruction will trigger a VM-Exit and the
885 * entire cycle start over.
886 */
887 if (vmcb02->save.rip && (svm->nested.last_bus_lock_rip == vmcb02->save.rip))
888 vmcb02->control.bus_lock_counter = 1;
889 else
890 vmcb02->control.bus_lock_counter = 0;
891
892 /* Done at vmrun: asid. */
893
894 /* Also overwritten later if necessary. */
895 vmcb02->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
896
897 /* nested_cr3. */
898 if (nested_npt_enabled(svm))
899 nested_svm_init_mmu_context(vcpu);
900
901 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset(vcpu->arch.l1_tsc_offset,
902 vmcb12_ctrl->tsc_offset,
903 svm->tsc_ratio_msr);
904
905 vmcb02->control.tsc_offset = vcpu->arch.tsc_offset;
906
907 if (guest_cpu_cap_has(vcpu, X86_FEATURE_TSCRATEMSR) &&
908 svm->tsc_ratio_msr != kvm_caps.default_tsc_scaling_ratio)
909 nested_svm_update_tsc_ratio_msr(vcpu);
910
911 vmcb02->control.int_ctl =
912 (vmcb12_ctrl->int_ctl & int_ctl_vmcb12_bits) |
913 (vmcb01->control.int_ctl & int_ctl_vmcb01_bits);
914
915 vmcb02->control.int_vector = vmcb12_ctrl->int_vector;
916 vmcb02->control.int_state = vmcb12_ctrl->int_state;
917 vmcb02->control.event_inj = vmcb12_ctrl->event_inj;
918 vmcb02->control.event_inj_err = vmcb12_ctrl->event_inj_err;
919
920 /*
921 * If nrips is exposed to L1, take NextRIP as-is. Otherwise, L1
922 * advances L2's RIP before VMRUN instead of using NextRIP. KVM will
923 * stuff the current RIP as vmcb02's NextRIP before L2 is run. After
924 * the first run of L2 (e.g. after save+restore), NextRIP is updated by
925 * the CPU and/or KVM and should be used regardless of L1's support.
926 */
927 if (guest_cpu_cap_has(vcpu, X86_FEATURE_NRIPS) ||
928 !vcpu->arch.nested_run_pending)
929 vmcb02->control.next_rip = vmcb12_ctrl->next_rip;
930
931 svm->nmi_l1_to_l2 = is_evtinj_nmi(vmcb02->control.event_inj);
932
933 /*
934 * soft_int_csbase, soft_int_old_rip, and soft_int_next_rip (if L1
935 * doesn't have NRIPS) are initialized later, before the vCPU is run.
936 */
937 if (is_evtinj_soft(vmcb02->control.event_inj)) {
938 svm->soft_int_injected = true;
939 if (guest_cpu_cap_has(vcpu, X86_FEATURE_NRIPS) ||
940 !vcpu->arch.nested_run_pending)
941 svm->soft_int_next_rip = vmcb12_ctrl->next_rip;
942 }
943
944 /* SVM_MISC2_ENABLE_V_LBR is controlled by svm_update_lbrv() */
945
946 if (!nested_vmcb_needs_vls_intercept(svm))
947 vmcb02->control.misc_ctl2 |= SVM_MISC2_ENABLE_V_VMLOAD_VMSAVE;
948
949 if (guest_cpu_cap_has(vcpu, X86_FEATURE_PAUSEFILTER))
950 pause_count12 = vmcb12_ctrl->pause_filter_count;
951 else
952 pause_count12 = 0;
953 if (guest_cpu_cap_has(vcpu, X86_FEATURE_PFTHRESHOLD))
954 pause_thresh12 = vmcb12_ctrl->pause_filter_thresh;
955 else
956 pause_thresh12 = 0;
957 if (kvm_pause_in_guest(svm->vcpu.kvm)) {
958 /* use guest values since host doesn't intercept PAUSE */
959 vmcb02->control.pause_filter_count = pause_count12;
960 vmcb02->control.pause_filter_thresh = pause_thresh12;
961
962 } else {
963 /* start from host values otherwise */
964 vmcb02->control.pause_filter_count = vmcb01->control.pause_filter_count;
965 vmcb02->control.pause_filter_thresh = vmcb01->control.pause_filter_thresh;
966
967 /* ... but ensure filtering is disabled if so requested. */
968 if (vmcb12_is_intercept(vmcb12_ctrl, INTERCEPT_PAUSE)) {
969 if (!pause_count12)
970 vmcb02->control.pause_filter_count = 0;
971 if (!pause_thresh12)
972 vmcb02->control.pause_filter_thresh = 0;
973 }
974 }
975
976 /*
977 * Take ALLOW_LARGER_RAP from vmcb12 even though it should be safe to
978 * let L2 use a larger RAP since KVM will emulate the necessary clears,
979 * as it's possible L1 deliberately wants to restrict L2 to the legacy
980 * RAP size. Unconditionally clear the RAP on nested VMRUN, as KVM is
981 * responsible for emulating the host vs. guest tags (L1 is the "host",
982 * L2 is the "guest").
983 */
984 if (guest_cpu_cap_has(vcpu, X86_FEATURE_ERAPS))
985 vmcb02->control.erap_ctl = (vmcb12_ctrl->erap_ctl &
986 ERAP_CONTROL_ALLOW_LARGER_RAP) |
987 ERAP_CONTROL_CLEAR_RAP;
988
989 /*
990 * Merge guest and host intercepts - must be called with vcpu in
991 * guest-mode to take effect.
992 */
993 nested_vmcb02_recalc_intercepts(svm);
994 }
995
nested_svm_copy_common_state(struct vmcb * from_vmcb,struct vmcb * to_vmcb)996 static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
997 {
998 /*
999 * Some VMCB state is shared between L1 and L2 and thus has to be
1000 * moved at the time of nested vmrun and vmexit.
1001 *
1002 * VMLOAD/VMSAVE state would also belong in this category, but KVM
1003 * always performs VMLOAD and VMSAVE from the VMCB01.
1004 */
1005 to_vmcb->save.spec_ctrl = from_vmcb->save.spec_ctrl;
1006 }
1007
enter_svm_guest_mode(struct kvm_vcpu * vcpu,u64 vmcb12_gpa,bool from_vmrun)1008 int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa, bool from_vmrun)
1009 {
1010 struct vcpu_svm *svm = to_svm(vcpu);
1011 struct vmcb_ctrl_area_cached *control = &svm->nested.ctl;
1012 struct vmcb_save_area_cached *save = &svm->nested.save;
1013 int ret;
1014
1015 trace_kvm_nested_vmenter(svm->vmcb->save.rip,
1016 vmcb12_gpa,
1017 save->rip,
1018 control->int_ctl,
1019 control->event_inj,
1020 control->misc_ctl,
1021 control->nested_cr3,
1022 save->cr3,
1023 KVM_ISA_SVM);
1024
1025 trace_kvm_nested_intercepts(control->intercepts[INTERCEPT_CR] & 0xffff,
1026 control->intercepts[INTERCEPT_CR] >> 16,
1027 control->intercepts[INTERCEPT_EXCEPTION],
1028 control->intercepts[INTERCEPT_WORD3],
1029 control->intercepts[INTERCEPT_WORD4],
1030 control->intercepts[INTERCEPT_WORD5]);
1031
1032
1033 svm->nested.vmcb12_gpa = vmcb12_gpa;
1034
1035 WARN_ON(svm->vmcb == svm->nested.vmcb02.ptr);
1036
1037 nested_svm_copy_common_state(svm->vmcb01.ptr, svm->nested.vmcb02.ptr);
1038
1039 svm_switch_vmcb(svm, &svm->nested.vmcb02);
1040 nested_vmcb02_prepare_control(svm);
1041 nested_vmcb02_prepare_save(svm);
1042
1043 ret = nested_svm_load_cr3(&svm->vcpu, svm->nested.save.cr3,
1044 nested_npt_enabled(svm), from_vmrun);
1045 if (ret)
1046 return ret;
1047
1048 if (!from_vmrun)
1049 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
1050
1051 svm_set_gif(svm, true);
1052
1053 if (kvm_vcpu_apicv_active(vcpu))
1054 kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
1055
1056 nested_svm_hv_update_vm_vp_ids(vcpu);
1057
1058 return 0;
1059 }
1060
nested_svm_copy_vmcb12_to_cache(struct kvm_vcpu * vcpu,u64 vmcb12_gpa)1061 static int nested_svm_copy_vmcb12_to_cache(struct kvm_vcpu *vcpu, u64 vmcb12_gpa)
1062 {
1063 struct vcpu_svm *svm = to_svm(vcpu);
1064 struct kvm_host_map map;
1065 struct vmcb *vmcb12;
1066 int r = 0;
1067
1068 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map))
1069 return -EFAULT;
1070
1071 vmcb12 = map.hva;
1072 nested_copy_vmcb_control_to_cache(svm, &vmcb12->control);
1073 nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
1074
1075 if (nested_svm_check_cached_vmcb12(vcpu) < 0) {
1076 vmcb12->control.exit_code = SVM_EXIT_ERR;
1077 vmcb12->control.exit_info_1 = 0;
1078 vmcb12->control.exit_info_2 = 0;
1079 vmcb12->control.event_inj = 0;
1080 vmcb12->control.event_inj_err = 0;
1081 svm_set_gif(svm, false);
1082 r = -EINVAL;
1083 }
1084
1085 kvm_vcpu_unmap(vcpu, &map);
1086 return r;
1087 }
1088
nested_svm_vmrun(struct kvm_vcpu * vcpu)1089 int nested_svm_vmrun(struct kvm_vcpu *vcpu)
1090 {
1091 struct vcpu_svm *svm = to_svm(vcpu);
1092 int ret;
1093 u64 vmcb12_gpa;
1094 struct vmcb *vmcb01 = svm->vmcb01.ptr;
1095
1096 if (!svm->nested.hsave_msr) {
1097 kvm_inject_gp(vcpu, 0);
1098 return 1;
1099 }
1100
1101 if (is_smm(vcpu)) {
1102 kvm_queue_exception(vcpu, UD_VECTOR);
1103 return 1;
1104 }
1105
1106 /* This fails when VP assist page is enabled but the supplied GPA is bogus */
1107 ret = kvm_hv_verify_vp_assist(vcpu);
1108 if (ret) {
1109 kvm_inject_gp(vcpu, 0);
1110 return ret;
1111 }
1112
1113 if (WARN_ON_ONCE(!svm->nested.initialized))
1114 return -EINVAL;
1115
1116 vmcb12_gpa = kvm_register_read(vcpu, VCPU_REGS_RAX);
1117 if (!page_address_valid(vcpu, vmcb12_gpa)) {
1118 kvm_inject_gp(vcpu, 0);
1119 return 1;
1120 }
1121
1122 ret = nested_svm_copy_vmcb12_to_cache(vcpu, vmcb12_gpa);
1123 if (ret) {
1124 if (ret == -EFAULT)
1125 return kvm_handle_memory_failure(vcpu, X86EMUL_IO_NEEDED, NULL);
1126
1127 /* Advance RIP past VMRUN as part of the nested #VMEXIT. */
1128 return kvm_skip_emulated_instruction(vcpu);
1129 }
1130
1131 /* At this point, VMRUN is guaranteed to not fault; advance RIP. */
1132 ret = kvm_skip_emulated_instruction(vcpu);
1133
1134 /*
1135 * Since vmcb01 is not in use, we can use it to store some of the L1
1136 * state.
1137 */
1138 vmcb01->save.efer = vcpu->arch.efer;
1139 vmcb01->save.cr0 = kvm_read_cr0(vcpu);
1140 vmcb01->save.cr4 = vcpu->arch.cr4;
1141 vmcb01->save.rflags = kvm_get_rflags(vcpu);
1142 vmcb01->save.rip = kvm_rip_read(vcpu);
1143
1144 if (!npt_enabled)
1145 vmcb01->save.cr3 = kvm_read_cr3(vcpu);
1146
1147 vcpu->arch.nested_run_pending = KVM_NESTED_RUN_PENDING;
1148
1149 if (enter_svm_guest_mode(vcpu, vmcb12_gpa, true) ||
1150 !nested_svm_merge_msrpm(vcpu)) {
1151 vcpu->arch.nested_run_pending = 0;
1152 svm->nmi_l1_to_l2 = false;
1153 svm->soft_int_injected = false;
1154
1155 svm->vmcb->control.exit_code = SVM_EXIT_ERR;
1156 svm->vmcb->control.exit_info_1 = 0;
1157 svm->vmcb->control.exit_info_2 = 0;
1158
1159 nested_svm_vmexit(svm);
1160 }
1161
1162 return ret;
1163 }
1164
1165 /* Copy state save area fields which are handled by VMRUN */
svm_copy_vmrun_state(struct vmcb_save_area * to_save,struct vmcb_save_area * from_save)1166 void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
1167 struct vmcb_save_area *from_save)
1168 {
1169 to_save->es = from_save->es;
1170 to_save->cs = from_save->cs;
1171 to_save->ss = from_save->ss;
1172 to_save->ds = from_save->ds;
1173 to_save->gdtr = from_save->gdtr;
1174 to_save->idtr = from_save->idtr;
1175 to_save->rflags = from_save->rflags | X86_EFLAGS_FIXED;
1176 to_save->efer = from_save->efer;
1177 to_save->cr0 = from_save->cr0;
1178 to_save->cr3 = from_save->cr3;
1179 to_save->cr4 = from_save->cr4;
1180 to_save->rax = from_save->rax;
1181 to_save->rsp = from_save->rsp;
1182 to_save->rip = from_save->rip;
1183 to_save->cpl = 0;
1184
1185 if (kvm_cpu_cap_has(X86_FEATURE_SHSTK)) {
1186 to_save->s_cet = from_save->s_cet;
1187 to_save->isst_addr = from_save->isst_addr;
1188 to_save->ssp = from_save->ssp;
1189 }
1190
1191 if (kvm_cpu_cap_has(X86_FEATURE_LBRV)) {
1192 svm_copy_lbrs(to_save, from_save);
1193 to_save->dbgctl &= ~DEBUGCTL_RESERVED_BITS;
1194 }
1195 }
1196
svm_copy_vmloadsave_state(struct vmcb * to_vmcb,struct vmcb * from_vmcb)1197 void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
1198 {
1199 to_vmcb->save.fs = from_vmcb->save.fs;
1200 to_vmcb->save.gs = from_vmcb->save.gs;
1201 to_vmcb->save.tr = from_vmcb->save.tr;
1202 to_vmcb->save.ldtr = from_vmcb->save.ldtr;
1203 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
1204 to_vmcb->save.star = from_vmcb->save.star;
1205 to_vmcb->save.lstar = from_vmcb->save.lstar;
1206 to_vmcb->save.cstar = from_vmcb->save.cstar;
1207 to_vmcb->save.sfmask = from_vmcb->save.sfmask;
1208 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
1209 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
1210 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
1211 }
1212
nested_svm_vmexit_update_vmcb12(struct kvm_vcpu * vcpu)1213 static int nested_svm_vmexit_update_vmcb12(struct kvm_vcpu *vcpu)
1214 {
1215 struct vcpu_svm *svm = to_svm(vcpu);
1216 struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
1217 struct kvm_host_map map;
1218 struct vmcb *vmcb12;
1219 int rc;
1220
1221 rc = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map);
1222 if (rc)
1223 return rc;
1224
1225 vmcb12 = map.hva;
1226
1227 vmcb12->save.es = vmcb02->save.es;
1228 vmcb12->save.cs = vmcb02->save.cs;
1229 vmcb12->save.ss = vmcb02->save.ss;
1230 vmcb12->save.ds = vmcb02->save.ds;
1231 vmcb12->save.gdtr = vmcb02->save.gdtr;
1232 vmcb12->save.idtr = vmcb02->save.idtr;
1233 vmcb12->save.efer = svm->vcpu.arch.efer;
1234 vmcb12->save.cr0 = kvm_read_cr0(vcpu);
1235 vmcb12->save.cr3 = kvm_read_cr3(vcpu);
1236 vmcb12->save.cr2 = vcpu->arch.cr2;
1237 vmcb12->save.cr4 = svm->vcpu.arch.cr4;
1238 vmcb12->save.rflags = kvm_get_rflags(vcpu);
1239 vmcb12->save.rip = kvm_rip_read(vcpu);
1240 vmcb12->save.rsp = kvm_rsp_read(vcpu);
1241 vmcb12->save.rax = kvm_rax_read(vcpu);
1242 vmcb12->save.dr7 = vmcb02->save.dr7;
1243 vmcb12->save.dr6 = svm->vcpu.arch.dr6;
1244 vmcb12->save.cpl = vmcb02->save.cpl;
1245
1246 if (guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK)) {
1247 vmcb12->save.s_cet = vmcb02->save.s_cet;
1248 vmcb12->save.isst_addr = vmcb02->save.isst_addr;
1249 vmcb12->save.ssp = vmcb02->save.ssp;
1250 }
1251
1252 vmcb12->control.int_state = vmcb02->control.int_state;
1253 vmcb12->control.exit_code = vmcb02->control.exit_code;
1254 vmcb12->control.exit_info_1 = vmcb02->control.exit_info_1;
1255 vmcb12->control.exit_info_2 = vmcb02->control.exit_info_2;
1256
1257 if (!svm_is_vmrun_failure(vmcb12->control.exit_code))
1258 nested_save_pending_event_to_vmcb12(svm, vmcb12);
1259
1260 if (guest_cpu_cap_has(vcpu, X86_FEATURE_NRIPS))
1261 vmcb12->control.next_rip = vmcb02->control.next_rip;
1262
1263 if (nested_vmcb12_has_lbrv(vcpu))
1264 svm_copy_lbrs(&vmcb12->save, &vmcb02->save);
1265
1266 vmcb12->control.event_inj = 0;
1267 vmcb12->control.event_inj_err = 0;
1268 vmcb12->control.int_ctl = svm->nested.ctl.int_ctl;
1269
1270 trace_kvm_nested_vmexit_inject(vmcb12->control.exit_code,
1271 vmcb12->control.exit_info_1,
1272 vmcb12->control.exit_info_2,
1273 vmcb12->control.exit_int_info,
1274 vmcb12->control.exit_int_info_err,
1275 KVM_ISA_SVM);
1276
1277 kvm_vcpu_unmap(vcpu, &map);
1278 return 0;
1279 }
1280
nested_svm_vmexit(struct vcpu_svm * svm)1281 void nested_svm_vmexit(struct vcpu_svm *svm)
1282 {
1283 struct kvm_vcpu *vcpu = &svm->vcpu;
1284 struct vmcb *vmcb01 = svm->vmcb01.ptr;
1285 struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
1286
1287 if (nested_svm_vmexit_update_vmcb12(vcpu))
1288 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
1289
1290 /* Exit Guest-Mode */
1291 leave_guest_mode(vcpu);
1292 svm->nested.vmcb12_gpa = 0;
1293
1294 kvm_warn_on_nested_run_pending(vcpu);
1295
1296 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
1297
1298 /* in case we halted in L2 */
1299 kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE);
1300
1301 if (!kvm_pause_in_guest(vcpu->kvm)) {
1302 vmcb01->control.pause_filter_count = vmcb02->control.pause_filter_count;
1303 vmcb_mark_dirty(vmcb01, VMCB_INTERCEPTS);
1304
1305 }
1306
1307 /*
1308 * Invalidate last_bus_lock_rip unless KVM is still waiting for the
1309 * guest to make forward progress before re-enabling bus lock detection.
1310 */
1311 if (!vmcb02->control.bus_lock_counter)
1312 svm->nested.last_bus_lock_rip = INVALID_GPA;
1313
1314 nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr);
1315
1316 kvm_nested_vmexit_handle_ibrs(vcpu);
1317
1318 if (guest_cpu_cap_has(vcpu, X86_FEATURE_ERAPS))
1319 vmcb01->control.erap_ctl |= ERAP_CONTROL_CLEAR_RAP;
1320
1321 svm_switch_vmcb(svm, &svm->vmcb01);
1322
1323 /*
1324 * Rules for synchronizing int_ctl bits from vmcb02 to vmcb01:
1325 *
1326 * V_IRQ, V_IRQ_VECTOR, V_INTR_PRIO_MASK, V_IGN_TPR: If L1 doesn't
1327 * intercept interrupts, then KVM will use vmcb02's V_IRQ (and related
1328 * flags) to detect interrupt windows for L1 IRQs (even if L1 uses
1329 * virtual interrupt masking). Raise KVM_REQ_EVENT to ensure that
1330 * KVM re-requests an interrupt window if necessary, which implicitly
1331 * copies this bits from vmcb02 to vmcb01.
1332 *
1333 * V_TPR: If L1 doesn't use virtual interrupt masking, then L1's vTPR
1334 * is stored in vmcb02, but its value doesn't need to be copied from/to
1335 * vmcb01 because it is copied from/to the virtual APIC's TPR register
1336 * on each VM entry/exit.
1337 *
1338 * V_GIF: If nested vGIF is not used, KVM uses vmcb02's V_GIF for L1's
1339 * V_GIF. However, GIF is architecturally clear on each VM exit, thus
1340 * there is no need to copy V_GIF from vmcb02 to vmcb01.
1341 */
1342 if (!nested_exit_on_intr(svm))
1343 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
1344
1345 if (!nested_vmcb12_has_lbrv(vcpu)) {
1346 svm_copy_lbrs(&vmcb01->save, &vmcb02->save);
1347 vmcb_mark_dirty(vmcb01, VMCB_LBR);
1348 }
1349
1350 svm_update_lbrv(vcpu);
1351
1352 if (vnmi) {
1353 if (vmcb02->control.int_ctl & V_NMI_BLOCKING_MASK)
1354 vmcb01->control.int_ctl |= V_NMI_BLOCKING_MASK;
1355 else
1356 vmcb01->control.int_ctl &= ~V_NMI_BLOCKING_MASK;
1357
1358 if (vcpu->arch.nmi_pending) {
1359 vcpu->arch.nmi_pending--;
1360 vmcb01->control.int_ctl |= V_NMI_PENDING_MASK;
1361 } else {
1362 vmcb01->control.int_ctl &= ~V_NMI_PENDING_MASK;
1363 }
1364 }
1365
1366 /*
1367 * On vmexit the GIF is set to false and
1368 * no event can be injected in L1.
1369 */
1370 svm_set_gif(svm, false);
1371 vmcb01->control.exit_int_info = 0;
1372
1373 svm->vcpu.arch.tsc_offset = svm->vcpu.arch.l1_tsc_offset;
1374 if (vmcb01->control.tsc_offset != svm->vcpu.arch.tsc_offset) {
1375 vmcb01->control.tsc_offset = svm->vcpu.arch.tsc_offset;
1376 vmcb_mark_dirty(vmcb01, VMCB_INTERCEPTS);
1377 }
1378
1379 if (kvm_caps.has_tsc_control &&
1380 vcpu->arch.tsc_scaling_ratio != vcpu->arch.l1_tsc_scaling_ratio) {
1381 vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
1382 svm_write_tsc_multiplier(vcpu);
1383 }
1384
1385 svm->nested.ctl.nested_cr3 = 0;
1386
1387 /*
1388 * Restore processor state that had been saved in vmcb01
1389 */
1390 kvm_set_rflags(vcpu, vmcb01->save.rflags);
1391 svm_set_efer(vcpu, vmcb01->save.efer);
1392 svm_set_cr0(vcpu, vmcb01->save.cr0 | X86_CR0_PE);
1393 svm_set_cr4(vcpu, vmcb01->save.cr4);
1394 kvm_rax_write(vcpu, vmcb01->save.rax);
1395 kvm_rsp_write(vcpu, vmcb01->save.rsp);
1396 kvm_rip_write(vcpu, vmcb01->save.rip);
1397
1398 svm->vcpu.arch.dr7 = DR7_FIXED_1;
1399 kvm_update_dr7(&svm->vcpu);
1400
1401 nested_svm_transition_tlb_flush(vcpu);
1402
1403 nested_svm_uninit_mmu_context(vcpu);
1404
1405 if (nested_svm_load_cr3(vcpu, vmcb01->save.cr3, false, true))
1406 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
1407
1408 /* Drop tracking for L1->L2 injected NMIs and soft IRQs */
1409 svm->nmi_l1_to_l2 = false;
1410 svm->soft_int_injected = false;
1411
1412 /*
1413 * Drop what we picked up for L2 via svm_complete_interrupts() so it
1414 * doesn't end up in L1.
1415 */
1416 svm->vcpu.arch.nmi_injected = false;
1417 kvm_clear_exception_queue(vcpu);
1418 kvm_clear_interrupt_queue(vcpu);
1419
1420 /*
1421 * If we are here following the completion of a VMRUN that
1422 * is being single-stepped, queue the pending #DB intercept
1423 * right now so that it an be accounted for before we execute
1424 * L1's next instruction.
1425 */
1426 if (unlikely(vmcb01->save.rflags & X86_EFLAGS_TF))
1427 kvm_queue_exception(&(svm->vcpu), DB_VECTOR);
1428
1429 /*
1430 * Un-inhibit the AVIC right away, so that other vCPUs can start
1431 * to benefit from it right away.
1432 */
1433 if (kvm_apicv_activated(vcpu->kvm))
1434 __kvm_vcpu_update_apicv(vcpu);
1435 }
1436
nested_svm_triple_fault(struct kvm_vcpu * vcpu)1437 static void nested_svm_triple_fault(struct kvm_vcpu *vcpu)
1438 {
1439 struct vcpu_svm *svm = to_svm(vcpu);
1440
1441 if (!vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SHUTDOWN))
1442 return;
1443
1444 kvm_clear_request(KVM_REQ_TRIPLE_FAULT, vcpu);
1445 nested_svm_simple_vmexit(to_svm(vcpu), SVM_EXIT_SHUTDOWN);
1446 }
1447
svm_allocate_nested(struct vcpu_svm * svm)1448 int svm_allocate_nested(struct vcpu_svm *svm)
1449 {
1450 struct page *vmcb02_page;
1451
1452 if (svm->nested.initialized)
1453 return 0;
1454
1455 vmcb02_page = snp_safe_alloc_page();
1456 if (!vmcb02_page)
1457 return -ENOMEM;
1458 svm->nested.vmcb02.ptr = page_address(vmcb02_page);
1459 svm->nested.vmcb02.pa = __sme_set(page_to_pfn(vmcb02_page) << PAGE_SHIFT);
1460
1461 svm->nested.msrpm = svm_vcpu_alloc_msrpm();
1462 if (!svm->nested.msrpm)
1463 goto err_free_vmcb02;
1464
1465 svm->nested.initialized = true;
1466 return 0;
1467
1468 err_free_vmcb02:
1469 __free_page(vmcb02_page);
1470 return -ENOMEM;
1471 }
1472
svm_free_nested(struct vcpu_svm * svm)1473 void svm_free_nested(struct vcpu_svm *svm)
1474 {
1475 if (!svm->nested.initialized)
1476 return;
1477
1478 if (WARN_ON_ONCE(svm->vmcb != svm->vmcb01.ptr))
1479 svm_switch_vmcb(svm, &svm->vmcb01);
1480
1481 svm_vcpu_free_msrpm(svm->nested.msrpm);
1482 svm->nested.msrpm = NULL;
1483
1484 __free_page(virt_to_page(svm->nested.vmcb02.ptr));
1485 svm->nested.vmcb02.ptr = NULL;
1486
1487 /*
1488 * When last_vmcb12_gpa matches the current vmcb12 gpa,
1489 * some vmcb12 fields are not loaded if they are marked clean
1490 * in the vmcb12, since in this case they are up to date already.
1491 *
1492 * When the vmcb02 is freed, this optimization becomes invalid.
1493 */
1494 svm->nested.last_vmcb12_gpa = INVALID_GPA;
1495
1496 svm->nested.initialized = false;
1497 }
1498
svm_leave_nested(struct kvm_vcpu * vcpu)1499 void svm_leave_nested(struct kvm_vcpu *vcpu)
1500 {
1501 struct vcpu_svm *svm = to_svm(vcpu);
1502
1503 if (is_guest_mode(vcpu)) {
1504 vcpu->arch.nested_run_pending = 0;
1505 svm->nested.vmcb12_gpa = INVALID_GPA;
1506
1507 leave_guest_mode(vcpu);
1508
1509 svm_switch_vmcb(svm, &svm->vmcb01);
1510
1511 nested_svm_uninit_mmu_context(vcpu);
1512 vmcb_mark_all_dirty(svm->vmcb);
1513
1514 svm_set_gif(svm, true);
1515
1516 if (kvm_apicv_activated(vcpu->kvm))
1517 kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
1518 }
1519
1520 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
1521 }
1522
nested_svm_exit_handled_msr(struct vcpu_svm * svm)1523 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
1524 {
1525 gpa_t base = svm->nested.ctl.msrpm_base_pa;
1526 int write, bit_nr;
1527 u8 value, mask;
1528 u32 msr;
1529
1530 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
1531 return NESTED_EXIT_HOST;
1532
1533 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
1534 bit_nr = svm_msrpm_bit_nr(msr);
1535 write = svm->vmcb->control.exit_info_1 & 1;
1536
1537 if (bit_nr < 0)
1538 return NESTED_EXIT_DONE;
1539
1540 if (kvm_vcpu_read_guest(&svm->vcpu, base + bit_nr / BITS_PER_BYTE,
1541 &value, sizeof(value)))
1542 return NESTED_EXIT_DONE;
1543
1544 mask = BIT(write) << (bit_nr & (BITS_PER_BYTE - 1));
1545 return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
1546 }
1547
nested_svm_intercept_ioio(struct vcpu_svm * svm)1548 static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
1549 {
1550 unsigned port, size, iopm_len;
1551 u16 val, mask;
1552 u8 start_bit;
1553 u64 gpa;
1554
1555 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT)))
1556 return NESTED_EXIT_HOST;
1557
1558 port = svm->vmcb->control.exit_info_1 >> 16;
1559 size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
1560 SVM_IOIO_SIZE_SHIFT;
1561 gpa = svm->nested.ctl.iopm_base_pa + (port / 8);
1562 start_bit = port % 8;
1563 iopm_len = (start_bit + size > 8) ? 2 : 1;
1564 mask = (0xf >> (4 - size)) << start_bit;
1565 val = 0;
1566
1567 if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
1568 return NESTED_EXIT_DONE;
1569
1570 return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
1571 }
1572
nested_svm_intercept(struct vcpu_svm * svm)1573 static int nested_svm_intercept(struct vcpu_svm *svm)
1574 {
1575 u64 exit_code = svm->vmcb->control.exit_code;
1576 int vmexit = NESTED_EXIT_HOST;
1577
1578 if (svm_is_vmrun_failure(exit_code))
1579 return NESTED_EXIT_DONE;
1580
1581 switch (exit_code) {
1582 case SVM_EXIT_MSR:
1583 vmexit = nested_svm_exit_handled_msr(svm);
1584 break;
1585 case SVM_EXIT_IOIO:
1586 vmexit = nested_svm_intercept_ioio(svm);
1587 break;
1588 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f:
1589 /*
1590 * Host-intercepted exceptions have been checked already in
1591 * nested_svm_exit_special. There is nothing to do here,
1592 * the vmexit is injected by svm_check_nested_events.
1593 */
1594 vmexit = NESTED_EXIT_DONE;
1595 break;
1596 default:
1597 if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
1598 vmexit = NESTED_EXIT_DONE;
1599 break;
1600 }
1601
1602 return vmexit;
1603 }
1604
nested_svm_exit_handled(struct vcpu_svm * svm)1605 int nested_svm_exit_handled(struct vcpu_svm *svm)
1606 {
1607 int vmexit;
1608
1609 vmexit = nested_svm_intercept(svm);
1610
1611 if (vmexit == NESTED_EXIT_DONE)
1612 nested_svm_vmexit(svm);
1613
1614 return vmexit;
1615 }
1616
nested_svm_check_permissions(struct kvm_vcpu * vcpu)1617 int nested_svm_check_permissions(struct kvm_vcpu *vcpu)
1618 {
1619 if (!(vcpu->arch.efer & EFER_SVME) || !is_paging(vcpu)) {
1620 kvm_queue_exception(vcpu, UD_VECTOR);
1621 return 1;
1622 }
1623
1624 if (to_svm(vcpu)->vmcb->save.cpl) {
1625 kvm_inject_gp(vcpu, 0);
1626 return 1;
1627 }
1628
1629 return 0;
1630 }
1631
nested_svm_is_exception_vmexit(struct kvm_vcpu * vcpu,u8 vector,u32 error_code)1632 static bool nested_svm_is_exception_vmexit(struct kvm_vcpu *vcpu, u8 vector,
1633 u32 error_code)
1634 {
1635 struct vcpu_svm *svm = to_svm(vcpu);
1636
1637 return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(vector));
1638 }
1639
nested_svm_inject_exception_vmexit(struct kvm_vcpu * vcpu)1640 static void nested_svm_inject_exception_vmexit(struct kvm_vcpu *vcpu)
1641 {
1642 struct kvm_queued_exception *ex = &vcpu->arch.exception_vmexit;
1643 struct vcpu_svm *svm = to_svm(vcpu);
1644 struct vmcb *vmcb = svm->vmcb;
1645
1646 vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + ex->vector;
1647
1648 if (ex->has_error_code)
1649 vmcb->control.exit_info_1 = ex->error_code;
1650
1651 /*
1652 * EXITINFO2 is undefined for all exception intercepts other
1653 * than #PF.
1654 */
1655 if (ex->vector == PF_VECTOR) {
1656 if (ex->has_payload)
1657 vmcb->control.exit_info_2 = ex->payload;
1658 else
1659 vmcb->control.exit_info_2 = vcpu->arch.cr2;
1660 } else if (ex->vector == DB_VECTOR) {
1661 /* See kvm_check_and_inject_events(). */
1662 kvm_deliver_exception_payload(vcpu, ex);
1663
1664 if (vcpu->arch.dr7 & DR7_GD) {
1665 vcpu->arch.dr7 &= ~DR7_GD;
1666 kvm_update_dr7(vcpu);
1667 }
1668 } else {
1669 WARN_ON(ex->has_payload);
1670 }
1671
1672 nested_svm_vmexit(svm);
1673 }
1674
nested_exit_on_init(struct vcpu_svm * svm)1675 static inline bool nested_exit_on_init(struct vcpu_svm *svm)
1676 {
1677 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INIT);
1678 }
1679
svm_check_nested_events(struct kvm_vcpu * vcpu)1680 static int svm_check_nested_events(struct kvm_vcpu *vcpu)
1681 {
1682 struct kvm_lapic *apic = vcpu->arch.apic;
1683 struct vcpu_svm *svm = to_svm(vcpu);
1684 /*
1685 * Only a pending nested run blocks a pending exception. If there is a
1686 * previously injected event, the pending exception occurred while said
1687 * event was being delivered and thus needs to be handled.
1688 */
1689 bool block_nested_exceptions = vcpu->arch.nested_run_pending;
1690 /*
1691 * New events (not exceptions) are only recognized at instruction
1692 * boundaries. If an event needs reinjection, then KVM is handling a
1693 * VM-Exit that occurred _during_ instruction execution; new events are
1694 * blocked until the instruction completes.
1695 */
1696 bool block_nested_events = block_nested_exceptions ||
1697 kvm_event_needs_reinjection(vcpu);
1698
1699 if (lapic_in_kernel(vcpu) &&
1700 test_bit(KVM_APIC_INIT, &apic->pending_events)) {
1701 if (block_nested_events)
1702 return -EBUSY;
1703 if (!nested_exit_on_init(svm))
1704 return 0;
1705 nested_svm_simple_vmexit(svm, SVM_EXIT_INIT);
1706 return 0;
1707 }
1708
1709 if (vcpu->arch.exception_vmexit.pending) {
1710 if (block_nested_exceptions)
1711 return -EBUSY;
1712 nested_svm_inject_exception_vmexit(vcpu);
1713 return 0;
1714 }
1715
1716 if (vcpu->arch.exception.pending) {
1717 if (block_nested_exceptions)
1718 return -EBUSY;
1719 return 0;
1720 }
1721
1722 #ifdef CONFIG_KVM_SMM
1723 if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) {
1724 if (block_nested_events)
1725 return -EBUSY;
1726 if (!nested_exit_on_smi(svm))
1727 return 0;
1728 nested_svm_simple_vmexit(svm, SVM_EXIT_SMI);
1729 return 0;
1730 }
1731 #endif
1732
1733 if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) {
1734 if (block_nested_events)
1735 return -EBUSY;
1736 if (!nested_exit_on_nmi(svm))
1737 return 0;
1738 nested_svm_simple_vmexit(svm, SVM_EXIT_NMI);
1739 return 0;
1740 }
1741
1742 if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) {
1743 if (block_nested_events)
1744 return -EBUSY;
1745 if (!nested_exit_on_intr(svm))
1746 return 0;
1747 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
1748 nested_svm_simple_vmexit(svm, SVM_EXIT_INTR);
1749 return 0;
1750 }
1751
1752 return 0;
1753 }
1754
nested_svm_exit_special(struct vcpu_svm * svm)1755 int nested_svm_exit_special(struct vcpu_svm *svm)
1756 {
1757 u32 exit_code = svm->vmcb->control.exit_code;
1758 struct kvm_vcpu *vcpu = &svm->vcpu;
1759
1760 switch (exit_code) {
1761 case SVM_EXIT_INTR:
1762 case SVM_EXIT_NMI:
1763 case SVM_EXIT_NPF:
1764 return NESTED_EXIT_HOST;
1765 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
1766 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
1767
1768 if (svm->vmcb01.ptr->control.intercepts[INTERCEPT_EXCEPTION] &
1769 excp_bits)
1770 return NESTED_EXIT_HOST;
1771 else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR &&
1772 svm->vcpu.arch.apf.host_apf_flags)
1773 /* Trap async PF even if not shadowing */
1774 return NESTED_EXIT_HOST;
1775 break;
1776 }
1777 case SVM_EXIT_VMMCALL:
1778 /* Hyper-V L2 TLB flush hypercall is handled by L0 */
1779 if (nested_svm_is_l2_tlb_flush_hcall(vcpu))
1780 return NESTED_EXIT_HOST;
1781 break;
1782 default:
1783 break;
1784 }
1785
1786 return NESTED_EXIT_CONTINUE;
1787 }
1788
nested_svm_update_tsc_ratio_msr(struct kvm_vcpu * vcpu)1789 void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu)
1790 {
1791 struct vcpu_svm *svm = to_svm(vcpu);
1792
1793 vcpu->arch.tsc_scaling_ratio =
1794 kvm_calc_nested_tsc_multiplier(vcpu->arch.l1_tsc_scaling_ratio,
1795 svm->tsc_ratio_msr);
1796 svm_write_tsc_multiplier(vcpu);
1797 }
1798
1799 /* Inverse operation of nested_copy_vmcb_control_to_cache(). asid is copied too. */
nested_copy_vmcb_cache_to_control(struct vmcb_control_area * dst,struct vmcb_ctrl_area_cached * from)1800 static void nested_copy_vmcb_cache_to_control(struct vmcb_control_area *dst,
1801 struct vmcb_ctrl_area_cached *from)
1802 {
1803 unsigned int i;
1804
1805 memset(dst, 0, sizeof(struct vmcb_control_area));
1806
1807 for (i = 0; i < MAX_INTERCEPT; i++)
1808 dst->intercepts[i] = from->intercepts[i];
1809
1810 dst->iopm_base_pa = from->iopm_base_pa;
1811 dst->msrpm_base_pa = from->msrpm_base_pa;
1812 dst->tsc_offset = from->tsc_offset;
1813 dst->asid = from->asid;
1814 dst->tlb_ctl = from->tlb_ctl;
1815 dst->erap_ctl = from->erap_ctl;
1816 dst->int_ctl = from->int_ctl;
1817 dst->int_vector = from->int_vector;
1818 dst->int_state = from->int_state;
1819 dst->exit_code = from->exit_code;
1820 dst->exit_info_1 = from->exit_info_1;
1821 dst->exit_info_2 = from->exit_info_2;
1822 dst->exit_int_info = from->exit_int_info;
1823 dst->exit_int_info_err = from->exit_int_info_err;
1824 dst->misc_ctl = from->misc_ctl;
1825 dst->event_inj = from->event_inj;
1826 dst->event_inj_err = from->event_inj_err;
1827 dst->next_rip = from->next_rip;
1828 dst->nested_cr3 = from->nested_cr3;
1829 dst->misc_ctl2 = from->misc_ctl2;
1830 dst->pause_filter_count = from->pause_filter_count;
1831 dst->pause_filter_thresh = from->pause_filter_thresh;
1832 /* 'clean' and 'hv_enlightenments' are not changed by KVM */
1833 }
1834
svm_get_nested_state(struct kvm_vcpu * vcpu,struct kvm_nested_state __user * user_kvm_nested_state,u32 user_data_size)1835 static int svm_get_nested_state(struct kvm_vcpu *vcpu,
1836 struct kvm_nested_state __user *user_kvm_nested_state,
1837 u32 user_data_size)
1838 {
1839 struct vcpu_svm *svm;
1840 struct vmcb_control_area *ctl;
1841 unsigned long r;
1842 struct kvm_nested_state kvm_state = {
1843 .flags = 0,
1844 .format = KVM_STATE_NESTED_FORMAT_SVM,
1845 .size = sizeof(kvm_state),
1846 };
1847 struct vmcb __user *user_vmcb = (struct vmcb __user *)
1848 &user_kvm_nested_state->data.svm[0];
1849
1850 if (!vcpu)
1851 return kvm_state.size + KVM_STATE_NESTED_SVM_VMCB_SIZE;
1852
1853 svm = to_svm(vcpu);
1854
1855 if (user_data_size < kvm_state.size)
1856 goto out;
1857
1858 /* First fill in the header and copy it out. */
1859 if (is_guest_mode(vcpu)) {
1860 kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb12_gpa;
1861 kvm_state.size += KVM_STATE_NESTED_SVM_VMCB_SIZE;
1862 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
1863
1864 if (vcpu->arch.nested_run_pending)
1865 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
1866 }
1867
1868 if (gif_set(svm))
1869 kvm_state.flags |= KVM_STATE_NESTED_GIF_SET;
1870
1871 if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
1872 return -EFAULT;
1873
1874 if (!is_guest_mode(vcpu))
1875 goto out;
1876
1877 /*
1878 * Copy over the full size of the VMCB rather than just the size
1879 * of the structs.
1880 */
1881 if (clear_user(user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE))
1882 return -EFAULT;
1883
1884 ctl = kzalloc_obj(*ctl);
1885 if (!ctl)
1886 return -ENOMEM;
1887
1888 nested_copy_vmcb_cache_to_control(ctl, &svm->nested.ctl);
1889 r = copy_to_user(&user_vmcb->control, ctl,
1890 sizeof(user_vmcb->control));
1891 kfree(ctl);
1892 if (r)
1893 return -EFAULT;
1894
1895 if (copy_to_user(&user_vmcb->save, &svm->vmcb01.ptr->save,
1896 sizeof(user_vmcb->save)))
1897 return -EFAULT;
1898 out:
1899 return kvm_state.size;
1900 }
1901
svm_set_nested_state(struct kvm_vcpu * vcpu,struct kvm_nested_state __user * user_kvm_nested_state,struct kvm_nested_state * kvm_state)1902 static int svm_set_nested_state(struct kvm_vcpu *vcpu,
1903 struct kvm_nested_state __user *user_kvm_nested_state,
1904 struct kvm_nested_state *kvm_state)
1905 {
1906 struct vcpu_svm *svm = to_svm(vcpu);
1907 struct vmcb __user *user_vmcb = (struct vmcb __user *)
1908 &user_kvm_nested_state->data.svm[0];
1909 struct vmcb_control_area *ctl;
1910 struct vmcb_save_area *save;
1911 struct vmcb_save_area_cached save_cached;
1912 struct vmcb_ctrl_area_cached ctl_cached;
1913 unsigned long cr0;
1914 int ret;
1915
1916 BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) >
1917 KVM_STATE_NESTED_SVM_VMCB_SIZE);
1918
1919 if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM)
1920 return -EINVAL;
1921
1922 if (kvm_state->flags & ~(KVM_STATE_NESTED_GUEST_MODE |
1923 KVM_STATE_NESTED_RUN_PENDING |
1924 KVM_STATE_NESTED_GIF_SET))
1925 return -EINVAL;
1926
1927 /*
1928 * If in guest mode, vcpu->arch.efer actually refers to the L2 guest's
1929 * EFER.SVME, but EFER.SVME still has to be 1 for VMRUN to succeed.
1930 * If SVME is disabled, the only valid states are "none" and GIF=1
1931 * (clearing SVME does NOT set GIF, i.e. GIF=0 is allowed).
1932 */
1933 if (!(vcpu->arch.efer & EFER_SVME) && kvm_state->flags &&
1934 kvm_state->flags != KVM_STATE_NESTED_GIF_SET)
1935 return -EINVAL;
1936
1937 /* SMM temporarily disables SVM, so we cannot be in guest mode. */
1938 if (is_smm(vcpu) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
1939 return -EINVAL;
1940
1941 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) {
1942 svm_leave_nested(vcpu);
1943 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1944 return 0;
1945 }
1946
1947 if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa))
1948 return -EINVAL;
1949 if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE)
1950 return -EINVAL;
1951
1952 ctl = memdup_user(&user_vmcb->control, sizeof(*ctl));
1953 if (IS_ERR(ctl))
1954 return PTR_ERR(ctl);
1955
1956 save = memdup_user(&user_vmcb->save, sizeof(*save));
1957 if (IS_ERR(save)) {
1958 kfree(ctl);
1959 return PTR_ERR(save);
1960 }
1961
1962 ret = -EINVAL;
1963 __nested_copy_vmcb_control_to_cache(vcpu, &ctl_cached, ctl);
1964 if (!nested_vmcb_check_controls(vcpu, &ctl_cached))
1965 goto out_free;
1966
1967 /*
1968 * Processor state contains L2 state. Check that it is
1969 * valid for guest mode (see nested_vmcb_check_save()).
1970 */
1971 cr0 = kvm_read_cr0(vcpu);
1972 if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW))
1973 goto out_free;
1974
1975 /*
1976 * Validate host state saved from before VMRUN (see
1977 * nested_svm_check_permissions).
1978 */
1979 __nested_copy_vmcb_save_to_cache(&save_cached, save);
1980 if (!(save->cr0 & X86_CR0_PG) ||
1981 !(save->cr0 & X86_CR0_PE) ||
1982 (save->rflags & X86_EFLAGS_VM) ||
1983 !nested_vmcb_check_save(vcpu, &save_cached))
1984 goto out_free;
1985
1986
1987 /*
1988 * All checks done, we can enter guest mode. Userspace provides
1989 * vmcb12.control, which will be combined with L1 and stored into
1990 * vmcb02, and the L1 save state which we store in vmcb01.
1991 * L2 registers if needed are moved from the current VMCB to VMCB02.
1992 */
1993
1994 if (is_guest_mode(vcpu))
1995 svm_leave_nested(vcpu);
1996 else
1997 svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save;
1998
1999 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
2000
2001 if (kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING)
2002 vcpu->arch.nested_run_pending = KVM_NESTED_RUN_PENDING_UNTRUSTED;
2003 else
2004 vcpu->arch.nested_run_pending = 0;
2005
2006 svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
2007
2008 svm_copy_vmrun_state(&svm->vmcb01.ptr->save, save);
2009 nested_copy_vmcb_control_to_cache(svm, ctl);
2010
2011 svm_switch_vmcb(svm, &svm->nested.vmcb02);
2012 nested_vmcb02_prepare_control(svm);
2013
2014 /*
2015 * Any previously restored state (e.g. KVM_SET_SREGS) would mark fields
2016 * dirty in vmcb01 instead of vmcb02, so mark all of vmcb02 dirty here.
2017 */
2018 vmcb_mark_all_dirty(svm->vmcb);
2019
2020 /*
2021 * While the nested guest CR3 is already checked and set by
2022 * KVM_SET_SREGS, it was set when nested state was yet loaded,
2023 * thus MMU might not be initialized correctly.
2024 * Set it again to fix this.
2025 */
2026 ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
2027 nested_npt_enabled(svm), false);
2028 if (ret)
2029 goto out_free;
2030
2031 svm->nested.force_msr_bitmap_recalc = true;
2032
2033 if (kvm_vcpu_apicv_active(vcpu))
2034 kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
2035
2036 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
2037 ret = 0;
2038 out_free:
2039 kfree(save);
2040 kfree(ctl);
2041
2042 return ret;
2043 }
2044
svm_get_nested_state_pages(struct kvm_vcpu * vcpu)2045 static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
2046 {
2047 if (WARN_ON(!is_guest_mode(vcpu)))
2048 return true;
2049
2050 if (!vcpu->arch.pdptrs_from_userspace &&
2051 !nested_npt_enabled(to_svm(vcpu)) && is_pae_paging(vcpu))
2052 /*
2053 * Reload the guest's PDPTRs since after a migration
2054 * the guest CR3 might be restored prior to setting the nested
2055 * state which can lead to a load of wrong PDPTRs.
2056 */
2057 if (CC(!load_pdptrs(vcpu, vcpu->arch.cr3)))
2058 return false;
2059
2060 if (!nested_svm_merge_msrpm(vcpu)) {
2061 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2062 vcpu->run->internal.suberror =
2063 KVM_INTERNAL_ERROR_EMULATION;
2064 vcpu->run->internal.ndata = 0;
2065 return false;
2066 }
2067
2068 if (kvm_hv_verify_vp_assist(vcpu))
2069 return false;
2070
2071 return true;
2072 }
2073
2074 struct kvm_x86_nested_ops svm_nested_ops = {
2075 .leave_nested = svm_leave_nested,
2076 .is_exception_vmexit = nested_svm_is_exception_vmexit,
2077 .check_events = svm_check_nested_events,
2078 .triple_fault = nested_svm_triple_fault,
2079 .get_nested_state_pages = svm_get_nested_state_pages,
2080 .get_state = svm_get_nested_state,
2081 .set_state = svm_set_nested_state,
2082 .hv_inject_synthetic_vmexit_post_tlb_flush = svm_hv_inject_synthetic_vmexit_post_tlb_flush,
2083 };
2084