1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * AMD SVM support
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9 *
10 * Authors:
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
13 */
14
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17 #include <linux/kvm_types.h>
18 #include <linux/kvm_host.h>
19 #include <linux/kernel.h>
20
21 #include <asm/msr-index.h>
22 #include <asm/debugreg.h>
23
24 #include "kvm_emulate.h"
25 #include "trace.h"
26 #include "mmu.h"
27 #include "x86.h"
28 #include "smm.h"
29 #include "cpuid.h"
30 #include "lapic.h"
31 #include "svm.h"
32 #include "hyperv.h"
33
34 #define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK
35
nested_svm_inject_npf_exit(struct kvm_vcpu * vcpu,struct x86_exception * fault)36 static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
37 struct x86_exception *fault)
38 {
39 struct vcpu_svm *svm = to_svm(vcpu);
40 struct vmcb *vmcb = svm->vmcb;
41
42 if (vmcb->control.exit_code != SVM_EXIT_NPF) {
43 /*
44 * TODO: track the cause of the nested page fault, and
45 * correctly fill in the high bits of exit_info_1.
46 */
47 vmcb->control.exit_code = SVM_EXIT_NPF;
48 vmcb->control.exit_info_1 = (1ULL << 32);
49 vmcb->control.exit_info_2 = fault->address;
50 }
51
52 vmcb->control.exit_info_1 &= ~0xffffffffULL;
53 vmcb->control.exit_info_1 |= fault->error_code;
54
55 nested_svm_vmexit(svm);
56 }
57
nested_svm_get_tdp_pdptr(struct kvm_vcpu * vcpu,int index)58 static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
59 {
60 struct vcpu_svm *svm = to_svm(vcpu);
61 u64 cr3 = svm->nested.ctl.nested_cr3;
62 u64 pdpte;
63 int ret;
64
65 /*
66 * Note, nCR3 is "assumed" to be 32-byte aligned, i.e. the CPU ignores
67 * nCR3[4:0] when loading PDPTEs from memory.
68 */
69 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte,
70 (cr3 & GENMASK(11, 5)) + index * 8, 8);
71 if (ret)
72 return 0;
73 return pdpte;
74 }
75
nested_svm_get_tdp_cr3(struct kvm_vcpu * vcpu)76 static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
77 {
78 struct vcpu_svm *svm = to_svm(vcpu);
79
80 return svm->nested.ctl.nested_cr3;
81 }
82
nested_svm_init_mmu_context(struct kvm_vcpu * vcpu)83 static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
84 {
85 struct vcpu_svm *svm = to_svm(vcpu);
86
87 WARN_ON(mmu_is_nested(vcpu));
88
89 vcpu->arch.mmu = &vcpu->arch.guest_mmu;
90
91 /*
92 * The NPT format depends on L1's CR4 and EFER, which is in vmcb01. Note,
93 * when called via KVM_SET_NESTED_STATE, that state may _not_ match current
94 * vCPU state. CR0.WP is explicitly ignored, while CR0.PG is required.
95 */
96 kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, svm->vmcb01.ptr->save.cr4,
97 svm->vmcb01.ptr->save.efer,
98 svm->nested.ctl.nested_cr3);
99 vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3;
100 vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr;
101 vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
102 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
103 }
104
nested_svm_uninit_mmu_context(struct kvm_vcpu * vcpu)105 static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
106 {
107 vcpu->arch.mmu = &vcpu->arch.root_mmu;
108 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
109 }
110
nested_vmcb_needs_vls_intercept(struct vcpu_svm * svm)111 static bool nested_vmcb_needs_vls_intercept(struct vcpu_svm *svm)
112 {
113 if (!guest_cpu_cap_has(&svm->vcpu, X86_FEATURE_V_VMSAVE_VMLOAD))
114 return true;
115
116 if (!nested_npt_enabled(svm))
117 return true;
118
119 if (!(svm->nested.ctl.misc_ctl2 & SVM_MISC2_ENABLE_V_VMLOAD_VMSAVE))
120 return true;
121
122 return false;
123 }
124
nested_vmcb02_recalc_intercepts(struct vcpu_svm * svm)125 void nested_vmcb02_recalc_intercepts(struct vcpu_svm *svm)
126 {
127 struct vmcb_ctrl_area_cached *vmcb12_ctrl = &svm->nested.ctl;
128 struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
129 struct vmcb *vmcb01 = svm->vmcb01.ptr;
130 unsigned int i;
131
132 if (WARN_ON_ONCE(svm->vmcb != vmcb02))
133 return;
134
135 vmcb_mark_dirty(vmcb02, VMCB_INTERCEPTS);
136
137 for (i = 0; i < MAX_INTERCEPT; i++)
138 vmcb02->control.intercepts[i] = vmcb01->control.intercepts[i];
139
140 if (vmcb12_ctrl->int_ctl & V_INTR_MASKING_MASK) {
141 /*
142 * If L2 is active and V_INTR_MASKING is enabled in vmcb12,
143 * disable intercept of CR8 writes as L2's CR8 does not affect
144 * any interrupt KVM may want to inject.
145 *
146 * Similarly, disable intercept of virtual interrupts (used to
147 * detect interrupt windows) if the saved RFLAGS.IF is '0', as
148 * the effective RFLAGS.IF for L1 interrupts will never be set
149 * while L2 is running (L2's RFLAGS.IF doesn't affect L1 IRQs).
150 */
151 vmcb_clr_intercept(&vmcb02->control, INTERCEPT_CR8_WRITE);
152 if (!(vmcb01->save.rflags & X86_EFLAGS_IF))
153 vmcb_clr_intercept(&vmcb02->control, INTERCEPT_VINTR);
154 }
155
156 for (i = 0; i < MAX_INTERCEPT; i++)
157 vmcb02->control.intercepts[i] |= vmcb12_ctrl->intercepts[i];
158
159 /* If SMI is not intercepted, ignore guest SMI intercept as well */
160 if (!intercept_smi)
161 vmcb_clr_intercept(&vmcb02->control, INTERCEPT_SMI);
162
163 /*
164 * Intercept PAUSE if and only if L1 wants to. KVM intercepts PAUSE so
165 * that a vCPU that may be spinning waiting for a lock can be scheduled
166 * out in favor of the vCPU that holds said lock. KVM doesn't support
167 * yielding across L2 vCPUs, as KVM has limited visilibity into which
168 * L2 vCPUs are in the same L2 VM, i.e. may be contending for locks.
169 */
170 if (!vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_PAUSE))
171 vmcb_clr_intercept(&vmcb02->control, INTERCEPT_PAUSE);
172
173 if (nested_vmcb_needs_vls_intercept(svm)) {
174 /*
175 * If the virtual VMLOAD/VMSAVE is not enabled for the L2,
176 * we must intercept these instructions to correctly
177 * emulate them in case L1 doesn't intercept them.
178 */
179 vmcb_set_intercept(&vmcb02->control, INTERCEPT_VMLOAD);
180 vmcb_set_intercept(&vmcb02->control, INTERCEPT_VMSAVE);
181 } else {
182 WARN_ON_ONCE(!(vmcb02->control.misc_ctl2 & SVM_MISC2_ENABLE_V_VMLOAD_VMSAVE));
183 }
184 }
185
186 /*
187 * This array (and its actual size) holds the set of offsets (indexing by chunk
188 * size) to process when merging vmcb12's MSRPM with vmcb01's MSRPM. Note, the
189 * set of MSRs for which interception is disabled in vmcb01 is per-vCPU, e.g.
190 * based on CPUID features. This array only tracks MSRs that *might* be passed
191 * through to the guest.
192 *
193 * Hardcode the capacity of the array based on the maximum number of _offsets_.
194 * MSRs are batched together, so there are fewer offsets than MSRs.
195 */
196 static int nested_svm_msrpm_merge_offsets[10] __ro_after_init;
197 static int nested_svm_nr_msrpm_merge_offsets __ro_after_init;
198 typedef unsigned long nsvm_msrpm_merge_t;
199
nested_svm_init_msrpm_merge_offsets(void)200 int __init nested_svm_init_msrpm_merge_offsets(void)
201 {
202 static const u32 merge_msrs[] __initconst = {
203 MSR_STAR,
204 MSR_IA32_SYSENTER_CS,
205 MSR_IA32_SYSENTER_EIP,
206 MSR_IA32_SYSENTER_ESP,
207 #ifdef CONFIG_X86_64
208 MSR_GS_BASE,
209 MSR_FS_BASE,
210 MSR_KERNEL_GS_BASE,
211 MSR_LSTAR,
212 MSR_CSTAR,
213 MSR_SYSCALL_MASK,
214 #endif
215 MSR_IA32_SPEC_CTRL,
216 MSR_IA32_PRED_CMD,
217 MSR_IA32_FLUSH_CMD,
218 MSR_IA32_APERF,
219 MSR_IA32_MPERF,
220 MSR_IA32_LASTBRANCHFROMIP,
221 MSR_IA32_LASTBRANCHTOIP,
222 MSR_IA32_LASTINTFROMIP,
223 MSR_IA32_LASTINTTOIP,
224
225 MSR_K7_PERFCTR0,
226 MSR_K7_PERFCTR1,
227 MSR_K7_PERFCTR2,
228 MSR_K7_PERFCTR3,
229 MSR_F15H_PERF_CTR0,
230 MSR_F15H_PERF_CTR1,
231 MSR_F15H_PERF_CTR2,
232 MSR_F15H_PERF_CTR3,
233 MSR_F15H_PERF_CTR4,
234 MSR_F15H_PERF_CTR5,
235
236 MSR_AMD64_PERF_CNTR_GLOBAL_CTL,
237 MSR_AMD64_PERF_CNTR_GLOBAL_STATUS,
238 MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR,
239 MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET,
240 };
241 int i, j;
242
243 for (i = 0; i < ARRAY_SIZE(merge_msrs); i++) {
244 int bit_nr = svm_msrpm_bit_nr(merge_msrs[i]);
245 u32 offset;
246
247 if (WARN_ON(bit_nr < 0))
248 return -EIO;
249
250 /*
251 * Merging is done in chunks to reduce the number of accesses
252 * to L1's bitmap.
253 */
254 offset = bit_nr / BITS_PER_BYTE / sizeof(nsvm_msrpm_merge_t);
255
256 for (j = 0; j < nested_svm_nr_msrpm_merge_offsets; j++) {
257 if (nested_svm_msrpm_merge_offsets[j] == offset)
258 break;
259 }
260
261 if (j < nested_svm_nr_msrpm_merge_offsets)
262 continue;
263
264 if (WARN_ON(j >= ARRAY_SIZE(nested_svm_msrpm_merge_offsets)))
265 return -EIO;
266
267 nested_svm_msrpm_merge_offsets[j] = offset;
268 nested_svm_nr_msrpm_merge_offsets++;
269 }
270
271 return 0;
272 }
273
274 /*
275 * Merge L0's (KVM) and L1's (Nested VMCB) MSR permission bitmaps. The function
276 * is optimized in that it only merges the parts where KVM MSR permission bitmap
277 * may contain zero bits.
278 */
nested_svm_merge_msrpm(struct kvm_vcpu * vcpu)279 static bool nested_svm_merge_msrpm(struct kvm_vcpu *vcpu)
280 {
281 struct vcpu_svm *svm = to_svm(vcpu);
282 nsvm_msrpm_merge_t *msrpm02 = svm->nested.msrpm;
283 nsvm_msrpm_merge_t *msrpm01 = svm->msrpm;
284 int i;
285
286 /*
287 * MSR bitmap update can be skipped when:
288 * - MSR bitmap for L1 hasn't changed.
289 * - Nested hypervisor (L1) is attempting to launch the same L2 as
290 * before.
291 * - Nested hypervisor (L1) is using Hyper-V emulation interface and
292 * tells KVM (L0) there were no changes in MSR bitmap for L2.
293 */
294 #ifdef CONFIG_KVM_HYPERV
295 if (!svm->nested.force_msr_bitmap_recalc) {
296 struct hv_vmcb_enlightenments *hve = &svm->nested.ctl.hv_enlightenments;
297
298 if (kvm_hv_hypercall_enabled(vcpu) &&
299 hve->hv_enlightenments_control.msr_bitmap &&
300 (svm->nested.ctl.clean & BIT(HV_VMCB_NESTED_ENLIGHTENMENTS)))
301 goto set_msrpm_base_pa;
302 }
303 #endif
304
305 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
306 return true;
307
308 for (i = 0; i < nested_svm_nr_msrpm_merge_offsets; i++) {
309 const int p = nested_svm_msrpm_merge_offsets[i];
310 nsvm_msrpm_merge_t l1_val;
311 gpa_t gpa;
312
313 gpa = svm->nested.ctl.msrpm_base_pa + (p * sizeof(l1_val));
314
315 if (kvm_vcpu_read_guest(vcpu, gpa, &l1_val, sizeof(l1_val)))
316 return false;
317
318 msrpm02[p] = msrpm01[p] | l1_val;
319 }
320
321 svm->nested.force_msr_bitmap_recalc = false;
322
323 #ifdef CONFIG_KVM_HYPERV
324 set_msrpm_base_pa:
325 #endif
326 svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
327
328 return true;
329 }
330
331 /*
332 * Bits 11:0 of bitmap address are ignored by hardware
333 */
nested_svm_check_bitmap_pa(struct kvm_vcpu * vcpu,u64 pa,u32 size)334 static bool nested_svm_check_bitmap_pa(struct kvm_vcpu *vcpu, u64 pa, u32 size)
335 {
336 u64 addr = PAGE_ALIGN(pa);
337
338 return kvm_vcpu_is_legal_gpa(vcpu, addr) &&
339 kvm_vcpu_is_legal_gpa(vcpu, addr + size - 1);
340 }
341
nested_svm_event_inj_valid_exept(struct kvm_vcpu * vcpu,u8 vector)342 static bool nested_svm_event_inj_valid_exept(struct kvm_vcpu *vcpu, u8 vector)
343 {
344 /*
345 * Vectors that do not correspond to a defined exception are invalid
346 * (including #NMI and reserved vectors). In a best effort to define
347 * valid exceptions based on the virtual CPU, make all exceptions always
348 * valid except those obviously tied to a CPU feature.
349 */
350 switch (vector) {
351 case DE_VECTOR: case DB_VECTOR: case BP_VECTOR: case OF_VECTOR:
352 case BR_VECTOR: case UD_VECTOR: case NM_VECTOR: case DF_VECTOR:
353 case TS_VECTOR: case NP_VECTOR: case SS_VECTOR: case GP_VECTOR:
354 case PF_VECTOR: case MF_VECTOR: case AC_VECTOR: case MC_VECTOR:
355 case XM_VECTOR: case HV_VECTOR: case SX_VECTOR:
356 return true;
357 case CP_VECTOR:
358 return guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK);
359 case VC_VECTOR:
360 return guest_cpu_cap_has(vcpu, X86_FEATURE_SEV_ES);
361 }
362 return false;
363 }
364
365 /*
366 * According to the APM, VMRUN exits with SVM_EXIT_ERR if SVM_EVTINJ_VALID is
367 * set and:
368 * - The type of event_inj is not one of the defined values.
369 * - The type is SVM_EVTINJ_TYPE_EXEPT, but the vector is not a valid exception.
370 */
nested_svm_check_event_inj(struct kvm_vcpu * vcpu,u32 event_inj)371 static bool nested_svm_check_event_inj(struct kvm_vcpu *vcpu, u32 event_inj)
372 {
373 u32 type = event_inj & SVM_EVTINJ_TYPE_MASK;
374 u8 vector = event_inj & SVM_EVTINJ_VEC_MASK;
375
376 if (!(event_inj & SVM_EVTINJ_VALID))
377 return true;
378
379 if (type != SVM_EVTINJ_TYPE_INTR && type != SVM_EVTINJ_TYPE_NMI &&
380 type != SVM_EVTINJ_TYPE_EXEPT && type != SVM_EVTINJ_TYPE_SOFT)
381 return false;
382
383 if (type == SVM_EVTINJ_TYPE_EXEPT &&
384 !nested_svm_event_inj_valid_exept(vcpu, vector))
385 return false;
386
387 return true;
388 }
389
nested_vmcb_check_controls(struct kvm_vcpu * vcpu,struct vmcb_ctrl_area_cached * control)390 static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu,
391 struct vmcb_ctrl_area_cached *control)
392 {
393 if (CC(!vmcb12_is_intercept(control, INTERCEPT_VMRUN)))
394 return false;
395
396 if (CC(control->asid == 0))
397 return false;
398
399 if (CC((control->misc_ctl & SVM_MISC_ENABLE_NP) &&
400 !kvm_vcpu_is_legal_gpa(vcpu, control->nested_cr3)))
401 return false;
402
403 if (CC(!nested_svm_check_bitmap_pa(vcpu, control->msrpm_base_pa,
404 MSRPM_SIZE)))
405 return false;
406 if (CC(!nested_svm_check_bitmap_pa(vcpu, control->iopm_base_pa,
407 IOPM_SIZE)))
408 return false;
409
410 if (CC((control->int_ctl & V_NMI_ENABLE_MASK) &&
411 !vmcb12_is_intercept(control, INTERCEPT_NMI))) {
412 return false;
413 }
414
415 if (CC(!nested_svm_check_event_inj(vcpu, control->event_inj)))
416 return false;
417
418 return true;
419 }
420
421 /* Common checks that apply to both L1 and L2 state. */
nested_vmcb_check_save(struct kvm_vcpu * vcpu,struct vmcb_save_area_cached * save)422 static bool nested_vmcb_check_save(struct kvm_vcpu *vcpu,
423 struct vmcb_save_area_cached *save)
424 {
425 if (CC(!(save->efer & EFER_SVME)))
426 return false;
427
428 if (CC((save->cr0 & X86_CR0_CD) == 0 && (save->cr0 & X86_CR0_NW)) ||
429 CC(save->cr0 & ~0xffffffffULL))
430 return false;
431
432 if (CC(!kvm_dr6_valid(save->dr6)) || CC(!kvm_dr7_valid(save->dr7)))
433 return false;
434
435 /*
436 * These checks are also performed by KVM_SET_SREGS,
437 * except that EFER.LMA is not checked by SVM against
438 * CR0.PG && EFER.LME.
439 */
440 if ((save->efer & EFER_LME) && (save->cr0 & X86_CR0_PG)) {
441 if (CC(!(save->cr4 & X86_CR4_PAE)) ||
442 CC(!(save->cr0 & X86_CR0_PE)) ||
443 CC(!kvm_vcpu_is_legal_cr3(vcpu, save->cr3)))
444 return false;
445
446 if (CC((save->cs.attrib & SVM_SELECTOR_L_MASK) &&
447 (save->cs.attrib & SVM_SELECTOR_DB_MASK)))
448 return false;
449 }
450
451 /* Note, SVM doesn't have any additional restrictions on CR4. */
452 if (CC(!__kvm_is_valid_cr4(vcpu, save->cr4)))
453 return false;
454
455 if (CC(!kvm_valid_efer(vcpu, save->efer)))
456 return false;
457
458 return true;
459 }
460
nested_svm_check_cached_vmcb12(struct kvm_vcpu * vcpu)461 int nested_svm_check_cached_vmcb12(struct kvm_vcpu *vcpu)
462 {
463 struct vcpu_svm *svm = to_svm(vcpu);
464
465 if (!nested_vmcb_check_save(vcpu, &svm->nested.save) ||
466 !nested_vmcb_check_controls(vcpu, &svm->nested.ctl))
467 return -EINVAL;
468
469 return 0;
470 }
471
472 /*
473 * If a feature is not advertised to L1, clear the corresponding vmcb12
474 * intercept.
475 */
476 #define __nested_svm_sanitize_intercept(__vcpu, __control, fname, iname) \
477 do { \
478 if (!guest_cpu_cap_has(__vcpu, X86_FEATURE_##fname)) \
479 vmcb12_clr_intercept(__control, INTERCEPT_##iname); \
480 } while (0)
481
482 #define nested_svm_sanitize_intercept(__vcpu, __control, name) \
483 __nested_svm_sanitize_intercept(__vcpu, __control, name, name)
484
485 static
__nested_copy_vmcb_control_to_cache(struct kvm_vcpu * vcpu,struct vmcb_ctrl_area_cached * to,struct vmcb_control_area * from)486 void __nested_copy_vmcb_control_to_cache(struct kvm_vcpu *vcpu,
487 struct vmcb_ctrl_area_cached *to,
488 struct vmcb_control_area *from)
489 {
490 unsigned int i;
491
492 for (i = 0; i < MAX_INTERCEPT; i++)
493 to->intercepts[i] = from->intercepts[i];
494
495 __nested_svm_sanitize_intercept(vcpu, to, XSAVE, XSETBV);
496 nested_svm_sanitize_intercept(vcpu, to, INVPCID);
497 nested_svm_sanitize_intercept(vcpu, to, RDTSCP);
498 nested_svm_sanitize_intercept(vcpu, to, SKINIT);
499 nested_svm_sanitize_intercept(vcpu, to, RDPRU);
500
501 /* Always clear SVM_MISC_ENABLE_NP if the guest cannot use NPTs */
502 to->misc_ctl = from->misc_ctl;
503 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_NPT))
504 to->misc_ctl &= ~SVM_MISC_ENABLE_NP;
505
506 to->iopm_base_pa = from->iopm_base_pa & PAGE_MASK;
507 to->msrpm_base_pa = from->msrpm_base_pa & PAGE_MASK;
508 to->tsc_offset = from->tsc_offset;
509 to->tlb_ctl = from->tlb_ctl & TLB_CONTROL_MASK;
510 to->erap_ctl = from->erap_ctl;
511 to->int_ctl = from->int_ctl;
512 to->int_vector = from->int_vector & SVM_INT_VECTOR_MASK;
513 to->int_state = from->int_state & SVM_INTERRUPT_SHADOW_MASK;
514 to->exit_code = from->exit_code;
515 to->exit_info_1 = from->exit_info_1;
516 to->exit_info_2 = from->exit_info_2;
517 to->exit_int_info = from->exit_int_info;
518 to->exit_int_info_err = from->exit_int_info_err;
519 to->event_inj = from->event_inj & ~SVM_EVTINJ_RESERVED_BITS;
520 to->event_inj_err = from->event_inj_err;
521 to->next_rip = from->next_rip;
522 to->nested_cr3 = from->nested_cr3;
523 to->misc_ctl2 = from->misc_ctl2;
524 to->pause_filter_count = from->pause_filter_count;
525 to->pause_filter_thresh = from->pause_filter_thresh;
526
527 /* Copy asid here because nested_vmcb_check_controls() will check it */
528 to->asid = from->asid;
529 to->clean = from->clean;
530
531 #ifdef CONFIG_KVM_HYPERV
532 /* Hyper-V extensions (Enlightened VMCB) */
533 if (kvm_hv_hypercall_enabled(vcpu)) {
534 memcpy(&to->hv_enlightenments, &from->hv_enlightenments,
535 sizeof(to->hv_enlightenments));
536 }
537 #endif
538 }
539
nested_copy_vmcb_control_to_cache(struct vcpu_svm * svm,struct vmcb_control_area * control)540 void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
541 struct vmcb_control_area *control)
542 {
543 __nested_copy_vmcb_control_to_cache(&svm->vcpu, &svm->nested.ctl, control);
544 }
545
__nested_copy_vmcb_save_to_cache(struct vmcb_save_area_cached * to,struct vmcb_save_area * from)546 static void __nested_copy_vmcb_save_to_cache(struct vmcb_save_area_cached *to,
547 struct vmcb_save_area *from)
548 {
549 to->es = from->es;
550 to->cs = from->cs;
551 to->ss = from->ss;
552 to->ds = from->ds;
553 to->gdtr = from->gdtr;
554 to->idtr = from->idtr;
555
556 to->cpl = from->cpl;
557
558 to->efer = from->efer;
559 to->cr4 = from->cr4;
560 to->cr3 = from->cr3;
561 to->cr0 = from->cr0;
562 to->dr7 = from->dr7;
563 to->dr6 = from->dr6;
564
565 to->rflags = from->rflags;
566 to->rip = from->rip;
567 to->rsp = from->rsp;
568
569 to->s_cet = from->s_cet;
570 to->ssp = from->ssp;
571 to->isst_addr = from->isst_addr;
572
573 to->rax = from->rax;
574 to->cr2 = from->cr2;
575
576 svm_copy_lbrs(to, from);
577 }
578
nested_copy_vmcb_save_to_cache(struct vcpu_svm * svm,struct vmcb_save_area * save)579 void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
580 struct vmcb_save_area *save)
581 {
582 __nested_copy_vmcb_save_to_cache(&svm->nested.save, save);
583 }
584
585 /*
586 * Synchronize fields that are written by the processor, so that
587 * they can be copied back into the vmcb12.
588 */
nested_sync_control_from_vmcb02(struct vcpu_svm * svm)589 void nested_sync_control_from_vmcb02(struct vcpu_svm *svm)
590 {
591 u32 mask;
592 svm->nested.ctl.event_inj = svm->vmcb->control.event_inj;
593 svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err;
594 svm->nested.ctl.int_state = svm->vmcb->control.int_state;
595
596 /* Only a few fields of int_ctl are written by the processor. */
597 mask = V_IRQ_MASK | V_TPR_MASK;
598 /*
599 * Don't sync vmcb02 V_IRQ back to vmcb12 if KVM (L0) is intercepting
600 * virtual interrupts in order to request an interrupt window, as KVM
601 * has usurped vmcb02's int_ctl. If an interrupt window opens before
602 * the next VM-Exit, svm_clear_vintr() will restore vmcb12's int_ctl.
603 * If no window opens, V_IRQ will be correctly preserved in vmcb12's
604 * int_ctl (because it was never recognized while L2 was running).
605 */
606 if (svm_is_intercept(svm, INTERCEPT_VINTR) &&
607 !vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_VINTR))
608 mask &= ~V_IRQ_MASK;
609
610 if (nested_vgif_enabled(svm))
611 mask |= V_GIF_MASK;
612
613 if (nested_vnmi_enabled(svm))
614 mask |= V_NMI_BLOCKING_MASK | V_NMI_PENDING_MASK;
615
616 svm->nested.ctl.int_ctl &= ~mask;
617 svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask;
618 }
619
620 /*
621 * Transfer any event that L0 or L1 wanted to inject into L2 to
622 * EXIT_INT_INFO.
623 */
nested_save_pending_event_to_vmcb12(struct vcpu_svm * svm,struct vmcb * vmcb12)624 static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm,
625 struct vmcb *vmcb12)
626 {
627 struct kvm_vcpu *vcpu = &svm->vcpu;
628 u32 exit_int_info = 0;
629 unsigned int nr;
630
631 if (vcpu->arch.exception.injected) {
632 nr = vcpu->arch.exception.vector;
633 exit_int_info = nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT;
634
635 if (vcpu->arch.exception.has_error_code) {
636 exit_int_info |= SVM_EVTINJ_VALID_ERR;
637 vmcb12->control.exit_int_info_err =
638 vcpu->arch.exception.error_code;
639 }
640
641 } else if (vcpu->arch.nmi_injected) {
642 exit_int_info = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
643
644 } else if (vcpu->arch.interrupt.injected) {
645 nr = vcpu->arch.interrupt.nr;
646 exit_int_info = nr | SVM_EVTINJ_VALID;
647
648 if (vcpu->arch.interrupt.soft)
649 exit_int_info |= SVM_EVTINJ_TYPE_SOFT;
650 else
651 exit_int_info |= SVM_EVTINJ_TYPE_INTR;
652 }
653
654 vmcb12->control.exit_int_info = exit_int_info;
655 }
656
nested_svm_transition_tlb_flush(struct kvm_vcpu * vcpu)657 static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu)
658 {
659 /* Handle pending Hyper-V TLB flush requests */
660 kvm_hv_nested_transtion_tlb_flush(vcpu, npt_enabled);
661
662 /*
663 * TODO: optimize unconditional TLB flush/MMU sync. A partial list of
664 * things to fix before this can be conditional:
665 *
666 * - Flush TLBs for both L1 and L2 remote TLB flush
667 * - Honor L1's request to flush an ASID on nested VMRUN
668 * - Sync nested NPT MMU on VMRUN that flushes L2's ASID[*]
669 * - Don't crush a pending TLB flush in vmcb02 on nested VMRUN
670 * - Flush L1's ASID on KVM_REQ_TLB_FLUSH_GUEST
671 *
672 * [*] Unlike nested EPT, SVM's ASID management can invalidate nested
673 * NPT guest-physical mappings on VMRUN.
674 */
675 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
676 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
677 }
678
679 /*
680 * Load guest's/host's cr3 on nested vmentry or vmexit. @nested_npt is true
681 * if we are emulating VM-Entry into a guest with NPT enabled.
682 */
nested_svm_load_cr3(struct kvm_vcpu * vcpu,unsigned long cr3,bool nested_npt,bool reload_pdptrs)683 static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
684 bool nested_npt, bool reload_pdptrs)
685 {
686 if (CC(!kvm_vcpu_is_legal_cr3(vcpu, cr3)))
687 return -EINVAL;
688
689 if (reload_pdptrs && !nested_npt && is_pae_paging(vcpu) &&
690 CC(!load_pdptrs(vcpu, cr3)))
691 return -EINVAL;
692
693 vcpu->arch.cr3 = cr3;
694
695 /* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */
696 kvm_init_mmu(vcpu);
697
698 if (!nested_npt)
699 kvm_mmu_new_pgd(vcpu, cr3);
700
701 return 0;
702 }
703
nested_vmcb02_compute_g_pat(struct vcpu_svm * svm)704 void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm)
705 {
706 if (!svm->nested.vmcb02.ptr)
707 return;
708
709 /* FIXME: merge g_pat from vmcb01 and vmcb12. */
710 svm->nested.vmcb02.ptr->save.g_pat = svm->vmcb01.ptr->save.g_pat;
711 }
712
nested_vmcb12_has_lbrv(struct kvm_vcpu * vcpu)713 static bool nested_vmcb12_has_lbrv(struct kvm_vcpu *vcpu)
714 {
715 return guest_cpu_cap_has(vcpu, X86_FEATURE_LBRV) &&
716 (to_svm(vcpu)->nested.ctl.misc_ctl2 & SVM_MISC2_ENABLE_V_LBR);
717 }
718
nested_vmcb02_prepare_save(struct vcpu_svm * svm)719 static void nested_vmcb02_prepare_save(struct vcpu_svm *svm)
720 {
721 struct vmcb_ctrl_area_cached *control = &svm->nested.ctl;
722 struct vmcb_save_area_cached *save = &svm->nested.save;
723 bool new_vmcb12 = false;
724 struct vmcb *vmcb01 = svm->vmcb01.ptr;
725 struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
726 struct kvm_vcpu *vcpu = &svm->vcpu;
727
728 nested_vmcb02_compute_g_pat(svm);
729 vmcb_mark_dirty(vmcb02, VMCB_NPT);
730
731 /* Load the nested guest state */
732 if (svm->nested.vmcb12_gpa != svm->nested.last_vmcb12_gpa) {
733 new_vmcb12 = true;
734 svm->nested.last_vmcb12_gpa = svm->nested.vmcb12_gpa;
735 svm->nested.force_msr_bitmap_recalc = true;
736 }
737
738 if (unlikely(new_vmcb12 || vmcb12_is_dirty(control, VMCB_SEG))) {
739 vmcb02->save.es = save->es;
740 vmcb02->save.cs = save->cs;
741 vmcb02->save.ss = save->ss;
742 vmcb02->save.ds = save->ds;
743 vmcb02->save.cpl = save->cpl;
744 vmcb_mark_dirty(vmcb02, VMCB_SEG);
745 }
746
747 if (unlikely(new_vmcb12 || vmcb12_is_dirty(control, VMCB_DT))) {
748 vmcb02->save.gdtr = save->gdtr;
749 vmcb02->save.idtr = save->idtr;
750 vmcb_mark_dirty(vmcb02, VMCB_DT);
751 }
752
753 if (guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK) &&
754 (unlikely(new_vmcb12 || vmcb12_is_dirty(control, VMCB_CET)))) {
755 vmcb02->save.s_cet = save->s_cet;
756 vmcb02->save.isst_addr = save->isst_addr;
757 vmcb02->save.ssp = save->ssp;
758 vmcb_mark_dirty(vmcb02, VMCB_CET);
759 }
760
761 kvm_set_rflags(vcpu, save->rflags | X86_EFLAGS_FIXED);
762
763 svm_set_efer(vcpu, svm->nested.save.efer);
764
765 svm_set_cr0(vcpu, svm->nested.save.cr0);
766 svm_set_cr4(vcpu, svm->nested.save.cr4);
767
768 svm->vcpu.arch.cr2 = save->cr2;
769
770 kvm_rax_write(vcpu, save->rax);
771 kvm_rsp_write(vcpu, save->rsp);
772 kvm_rip_write(vcpu, save->rip);
773
774 /* In case we don't even reach vcpu_run, the fields are not updated */
775 vmcb02->save.rax = save->rax;
776 vmcb02->save.rsp = save->rsp;
777 vmcb02->save.rip = save->rip;
778
779 if (unlikely(new_vmcb12 || vmcb12_is_dirty(control, VMCB_DR))) {
780 vmcb02->save.dr7 = svm->nested.save.dr7 | DR7_FIXED_1;
781 svm->vcpu.arch.dr6 = svm->nested.save.dr6 | DR6_ACTIVE_LOW;
782 vmcb_mark_dirty(vmcb02, VMCB_DR);
783 }
784
785 if (nested_vmcb12_has_lbrv(vcpu)) {
786 /*
787 * Reserved bits of DEBUGCTL are ignored. Be consistent with
788 * svm_set_msr's definition of reserved bits.
789 */
790 svm_copy_lbrs(&vmcb02->save, save);
791 vmcb02->save.dbgctl &= ~DEBUGCTL_RESERVED_BITS;
792 } else {
793 svm_copy_lbrs(&vmcb02->save, &vmcb01->save);
794 }
795 vmcb_mark_dirty(vmcb02, VMCB_LBR);
796 svm_update_lbrv(&svm->vcpu);
797 }
798
is_evtinj_soft(u32 evtinj)799 static inline bool is_evtinj_soft(u32 evtinj)
800 {
801 u32 type = evtinj & SVM_EVTINJ_TYPE_MASK;
802 u8 vector = evtinj & SVM_EVTINJ_VEC_MASK;
803
804 if (!(evtinj & SVM_EVTINJ_VALID))
805 return false;
806
807 if (type == SVM_EVTINJ_TYPE_SOFT)
808 return true;
809
810 return type == SVM_EVTINJ_TYPE_EXEPT && kvm_exception_is_soft(vector);
811 }
812
is_evtinj_nmi(u32 evtinj)813 static bool is_evtinj_nmi(u32 evtinj)
814 {
815 u32 type = evtinj & SVM_EVTINJ_TYPE_MASK;
816
817 if (!(evtinj & SVM_EVTINJ_VALID))
818 return false;
819
820 return type == SVM_EVTINJ_TYPE_NMI;
821 }
822
nested_vmcb02_prepare_control(struct vcpu_svm * svm)823 static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
824 {
825 u32 int_ctl_vmcb01_bits = V_INTR_MASKING_MASK;
826 u32 int_ctl_vmcb12_bits = V_TPR_MASK | V_IRQ_INJECTION_BITS_MASK;
827
828 struct vmcb_ctrl_area_cached *vmcb12_ctrl = &svm->nested.ctl;
829 struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
830 struct vmcb *vmcb01 = svm->vmcb01.ptr;
831 struct kvm_vcpu *vcpu = &svm->vcpu;
832
833 nested_svm_transition_tlb_flush(vcpu);
834
835 /* Enter Guest-Mode */
836 enter_guest_mode(vcpu);
837
838 /*
839 * Filled at exit: exit_code, exit_info_1, exit_info_2, exit_int_info,
840 * exit_int_info_err, next_rip, insn_len, insn_bytes.
841 */
842
843 if (guest_cpu_cap_has(vcpu, X86_FEATURE_VGIF) &&
844 (vmcb12_ctrl->int_ctl & V_GIF_ENABLE_MASK))
845 int_ctl_vmcb12_bits |= (V_GIF_MASK | V_GIF_ENABLE_MASK);
846 else
847 int_ctl_vmcb01_bits |= (V_GIF_MASK | V_GIF_ENABLE_MASK);
848
849 if (vnmi) {
850 if (vmcb01->control.int_ctl & V_NMI_PENDING_MASK) {
851 svm->vcpu.arch.nmi_pending++;
852 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
853 }
854 if (nested_vnmi_enabled(svm))
855 int_ctl_vmcb12_bits |= (V_NMI_PENDING_MASK |
856 V_NMI_ENABLE_MASK |
857 V_NMI_BLOCKING_MASK);
858 }
859
860 /*
861 * Copied from vmcb01. msrpm_base can be overwritten later.
862 *
863 * SVM_MISC_ENABLE_NP in vmcb12 is only used for consistency checks. If
864 * L1 enables NPTs, KVM shadows L1's NPTs and uses those to run L2. If
865 * L1 disables NPT, KVM runs L2 with the same NPTs used to run L1. For
866 * the latter, L1 runs L2 with shadow page tables that translate L2 GVAs
867 * to L1 GPAs, so the same NPTs can be used for L1 and L2.
868 */
869 vmcb02->control.misc_ctl = vmcb01->control.misc_ctl & SVM_MISC_ENABLE_NP;
870 vmcb02->control.iopm_base_pa = vmcb01->control.iopm_base_pa;
871 vmcb02->control.msrpm_base_pa = vmcb01->control.msrpm_base_pa;
872 vmcb_mark_dirty(vmcb02, VMCB_PERM_MAP);
873
874 /*
875 * Stash vmcb02's counter if the guest hasn't moved past the guilty
876 * instruction; otherwise, reset the counter to '0'.
877 *
878 * In order to detect if L2 has made forward progress or not, track the
879 * RIP at which a bus lock has occurred on a per-vmcb12 basis. If RIP
880 * is changed, guest has clearly made forward progress, bus_lock_counter
881 * still remained '1', so reset bus_lock_counter to '0'. Eg. In the
882 * scenario, where a buslock happened in L1 before VMRUN, the bus lock
883 * firmly happened on an instruction in the past. Even if vmcb01's
884 * counter is still '1', (because the guilty instruction got patched),
885 * the vCPU has clearly made forward progress and so KVM should reset
886 * vmcb02's counter to '0'.
887 *
888 * If the RIP hasn't changed, stash the bus lock counter at nested VMRUN
889 * to prevent the same guilty instruction from triggering a VM-Exit. Eg.
890 * if userspace rate-limits the vCPU, then it's entirely possible that
891 * L1's tick interrupt is pending by the time userspace re-runs the
892 * vCPU. If KVM unconditionally clears the counter on VMRUN, then when
893 * L1 re-enters L2, the same instruction will trigger a VM-Exit and the
894 * entire cycle start over.
895 */
896 if (vmcb02->save.rip && (svm->nested.last_bus_lock_rip == vmcb02->save.rip))
897 vmcb02->control.bus_lock_counter = 1;
898 else
899 vmcb02->control.bus_lock_counter = 0;
900
901 /* Done at vmrun: asid. */
902
903 /* Also overwritten later if necessary. */
904 vmcb02->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
905
906 /* nested_cr3. */
907 if (nested_npt_enabled(svm))
908 nested_svm_init_mmu_context(vcpu);
909
910 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset(vcpu->arch.l1_tsc_offset,
911 vmcb12_ctrl->tsc_offset,
912 svm->tsc_ratio_msr);
913
914 vmcb02->control.tsc_offset = vcpu->arch.tsc_offset;
915
916 if (guest_cpu_cap_has(vcpu, X86_FEATURE_TSCRATEMSR) &&
917 svm->tsc_ratio_msr != kvm_caps.default_tsc_scaling_ratio)
918 nested_svm_update_tsc_ratio_msr(vcpu);
919
920 vmcb02->control.int_ctl =
921 (vmcb12_ctrl->int_ctl & int_ctl_vmcb12_bits) |
922 (vmcb01->control.int_ctl & int_ctl_vmcb01_bits);
923
924 vmcb02->control.int_vector = vmcb12_ctrl->int_vector;
925 vmcb02->control.int_state = vmcb12_ctrl->int_state;
926 vmcb02->control.event_inj = vmcb12_ctrl->event_inj;
927 vmcb02->control.event_inj_err = vmcb12_ctrl->event_inj_err;
928
929 /*
930 * If nrips is exposed to L1, take NextRIP as-is. Otherwise, L1
931 * advances L2's RIP before VMRUN instead of using NextRIP. KVM will
932 * stuff the current RIP as vmcb02's NextRIP before L2 is run. After
933 * the first run of L2 (e.g. after save+restore), NextRIP is updated by
934 * the CPU and/or KVM and should be used regardless of L1's support.
935 */
936 if (guest_cpu_cap_has(vcpu, X86_FEATURE_NRIPS) ||
937 !vcpu->arch.nested_run_pending)
938 vmcb02->control.next_rip = vmcb12_ctrl->next_rip;
939
940 svm->nmi_l1_to_l2 = is_evtinj_nmi(vmcb02->control.event_inj);
941
942 /*
943 * soft_int_csbase, soft_int_old_rip, and soft_int_next_rip (if L1
944 * doesn't have NRIPS) are initialized later, before the vCPU is run.
945 */
946 if (is_evtinj_soft(vmcb02->control.event_inj)) {
947 svm->soft_int_injected = true;
948 if (guest_cpu_cap_has(vcpu, X86_FEATURE_NRIPS) ||
949 !vcpu->arch.nested_run_pending)
950 svm->soft_int_next_rip = vmcb12_ctrl->next_rip;
951 }
952
953 /* SVM_MISC2_ENABLE_V_LBR is controlled by svm_update_lbrv() */
954
955 if (!nested_vmcb_needs_vls_intercept(svm))
956 vmcb02->control.misc_ctl2 |= SVM_MISC2_ENABLE_V_VMLOAD_VMSAVE;
957
958 if (guest_cpu_cap_has(vcpu, X86_FEATURE_PAUSEFILTER))
959 vmcb02->control.pause_filter_count = vmcb12_ctrl->pause_filter_count;
960 else
961 vmcb02->control.pause_filter_count = 0;
962 if (guest_cpu_cap_has(vcpu, X86_FEATURE_PFTHRESHOLD))
963 vmcb02->control.pause_filter_thresh = vmcb12_ctrl->pause_filter_thresh;
964 else
965 vmcb02->control.pause_filter_thresh = 0;
966
967 /*
968 * Take ALLOW_LARGER_RAP from vmcb12 even though it should be safe to
969 * let L2 use a larger RAP since KVM will emulate the necessary clears,
970 * as it's possible L1 deliberately wants to restrict L2 to the legacy
971 * RAP size. Unconditionally clear the RAP on nested VMRUN, as KVM is
972 * responsible for emulating the host vs. guest tags (L1 is the "host",
973 * L2 is the "guest").
974 */
975 if (guest_cpu_cap_has(vcpu, X86_FEATURE_ERAPS))
976 vmcb02->control.erap_ctl = (vmcb12_ctrl->erap_ctl &
977 ERAP_CONTROL_ALLOW_LARGER_RAP) |
978 ERAP_CONTROL_CLEAR_RAP;
979
980 /*
981 * Merge guest and host intercepts - must be called with vcpu in
982 * guest-mode to take effect.
983 */
984 nested_vmcb02_recalc_intercepts(svm);
985 }
986
nested_svm_copy_common_state(struct vmcb * from_vmcb,struct vmcb * to_vmcb)987 static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
988 {
989 /*
990 * Some VMCB state is shared between L1 and L2 and thus has to be
991 * moved at the time of nested vmrun and vmexit.
992 *
993 * VMLOAD/VMSAVE state would also belong in this category, but KVM
994 * always performs VMLOAD and VMSAVE from the VMCB01.
995 */
996 to_vmcb->save.spec_ctrl = from_vmcb->save.spec_ctrl;
997 }
998
enter_svm_guest_mode(struct kvm_vcpu * vcpu,u64 vmcb12_gpa,bool from_vmrun)999 int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa, bool from_vmrun)
1000 {
1001 struct vcpu_svm *svm = to_svm(vcpu);
1002 struct vmcb_ctrl_area_cached *control = &svm->nested.ctl;
1003 struct vmcb_save_area_cached *save = &svm->nested.save;
1004 int ret;
1005
1006 trace_kvm_nested_vmenter(svm->vmcb->save.rip,
1007 vmcb12_gpa,
1008 save->rip,
1009 control->int_ctl,
1010 control->event_inj,
1011 control->misc_ctl,
1012 control->nested_cr3,
1013 save->cr3,
1014 KVM_ISA_SVM);
1015
1016 trace_kvm_nested_intercepts(control->intercepts[INTERCEPT_CR] & 0xffff,
1017 control->intercepts[INTERCEPT_CR] >> 16,
1018 control->intercepts[INTERCEPT_EXCEPTION],
1019 control->intercepts[INTERCEPT_WORD3],
1020 control->intercepts[INTERCEPT_WORD4],
1021 control->intercepts[INTERCEPT_WORD5]);
1022
1023
1024 svm->nested.vmcb12_gpa = vmcb12_gpa;
1025
1026 WARN_ON(svm->vmcb == svm->nested.vmcb02.ptr);
1027
1028 nested_svm_copy_common_state(svm->vmcb01.ptr, svm->nested.vmcb02.ptr);
1029
1030 svm_switch_vmcb(svm, &svm->nested.vmcb02);
1031 nested_vmcb02_prepare_control(svm);
1032 nested_vmcb02_prepare_save(svm);
1033
1034 ret = nested_svm_load_cr3(&svm->vcpu, svm->nested.save.cr3,
1035 nested_npt_enabled(svm), from_vmrun);
1036 if (ret)
1037 return ret;
1038
1039 if (!from_vmrun)
1040 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
1041
1042 svm_set_gif(svm, true);
1043
1044 if (kvm_vcpu_apicv_active(vcpu))
1045 kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
1046
1047 nested_svm_hv_update_vm_vp_ids(vcpu);
1048
1049 return 0;
1050 }
1051
nested_svm_copy_vmcb12_to_cache(struct kvm_vcpu * vcpu,u64 vmcb12_gpa)1052 static int nested_svm_copy_vmcb12_to_cache(struct kvm_vcpu *vcpu, u64 vmcb12_gpa)
1053 {
1054 struct vcpu_svm *svm = to_svm(vcpu);
1055 struct kvm_host_map map;
1056 struct vmcb *vmcb12;
1057 int r = 0;
1058
1059 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map))
1060 return -EFAULT;
1061
1062 vmcb12 = map.hva;
1063 nested_copy_vmcb_control_to_cache(svm, &vmcb12->control);
1064 nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
1065
1066 if (nested_svm_check_cached_vmcb12(vcpu) < 0) {
1067 vmcb12->control.exit_code = SVM_EXIT_ERR;
1068 vmcb12->control.exit_info_1 = 0;
1069 vmcb12->control.exit_info_2 = 0;
1070 vmcb12->control.event_inj = 0;
1071 vmcb12->control.event_inj_err = 0;
1072 svm_set_gif(svm, false);
1073 r = -EINVAL;
1074 }
1075
1076 kvm_vcpu_unmap(vcpu, &map);
1077 return r;
1078 }
1079
nested_svm_vmrun(struct kvm_vcpu * vcpu)1080 int nested_svm_vmrun(struct kvm_vcpu *vcpu)
1081 {
1082 struct vcpu_svm *svm = to_svm(vcpu);
1083 int ret;
1084 u64 vmcb12_gpa;
1085 struct vmcb *vmcb01 = svm->vmcb01.ptr;
1086
1087 if (!svm->nested.hsave_msr) {
1088 kvm_inject_gp(vcpu, 0);
1089 return 1;
1090 }
1091
1092 if (is_smm(vcpu)) {
1093 kvm_queue_exception(vcpu, UD_VECTOR);
1094 return 1;
1095 }
1096
1097 /* This fails when VP assist page is enabled but the supplied GPA is bogus */
1098 ret = kvm_hv_verify_vp_assist(vcpu);
1099 if (ret) {
1100 kvm_inject_gp(vcpu, 0);
1101 return ret;
1102 }
1103
1104 if (WARN_ON_ONCE(!svm->nested.initialized))
1105 return -EINVAL;
1106
1107 vmcb12_gpa = kvm_register_read(vcpu, VCPU_REGS_RAX);
1108 if (!page_address_valid(vcpu, vmcb12_gpa)) {
1109 kvm_inject_gp(vcpu, 0);
1110 return 1;
1111 }
1112
1113 ret = nested_svm_copy_vmcb12_to_cache(vcpu, vmcb12_gpa);
1114 if (ret) {
1115 if (ret == -EFAULT)
1116 return kvm_handle_memory_failure(vcpu, X86EMUL_IO_NEEDED, NULL);
1117
1118 /* Advance RIP past VMRUN as part of the nested #VMEXIT. */
1119 return kvm_skip_emulated_instruction(vcpu);
1120 }
1121
1122 /* At this point, VMRUN is guaranteed to not fault; advance RIP. */
1123 ret = kvm_skip_emulated_instruction(vcpu);
1124
1125 /*
1126 * Since vmcb01 is not in use, we can use it to store some of the L1
1127 * state.
1128 */
1129 vmcb01->save.efer = vcpu->arch.efer;
1130 vmcb01->save.cr0 = kvm_read_cr0(vcpu);
1131 vmcb01->save.cr4 = vcpu->arch.cr4;
1132 vmcb01->save.rflags = kvm_get_rflags(vcpu);
1133 vmcb01->save.rip = kvm_rip_read(vcpu);
1134
1135 if (!npt_enabled)
1136 vmcb01->save.cr3 = kvm_read_cr3(vcpu);
1137
1138 vcpu->arch.nested_run_pending = KVM_NESTED_RUN_PENDING;
1139
1140 if (enter_svm_guest_mode(vcpu, vmcb12_gpa, true) ||
1141 !nested_svm_merge_msrpm(vcpu)) {
1142 vcpu->arch.nested_run_pending = 0;
1143 svm->nmi_l1_to_l2 = false;
1144 svm->soft_int_injected = false;
1145
1146 svm->vmcb->control.exit_code = SVM_EXIT_ERR;
1147 svm->vmcb->control.exit_info_1 = 0;
1148 svm->vmcb->control.exit_info_2 = 0;
1149
1150 nested_svm_vmexit(svm);
1151 }
1152
1153 return ret;
1154 }
1155
1156 /* Copy state save area fields which are handled by VMRUN */
svm_copy_vmrun_state(struct vmcb_save_area * to_save,struct vmcb_save_area * from_save)1157 void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
1158 struct vmcb_save_area *from_save)
1159 {
1160 to_save->es = from_save->es;
1161 to_save->cs = from_save->cs;
1162 to_save->ss = from_save->ss;
1163 to_save->ds = from_save->ds;
1164 to_save->gdtr = from_save->gdtr;
1165 to_save->idtr = from_save->idtr;
1166 to_save->rflags = from_save->rflags | X86_EFLAGS_FIXED;
1167 to_save->efer = from_save->efer;
1168 to_save->cr0 = from_save->cr0;
1169 to_save->cr3 = from_save->cr3;
1170 to_save->cr4 = from_save->cr4;
1171 to_save->rax = from_save->rax;
1172 to_save->rsp = from_save->rsp;
1173 to_save->rip = from_save->rip;
1174 to_save->cpl = 0;
1175
1176 if (kvm_cpu_cap_has(X86_FEATURE_SHSTK)) {
1177 to_save->s_cet = from_save->s_cet;
1178 to_save->isst_addr = from_save->isst_addr;
1179 to_save->ssp = from_save->ssp;
1180 }
1181
1182 if (kvm_cpu_cap_has(X86_FEATURE_LBRV)) {
1183 svm_copy_lbrs(to_save, from_save);
1184 to_save->dbgctl &= ~DEBUGCTL_RESERVED_BITS;
1185 }
1186 }
1187
svm_copy_vmloadsave_state(struct vmcb * to_vmcb,struct vmcb * from_vmcb)1188 void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
1189 {
1190 to_vmcb->save.fs = from_vmcb->save.fs;
1191 to_vmcb->save.gs = from_vmcb->save.gs;
1192 to_vmcb->save.tr = from_vmcb->save.tr;
1193 to_vmcb->save.ldtr = from_vmcb->save.ldtr;
1194 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
1195 to_vmcb->save.star = from_vmcb->save.star;
1196 to_vmcb->save.lstar = from_vmcb->save.lstar;
1197 to_vmcb->save.cstar = from_vmcb->save.cstar;
1198 to_vmcb->save.sfmask = from_vmcb->save.sfmask;
1199 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
1200 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
1201 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
1202 }
1203
nested_svm_vmexit_update_vmcb12(struct kvm_vcpu * vcpu)1204 static int nested_svm_vmexit_update_vmcb12(struct kvm_vcpu *vcpu)
1205 {
1206 struct vcpu_svm *svm = to_svm(vcpu);
1207 struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
1208 struct kvm_host_map map;
1209 struct vmcb *vmcb12;
1210 int rc;
1211
1212 rc = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map);
1213 if (rc)
1214 return rc;
1215
1216 vmcb12 = map.hva;
1217
1218 vmcb12->save.es = vmcb02->save.es;
1219 vmcb12->save.cs = vmcb02->save.cs;
1220 vmcb12->save.ss = vmcb02->save.ss;
1221 vmcb12->save.ds = vmcb02->save.ds;
1222 vmcb12->save.gdtr = vmcb02->save.gdtr;
1223 vmcb12->save.idtr = vmcb02->save.idtr;
1224 vmcb12->save.efer = svm->vcpu.arch.efer;
1225 vmcb12->save.cr0 = kvm_read_cr0(vcpu);
1226 vmcb12->save.cr3 = kvm_read_cr3(vcpu);
1227 vmcb12->save.cr2 = vcpu->arch.cr2;
1228 vmcb12->save.cr4 = svm->vcpu.arch.cr4;
1229 vmcb12->save.rflags = kvm_get_rflags(vcpu);
1230 vmcb12->save.rip = kvm_rip_read(vcpu);
1231 vmcb12->save.rsp = kvm_rsp_read(vcpu);
1232 vmcb12->save.rax = kvm_rax_read(vcpu);
1233 vmcb12->save.dr7 = vmcb02->save.dr7;
1234 vmcb12->save.dr6 = svm->vcpu.arch.dr6;
1235 vmcb12->save.cpl = vmcb02->save.cpl;
1236
1237 if (guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK)) {
1238 vmcb12->save.s_cet = vmcb02->save.s_cet;
1239 vmcb12->save.isst_addr = vmcb02->save.isst_addr;
1240 vmcb12->save.ssp = vmcb02->save.ssp;
1241 }
1242
1243 vmcb12->control.int_state = vmcb02->control.int_state;
1244 vmcb12->control.exit_code = vmcb02->control.exit_code;
1245 vmcb12->control.exit_info_1 = vmcb02->control.exit_info_1;
1246 vmcb12->control.exit_info_2 = vmcb02->control.exit_info_2;
1247
1248 if (!svm_is_vmrun_failure(vmcb12->control.exit_code))
1249 nested_save_pending_event_to_vmcb12(svm, vmcb12);
1250
1251 if (guest_cpu_cap_has(vcpu, X86_FEATURE_NRIPS))
1252 vmcb12->control.next_rip = vmcb02->control.next_rip;
1253
1254 if (nested_vmcb12_has_lbrv(vcpu))
1255 svm_copy_lbrs(&vmcb12->save, &vmcb02->save);
1256
1257 vmcb12->control.event_inj = 0;
1258 vmcb12->control.event_inj_err = 0;
1259 vmcb12->control.int_ctl = svm->nested.ctl.int_ctl;
1260
1261 trace_kvm_nested_vmexit_inject(vmcb12->control.exit_code,
1262 vmcb12->control.exit_info_1,
1263 vmcb12->control.exit_info_2,
1264 vmcb12->control.exit_int_info,
1265 vmcb12->control.exit_int_info_err,
1266 KVM_ISA_SVM);
1267
1268 kvm_vcpu_unmap(vcpu, &map);
1269 return 0;
1270 }
1271
nested_svm_vmexit(struct vcpu_svm * svm)1272 void nested_svm_vmexit(struct vcpu_svm *svm)
1273 {
1274 struct kvm_vcpu *vcpu = &svm->vcpu;
1275 struct vmcb *vmcb01 = svm->vmcb01.ptr;
1276 struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
1277
1278 if (nested_svm_vmexit_update_vmcb12(vcpu))
1279 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
1280
1281 /* Exit Guest-Mode */
1282 leave_guest_mode(vcpu);
1283 svm->nested.vmcb12_gpa = 0;
1284
1285 kvm_warn_on_nested_run_pending(vcpu);
1286
1287 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
1288
1289 /* in case we halted in L2 */
1290 kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE);
1291
1292 /*
1293 * Invalidate last_bus_lock_rip unless KVM is still waiting for the
1294 * guest to make forward progress before re-enabling bus lock detection.
1295 */
1296 if (!vmcb02->control.bus_lock_counter)
1297 svm->nested.last_bus_lock_rip = INVALID_GPA;
1298
1299 nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr);
1300
1301 kvm_nested_vmexit_handle_ibrs(vcpu);
1302
1303 if (guest_cpu_cap_has(vcpu, X86_FEATURE_ERAPS))
1304 vmcb01->control.erap_ctl |= ERAP_CONTROL_CLEAR_RAP;
1305
1306 svm_switch_vmcb(svm, &svm->vmcb01);
1307
1308 /*
1309 * Rules for synchronizing int_ctl bits from vmcb02 to vmcb01:
1310 *
1311 * V_IRQ, V_IRQ_VECTOR, V_INTR_PRIO_MASK, V_IGN_TPR: If L1 doesn't
1312 * intercept interrupts, then KVM will use vmcb02's V_IRQ (and related
1313 * flags) to detect interrupt windows for L1 IRQs (even if L1 uses
1314 * virtual interrupt masking). Raise KVM_REQ_EVENT to ensure that
1315 * KVM re-requests an interrupt window if necessary, which implicitly
1316 * copies this bits from vmcb02 to vmcb01.
1317 *
1318 * V_TPR: If L1 doesn't use virtual interrupt masking, then L1's vTPR
1319 * is stored in vmcb02, but its value doesn't need to be copied from/to
1320 * vmcb01 because it is copied from/to the virtual APIC's TPR register
1321 * on each VM entry/exit.
1322 *
1323 * V_GIF: If nested vGIF is not used, KVM uses vmcb02's V_GIF for L1's
1324 * V_GIF. However, GIF is architecturally clear on each VM exit, thus
1325 * there is no need to copy V_GIF from vmcb02 to vmcb01.
1326 */
1327 if (!nested_exit_on_intr(svm))
1328 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
1329
1330 if (!nested_vmcb12_has_lbrv(vcpu)) {
1331 svm_copy_lbrs(&vmcb01->save, &vmcb02->save);
1332 vmcb_mark_dirty(vmcb01, VMCB_LBR);
1333 }
1334
1335 svm_update_lbrv(vcpu);
1336
1337 if (vnmi) {
1338 if (vmcb02->control.int_ctl & V_NMI_BLOCKING_MASK)
1339 vmcb01->control.int_ctl |= V_NMI_BLOCKING_MASK;
1340 else
1341 vmcb01->control.int_ctl &= ~V_NMI_BLOCKING_MASK;
1342
1343 if (vcpu->arch.nmi_pending) {
1344 vcpu->arch.nmi_pending--;
1345 vmcb01->control.int_ctl |= V_NMI_PENDING_MASK;
1346 } else {
1347 vmcb01->control.int_ctl &= ~V_NMI_PENDING_MASK;
1348 }
1349 }
1350
1351 /*
1352 * On vmexit the GIF is set to false and
1353 * no event can be injected in L1.
1354 */
1355 svm_set_gif(svm, false);
1356 vmcb01->control.exit_int_info = 0;
1357
1358 svm->vcpu.arch.tsc_offset = svm->vcpu.arch.l1_tsc_offset;
1359 if (vmcb01->control.tsc_offset != svm->vcpu.arch.tsc_offset) {
1360 vmcb01->control.tsc_offset = svm->vcpu.arch.tsc_offset;
1361 vmcb_mark_dirty(vmcb01, VMCB_INTERCEPTS);
1362 }
1363
1364 if (kvm_caps.has_tsc_control &&
1365 vcpu->arch.tsc_scaling_ratio != vcpu->arch.l1_tsc_scaling_ratio) {
1366 vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
1367 svm_write_tsc_multiplier(vcpu);
1368 }
1369
1370 svm->nested.ctl.nested_cr3 = 0;
1371
1372 /*
1373 * Restore processor state that had been saved in vmcb01
1374 */
1375 kvm_set_rflags(vcpu, vmcb01->save.rflags);
1376 svm_set_efer(vcpu, vmcb01->save.efer);
1377 svm_set_cr0(vcpu, vmcb01->save.cr0 | X86_CR0_PE);
1378 svm_set_cr4(vcpu, vmcb01->save.cr4);
1379 kvm_rax_write(vcpu, vmcb01->save.rax);
1380 kvm_rsp_write(vcpu, vmcb01->save.rsp);
1381 kvm_rip_write(vcpu, vmcb01->save.rip);
1382
1383 svm->vcpu.arch.dr7 = DR7_FIXED_1;
1384 kvm_update_dr7(&svm->vcpu);
1385
1386 nested_svm_transition_tlb_flush(vcpu);
1387
1388 nested_svm_uninit_mmu_context(vcpu);
1389
1390 if (nested_svm_load_cr3(vcpu, vmcb01->save.cr3, false, true))
1391 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
1392
1393 /* Drop tracking for L1->L2 injected NMIs and soft IRQs */
1394 svm->nmi_l1_to_l2 = false;
1395 svm->soft_int_injected = false;
1396
1397 /*
1398 * Drop what we picked up for L2 via svm_complete_interrupts() so it
1399 * doesn't end up in L1.
1400 */
1401 svm->vcpu.arch.nmi_injected = false;
1402 kvm_clear_exception_queue(vcpu);
1403 kvm_clear_interrupt_queue(vcpu);
1404
1405 /*
1406 * If we are here following the completion of a VMRUN that
1407 * is being single-stepped, queue the pending #DB intercept
1408 * right now so that it an be accounted for before we execute
1409 * L1's next instruction.
1410 */
1411 if (unlikely(vmcb01->save.rflags & X86_EFLAGS_TF))
1412 kvm_queue_exception(&(svm->vcpu), DB_VECTOR);
1413
1414 /*
1415 * Un-inhibit the AVIC right away, so that other vCPUs can start
1416 * to benefit from it right away.
1417 */
1418 if (kvm_apicv_activated(vcpu->kvm))
1419 __kvm_vcpu_update_apicv(vcpu);
1420 }
1421
nested_svm_triple_fault(struct kvm_vcpu * vcpu)1422 static void nested_svm_triple_fault(struct kvm_vcpu *vcpu)
1423 {
1424 struct vcpu_svm *svm = to_svm(vcpu);
1425
1426 if (!vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SHUTDOWN))
1427 return;
1428
1429 kvm_clear_request(KVM_REQ_TRIPLE_FAULT, vcpu);
1430 nested_svm_simple_vmexit(to_svm(vcpu), SVM_EXIT_SHUTDOWN);
1431 }
1432
svm_allocate_nested(struct vcpu_svm * svm)1433 int svm_allocate_nested(struct vcpu_svm *svm)
1434 {
1435 struct page *vmcb02_page;
1436
1437 if (svm->nested.initialized)
1438 return 0;
1439
1440 vmcb02_page = snp_safe_alloc_page();
1441 if (!vmcb02_page)
1442 return -ENOMEM;
1443 svm->nested.vmcb02.ptr = page_address(vmcb02_page);
1444 svm->nested.vmcb02.pa = __sme_set(page_to_pfn(vmcb02_page) << PAGE_SHIFT);
1445
1446 svm->nested.msrpm = svm_vcpu_alloc_msrpm();
1447 if (!svm->nested.msrpm)
1448 goto err_free_vmcb02;
1449
1450 svm->nested.initialized = true;
1451 return 0;
1452
1453 err_free_vmcb02:
1454 __free_page(vmcb02_page);
1455 return -ENOMEM;
1456 }
1457
svm_free_nested(struct vcpu_svm * svm)1458 void svm_free_nested(struct vcpu_svm *svm)
1459 {
1460 if (!svm->nested.initialized)
1461 return;
1462
1463 if (WARN_ON_ONCE(svm->vmcb != svm->vmcb01.ptr))
1464 svm_switch_vmcb(svm, &svm->vmcb01);
1465
1466 svm_vcpu_free_msrpm(svm->nested.msrpm);
1467 svm->nested.msrpm = NULL;
1468
1469 __free_page(virt_to_page(svm->nested.vmcb02.ptr));
1470 svm->nested.vmcb02.ptr = NULL;
1471
1472 /*
1473 * When last_vmcb12_gpa matches the current vmcb12 gpa,
1474 * some vmcb12 fields are not loaded if they are marked clean
1475 * in the vmcb12, since in this case they are up to date already.
1476 *
1477 * When the vmcb02 is freed, this optimization becomes invalid.
1478 */
1479 svm->nested.last_vmcb12_gpa = INVALID_GPA;
1480
1481 svm->nested.initialized = false;
1482 }
1483
svm_leave_nested(struct kvm_vcpu * vcpu)1484 void svm_leave_nested(struct kvm_vcpu *vcpu)
1485 {
1486 struct vcpu_svm *svm = to_svm(vcpu);
1487
1488 if (is_guest_mode(vcpu)) {
1489 vcpu->arch.nested_run_pending = 0;
1490 svm->nested.vmcb12_gpa = INVALID_GPA;
1491
1492 leave_guest_mode(vcpu);
1493
1494 svm_switch_vmcb(svm, &svm->vmcb01);
1495
1496 nested_svm_uninit_mmu_context(vcpu);
1497 vmcb_mark_all_dirty(svm->vmcb);
1498
1499 svm_set_gif(svm, true);
1500
1501 if (kvm_apicv_activated(vcpu->kvm))
1502 kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
1503 }
1504
1505 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
1506 }
1507
nested_svm_exit_handled_msr(struct vcpu_svm * svm)1508 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
1509 {
1510 gpa_t base = svm->nested.ctl.msrpm_base_pa;
1511 int write, bit_nr;
1512 u8 value, mask;
1513 u32 msr;
1514
1515 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
1516 return NESTED_EXIT_HOST;
1517
1518 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
1519 bit_nr = svm_msrpm_bit_nr(msr);
1520 write = svm->vmcb->control.exit_info_1 & 1;
1521
1522 if (bit_nr < 0)
1523 return NESTED_EXIT_DONE;
1524
1525 if (kvm_vcpu_read_guest(&svm->vcpu, base + bit_nr / BITS_PER_BYTE,
1526 &value, sizeof(value)))
1527 return NESTED_EXIT_DONE;
1528
1529 mask = BIT(write) << (bit_nr & (BITS_PER_BYTE - 1));
1530 return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
1531 }
1532
nested_svm_intercept_ioio(struct vcpu_svm * svm)1533 static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
1534 {
1535 unsigned port, size, iopm_len;
1536 u16 val, mask;
1537 u8 start_bit;
1538 u64 gpa;
1539
1540 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT)))
1541 return NESTED_EXIT_HOST;
1542
1543 port = svm->vmcb->control.exit_info_1 >> 16;
1544 size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
1545 SVM_IOIO_SIZE_SHIFT;
1546 gpa = svm->nested.ctl.iopm_base_pa + (port / 8);
1547 start_bit = port % 8;
1548 iopm_len = (start_bit + size > 8) ? 2 : 1;
1549 mask = (0xf >> (4 - size)) << start_bit;
1550 val = 0;
1551
1552 if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
1553 return NESTED_EXIT_DONE;
1554
1555 return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
1556 }
1557
nested_svm_intercept(struct vcpu_svm * svm)1558 static int nested_svm_intercept(struct vcpu_svm *svm)
1559 {
1560 u64 exit_code = svm->vmcb->control.exit_code;
1561 int vmexit = NESTED_EXIT_HOST;
1562
1563 if (svm_is_vmrun_failure(exit_code))
1564 return NESTED_EXIT_DONE;
1565
1566 switch (exit_code) {
1567 case SVM_EXIT_MSR:
1568 vmexit = nested_svm_exit_handled_msr(svm);
1569 break;
1570 case SVM_EXIT_IOIO:
1571 vmexit = nested_svm_intercept_ioio(svm);
1572 break;
1573 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f:
1574 /*
1575 * Host-intercepted exceptions have been checked already in
1576 * nested_svm_exit_special. There is nothing to do here,
1577 * the vmexit is injected by svm_check_nested_events.
1578 */
1579 vmexit = NESTED_EXIT_DONE;
1580 break;
1581 default:
1582 if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
1583 vmexit = NESTED_EXIT_DONE;
1584 break;
1585 }
1586
1587 return vmexit;
1588 }
1589
nested_svm_exit_handled(struct vcpu_svm * svm)1590 int nested_svm_exit_handled(struct vcpu_svm *svm)
1591 {
1592 int vmexit;
1593
1594 vmexit = nested_svm_intercept(svm);
1595
1596 if (vmexit == NESTED_EXIT_DONE)
1597 nested_svm_vmexit(svm);
1598
1599 return vmexit;
1600 }
1601
nested_svm_check_permissions(struct kvm_vcpu * vcpu)1602 int nested_svm_check_permissions(struct kvm_vcpu *vcpu)
1603 {
1604 if (!(vcpu->arch.efer & EFER_SVME) || !is_paging(vcpu)) {
1605 kvm_queue_exception(vcpu, UD_VECTOR);
1606 return 1;
1607 }
1608
1609 if (to_svm(vcpu)->vmcb->save.cpl) {
1610 kvm_inject_gp(vcpu, 0);
1611 return 1;
1612 }
1613
1614 return 0;
1615 }
1616
nested_svm_is_exception_vmexit(struct kvm_vcpu * vcpu,u8 vector,u32 error_code)1617 static bool nested_svm_is_exception_vmexit(struct kvm_vcpu *vcpu, u8 vector,
1618 u32 error_code)
1619 {
1620 struct vcpu_svm *svm = to_svm(vcpu);
1621
1622 return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(vector));
1623 }
1624
nested_svm_inject_exception_vmexit(struct kvm_vcpu * vcpu)1625 static void nested_svm_inject_exception_vmexit(struct kvm_vcpu *vcpu)
1626 {
1627 struct kvm_queued_exception *ex = &vcpu->arch.exception_vmexit;
1628 struct vcpu_svm *svm = to_svm(vcpu);
1629 struct vmcb *vmcb = svm->vmcb;
1630
1631 vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + ex->vector;
1632
1633 if (ex->has_error_code)
1634 vmcb->control.exit_info_1 = ex->error_code;
1635
1636 /*
1637 * EXITINFO2 is undefined for all exception intercepts other
1638 * than #PF.
1639 */
1640 if (ex->vector == PF_VECTOR) {
1641 if (ex->has_payload)
1642 vmcb->control.exit_info_2 = ex->payload;
1643 else
1644 vmcb->control.exit_info_2 = vcpu->arch.cr2;
1645 } else if (ex->vector == DB_VECTOR) {
1646 /* See kvm_check_and_inject_events(). */
1647 kvm_deliver_exception_payload(vcpu, ex);
1648
1649 if (vcpu->arch.dr7 & DR7_GD) {
1650 vcpu->arch.dr7 &= ~DR7_GD;
1651 kvm_update_dr7(vcpu);
1652 }
1653 } else {
1654 WARN_ON(ex->has_payload);
1655 }
1656
1657 nested_svm_vmexit(svm);
1658 }
1659
nested_exit_on_init(struct vcpu_svm * svm)1660 static inline bool nested_exit_on_init(struct vcpu_svm *svm)
1661 {
1662 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INIT);
1663 }
1664
svm_check_nested_events(struct kvm_vcpu * vcpu)1665 static int svm_check_nested_events(struct kvm_vcpu *vcpu)
1666 {
1667 struct kvm_lapic *apic = vcpu->arch.apic;
1668 struct vcpu_svm *svm = to_svm(vcpu);
1669 /*
1670 * Only a pending nested run blocks a pending exception. If there is a
1671 * previously injected event, the pending exception occurred while said
1672 * event was being delivered and thus needs to be handled.
1673 */
1674 bool block_nested_exceptions = vcpu->arch.nested_run_pending;
1675 /*
1676 * New events (not exceptions) are only recognized at instruction
1677 * boundaries. If an event needs reinjection, then KVM is handling a
1678 * VM-Exit that occurred _during_ instruction execution; new events are
1679 * blocked until the instruction completes.
1680 */
1681 bool block_nested_events = block_nested_exceptions ||
1682 kvm_event_needs_reinjection(vcpu);
1683
1684 if (lapic_in_kernel(vcpu) &&
1685 test_bit(KVM_APIC_INIT, &apic->pending_events)) {
1686 if (block_nested_events)
1687 return -EBUSY;
1688 if (!nested_exit_on_init(svm))
1689 return 0;
1690 nested_svm_simple_vmexit(svm, SVM_EXIT_INIT);
1691 return 0;
1692 }
1693
1694 if (vcpu->arch.exception_vmexit.pending) {
1695 if (block_nested_exceptions)
1696 return -EBUSY;
1697 nested_svm_inject_exception_vmexit(vcpu);
1698 return 0;
1699 }
1700
1701 if (vcpu->arch.exception.pending) {
1702 if (block_nested_exceptions)
1703 return -EBUSY;
1704 return 0;
1705 }
1706
1707 #ifdef CONFIG_KVM_SMM
1708 if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) {
1709 if (block_nested_events)
1710 return -EBUSY;
1711 if (!nested_exit_on_smi(svm))
1712 return 0;
1713 nested_svm_simple_vmexit(svm, SVM_EXIT_SMI);
1714 return 0;
1715 }
1716 #endif
1717
1718 if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) {
1719 if (block_nested_events)
1720 return -EBUSY;
1721 if (!nested_exit_on_nmi(svm))
1722 return 0;
1723 nested_svm_simple_vmexit(svm, SVM_EXIT_NMI);
1724 return 0;
1725 }
1726
1727 if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) {
1728 if (block_nested_events)
1729 return -EBUSY;
1730 if (!nested_exit_on_intr(svm))
1731 return 0;
1732 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
1733 nested_svm_simple_vmexit(svm, SVM_EXIT_INTR);
1734 return 0;
1735 }
1736
1737 return 0;
1738 }
1739
nested_svm_exit_special(struct vcpu_svm * svm)1740 int nested_svm_exit_special(struct vcpu_svm *svm)
1741 {
1742 u32 exit_code = svm->vmcb->control.exit_code;
1743 struct kvm_vcpu *vcpu = &svm->vcpu;
1744
1745 switch (exit_code) {
1746 case SVM_EXIT_INTR:
1747 case SVM_EXIT_NMI:
1748 case SVM_EXIT_NPF:
1749 return NESTED_EXIT_HOST;
1750 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
1751 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
1752
1753 if (svm->vmcb01.ptr->control.intercepts[INTERCEPT_EXCEPTION] &
1754 excp_bits)
1755 return NESTED_EXIT_HOST;
1756 else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR &&
1757 svm->vcpu.arch.apf.host_apf_flags)
1758 /* Trap async PF even if not shadowing */
1759 return NESTED_EXIT_HOST;
1760 break;
1761 }
1762 case SVM_EXIT_VMMCALL:
1763 /* Hyper-V L2 TLB flush hypercall is handled by L0 */
1764 if (nested_svm_is_l2_tlb_flush_hcall(vcpu))
1765 return NESTED_EXIT_HOST;
1766 break;
1767 default:
1768 break;
1769 }
1770
1771 return NESTED_EXIT_CONTINUE;
1772 }
1773
nested_svm_update_tsc_ratio_msr(struct kvm_vcpu * vcpu)1774 void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu)
1775 {
1776 struct vcpu_svm *svm = to_svm(vcpu);
1777
1778 vcpu->arch.tsc_scaling_ratio =
1779 kvm_calc_nested_tsc_multiplier(vcpu->arch.l1_tsc_scaling_ratio,
1780 svm->tsc_ratio_msr);
1781 svm_write_tsc_multiplier(vcpu);
1782 }
1783
1784 /* Inverse operation of nested_copy_vmcb_control_to_cache(). asid is copied too. */
nested_copy_vmcb_cache_to_control(struct vmcb_control_area * dst,struct vmcb_ctrl_area_cached * from)1785 static void nested_copy_vmcb_cache_to_control(struct vmcb_control_area *dst,
1786 struct vmcb_ctrl_area_cached *from)
1787 {
1788 unsigned int i;
1789
1790 memset(dst, 0, sizeof(struct vmcb_control_area));
1791
1792 for (i = 0; i < MAX_INTERCEPT; i++)
1793 dst->intercepts[i] = from->intercepts[i];
1794
1795 dst->iopm_base_pa = from->iopm_base_pa;
1796 dst->msrpm_base_pa = from->msrpm_base_pa;
1797 dst->tsc_offset = from->tsc_offset;
1798 dst->asid = from->asid;
1799 dst->tlb_ctl = from->tlb_ctl;
1800 dst->erap_ctl = from->erap_ctl;
1801 dst->int_ctl = from->int_ctl;
1802 dst->int_vector = from->int_vector;
1803 dst->int_state = from->int_state;
1804 dst->exit_code = from->exit_code;
1805 dst->exit_info_1 = from->exit_info_1;
1806 dst->exit_info_2 = from->exit_info_2;
1807 dst->exit_int_info = from->exit_int_info;
1808 dst->exit_int_info_err = from->exit_int_info_err;
1809 dst->misc_ctl = from->misc_ctl;
1810 dst->event_inj = from->event_inj;
1811 dst->event_inj_err = from->event_inj_err;
1812 dst->next_rip = from->next_rip;
1813 dst->nested_cr3 = from->nested_cr3;
1814 dst->misc_ctl2 = from->misc_ctl2;
1815 dst->pause_filter_count = from->pause_filter_count;
1816 dst->pause_filter_thresh = from->pause_filter_thresh;
1817 /* 'clean' and 'hv_enlightenments' are not changed by KVM */
1818 }
1819
svm_get_nested_state(struct kvm_vcpu * vcpu,struct kvm_nested_state __user * user_kvm_nested_state,u32 user_data_size)1820 static int svm_get_nested_state(struct kvm_vcpu *vcpu,
1821 struct kvm_nested_state __user *user_kvm_nested_state,
1822 u32 user_data_size)
1823 {
1824 struct vcpu_svm *svm;
1825 struct vmcb_control_area *ctl;
1826 unsigned long r;
1827 struct kvm_nested_state kvm_state = {
1828 .flags = 0,
1829 .format = KVM_STATE_NESTED_FORMAT_SVM,
1830 .size = sizeof(kvm_state),
1831 };
1832 struct vmcb __user *user_vmcb = (struct vmcb __user *)
1833 &user_kvm_nested_state->data.svm[0];
1834
1835 if (!vcpu)
1836 return kvm_state.size + KVM_STATE_NESTED_SVM_VMCB_SIZE;
1837
1838 svm = to_svm(vcpu);
1839
1840 if (user_data_size < kvm_state.size)
1841 goto out;
1842
1843 /* First fill in the header and copy it out. */
1844 if (is_guest_mode(vcpu)) {
1845 kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb12_gpa;
1846 kvm_state.size += KVM_STATE_NESTED_SVM_VMCB_SIZE;
1847 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
1848
1849 if (vcpu->arch.nested_run_pending)
1850 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
1851 }
1852
1853 if (gif_set(svm))
1854 kvm_state.flags |= KVM_STATE_NESTED_GIF_SET;
1855
1856 if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
1857 return -EFAULT;
1858
1859 if (!is_guest_mode(vcpu))
1860 goto out;
1861
1862 /*
1863 * Copy over the full size of the VMCB rather than just the size
1864 * of the structs.
1865 */
1866 if (clear_user(user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE))
1867 return -EFAULT;
1868
1869 ctl = kzalloc_obj(*ctl);
1870 if (!ctl)
1871 return -ENOMEM;
1872
1873 nested_copy_vmcb_cache_to_control(ctl, &svm->nested.ctl);
1874 r = copy_to_user(&user_vmcb->control, ctl,
1875 sizeof(user_vmcb->control));
1876 kfree(ctl);
1877 if (r)
1878 return -EFAULT;
1879
1880 if (copy_to_user(&user_vmcb->save, &svm->vmcb01.ptr->save,
1881 sizeof(user_vmcb->save)))
1882 return -EFAULT;
1883 out:
1884 return kvm_state.size;
1885 }
1886
svm_set_nested_state(struct kvm_vcpu * vcpu,struct kvm_nested_state __user * user_kvm_nested_state,struct kvm_nested_state * kvm_state)1887 static int svm_set_nested_state(struct kvm_vcpu *vcpu,
1888 struct kvm_nested_state __user *user_kvm_nested_state,
1889 struct kvm_nested_state *kvm_state)
1890 {
1891 struct vcpu_svm *svm = to_svm(vcpu);
1892 struct vmcb __user *user_vmcb = (struct vmcb __user *)
1893 &user_kvm_nested_state->data.svm[0];
1894 struct vmcb_control_area *ctl;
1895 struct vmcb_save_area *save;
1896 struct vmcb_save_area_cached save_cached;
1897 struct vmcb_ctrl_area_cached ctl_cached;
1898 unsigned long cr0;
1899 int ret;
1900
1901 BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) >
1902 KVM_STATE_NESTED_SVM_VMCB_SIZE);
1903
1904 if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM)
1905 return -EINVAL;
1906
1907 if (kvm_state->flags & ~(KVM_STATE_NESTED_GUEST_MODE |
1908 KVM_STATE_NESTED_RUN_PENDING |
1909 KVM_STATE_NESTED_GIF_SET))
1910 return -EINVAL;
1911
1912 /*
1913 * If in guest mode, vcpu->arch.efer actually refers to the L2 guest's
1914 * EFER.SVME, but EFER.SVME still has to be 1 for VMRUN to succeed.
1915 * If SVME is disabled, the only valid states are "none" and GIF=1
1916 * (clearing SVME does NOT set GIF, i.e. GIF=0 is allowed).
1917 */
1918 if (!(vcpu->arch.efer & EFER_SVME) && kvm_state->flags &&
1919 kvm_state->flags != KVM_STATE_NESTED_GIF_SET)
1920 return -EINVAL;
1921
1922 /* SMM temporarily disables SVM, so we cannot be in guest mode. */
1923 if (is_smm(vcpu) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
1924 return -EINVAL;
1925
1926 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) {
1927 svm_leave_nested(vcpu);
1928 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1929 return 0;
1930 }
1931
1932 if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa))
1933 return -EINVAL;
1934 if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE)
1935 return -EINVAL;
1936
1937 ctl = memdup_user(&user_vmcb->control, sizeof(*ctl));
1938 if (IS_ERR(ctl))
1939 return PTR_ERR(ctl);
1940
1941 save = memdup_user(&user_vmcb->save, sizeof(*save));
1942 if (IS_ERR(save)) {
1943 kfree(ctl);
1944 return PTR_ERR(save);
1945 }
1946
1947 ret = -EINVAL;
1948 __nested_copy_vmcb_control_to_cache(vcpu, &ctl_cached, ctl);
1949 if (!nested_vmcb_check_controls(vcpu, &ctl_cached))
1950 goto out_free;
1951
1952 /*
1953 * Processor state contains L2 state. Check that it is
1954 * valid for guest mode (see nested_vmcb_check_save()).
1955 */
1956 cr0 = kvm_read_cr0(vcpu);
1957 if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW))
1958 goto out_free;
1959
1960 /*
1961 * Validate host state saved from before VMRUN (see
1962 * nested_svm_check_permissions).
1963 */
1964 __nested_copy_vmcb_save_to_cache(&save_cached, save);
1965 if (!(save->cr0 & X86_CR0_PG) ||
1966 !(save->cr0 & X86_CR0_PE) ||
1967 (save->rflags & X86_EFLAGS_VM) ||
1968 !nested_vmcb_check_save(vcpu, &save_cached))
1969 goto out_free;
1970
1971
1972 /*
1973 * All checks done, we can enter guest mode. Userspace provides
1974 * vmcb12.control, which will be combined with L1 and stored into
1975 * vmcb02, and the L1 save state which we store in vmcb01.
1976 * L2 registers if needed are moved from the current VMCB to VMCB02.
1977 */
1978
1979 if (is_guest_mode(vcpu))
1980 svm_leave_nested(vcpu);
1981 else
1982 svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save;
1983
1984 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1985
1986 if (kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING)
1987 vcpu->arch.nested_run_pending = KVM_NESTED_RUN_PENDING_UNTRUSTED;
1988 else
1989 vcpu->arch.nested_run_pending = 0;
1990
1991 svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
1992
1993 svm_copy_vmrun_state(&svm->vmcb01.ptr->save, save);
1994 nested_copy_vmcb_control_to_cache(svm, ctl);
1995
1996 svm_switch_vmcb(svm, &svm->nested.vmcb02);
1997 nested_vmcb02_prepare_control(svm);
1998
1999 /*
2000 * Any previously restored state (e.g. KVM_SET_SREGS) would mark fields
2001 * dirty in vmcb01 instead of vmcb02, so mark all of vmcb02 dirty here.
2002 */
2003 vmcb_mark_all_dirty(svm->vmcb);
2004
2005 /*
2006 * While the nested guest CR3 is already checked and set by
2007 * KVM_SET_SREGS, it was set when nested state was yet loaded,
2008 * thus MMU might not be initialized correctly.
2009 * Set it again to fix this.
2010 */
2011 ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
2012 nested_npt_enabled(svm), false);
2013 if (ret)
2014 goto out_free;
2015
2016 svm->nested.force_msr_bitmap_recalc = true;
2017
2018 if (kvm_vcpu_apicv_active(vcpu))
2019 kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
2020
2021 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
2022 ret = 0;
2023 out_free:
2024 kfree(save);
2025 kfree(ctl);
2026
2027 return ret;
2028 }
2029
svm_get_nested_state_pages(struct kvm_vcpu * vcpu)2030 static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
2031 {
2032 if (WARN_ON(!is_guest_mode(vcpu)))
2033 return true;
2034
2035 if (!vcpu->arch.pdptrs_from_userspace &&
2036 !nested_npt_enabled(to_svm(vcpu)) && is_pae_paging(vcpu))
2037 /*
2038 * Reload the guest's PDPTRs since after a migration
2039 * the guest CR3 might be restored prior to setting the nested
2040 * state which can lead to a load of wrong PDPTRs.
2041 */
2042 if (CC(!load_pdptrs(vcpu, vcpu->arch.cr3)))
2043 return false;
2044
2045 if (!nested_svm_merge_msrpm(vcpu)) {
2046 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2047 vcpu->run->internal.suberror =
2048 KVM_INTERNAL_ERROR_EMULATION;
2049 vcpu->run->internal.ndata = 0;
2050 return false;
2051 }
2052
2053 if (kvm_hv_verify_vp_assist(vcpu))
2054 return false;
2055
2056 return true;
2057 }
2058
2059 struct kvm_x86_nested_ops svm_nested_ops = {
2060 .leave_nested = svm_leave_nested,
2061 .is_exception_vmexit = nested_svm_is_exception_vmexit,
2062 .check_events = svm_check_nested_events,
2063 .triple_fault = nested_svm_triple_fault,
2064 .get_nested_state_pages = svm_get_nested_state_pages,
2065 .get_state = svm_get_nested_state,
2066 .set_state = svm_set_nested_state,
2067 .hv_inject_synthetic_vmexit_post_tlb_flush = svm_hv_inject_synthetic_vmexit_post_tlb_flush,
2068 };
2069