1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * AMD SVM support
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9 *
10 * Authors:
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
13 */
14
15 #ifndef __SVM_SVM_H
16 #define __SVM_SVM_H
17
18 #include <linux/kvm_types.h>
19 #include <linux/kvm_host.h>
20 #include <linux/bits.h>
21
22 #include <asm/svm.h>
23 #include <asm/sev-common.h>
24
25 #include "cpuid.h"
26 #include "kvm_cache_regs.h"
27
28 /*
29 * Helpers to convert to/from physical addresses for pages whose address is
30 * consumed directly by hardware. Even though it's a physical address, SVM
31 * often restricts the address to the natural width, hence 'unsigned long'
32 * instead of 'hpa_t'.
33 */
__sme_page_pa(struct page * page)34 static inline unsigned long __sme_page_pa(struct page *page)
35 {
36 return __sme_set(page_to_pfn(page) << PAGE_SHIFT);
37 }
38
__sme_pa_to_page(unsigned long pa)39 static inline struct page *__sme_pa_to_page(unsigned long pa)
40 {
41 return pfn_to_page(__sme_clr(pa) >> PAGE_SHIFT);
42 }
43
44 #define IOPM_SIZE PAGE_SIZE * 3
45 #define MSRPM_SIZE PAGE_SIZE * 2
46
47 extern bool npt_enabled;
48 extern int nrips;
49 extern int vgif;
50 extern bool intercept_smi;
51 extern bool vnmi;
52 extern int lbrv;
53
54 extern int tsc_aux_uret_slot __ro_after_init;
55
56 extern struct kvm_x86_ops svm_x86_ops __initdata;
57
58 /*
59 * Clean bits in VMCB.
60 * VMCB_ALL_CLEAN_MASK might also need to
61 * be updated if this enum is modified.
62 */
63 enum {
64 VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
65 pause filter count */
66 VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */
67 VMCB_ASID, /* ASID */
68 VMCB_INTR, /* int_ctl, int_vector */
69 VMCB_NPT, /* npt_en, nCR3, gPAT */
70 VMCB_CR, /* CR0, CR3, CR4, EFER */
71 VMCB_DR, /* DR6, DR7 */
72 VMCB_DT, /* GDT, IDT */
73 VMCB_SEG, /* CS, DS, SS, ES, CPL */
74 VMCB_CR2, /* CR2 only */
75 VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
76 VMCB_AVIC, /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
77 * AVIC PHYSICAL_TABLE pointer,
78 * AVIC LOGICAL_TABLE pointer
79 */
80 VMCB_CET, /* S_CET, SSP, ISST_ADDR */
81 VMCB_SW = 31, /* Reserved for hypervisor/software use */
82 };
83
84 #define VMCB_ALL_CLEAN_MASK ( \
85 (1U << VMCB_INTERCEPTS) | (1U << VMCB_PERM_MAP) | \
86 (1U << VMCB_ASID) | (1U << VMCB_INTR) | \
87 (1U << VMCB_NPT) | (1U << VMCB_CR) | (1U << VMCB_DR) | \
88 (1U << VMCB_DT) | (1U << VMCB_SEG) | (1U << VMCB_CR2) | \
89 (1U << VMCB_LBR) | (1U << VMCB_AVIC) | (1U << VMCB_CET) | \
90 (1U << VMCB_SW))
91
92 /* TPR and CR2 are always written before VMRUN */
93 #define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
94
95 struct kvm_sev_info {
96 bool active; /* SEV enabled guest */
97 bool es_active; /* SEV-ES enabled guest */
98 bool need_init; /* waiting for SEV_INIT2 */
99 unsigned int asid; /* ASID used for this guest */
100 unsigned int handle; /* SEV firmware handle */
101 int fd; /* SEV device fd */
102 unsigned long policy;
103 unsigned long pages_locked; /* Number of pages locked */
104 struct list_head regions_list; /* List of registered regions */
105 u64 ap_jump_table; /* SEV-ES AP Jump Table address */
106 u64 vmsa_features;
107 u16 ghcb_version; /* Highest guest GHCB protocol version allowed */
108 struct kvm *enc_context_owner; /* Owner of copied encryption context */
109 struct list_head mirror_vms; /* List of VMs mirroring */
110 struct list_head mirror_entry; /* Use as a list entry of mirrors */
111 struct misc_cg *misc_cg; /* For misc cgroup accounting */
112 atomic_t migration_in_progress;
113 void *snp_context; /* SNP guest context page */
114 void *guest_req_buf; /* Bounce buffer for SNP Guest Request input */
115 void *guest_resp_buf; /* Bounce buffer for SNP Guest Request output */
116 struct mutex guest_req_mutex; /* Must acquire before using bounce buffers */
117 cpumask_var_t have_run_cpus; /* CPUs that have done VMRUN for this VM. */
118 bool snp_certs_enabled; /* SNP certificate-fetching support. */
119 };
120
121 struct kvm_svm {
122 struct kvm kvm;
123
124 /* Struct members for AVIC */
125 u32 avic_vm_id;
126 u32 *avic_logical_id_table;
127 u64 *avic_physical_id_table;
128 struct hlist_node hnode;
129
130 struct kvm_sev_info sev_info;
131 };
132
133 struct kvm_vcpu;
134
135 struct kvm_vmcb_info {
136 struct vmcb *ptr;
137 unsigned long pa;
138 int cpu;
139 uint64_t asid_generation;
140 };
141
142 struct vmcb_save_area_cached {
143 u64 efer;
144 u64 cr4;
145 u64 cr3;
146 u64 cr0;
147 u64 dr7;
148 u64 dr6;
149 };
150
151 struct vmcb_ctrl_area_cached {
152 u32 intercepts[MAX_INTERCEPT];
153 u16 pause_filter_thresh;
154 u16 pause_filter_count;
155 u64 iopm_base_pa;
156 u64 msrpm_base_pa;
157 u64 tsc_offset;
158 u32 asid;
159 u8 tlb_ctl;
160 u8 erap_ctl;
161 u32 int_ctl;
162 u32 int_vector;
163 u32 int_state;
164 u64 exit_code;
165 u64 exit_info_1;
166 u64 exit_info_2;
167 u32 exit_int_info;
168 u32 exit_int_info_err;
169 u64 nested_ctl;
170 u32 event_inj;
171 u32 event_inj_err;
172 u64 next_rip;
173 u64 nested_cr3;
174 u64 virt_ext;
175 u32 clean;
176 u64 bus_lock_rip;
177 union {
178 #if IS_ENABLED(CONFIG_HYPERV) || IS_ENABLED(CONFIG_KVM_HYPERV)
179 struct hv_vmcb_enlightenments hv_enlightenments;
180 #endif
181 u8 reserved_sw[32];
182 };
183 };
184
185 struct svm_nested_state {
186 struct kvm_vmcb_info vmcb02;
187 u64 hsave_msr;
188 u64 vm_cr_msr;
189 u64 vmcb12_gpa;
190 u64 last_vmcb12_gpa;
191
192 /*
193 * The MSR permissions map used for vmcb02, which is the merge result
194 * of vmcb01 and vmcb12
195 */
196 void *msrpm;
197
198 /* A VMRUN has started but has not yet been performed, so
199 * we cannot inject a nested vmexit yet. */
200 bool nested_run_pending;
201
202 /* cache for control fields of the guest */
203 struct vmcb_ctrl_area_cached ctl;
204
205 /*
206 * Note: this struct is not kept up-to-date while L2 runs; it is only
207 * valid within nested_svm_vmrun.
208 */
209 struct vmcb_save_area_cached save;
210
211 bool initialized;
212
213 /*
214 * Indicates whether MSR bitmap for L2 needs to be rebuilt due to
215 * changes in MSR bitmap for L1 or switching to a different L2. Note,
216 * this flag can only be used reliably in conjunction with a paravirt L1
217 * which informs L0 whether any changes to MSR bitmap for L2 were done
218 * on its side.
219 */
220 bool force_msr_bitmap_recalc;
221 };
222
223 struct vcpu_sev_es_state {
224 /* SEV-ES support */
225 struct sev_es_save_area *vmsa;
226 struct ghcb *ghcb;
227 u8 valid_bitmap[16];
228 struct kvm_host_map ghcb_map;
229 bool received_first_sipi;
230 unsigned int ap_reset_hold_type;
231
232 /* SEV-ES scratch area support */
233 u64 sw_scratch;
234 void *ghcb_sa;
235 u32 ghcb_sa_len;
236 bool ghcb_sa_sync;
237 bool ghcb_sa_free;
238
239 /* SNP Page-State-Change buffer entries currently being processed */
240 u16 psc_idx;
241 u16 psc_inflight;
242 bool psc_2m;
243
244 u64 ghcb_registered_gpa;
245
246 struct mutex snp_vmsa_mutex; /* Used to handle concurrent updates of VMSA. */
247 gpa_t snp_vmsa_gpa;
248 bool snp_ap_waiting_for_reset;
249 bool snp_has_guest_vmsa;
250 };
251
252 struct vcpu_svm {
253 struct kvm_vcpu vcpu;
254 /* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */
255 struct vmcb *vmcb;
256 struct kvm_vmcb_info vmcb01;
257 struct kvm_vmcb_info *current_vmcb;
258 u32 asid;
259 u32 sysenter_esp_hi;
260 u32 sysenter_eip_hi;
261 uint64_t tsc_aux;
262
263 u64 msr_decfg;
264
265 u64 next_rip;
266
267 u64 spec_ctrl;
268
269 u64 tsc_ratio_msr;
270 /*
271 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
272 * translated into the appropriate L2_CFG bits on the host to
273 * perform speculative control.
274 */
275 u64 virt_spec_ctrl;
276
277 void *msrpm;
278
279 ulong nmi_iret_rip;
280
281 struct svm_nested_state nested;
282
283 /* NMI mask value, used when vNMI is not enabled */
284 bool nmi_masked;
285
286 /*
287 * True when NMIs are still masked but guest IRET was just intercepted
288 * and KVM is waiting for RIP to change, which will signal that the
289 * intercepted IRET was retired and thus NMI can be unmasked.
290 */
291 bool awaiting_iret_completion;
292
293 /*
294 * Set when KVM is awaiting IRET completion and needs to inject NMIs as
295 * soon as the IRET completes (e.g. NMI is pending injection). KVM
296 * temporarily steals RFLAGS.TF to single-step the guest in this case
297 * in order to regain control as soon as the NMI-blocking condition
298 * goes away.
299 */
300 bool nmi_singlestep;
301 u64 nmi_singlestep_guest_rflags;
302
303 bool nmi_l1_to_l2;
304
305 unsigned long soft_int_csbase;
306 unsigned long soft_int_old_rip;
307 unsigned long soft_int_next_rip;
308 bool soft_int_injected;
309
310 u32 ldr_reg;
311 u32 dfr_reg;
312
313 /* This is essentially a shadow of the vCPU's actual entry in the
314 * Physical ID table that is programmed into the VMCB, i.e. that is
315 * seen by the CPU. If IPI virtualization is disabled, IsRunning is
316 * only ever set in the shadow, i.e. is never propagated to the "real"
317 * table, so that hardware never sees IsRunning=1.
318 */
319 u64 avic_physical_id_entry;
320
321 /*
322 * Per-vCPU list of irqfds that are eligible to post IRQs directly to
323 * the vCPU (a.k.a. device posted IRQs, a.k.a. IRQ bypass). The list
324 * is used to reconfigure IRTEs when the vCPU is loaded/put (to set the
325 * target pCPU), when AVIC is toggled on/off (to (de)activate bypass),
326 * and if the irqfd becomes ineligible for posting (to put the IRTE
327 * back into remapped mode).
328 */
329 struct list_head ir_list;
330 raw_spinlock_t ir_list_lock;
331
332 struct vcpu_sev_es_state sev_es;
333
334 bool guest_state_loaded;
335
336 bool x2avic_msrs_intercepted;
337 bool lbr_msrs_intercepted;
338
339 /* Guest GIF value, used when vGIF is not enabled */
340 bool guest_gif;
341 };
342
343 struct svm_cpu_data {
344 u64 asid_generation;
345 u32 max_asid;
346 u32 next_asid;
347 u32 min_asid;
348
349 bool bp_spec_reduce_set;
350
351 struct vmcb *save_area;
352 unsigned long save_area_pa;
353
354 /* index = sev_asid, value = vmcb pointer */
355 struct vmcb **sev_vmcbs;
356 };
357
358 DECLARE_PER_CPU(struct svm_cpu_data, svm_data);
359
360 void recalc_intercepts(struct vcpu_svm *svm);
361
to_kvm_svm(struct kvm * kvm)362 static __always_inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
363 {
364 return container_of(kvm, struct kvm_svm, kvm);
365 }
366
to_kvm_sev_info(struct kvm * kvm)367 static __always_inline struct kvm_sev_info *to_kvm_sev_info(struct kvm *kvm)
368 {
369 return &to_kvm_svm(kvm)->sev_info;
370 }
371
372 #ifdef CONFIG_KVM_AMD_SEV
sev_guest(struct kvm * kvm)373 static __always_inline bool sev_guest(struct kvm *kvm)
374 {
375 return to_kvm_sev_info(kvm)->active;
376 }
sev_es_guest(struct kvm * kvm)377 static __always_inline bool sev_es_guest(struct kvm *kvm)
378 {
379 struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
380
381 return sev->es_active && !WARN_ON_ONCE(!sev->active);
382 }
383
sev_snp_guest(struct kvm * kvm)384 static __always_inline bool sev_snp_guest(struct kvm *kvm)
385 {
386 struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
387
388 return (sev->vmsa_features & SVM_SEV_FEAT_SNP_ACTIVE) &&
389 !WARN_ON_ONCE(!sev_es_guest(kvm));
390 }
391 #else
392 #define sev_guest(kvm) false
393 #define sev_es_guest(kvm) false
394 #define sev_snp_guest(kvm) false
395 #endif
396
ghcb_gpa_is_registered(struct vcpu_svm * svm,u64 val)397 static inline bool ghcb_gpa_is_registered(struct vcpu_svm *svm, u64 val)
398 {
399 return svm->sev_es.ghcb_registered_gpa == val;
400 }
401
vmcb_mark_all_dirty(struct vmcb * vmcb)402 static inline void vmcb_mark_all_dirty(struct vmcb *vmcb)
403 {
404 vmcb->control.clean = 0;
405 }
406
vmcb_mark_all_clean(struct vmcb * vmcb)407 static inline void vmcb_mark_all_clean(struct vmcb *vmcb)
408 {
409 vmcb->control.clean = VMCB_ALL_CLEAN_MASK
410 & ~VMCB_ALWAYS_DIRTY_MASK;
411 }
412
vmcb_mark_dirty(struct vmcb * vmcb,int bit)413 static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
414 {
415 vmcb->control.clean &= ~(1 << bit);
416 }
417
vmcb_is_dirty(struct vmcb * vmcb,int bit)418 static inline bool vmcb_is_dirty(struct vmcb *vmcb, int bit)
419 {
420 return !test_bit(bit, (unsigned long *)&vmcb->control.clean);
421 }
422
to_svm(struct kvm_vcpu * vcpu)423 static __always_inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
424 {
425 return container_of(vcpu, struct vcpu_svm, vcpu);
426 }
427
svm_is_vmrun_failure(u64 exit_code)428 static inline bool svm_is_vmrun_failure(u64 exit_code)
429 {
430 if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
431 return (u32)exit_code == (u32)SVM_EXIT_ERR;
432
433 return exit_code == SVM_EXIT_ERR;
434 }
435
436 /*
437 * Only the PDPTRs are loaded on demand into the shadow MMU. All other
438 * fields are synchronized on VM-Exit, because accessing the VMCB is cheap.
439 *
440 * CR3 might be out of date in the VMCB but it is not marked dirty; instead,
441 * KVM_REQ_LOAD_MMU_PGD is always requested when the cached vcpu->arch.cr3
442 * is changed. svm_load_mmu_pgd() then syncs the new CR3 value into the VMCB.
443 */
444 #define SVM_REGS_LAZY_LOAD_SET (1 << VCPU_EXREG_PDPTR)
445
__vmcb_set_intercept(unsigned long * intercepts,u32 bit)446 static inline void __vmcb_set_intercept(unsigned long *intercepts, u32 bit)
447 {
448 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
449 __set_bit(bit, intercepts);
450 }
451
__vmcb_clr_intercept(unsigned long * intercepts,u32 bit)452 static inline void __vmcb_clr_intercept(unsigned long *intercepts, u32 bit)
453 {
454 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
455 __clear_bit(bit, intercepts);
456 }
457
__vmcb_is_intercept(unsigned long * intercepts,u32 bit)458 static inline bool __vmcb_is_intercept(unsigned long *intercepts, u32 bit)
459 {
460 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
461 return test_bit(bit, intercepts);
462 }
463
vmcb_set_intercept(struct vmcb_control_area * control,u32 bit)464 static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit)
465 {
466 __vmcb_set_intercept((unsigned long *)&control->intercepts, bit);
467 }
468
vmcb_clr_intercept(struct vmcb_control_area * control,u32 bit)469 static inline void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit)
470 {
471 __vmcb_clr_intercept((unsigned long *)&control->intercepts, bit);
472 }
473
vmcb_is_intercept(struct vmcb_control_area * control,u32 bit)474 static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit)
475 {
476 return __vmcb_is_intercept((unsigned long *)&control->intercepts, bit);
477 }
478
vmcb12_clr_intercept(struct vmcb_ctrl_area_cached * control,u32 bit)479 static inline void vmcb12_clr_intercept(struct vmcb_ctrl_area_cached *control, u32 bit)
480 {
481 __vmcb_clr_intercept((unsigned long *)&control->intercepts, bit);
482 }
483
vmcb12_is_intercept(struct vmcb_ctrl_area_cached * control,u32 bit)484 static inline bool vmcb12_is_intercept(struct vmcb_ctrl_area_cached *control, u32 bit)
485 {
486 return __vmcb_is_intercept((unsigned long *)&control->intercepts, bit);
487 }
488
set_exception_intercept(struct vcpu_svm * svm,u32 bit)489 static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit)
490 {
491 struct vmcb *vmcb = svm->vmcb01.ptr;
492
493 WARN_ON_ONCE(bit >= 32);
494 vmcb_set_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
495
496 recalc_intercepts(svm);
497 }
498
clr_exception_intercept(struct vcpu_svm * svm,u32 bit)499 static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit)
500 {
501 struct vmcb *vmcb = svm->vmcb01.ptr;
502
503 WARN_ON_ONCE(bit >= 32);
504 vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
505
506 recalc_intercepts(svm);
507 }
508
svm_set_intercept(struct vcpu_svm * svm,int bit)509 static inline void svm_set_intercept(struct vcpu_svm *svm, int bit)
510 {
511 struct vmcb *vmcb = svm->vmcb01.ptr;
512
513 vmcb_set_intercept(&vmcb->control, bit);
514
515 recalc_intercepts(svm);
516 }
517
svm_clr_intercept(struct vcpu_svm * svm,int bit)518 static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit)
519 {
520 struct vmcb *vmcb = svm->vmcb01.ptr;
521
522 vmcb_clr_intercept(&vmcb->control, bit);
523
524 recalc_intercepts(svm);
525 }
526
svm_is_intercept(struct vcpu_svm * svm,int bit)527 static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit)
528 {
529 return vmcb_is_intercept(&svm->vmcb->control, bit);
530 }
531
nested_vgif_enabled(struct vcpu_svm * svm)532 static inline bool nested_vgif_enabled(struct vcpu_svm *svm)
533 {
534 return guest_cpu_cap_has(&svm->vcpu, X86_FEATURE_VGIF) &&
535 (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK);
536 }
537
get_vgif_vmcb(struct vcpu_svm * svm)538 static inline struct vmcb *get_vgif_vmcb(struct vcpu_svm *svm)
539 {
540 if (!vgif)
541 return NULL;
542
543 if (is_guest_mode(&svm->vcpu) && !nested_vgif_enabled(svm))
544 return svm->nested.vmcb02.ptr;
545 else
546 return svm->vmcb01.ptr;
547 }
548
enable_gif(struct vcpu_svm * svm)549 static inline void enable_gif(struct vcpu_svm *svm)
550 {
551 struct vmcb *vmcb = get_vgif_vmcb(svm);
552
553 if (vmcb)
554 vmcb->control.int_ctl |= V_GIF_MASK;
555 else
556 svm->guest_gif = true;
557 }
558
disable_gif(struct vcpu_svm * svm)559 static inline void disable_gif(struct vcpu_svm *svm)
560 {
561 struct vmcb *vmcb = get_vgif_vmcb(svm);
562
563 if (vmcb)
564 vmcb->control.int_ctl &= ~V_GIF_MASK;
565 else
566 svm->guest_gif = false;
567 }
568
gif_set(struct vcpu_svm * svm)569 static inline bool gif_set(struct vcpu_svm *svm)
570 {
571 struct vmcb *vmcb = get_vgif_vmcb(svm);
572
573 if (vmcb)
574 return !!(vmcb->control.int_ctl & V_GIF_MASK);
575 else
576 return svm->guest_gif;
577 }
578
nested_npt_enabled(struct vcpu_svm * svm)579 static inline bool nested_npt_enabled(struct vcpu_svm *svm)
580 {
581 return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE;
582 }
583
nested_vnmi_enabled(struct vcpu_svm * svm)584 static inline bool nested_vnmi_enabled(struct vcpu_svm *svm)
585 {
586 return guest_cpu_cap_has(&svm->vcpu, X86_FEATURE_VNMI) &&
587 (svm->nested.ctl.int_ctl & V_NMI_ENABLE_MASK);
588 }
589
is_x2apic_msrpm_offset(u32 offset)590 static inline bool is_x2apic_msrpm_offset(u32 offset)
591 {
592 /* 4 msrs per u8, and 4 u8 in u32 */
593 u32 msr = offset * 16;
594
595 return (msr >= APIC_BASE_MSR) &&
596 (msr < (APIC_BASE_MSR + 0x100));
597 }
598
get_vnmi_vmcb_l1(struct vcpu_svm * svm)599 static inline struct vmcb *get_vnmi_vmcb_l1(struct vcpu_svm *svm)
600 {
601 if (!vnmi)
602 return NULL;
603
604 if (is_guest_mode(&svm->vcpu))
605 return NULL;
606 else
607 return svm->vmcb01.ptr;
608 }
609
is_vnmi_enabled(struct vcpu_svm * svm)610 static inline bool is_vnmi_enabled(struct vcpu_svm *svm)
611 {
612 struct vmcb *vmcb = get_vnmi_vmcb_l1(svm);
613
614 if (vmcb)
615 return !!(vmcb->control.int_ctl & V_NMI_ENABLE_MASK);
616 else
617 return false;
618 }
619
svm_vmgexit_set_return_code(struct vcpu_svm * svm,u64 response,u64 data)620 static inline void svm_vmgexit_set_return_code(struct vcpu_svm *svm,
621 u64 response, u64 data)
622 {
623 ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, response);
624 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, data);
625 }
626
svm_vmgexit_inject_exception(struct vcpu_svm * svm,u8 vector)627 static inline void svm_vmgexit_inject_exception(struct vcpu_svm *svm, u8 vector)
628 {
629 u64 data = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT | vector;
630
631 svm_vmgexit_set_return_code(svm, GHCB_HV_RESP_ISSUE_EXCEPTION, data);
632 }
633
svm_vmgexit_bad_input(struct vcpu_svm * svm,u64 suberror)634 static inline void svm_vmgexit_bad_input(struct vcpu_svm *svm, u64 suberror)
635 {
636 svm_vmgexit_set_return_code(svm, GHCB_HV_RESP_MALFORMED_INPUT, suberror);
637 }
638
svm_vmgexit_success(struct vcpu_svm * svm,u64 data)639 static inline void svm_vmgexit_success(struct vcpu_svm *svm, u64 data)
640 {
641 svm_vmgexit_set_return_code(svm, GHCB_HV_RESP_NO_ACTION, data);
642 }
643
svm_vmgexit_no_action(struct vcpu_svm * svm,u64 data)644 static inline void svm_vmgexit_no_action(struct vcpu_svm *svm, u64 data)
645 {
646 svm_vmgexit_set_return_code(svm, GHCB_HV_RESP_NO_ACTION, data);
647 }
648
649 /*
650 * The MSRPM is 8KiB in size, divided into four 2KiB ranges (the fourth range
651 * is reserved). Each MSR within a range is covered by two bits, one each for
652 * read (bit 0) and write (bit 1), where a bit value of '1' means intercepted.
653 */
654 #define SVM_MSRPM_BYTES_PER_RANGE 2048
655 #define SVM_BITS_PER_MSR 2
656 #define SVM_MSRS_PER_BYTE (BITS_PER_BYTE / SVM_BITS_PER_MSR)
657 #define SVM_MSRS_PER_RANGE (SVM_MSRPM_BYTES_PER_RANGE * SVM_MSRS_PER_BYTE)
658 static_assert(SVM_MSRS_PER_RANGE == 8192);
659 #define SVM_MSRPM_OFFSET_MASK (SVM_MSRS_PER_RANGE - 1)
660
svm_msrpm_bit_nr(u32 msr)661 static __always_inline int svm_msrpm_bit_nr(u32 msr)
662 {
663 int range_nr;
664
665 switch (msr & ~SVM_MSRPM_OFFSET_MASK) {
666 case 0:
667 range_nr = 0;
668 break;
669 case 0xc0000000:
670 range_nr = 1;
671 break;
672 case 0xc0010000:
673 range_nr = 2;
674 break;
675 default:
676 return -EINVAL;
677 }
678
679 return range_nr * SVM_MSRPM_BYTES_PER_RANGE * BITS_PER_BYTE +
680 (msr & SVM_MSRPM_OFFSET_MASK) * SVM_BITS_PER_MSR;
681 }
682
683 #define __BUILD_SVM_MSR_BITMAP_HELPER(rtype, action, bitop, access, bit_rw) \
684 static inline rtype svm_##action##_msr_bitmap_##access(unsigned long *bitmap, \
685 u32 msr) \
686 { \
687 int bit_nr; \
688 \
689 bit_nr = svm_msrpm_bit_nr(msr); \
690 if (bit_nr < 0) \
691 return (rtype)true; \
692 \
693 return bitop##_bit(bit_nr + bit_rw, bitmap); \
694 }
695
696 #define BUILD_SVM_MSR_BITMAP_HELPERS(ret_type, action, bitop) \
697 __BUILD_SVM_MSR_BITMAP_HELPER(ret_type, action, bitop, read, 0) \
698 __BUILD_SVM_MSR_BITMAP_HELPER(ret_type, action, bitop, write, 1)
699
700 BUILD_SVM_MSR_BITMAP_HELPERS(bool, test, test)
701 BUILD_SVM_MSR_BITMAP_HELPERS(void, clear, __clear)
702 BUILD_SVM_MSR_BITMAP_HELPERS(void, set, __set)
703
704 #define DEBUGCTL_RESERVED_BITS (~DEBUGCTLMSR_LBR)
705
706 /* svm.c */
707 extern bool dump_invalid_vmcb;
708
709 void *svm_alloc_permissions_map(unsigned long size, gfp_t gfp_mask);
710
svm_vcpu_alloc_msrpm(void)711 static inline void *svm_vcpu_alloc_msrpm(void)
712 {
713 return svm_alloc_permissions_map(MSRPM_SIZE, GFP_KERNEL_ACCOUNT);
714 }
715
716 void svm_vcpu_free_msrpm(void *msrpm);
717 void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
718 void svm_enable_lbrv(struct kvm_vcpu *vcpu);
719 void svm_update_lbrv(struct kvm_vcpu *vcpu);
720
721 int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
722 void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
723 void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
724 void disable_nmi_singlestep(struct vcpu_svm *svm);
725 bool svm_smi_blocked(struct kvm_vcpu *vcpu);
726 bool svm_nmi_blocked(struct kvm_vcpu *vcpu);
727 bool svm_interrupt_blocked(struct kvm_vcpu *vcpu);
728 void svm_set_gif(struct vcpu_svm *svm, bool value);
729 int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code);
730 void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
731 int read, int write);
732 void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
733 int trig_mode, int vec);
734
735 void svm_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool set);
736
svm_disable_intercept_for_msr(struct kvm_vcpu * vcpu,u32 msr,int type)737 static inline void svm_disable_intercept_for_msr(struct kvm_vcpu *vcpu,
738 u32 msr, int type)
739 {
740 svm_set_intercept_for_msr(vcpu, msr, type, false);
741 }
742
svm_enable_intercept_for_msr(struct kvm_vcpu * vcpu,u32 msr,int type)743 static inline void svm_enable_intercept_for_msr(struct kvm_vcpu *vcpu,
744 u32 msr, int type)
745 {
746 svm_set_intercept_for_msr(vcpu, msr, type, true);
747 }
748
749 /* nested.c */
750
751 #define NESTED_EXIT_HOST 0 /* Exit handled on host level */
752 #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
753 #define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
754
nested_svm_virtualize_tpr(struct kvm_vcpu * vcpu)755 static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu)
756 {
757 struct vcpu_svm *svm = to_svm(vcpu);
758
759 return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK);
760 }
761
nested_exit_on_smi(struct vcpu_svm * svm)762 static inline bool nested_exit_on_smi(struct vcpu_svm *svm)
763 {
764 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SMI);
765 }
766
nested_exit_on_intr(struct vcpu_svm * svm)767 static inline bool nested_exit_on_intr(struct vcpu_svm *svm)
768 {
769 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INTR);
770 }
771
nested_exit_on_nmi(struct vcpu_svm * svm)772 static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
773 {
774 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
775 }
776
777 int __init nested_svm_init_msrpm_merge_offsets(void);
778
779 int enter_svm_guest_mode(struct kvm_vcpu *vcpu,
780 u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun);
781 void svm_leave_nested(struct kvm_vcpu *vcpu);
782 void svm_free_nested(struct vcpu_svm *svm);
783 int svm_allocate_nested(struct vcpu_svm *svm);
784 int nested_svm_vmrun(struct kvm_vcpu *vcpu);
785 void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
786 struct vmcb_save_area *from_save);
787 void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
788 int nested_svm_vmexit(struct vcpu_svm *svm);
789
nested_svm_simple_vmexit(struct vcpu_svm * svm,u32 exit_code)790 static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code)
791 {
792 svm->vmcb->control.exit_code = exit_code;
793 svm->vmcb->control.exit_info_1 = 0;
794 svm->vmcb->control.exit_info_2 = 0;
795 return nested_svm_vmexit(svm);
796 }
797
798 int nested_svm_exit_handled(struct vcpu_svm *svm);
799 int nested_svm_check_permissions(struct kvm_vcpu *vcpu);
800 int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
801 bool has_error_code, u32 error_code);
802 int nested_svm_exit_special(struct vcpu_svm *svm);
803 void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu);
804 void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu);
805 void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
806 struct vmcb_control_area *control);
807 void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
808 struct vmcb_save_area *save);
809 void nested_sync_control_from_vmcb02(struct vcpu_svm *svm);
810 void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm);
811 void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb);
812
813 extern struct kvm_x86_nested_ops svm_nested_ops;
814
815 /* avic.c */
816 #define AVIC_REQUIRED_APICV_INHIBITS \
817 ( \
818 BIT(APICV_INHIBIT_REASON_DISABLED) | \
819 BIT(APICV_INHIBIT_REASON_ABSENT) | \
820 BIT(APICV_INHIBIT_REASON_HYPERV) | \
821 BIT(APICV_INHIBIT_REASON_NESTED) | \
822 BIT(APICV_INHIBIT_REASON_IRQWIN) | \
823 BIT(APICV_INHIBIT_REASON_PIT_REINJ) | \
824 BIT(APICV_INHIBIT_REASON_BLOCKIRQ) | \
825 BIT(APICV_INHIBIT_REASON_SEV) | \
826 BIT(APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED) | \
827 BIT(APICV_INHIBIT_REASON_APIC_ID_MODIFIED) | \
828 BIT(APICV_INHIBIT_REASON_APIC_BASE_MODIFIED) | \
829 BIT(APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED) | \
830 BIT(APICV_INHIBIT_REASON_PHYSICAL_ID_TOO_BIG) \
831 )
832
833 bool __init avic_hardware_setup(void);
834 void avic_hardware_unsetup(void);
835 int avic_alloc_physical_id_table(struct kvm *kvm);
836 void avic_vm_destroy(struct kvm *kvm);
837 int avic_vm_init(struct kvm *kvm);
838 void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb);
839 int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu);
840 int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu);
841 int avic_init_vcpu(struct vcpu_svm *svm);
842 void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
843 void avic_vcpu_put(struct kvm_vcpu *vcpu);
844 void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu);
845 void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
846 int avic_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm,
847 unsigned int host_irq, uint32_t guest_irq,
848 struct kvm_vcpu *vcpu, u32 vector);
849 void avic_vcpu_blocking(struct kvm_vcpu *vcpu);
850 void avic_vcpu_unblocking(struct kvm_vcpu *vcpu);
851 void avic_ring_doorbell(struct kvm_vcpu *vcpu);
852 unsigned long avic_vcpu_get_apicv_inhibit_reasons(struct kvm_vcpu *vcpu);
853 void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu);
854
855
856 /* sev.c */
857
858 int pre_sev_run(struct vcpu_svm *svm, int cpu);
859 void sev_init_vmcb(struct vcpu_svm *svm, bool init_event);
860 void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm);
861 int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
862 void sev_es_recalc_msr_intercepts(struct kvm_vcpu *vcpu);
863 void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
864 void sev_es_prepare_switch_to_guest(struct vcpu_svm *svm, struct sev_es_save_area *hostsa);
865 void sev_es_unmap_ghcb(struct vcpu_svm *svm);
866
867 #ifdef CONFIG_KVM_AMD_SEV
868 int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp);
869 int sev_mem_enc_register_region(struct kvm *kvm,
870 struct kvm_enc_region *range);
871 int sev_mem_enc_unregister_region(struct kvm *kvm,
872 struct kvm_enc_region *range);
873 int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd);
874 int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd);
875 void sev_guest_memory_reclaimed(struct kvm *kvm);
876 int sev_handle_vmgexit(struct kvm_vcpu *vcpu);
877
878 /* These symbols are used in common code and are stubbed below. */
879
880 struct page *snp_safe_alloc_page_node(int node, gfp_t gfp);
snp_safe_alloc_page(void)881 static inline struct page *snp_safe_alloc_page(void)
882 {
883 return snp_safe_alloc_page_node(numa_node_id(), GFP_KERNEL_ACCOUNT);
884 }
885
886 int sev_vcpu_create(struct kvm_vcpu *vcpu);
887 void sev_free_vcpu(struct kvm_vcpu *vcpu);
888 void sev_vm_destroy(struct kvm *kvm);
889 void __init sev_set_cpu_caps(void);
890 void __init sev_hardware_setup(void);
891 void sev_hardware_unsetup(void);
892 int sev_cpu_init(struct svm_cpu_data *sd);
893 int sev_dev_get_attr(u32 group, u64 attr, u64 *val);
894 extern unsigned int max_sev_asid;
895 void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code);
896 int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order);
897 void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end);
898 int sev_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn, bool is_private);
899 struct vmcb_save_area *sev_decrypt_vmsa(struct kvm_vcpu *vcpu);
900 void sev_free_decrypted_vmsa(struct kvm_vcpu *vcpu, struct vmcb_save_area *vmsa);
901 #else
snp_safe_alloc_page_node(int node,gfp_t gfp)902 static inline struct page *snp_safe_alloc_page_node(int node, gfp_t gfp)
903 {
904 return alloc_pages_node(node, gfp | __GFP_ZERO, 0);
905 }
906
snp_safe_alloc_page(void)907 static inline struct page *snp_safe_alloc_page(void)
908 {
909 return snp_safe_alloc_page_node(numa_node_id(), GFP_KERNEL_ACCOUNT);
910 }
911
sev_vcpu_create(struct kvm_vcpu * vcpu)912 static inline int sev_vcpu_create(struct kvm_vcpu *vcpu) { return 0; }
sev_free_vcpu(struct kvm_vcpu * vcpu)913 static inline void sev_free_vcpu(struct kvm_vcpu *vcpu) {}
sev_vm_destroy(struct kvm * kvm)914 static inline void sev_vm_destroy(struct kvm *kvm) {}
sev_set_cpu_caps(void)915 static inline void __init sev_set_cpu_caps(void) {}
sev_hardware_setup(void)916 static inline void __init sev_hardware_setup(void) {}
sev_hardware_unsetup(void)917 static inline void sev_hardware_unsetup(void) {}
sev_cpu_init(struct svm_cpu_data * sd)918 static inline int sev_cpu_init(struct svm_cpu_data *sd) { return 0; }
sev_dev_get_attr(u32 group,u64 attr,u64 * val)919 static inline int sev_dev_get_attr(u32 group, u64 attr, u64 *val) { return -ENXIO; }
920 #define max_sev_asid 0
sev_handle_rmp_fault(struct kvm_vcpu * vcpu,gpa_t gpa,u64 error_code)921 static inline void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code) {}
sev_gmem_prepare(struct kvm * kvm,kvm_pfn_t pfn,gfn_t gfn,int max_order)922 static inline int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order)
923 {
924 return 0;
925 }
sev_gmem_invalidate(kvm_pfn_t start,kvm_pfn_t end)926 static inline void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end) {}
sev_gmem_max_mapping_level(struct kvm * kvm,kvm_pfn_t pfn,bool is_private)927 static inline int sev_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn, bool is_private)
928 {
929 return 0;
930 }
931
sev_decrypt_vmsa(struct kvm_vcpu * vcpu)932 static inline struct vmcb_save_area *sev_decrypt_vmsa(struct kvm_vcpu *vcpu)
933 {
934 return NULL;
935 }
sev_free_decrypted_vmsa(struct kvm_vcpu * vcpu,struct vmcb_save_area * vmsa)936 static inline void sev_free_decrypted_vmsa(struct kvm_vcpu *vcpu, struct vmcb_save_area *vmsa) {}
937 #endif
938
939 /* vmenter.S */
940
941 void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted,
942 struct sev_es_save_area *hostsa);
943 void __svm_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);
944
945 #define DEFINE_KVM_GHCB_ACCESSORS(field) \
946 static __always_inline u64 kvm_ghcb_get_##field(struct vcpu_svm *svm) \
947 { \
948 return READ_ONCE(svm->sev_es.ghcb->save.field); \
949 } \
950 \
951 static __always_inline bool kvm_ghcb_##field##_is_valid(const struct vcpu_svm *svm) \
952 { \
953 return test_bit(GHCB_BITMAP_IDX(field), \
954 (unsigned long *)&svm->sev_es.valid_bitmap); \
955 } \
956 \
957 static __always_inline u64 kvm_ghcb_get_##field##_if_valid(struct vcpu_svm *svm) \
958 { \
959 return kvm_ghcb_##field##_is_valid(svm) ? kvm_ghcb_get_##field(svm) : 0; \
960 }
961
962 DEFINE_KVM_GHCB_ACCESSORS(cpl)
963 DEFINE_KVM_GHCB_ACCESSORS(rax)
964 DEFINE_KVM_GHCB_ACCESSORS(rcx)
965 DEFINE_KVM_GHCB_ACCESSORS(rdx)
966 DEFINE_KVM_GHCB_ACCESSORS(rbx)
967 DEFINE_KVM_GHCB_ACCESSORS(rsi)
968 DEFINE_KVM_GHCB_ACCESSORS(sw_exit_code)
969 DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_1)
970 DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_2)
971 DEFINE_KVM_GHCB_ACCESSORS(sw_scratch)
972 DEFINE_KVM_GHCB_ACCESSORS(xcr0)
973 DEFINE_KVM_GHCB_ACCESSORS(xss)
974
975 #endif
976