xref: /linux/arch/x86/kvm/svm/svm.h (revision 51d90a15fedf8366cb96ef68d0ea2d0bf15417d2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * AMD SVM support
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9  *
10  * Authors:
11  *   Yaniv Kamay  <yaniv@qumranet.com>
12  *   Avi Kivity   <avi@qumranet.com>
13  */
14 
15 #ifndef __SVM_SVM_H
16 #define __SVM_SVM_H
17 
18 #include <linux/kvm_types.h>
19 #include <linux/kvm_host.h>
20 #include <linux/bits.h>
21 
22 #include <asm/svm.h>
23 #include <asm/sev-common.h>
24 
25 #include "cpuid.h"
26 #include "kvm_cache_regs.h"
27 
28 /*
29  * Helpers to convert to/from physical addresses for pages whose address is
30  * consumed directly by hardware.  Even though it's a physical address, SVM
31  * often restricts the address to the natural width, hence 'unsigned long'
32  * instead of 'hpa_t'.
33  */
__sme_page_pa(struct page * page)34 static inline unsigned long __sme_page_pa(struct page *page)
35 {
36 	return __sme_set(page_to_pfn(page) << PAGE_SHIFT);
37 }
38 
__sme_pa_to_page(unsigned long pa)39 static inline struct page *__sme_pa_to_page(unsigned long pa)
40 {
41 	return pfn_to_page(__sme_clr(pa) >> PAGE_SHIFT);
42 }
43 
44 #define	IOPM_SIZE PAGE_SIZE * 3
45 #define	MSRPM_SIZE PAGE_SIZE * 2
46 
47 extern bool npt_enabled;
48 extern int nrips;
49 extern int vgif;
50 extern bool intercept_smi;
51 extern bool vnmi;
52 extern int lbrv;
53 
54 extern int tsc_aux_uret_slot __ro_after_init;
55 
56 extern struct kvm_x86_ops svm_x86_ops __initdata;
57 
58 /*
59  * Clean bits in VMCB.
60  * VMCB_ALL_CLEAN_MASK might also need to
61  * be updated if this enum is modified.
62  */
63 enum {
64 	VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
65 			    pause filter count */
66 	VMCB_PERM_MAP,   /* IOPM Base and MSRPM Base */
67 	VMCB_ASID,	 /* ASID */
68 	VMCB_INTR,	 /* int_ctl, int_vector */
69 	VMCB_NPT,        /* npt_en, nCR3, gPAT */
70 	VMCB_CR,	 /* CR0, CR3, CR4, EFER */
71 	VMCB_DR,         /* DR6, DR7 */
72 	VMCB_DT,         /* GDT, IDT */
73 	VMCB_SEG,        /* CS, DS, SS, ES, CPL */
74 	VMCB_CR2,        /* CR2 only */
75 	VMCB_LBR,        /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
76 	VMCB_AVIC,       /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
77 			  * AVIC PHYSICAL_TABLE pointer,
78 			  * AVIC LOGICAL_TABLE pointer
79 			  */
80 	VMCB_CET,	 /* S_CET, SSP, ISST_ADDR */
81 	VMCB_SW = 31,    /* Reserved for hypervisor/software use */
82 };
83 
84 #define VMCB_ALL_CLEAN_MASK (					\
85 	(1U << VMCB_INTERCEPTS) | (1U << VMCB_PERM_MAP) |	\
86 	(1U << VMCB_ASID) | (1U << VMCB_INTR) |			\
87 	(1U << VMCB_NPT) | (1U << VMCB_CR) | (1U << VMCB_DR) |	\
88 	(1U << VMCB_DT) | (1U << VMCB_SEG) | (1U << VMCB_CR2) |	\
89 	(1U << VMCB_LBR) | (1U << VMCB_AVIC) | (1U << VMCB_CET) | \
90 	(1U << VMCB_SW))
91 
92 /* TPR and CR2 are always written before VMRUN */
93 #define VMCB_ALWAYS_DIRTY_MASK	((1U << VMCB_INTR) | (1U << VMCB_CR2))
94 
95 struct kvm_sev_info {
96 	bool active;		/* SEV enabled guest */
97 	bool es_active;		/* SEV-ES enabled guest */
98 	bool need_init;		/* waiting for SEV_INIT2 */
99 	unsigned int asid;	/* ASID used for this guest */
100 	unsigned int handle;	/* SEV firmware handle */
101 	int fd;			/* SEV device fd */
102 	unsigned long policy;
103 	unsigned long pages_locked; /* Number of pages locked */
104 	struct list_head regions_list;  /* List of registered regions */
105 	u64 ap_jump_table;	/* SEV-ES AP Jump Table address */
106 	u64 vmsa_features;
107 	u16 ghcb_version;	/* Highest guest GHCB protocol version allowed */
108 	struct kvm *enc_context_owner; /* Owner of copied encryption context */
109 	struct list_head mirror_vms; /* List of VMs mirroring */
110 	struct list_head mirror_entry; /* Use as a list entry of mirrors */
111 	struct misc_cg *misc_cg; /* For misc cgroup accounting */
112 	atomic_t migration_in_progress;
113 	void *snp_context;      /* SNP guest context page */
114 	void *guest_req_buf;    /* Bounce buffer for SNP Guest Request input */
115 	void *guest_resp_buf;   /* Bounce buffer for SNP Guest Request output */
116 	struct mutex guest_req_mutex; /* Must acquire before using bounce buffers */
117 	cpumask_var_t have_run_cpus; /* CPUs that have done VMRUN for this VM. */
118 };
119 
120 struct kvm_svm {
121 	struct kvm kvm;
122 
123 	/* Struct members for AVIC */
124 	u32 avic_vm_id;
125 	u32 *avic_logical_id_table;
126 	u64 *avic_physical_id_table;
127 	struct hlist_node hnode;
128 
129 	struct kvm_sev_info sev_info;
130 };
131 
132 struct kvm_vcpu;
133 
134 struct kvm_vmcb_info {
135 	struct vmcb *ptr;
136 	unsigned long pa;
137 	int cpu;
138 	uint64_t asid_generation;
139 };
140 
141 struct vmcb_save_area_cached {
142 	u64 efer;
143 	u64 cr4;
144 	u64 cr3;
145 	u64 cr0;
146 	u64 dr7;
147 	u64 dr6;
148 };
149 
150 struct vmcb_ctrl_area_cached {
151 	u32 intercepts[MAX_INTERCEPT];
152 	u16 pause_filter_thresh;
153 	u16 pause_filter_count;
154 	u64 iopm_base_pa;
155 	u64 msrpm_base_pa;
156 	u64 tsc_offset;
157 	u32 asid;
158 	u8 tlb_ctl;
159 	u32 int_ctl;
160 	u32 int_vector;
161 	u32 int_state;
162 	u32 exit_code;
163 	u32 exit_code_hi;
164 	u64 exit_info_1;
165 	u64 exit_info_2;
166 	u32 exit_int_info;
167 	u32 exit_int_info_err;
168 	u64 nested_ctl;
169 	u32 event_inj;
170 	u32 event_inj_err;
171 	u64 next_rip;
172 	u64 nested_cr3;
173 	u64 virt_ext;
174 	u32 clean;
175 	u64 bus_lock_rip;
176 	union {
177 #if IS_ENABLED(CONFIG_HYPERV) || IS_ENABLED(CONFIG_KVM_HYPERV)
178 		struct hv_vmcb_enlightenments hv_enlightenments;
179 #endif
180 		u8 reserved_sw[32];
181 	};
182 };
183 
184 struct svm_nested_state {
185 	struct kvm_vmcb_info vmcb02;
186 	u64 hsave_msr;
187 	u64 vm_cr_msr;
188 	u64 vmcb12_gpa;
189 	u64 last_vmcb12_gpa;
190 
191 	/*
192 	 * The MSR permissions map used for vmcb02, which is the merge result
193 	 * of vmcb01 and vmcb12
194 	 */
195 	void *msrpm;
196 
197 	/* A VMRUN has started but has not yet been performed, so
198 	 * we cannot inject a nested vmexit yet.  */
199 	bool nested_run_pending;
200 
201 	/* cache for control fields of the guest */
202 	struct vmcb_ctrl_area_cached ctl;
203 
204 	/*
205 	 * Note: this struct is not kept up-to-date while L2 runs; it is only
206 	 * valid within nested_svm_vmrun.
207 	 */
208 	struct vmcb_save_area_cached save;
209 
210 	bool initialized;
211 
212 	/*
213 	 * Indicates whether MSR bitmap for L2 needs to be rebuilt due to
214 	 * changes in MSR bitmap for L1 or switching to a different L2. Note,
215 	 * this flag can only be used reliably in conjunction with a paravirt L1
216 	 * which informs L0 whether any changes to MSR bitmap for L2 were done
217 	 * on its side.
218 	 */
219 	bool force_msr_bitmap_recalc;
220 };
221 
222 struct vcpu_sev_es_state {
223 	/* SEV-ES support */
224 	struct sev_es_save_area *vmsa;
225 	struct ghcb *ghcb;
226 	u8 valid_bitmap[16];
227 	struct kvm_host_map ghcb_map;
228 	bool received_first_sipi;
229 	unsigned int ap_reset_hold_type;
230 
231 	/* SEV-ES scratch area support */
232 	u64 sw_scratch;
233 	void *ghcb_sa;
234 	u32 ghcb_sa_len;
235 	bool ghcb_sa_sync;
236 	bool ghcb_sa_free;
237 
238 	/* SNP Page-State-Change buffer entries currently being processed */
239 	u16 psc_idx;
240 	u16 psc_inflight;
241 	bool psc_2m;
242 
243 	u64 ghcb_registered_gpa;
244 
245 	struct mutex snp_vmsa_mutex; /* Used to handle concurrent updates of VMSA. */
246 	gpa_t snp_vmsa_gpa;
247 	bool snp_ap_waiting_for_reset;
248 	bool snp_has_guest_vmsa;
249 };
250 
251 struct vcpu_svm {
252 	struct kvm_vcpu vcpu;
253 	/* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */
254 	struct vmcb *vmcb;
255 	struct kvm_vmcb_info vmcb01;
256 	struct kvm_vmcb_info *current_vmcb;
257 	u32 asid;
258 	u32 sysenter_esp_hi;
259 	u32 sysenter_eip_hi;
260 	uint64_t tsc_aux;
261 
262 	u64 msr_decfg;
263 
264 	u64 next_rip;
265 
266 	u64 spec_ctrl;
267 
268 	u64 tsc_ratio_msr;
269 	/*
270 	 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
271 	 * translated into the appropriate L2_CFG bits on the host to
272 	 * perform speculative control.
273 	 */
274 	u64 virt_spec_ctrl;
275 
276 	void *msrpm;
277 
278 	ulong nmi_iret_rip;
279 
280 	struct svm_nested_state nested;
281 
282 	/* NMI mask value, used when vNMI is not enabled */
283 	bool nmi_masked;
284 
285 	/*
286 	 * True when NMIs are still masked but guest IRET was just intercepted
287 	 * and KVM is waiting for RIP to change, which will signal that the
288 	 * intercepted IRET was retired and thus NMI can be unmasked.
289 	 */
290 	bool awaiting_iret_completion;
291 
292 	/*
293 	 * Set when KVM is awaiting IRET completion and needs to inject NMIs as
294 	 * soon as the IRET completes (e.g. NMI is pending injection).  KVM
295 	 * temporarily steals RFLAGS.TF to single-step the guest in this case
296 	 * in order to regain control as soon as the NMI-blocking condition
297 	 * goes away.
298 	 */
299 	bool nmi_singlestep;
300 	u64 nmi_singlestep_guest_rflags;
301 
302 	bool nmi_l1_to_l2;
303 
304 	unsigned long soft_int_csbase;
305 	unsigned long soft_int_old_rip;
306 	unsigned long soft_int_next_rip;
307 	bool soft_int_injected;
308 
309 	u32 ldr_reg;
310 	u32 dfr_reg;
311 
312 	/* This is essentially a shadow of the vCPU's actual entry in the
313 	 * Physical ID table that is programmed into the VMCB, i.e. that is
314 	 * seen by the CPU.  If IPI virtualization is disabled, IsRunning is
315 	 * only ever set in the shadow, i.e. is never propagated to the "real"
316 	 * table, so that hardware never sees IsRunning=1.
317 	 */
318 	u64 avic_physical_id_entry;
319 
320 	/*
321 	 * Per-vCPU list of irqfds that are eligible to post IRQs directly to
322 	 * the vCPU (a.k.a. device posted IRQs, a.k.a. IRQ bypass).  The list
323 	 * is used to reconfigure IRTEs when the vCPU is loaded/put (to set the
324 	 * target pCPU), when AVIC is toggled on/off (to (de)activate bypass),
325 	 * and if the irqfd becomes ineligible for posting (to put the IRTE
326 	 * back into remapped mode).
327 	 */
328 	struct list_head ir_list;
329 	raw_spinlock_t ir_list_lock;
330 
331 	struct vcpu_sev_es_state sev_es;
332 
333 	bool guest_state_loaded;
334 
335 	bool x2avic_msrs_intercepted;
336 	bool lbr_msrs_intercepted;
337 
338 	/* Guest GIF value, used when vGIF is not enabled */
339 	bool guest_gif;
340 };
341 
342 struct svm_cpu_data {
343 	u64 asid_generation;
344 	u32 max_asid;
345 	u32 next_asid;
346 	u32 min_asid;
347 
348 	bool bp_spec_reduce_set;
349 
350 	struct vmcb *save_area;
351 	unsigned long save_area_pa;
352 
353 	/* index = sev_asid, value = vmcb pointer */
354 	struct vmcb **sev_vmcbs;
355 };
356 
357 DECLARE_PER_CPU(struct svm_cpu_data, svm_data);
358 
359 void recalc_intercepts(struct vcpu_svm *svm);
360 
to_kvm_svm(struct kvm * kvm)361 static __always_inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
362 {
363 	return container_of(kvm, struct kvm_svm, kvm);
364 }
365 
to_kvm_sev_info(struct kvm * kvm)366 static __always_inline struct kvm_sev_info *to_kvm_sev_info(struct kvm *kvm)
367 {
368 	return &to_kvm_svm(kvm)->sev_info;
369 }
370 
371 #ifdef CONFIG_KVM_AMD_SEV
sev_guest(struct kvm * kvm)372 static __always_inline bool sev_guest(struct kvm *kvm)
373 {
374 	return to_kvm_sev_info(kvm)->active;
375 }
sev_es_guest(struct kvm * kvm)376 static __always_inline bool sev_es_guest(struct kvm *kvm)
377 {
378 	struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
379 
380 	return sev->es_active && !WARN_ON_ONCE(!sev->active);
381 }
382 
sev_snp_guest(struct kvm * kvm)383 static __always_inline bool sev_snp_guest(struct kvm *kvm)
384 {
385 	struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
386 
387 	return (sev->vmsa_features & SVM_SEV_FEAT_SNP_ACTIVE) &&
388 	       !WARN_ON_ONCE(!sev_es_guest(kvm));
389 }
390 #else
391 #define sev_guest(kvm) false
392 #define sev_es_guest(kvm) false
393 #define sev_snp_guest(kvm) false
394 #endif
395 
ghcb_gpa_is_registered(struct vcpu_svm * svm,u64 val)396 static inline bool ghcb_gpa_is_registered(struct vcpu_svm *svm, u64 val)
397 {
398 	return svm->sev_es.ghcb_registered_gpa == val;
399 }
400 
vmcb_mark_all_dirty(struct vmcb * vmcb)401 static inline void vmcb_mark_all_dirty(struct vmcb *vmcb)
402 {
403 	vmcb->control.clean = 0;
404 }
405 
vmcb_mark_all_clean(struct vmcb * vmcb)406 static inline void vmcb_mark_all_clean(struct vmcb *vmcb)
407 {
408 	vmcb->control.clean = VMCB_ALL_CLEAN_MASK
409 			       & ~VMCB_ALWAYS_DIRTY_MASK;
410 }
411 
vmcb_mark_dirty(struct vmcb * vmcb,int bit)412 static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
413 {
414 	vmcb->control.clean &= ~(1 << bit);
415 }
416 
vmcb_is_dirty(struct vmcb * vmcb,int bit)417 static inline bool vmcb_is_dirty(struct vmcb *vmcb, int bit)
418 {
419         return !test_bit(bit, (unsigned long *)&vmcb->control.clean);
420 }
421 
to_svm(struct kvm_vcpu * vcpu)422 static __always_inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
423 {
424 	return container_of(vcpu, struct vcpu_svm, vcpu);
425 }
426 
427 /*
428  * Only the PDPTRs are loaded on demand into the shadow MMU.  All other
429  * fields are synchronized on VM-Exit, because accessing the VMCB is cheap.
430  *
431  * CR3 might be out of date in the VMCB but it is not marked dirty; instead,
432  * KVM_REQ_LOAD_MMU_PGD is always requested when the cached vcpu->arch.cr3
433  * is changed.  svm_load_mmu_pgd() then syncs the new CR3 value into the VMCB.
434  */
435 #define SVM_REGS_LAZY_LOAD_SET	(1 << VCPU_EXREG_PDPTR)
436 
vmcb_set_intercept(struct vmcb_control_area * control,u32 bit)437 static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit)
438 {
439 	WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
440 	__set_bit(bit, (unsigned long *)&control->intercepts);
441 }
442 
vmcb_clr_intercept(struct vmcb_control_area * control,u32 bit)443 static inline void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit)
444 {
445 	WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
446 	__clear_bit(bit, (unsigned long *)&control->intercepts);
447 }
448 
vmcb_is_intercept(struct vmcb_control_area * control,u32 bit)449 static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit)
450 {
451 	WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
452 	return test_bit(bit, (unsigned long *)&control->intercepts);
453 }
454 
vmcb12_is_intercept(struct vmcb_ctrl_area_cached * control,u32 bit)455 static inline bool vmcb12_is_intercept(struct vmcb_ctrl_area_cached *control, u32 bit)
456 {
457 	WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
458 	return test_bit(bit, (unsigned long *)&control->intercepts);
459 }
460 
set_exception_intercept(struct vcpu_svm * svm,u32 bit)461 static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit)
462 {
463 	struct vmcb *vmcb = svm->vmcb01.ptr;
464 
465 	WARN_ON_ONCE(bit >= 32);
466 	vmcb_set_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
467 
468 	recalc_intercepts(svm);
469 }
470 
clr_exception_intercept(struct vcpu_svm * svm,u32 bit)471 static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit)
472 {
473 	struct vmcb *vmcb = svm->vmcb01.ptr;
474 
475 	WARN_ON_ONCE(bit >= 32);
476 	vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
477 
478 	recalc_intercepts(svm);
479 }
480 
svm_set_intercept(struct vcpu_svm * svm,int bit)481 static inline void svm_set_intercept(struct vcpu_svm *svm, int bit)
482 {
483 	struct vmcb *vmcb = svm->vmcb01.ptr;
484 
485 	vmcb_set_intercept(&vmcb->control, bit);
486 
487 	recalc_intercepts(svm);
488 }
489 
svm_clr_intercept(struct vcpu_svm * svm,int bit)490 static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit)
491 {
492 	struct vmcb *vmcb = svm->vmcb01.ptr;
493 
494 	vmcb_clr_intercept(&vmcb->control, bit);
495 
496 	recalc_intercepts(svm);
497 }
498 
svm_is_intercept(struct vcpu_svm * svm,int bit)499 static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit)
500 {
501 	return vmcb_is_intercept(&svm->vmcb->control, bit);
502 }
503 
nested_vgif_enabled(struct vcpu_svm * svm)504 static inline bool nested_vgif_enabled(struct vcpu_svm *svm)
505 {
506 	return guest_cpu_cap_has(&svm->vcpu, X86_FEATURE_VGIF) &&
507 	       (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK);
508 }
509 
get_vgif_vmcb(struct vcpu_svm * svm)510 static inline struct vmcb *get_vgif_vmcb(struct vcpu_svm *svm)
511 {
512 	if (!vgif)
513 		return NULL;
514 
515 	if (is_guest_mode(&svm->vcpu) && !nested_vgif_enabled(svm))
516 		return svm->nested.vmcb02.ptr;
517 	else
518 		return svm->vmcb01.ptr;
519 }
520 
enable_gif(struct vcpu_svm * svm)521 static inline void enable_gif(struct vcpu_svm *svm)
522 {
523 	struct vmcb *vmcb = get_vgif_vmcb(svm);
524 
525 	if (vmcb)
526 		vmcb->control.int_ctl |= V_GIF_MASK;
527 	else
528 		svm->guest_gif = true;
529 }
530 
disable_gif(struct vcpu_svm * svm)531 static inline void disable_gif(struct vcpu_svm *svm)
532 {
533 	struct vmcb *vmcb = get_vgif_vmcb(svm);
534 
535 	if (vmcb)
536 		vmcb->control.int_ctl &= ~V_GIF_MASK;
537 	else
538 		svm->guest_gif = false;
539 }
540 
gif_set(struct vcpu_svm * svm)541 static inline bool gif_set(struct vcpu_svm *svm)
542 {
543 	struct vmcb *vmcb = get_vgif_vmcb(svm);
544 
545 	if (vmcb)
546 		return !!(vmcb->control.int_ctl & V_GIF_MASK);
547 	else
548 		return svm->guest_gif;
549 }
550 
nested_npt_enabled(struct vcpu_svm * svm)551 static inline bool nested_npt_enabled(struct vcpu_svm *svm)
552 {
553 	return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE;
554 }
555 
nested_vnmi_enabled(struct vcpu_svm * svm)556 static inline bool nested_vnmi_enabled(struct vcpu_svm *svm)
557 {
558 	return guest_cpu_cap_has(&svm->vcpu, X86_FEATURE_VNMI) &&
559 	       (svm->nested.ctl.int_ctl & V_NMI_ENABLE_MASK);
560 }
561 
is_x2apic_msrpm_offset(u32 offset)562 static inline bool is_x2apic_msrpm_offset(u32 offset)
563 {
564 	/* 4 msrs per u8, and 4 u8 in u32 */
565 	u32 msr = offset * 16;
566 
567 	return (msr >= APIC_BASE_MSR) &&
568 	       (msr < (APIC_BASE_MSR + 0x100));
569 }
570 
get_vnmi_vmcb_l1(struct vcpu_svm * svm)571 static inline struct vmcb *get_vnmi_vmcb_l1(struct vcpu_svm *svm)
572 {
573 	if (!vnmi)
574 		return NULL;
575 
576 	if (is_guest_mode(&svm->vcpu))
577 		return NULL;
578 	else
579 		return svm->vmcb01.ptr;
580 }
581 
is_vnmi_enabled(struct vcpu_svm * svm)582 static inline bool is_vnmi_enabled(struct vcpu_svm *svm)
583 {
584 	struct vmcb *vmcb = get_vnmi_vmcb_l1(svm);
585 
586 	if (vmcb)
587 		return !!(vmcb->control.int_ctl & V_NMI_ENABLE_MASK);
588 	else
589 		return false;
590 }
591 
svm_vmgexit_set_return_code(struct vcpu_svm * svm,u64 response,u64 data)592 static inline void svm_vmgexit_set_return_code(struct vcpu_svm *svm,
593 						u64 response, u64 data)
594 {
595 	ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, response);
596 	ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, data);
597 }
598 
svm_vmgexit_inject_exception(struct vcpu_svm * svm,u8 vector)599 static inline void svm_vmgexit_inject_exception(struct vcpu_svm *svm, u8 vector)
600 {
601 	u64 data = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT | vector;
602 
603 	svm_vmgexit_set_return_code(svm, GHCB_HV_RESP_ISSUE_EXCEPTION, data);
604 }
605 
svm_vmgexit_bad_input(struct vcpu_svm * svm,u64 suberror)606 static inline void svm_vmgexit_bad_input(struct vcpu_svm *svm, u64 suberror)
607 {
608 	svm_vmgexit_set_return_code(svm, GHCB_HV_RESP_MALFORMED_INPUT, suberror);
609 }
610 
svm_vmgexit_success(struct vcpu_svm * svm,u64 data)611 static inline void svm_vmgexit_success(struct vcpu_svm *svm, u64 data)
612 {
613 	svm_vmgexit_set_return_code(svm, GHCB_HV_RESP_NO_ACTION, data);
614 }
615 
svm_vmgexit_no_action(struct vcpu_svm * svm,u64 data)616 static inline void svm_vmgexit_no_action(struct vcpu_svm *svm, u64 data)
617 {
618 	svm_vmgexit_set_return_code(svm, GHCB_HV_RESP_NO_ACTION, data);
619 }
620 
621 /*
622  * The MSRPM is 8KiB in size, divided into four 2KiB ranges (the fourth range
623  * is reserved).  Each MSR within a range is covered by two bits, one each for
624  * read (bit 0) and write (bit 1), where a bit value of '1' means intercepted.
625  */
626 #define SVM_MSRPM_BYTES_PER_RANGE 2048
627 #define SVM_BITS_PER_MSR 2
628 #define SVM_MSRS_PER_BYTE (BITS_PER_BYTE / SVM_BITS_PER_MSR)
629 #define SVM_MSRS_PER_RANGE (SVM_MSRPM_BYTES_PER_RANGE * SVM_MSRS_PER_BYTE)
630 static_assert(SVM_MSRS_PER_RANGE == 8192);
631 #define SVM_MSRPM_OFFSET_MASK (SVM_MSRS_PER_RANGE - 1)
632 
svm_msrpm_bit_nr(u32 msr)633 static __always_inline int svm_msrpm_bit_nr(u32 msr)
634 {
635 	int range_nr;
636 
637 	switch (msr & ~SVM_MSRPM_OFFSET_MASK) {
638 	case 0:
639 		range_nr = 0;
640 		break;
641 	case 0xc0000000:
642 		range_nr = 1;
643 		break;
644 	case 0xc0010000:
645 		range_nr = 2;
646 		break;
647 	default:
648 		return -EINVAL;
649 	}
650 
651 	return range_nr * SVM_MSRPM_BYTES_PER_RANGE * BITS_PER_BYTE +
652 	       (msr & SVM_MSRPM_OFFSET_MASK) * SVM_BITS_PER_MSR;
653 }
654 
655 #define __BUILD_SVM_MSR_BITMAP_HELPER(rtype, action, bitop, access, bit_rw)	\
656 static inline rtype svm_##action##_msr_bitmap_##access(unsigned long *bitmap,	\
657 						       u32 msr)			\
658 {										\
659 	int bit_nr;								\
660 										\
661 	bit_nr = svm_msrpm_bit_nr(msr);						\
662 	if (bit_nr < 0)								\
663 		return (rtype)true;						\
664 										\
665 	return bitop##_bit(bit_nr + bit_rw, bitmap);				\
666 }
667 
668 #define BUILD_SVM_MSR_BITMAP_HELPERS(ret_type, action, bitop)			\
669 	__BUILD_SVM_MSR_BITMAP_HELPER(ret_type, action, bitop, read,  0)	\
670 	__BUILD_SVM_MSR_BITMAP_HELPER(ret_type, action, bitop, write, 1)
671 
672 BUILD_SVM_MSR_BITMAP_HELPERS(bool, test, test)
673 BUILD_SVM_MSR_BITMAP_HELPERS(void, clear, __clear)
674 BUILD_SVM_MSR_BITMAP_HELPERS(void, set, __set)
675 
676 #define DEBUGCTL_RESERVED_BITS (~DEBUGCTLMSR_LBR)
677 
678 /* svm.c */
679 extern bool dump_invalid_vmcb;
680 
681 void *svm_alloc_permissions_map(unsigned long size, gfp_t gfp_mask);
682 
svm_vcpu_alloc_msrpm(void)683 static inline void *svm_vcpu_alloc_msrpm(void)
684 {
685 	return svm_alloc_permissions_map(MSRPM_SIZE, GFP_KERNEL_ACCOUNT);
686 }
687 
688 void svm_vcpu_free_msrpm(void *msrpm);
689 void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
690 void svm_enable_lbrv(struct kvm_vcpu *vcpu);
691 void svm_update_lbrv(struct kvm_vcpu *vcpu);
692 
693 int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
694 void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
695 void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
696 void disable_nmi_singlestep(struct vcpu_svm *svm);
697 bool svm_smi_blocked(struct kvm_vcpu *vcpu);
698 bool svm_nmi_blocked(struct kvm_vcpu *vcpu);
699 bool svm_interrupt_blocked(struct kvm_vcpu *vcpu);
700 void svm_set_gif(struct vcpu_svm *svm, bool value);
701 int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code);
702 void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
703 			  int read, int write);
704 void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
705 				     int trig_mode, int vec);
706 
707 void svm_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool set);
708 
svm_disable_intercept_for_msr(struct kvm_vcpu * vcpu,u32 msr,int type)709 static inline void svm_disable_intercept_for_msr(struct kvm_vcpu *vcpu,
710 						 u32 msr, int type)
711 {
712 	svm_set_intercept_for_msr(vcpu, msr, type, false);
713 }
714 
svm_enable_intercept_for_msr(struct kvm_vcpu * vcpu,u32 msr,int type)715 static inline void svm_enable_intercept_for_msr(struct kvm_vcpu *vcpu,
716 						u32 msr, int type)
717 {
718 	svm_set_intercept_for_msr(vcpu, msr, type, true);
719 }
720 
721 /* nested.c */
722 
723 #define NESTED_EXIT_HOST	0	/* Exit handled on host level */
724 #define NESTED_EXIT_DONE	1	/* Exit caused nested vmexit  */
725 #define NESTED_EXIT_CONTINUE	2	/* Further checks needed      */
726 
nested_svm_virtualize_tpr(struct kvm_vcpu * vcpu)727 static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu)
728 {
729 	struct vcpu_svm *svm = to_svm(vcpu);
730 
731 	return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK);
732 }
733 
nested_exit_on_smi(struct vcpu_svm * svm)734 static inline bool nested_exit_on_smi(struct vcpu_svm *svm)
735 {
736 	return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SMI);
737 }
738 
nested_exit_on_intr(struct vcpu_svm * svm)739 static inline bool nested_exit_on_intr(struct vcpu_svm *svm)
740 {
741 	return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INTR);
742 }
743 
nested_exit_on_nmi(struct vcpu_svm * svm)744 static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
745 {
746 	return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
747 }
748 
749 int __init nested_svm_init_msrpm_merge_offsets(void);
750 
751 int enter_svm_guest_mode(struct kvm_vcpu *vcpu,
752 			 u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun);
753 void svm_leave_nested(struct kvm_vcpu *vcpu);
754 void svm_free_nested(struct vcpu_svm *svm);
755 int svm_allocate_nested(struct vcpu_svm *svm);
756 int nested_svm_vmrun(struct kvm_vcpu *vcpu);
757 void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
758 			  struct vmcb_save_area *from_save);
759 void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
760 int nested_svm_vmexit(struct vcpu_svm *svm);
761 
nested_svm_simple_vmexit(struct vcpu_svm * svm,u32 exit_code)762 static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code)
763 {
764 	svm->vmcb->control.exit_code   = exit_code;
765 	svm->vmcb->control.exit_info_1 = 0;
766 	svm->vmcb->control.exit_info_2 = 0;
767 	return nested_svm_vmexit(svm);
768 }
769 
770 int nested_svm_exit_handled(struct vcpu_svm *svm);
771 int nested_svm_check_permissions(struct kvm_vcpu *vcpu);
772 int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
773 			       bool has_error_code, u32 error_code);
774 int nested_svm_exit_special(struct vcpu_svm *svm);
775 void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu);
776 void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu);
777 void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
778 				       struct vmcb_control_area *control);
779 void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
780 				    struct vmcb_save_area *save);
781 void nested_sync_control_from_vmcb02(struct vcpu_svm *svm);
782 void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm);
783 void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb);
784 
785 extern struct kvm_x86_nested_ops svm_nested_ops;
786 
787 /* avic.c */
788 #define AVIC_REQUIRED_APICV_INHIBITS			\
789 (							\
790 	BIT(APICV_INHIBIT_REASON_DISABLED) |		\
791 	BIT(APICV_INHIBIT_REASON_ABSENT) |		\
792 	BIT(APICV_INHIBIT_REASON_HYPERV) |		\
793 	BIT(APICV_INHIBIT_REASON_NESTED) |		\
794 	BIT(APICV_INHIBIT_REASON_IRQWIN) |		\
795 	BIT(APICV_INHIBIT_REASON_PIT_REINJ) |		\
796 	BIT(APICV_INHIBIT_REASON_BLOCKIRQ) |		\
797 	BIT(APICV_INHIBIT_REASON_SEV)      |		\
798 	BIT(APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED) |	\
799 	BIT(APICV_INHIBIT_REASON_APIC_ID_MODIFIED) |	\
800 	BIT(APICV_INHIBIT_REASON_APIC_BASE_MODIFIED) |	\
801 	BIT(APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED) |	\
802 	BIT(APICV_INHIBIT_REASON_PHYSICAL_ID_TOO_BIG)	\
803 )
804 
805 bool __init avic_hardware_setup(void);
806 void avic_hardware_unsetup(void);
807 int avic_alloc_physical_id_table(struct kvm *kvm);
808 void avic_vm_destroy(struct kvm *kvm);
809 int avic_vm_init(struct kvm *kvm);
810 void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb);
811 int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu);
812 int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu);
813 int avic_init_vcpu(struct vcpu_svm *svm);
814 void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
815 void avic_vcpu_put(struct kvm_vcpu *vcpu);
816 void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu);
817 void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
818 int avic_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm,
819 			unsigned int host_irq, uint32_t guest_irq,
820 			struct kvm_vcpu *vcpu, u32 vector);
821 void avic_vcpu_blocking(struct kvm_vcpu *vcpu);
822 void avic_vcpu_unblocking(struct kvm_vcpu *vcpu);
823 void avic_ring_doorbell(struct kvm_vcpu *vcpu);
824 unsigned long avic_vcpu_get_apicv_inhibit_reasons(struct kvm_vcpu *vcpu);
825 void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu);
826 
827 
828 /* sev.c */
829 
830 int pre_sev_run(struct vcpu_svm *svm, int cpu);
831 void sev_init_vmcb(struct vcpu_svm *svm, bool init_event);
832 void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm);
833 int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
834 void sev_es_recalc_msr_intercepts(struct kvm_vcpu *vcpu);
835 void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
836 void sev_es_prepare_switch_to_guest(struct vcpu_svm *svm, struct sev_es_save_area *hostsa);
837 void sev_es_unmap_ghcb(struct vcpu_svm *svm);
838 
839 #ifdef CONFIG_KVM_AMD_SEV
840 int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp);
841 int sev_mem_enc_register_region(struct kvm *kvm,
842 				struct kvm_enc_region *range);
843 int sev_mem_enc_unregister_region(struct kvm *kvm,
844 				  struct kvm_enc_region *range);
845 int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd);
846 int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd);
847 void sev_guest_memory_reclaimed(struct kvm *kvm);
848 int sev_handle_vmgexit(struct kvm_vcpu *vcpu);
849 
850 /* These symbols are used in common code and are stubbed below.  */
851 
852 struct page *snp_safe_alloc_page_node(int node, gfp_t gfp);
snp_safe_alloc_page(void)853 static inline struct page *snp_safe_alloc_page(void)
854 {
855 	return snp_safe_alloc_page_node(numa_node_id(), GFP_KERNEL_ACCOUNT);
856 }
857 
858 int sev_vcpu_create(struct kvm_vcpu *vcpu);
859 void sev_free_vcpu(struct kvm_vcpu *vcpu);
860 void sev_vm_destroy(struct kvm *kvm);
861 void __init sev_set_cpu_caps(void);
862 void __init sev_hardware_setup(void);
863 void sev_hardware_unsetup(void);
864 int sev_cpu_init(struct svm_cpu_data *sd);
865 int sev_dev_get_attr(u32 group, u64 attr, u64 *val);
866 extern unsigned int max_sev_asid;
867 void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code);
868 int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order);
869 void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end);
870 int sev_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn, bool is_private);
871 struct vmcb_save_area *sev_decrypt_vmsa(struct kvm_vcpu *vcpu);
872 void sev_free_decrypted_vmsa(struct kvm_vcpu *vcpu, struct vmcb_save_area *vmsa);
873 #else
snp_safe_alloc_page_node(int node,gfp_t gfp)874 static inline struct page *snp_safe_alloc_page_node(int node, gfp_t gfp)
875 {
876 	return alloc_pages_node(node, gfp | __GFP_ZERO, 0);
877 }
878 
snp_safe_alloc_page(void)879 static inline struct page *snp_safe_alloc_page(void)
880 {
881 	return snp_safe_alloc_page_node(numa_node_id(), GFP_KERNEL_ACCOUNT);
882 }
883 
sev_vcpu_create(struct kvm_vcpu * vcpu)884 static inline int sev_vcpu_create(struct kvm_vcpu *vcpu) { return 0; }
sev_free_vcpu(struct kvm_vcpu * vcpu)885 static inline void sev_free_vcpu(struct kvm_vcpu *vcpu) {}
sev_vm_destroy(struct kvm * kvm)886 static inline void sev_vm_destroy(struct kvm *kvm) {}
sev_set_cpu_caps(void)887 static inline void __init sev_set_cpu_caps(void) {}
sev_hardware_setup(void)888 static inline void __init sev_hardware_setup(void) {}
sev_hardware_unsetup(void)889 static inline void sev_hardware_unsetup(void) {}
sev_cpu_init(struct svm_cpu_data * sd)890 static inline int sev_cpu_init(struct svm_cpu_data *sd) { return 0; }
sev_dev_get_attr(u32 group,u64 attr,u64 * val)891 static inline int sev_dev_get_attr(u32 group, u64 attr, u64 *val) { return -ENXIO; }
892 #define max_sev_asid 0
sev_handle_rmp_fault(struct kvm_vcpu * vcpu,gpa_t gpa,u64 error_code)893 static inline void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code) {}
sev_gmem_prepare(struct kvm * kvm,kvm_pfn_t pfn,gfn_t gfn,int max_order)894 static inline int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order)
895 {
896 	return 0;
897 }
sev_gmem_invalidate(kvm_pfn_t start,kvm_pfn_t end)898 static inline void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end) {}
sev_gmem_max_mapping_level(struct kvm * kvm,kvm_pfn_t pfn,bool is_private)899 static inline int sev_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn, bool is_private)
900 {
901 	return 0;
902 }
903 
sev_decrypt_vmsa(struct kvm_vcpu * vcpu)904 static inline struct vmcb_save_area *sev_decrypt_vmsa(struct kvm_vcpu *vcpu)
905 {
906 	return NULL;
907 }
sev_free_decrypted_vmsa(struct kvm_vcpu * vcpu,struct vmcb_save_area * vmsa)908 static inline void sev_free_decrypted_vmsa(struct kvm_vcpu *vcpu, struct vmcb_save_area *vmsa) {}
909 #endif
910 
911 /* vmenter.S */
912 
913 void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted,
914 			   struct sev_es_save_area *hostsa);
915 void __svm_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);
916 
917 #define DEFINE_KVM_GHCB_ACCESSORS(field)						\
918 static __always_inline u64 kvm_ghcb_get_##field(struct vcpu_svm *svm)			\
919 {											\
920 	return READ_ONCE(svm->sev_es.ghcb->save.field);					\
921 }											\
922 											\
923 static __always_inline bool kvm_ghcb_##field##_is_valid(const struct vcpu_svm *svm)	\
924 {											\
925 	return test_bit(GHCB_BITMAP_IDX(field),						\
926 			(unsigned long *)&svm->sev_es.valid_bitmap);			\
927 }											\
928 											\
929 static __always_inline u64 kvm_ghcb_get_##field##_if_valid(struct vcpu_svm *svm)	\
930 {											\
931 	return kvm_ghcb_##field##_is_valid(svm) ? kvm_ghcb_get_##field(svm) : 0;	\
932 }
933 
934 DEFINE_KVM_GHCB_ACCESSORS(cpl)
935 DEFINE_KVM_GHCB_ACCESSORS(rax)
936 DEFINE_KVM_GHCB_ACCESSORS(rcx)
937 DEFINE_KVM_GHCB_ACCESSORS(rdx)
938 DEFINE_KVM_GHCB_ACCESSORS(rbx)
939 DEFINE_KVM_GHCB_ACCESSORS(rsi)
940 DEFINE_KVM_GHCB_ACCESSORS(sw_exit_code)
941 DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_1)
942 DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_2)
943 DEFINE_KVM_GHCB_ACCESSORS(sw_scratch)
944 DEFINE_KVM_GHCB_ACCESSORS(xcr0)
945 DEFINE_KVM_GHCB_ACCESSORS(xss)
946 
947 #endif
948