1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * AMD SVM support
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9 *
10 * Authors:
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
13 */
14
15 #ifndef __SVM_SVM_H
16 #define __SVM_SVM_H
17
18 #include <linux/kvm_types.h>
19 #include <linux/kvm_host.h>
20 #include <linux/bits.h>
21
22 #include <asm/svm.h>
23 #include <asm/sev-common.h>
24
25 #include "cpuid.h"
26 #include "kvm_cache_regs.h"
27
28 /*
29 * Helpers to convert to/from physical addresses for pages whose address is
30 * consumed directly by hardware. Even though it's a physical address, SVM
31 * often restricts the address to the natural width, hence 'unsigned long'
32 * instead of 'hpa_t'.
33 */
__sme_page_pa(struct page * page)34 static inline unsigned long __sme_page_pa(struct page *page)
35 {
36 return __sme_set(page_to_pfn(page) << PAGE_SHIFT);
37 }
38
__sme_pa_to_page(unsigned long pa)39 static inline struct page *__sme_pa_to_page(unsigned long pa)
40 {
41 return pfn_to_page(__sme_clr(pa) >> PAGE_SHIFT);
42 }
43
44 #define IOPM_SIZE PAGE_SIZE * 3
45 #define MSRPM_SIZE PAGE_SIZE * 2
46
47 #define MAX_DIRECT_ACCESS_MSRS 48
48 #define MSRPM_OFFSETS 32
49 extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
50 extern bool npt_enabled;
51 extern int nrips;
52 extern int vgif;
53 extern bool intercept_smi;
54 extern bool x2avic_enabled;
55 extern bool vnmi;
56 extern int lbrv;
57
58 /*
59 * Clean bits in VMCB.
60 * VMCB_ALL_CLEAN_MASK might also need to
61 * be updated if this enum is modified.
62 */
63 enum {
64 VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
65 pause filter count */
66 VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */
67 VMCB_ASID, /* ASID */
68 VMCB_INTR, /* int_ctl, int_vector */
69 VMCB_NPT, /* npt_en, nCR3, gPAT */
70 VMCB_CR, /* CR0, CR3, CR4, EFER */
71 VMCB_DR, /* DR6, DR7 */
72 VMCB_DT, /* GDT, IDT */
73 VMCB_SEG, /* CS, DS, SS, ES, CPL */
74 VMCB_CR2, /* CR2 only */
75 VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
76 VMCB_AVIC, /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
77 * AVIC PHYSICAL_TABLE pointer,
78 * AVIC LOGICAL_TABLE pointer
79 */
80 VMCB_SW = 31, /* Reserved for hypervisor/software use */
81 };
82
83 #define VMCB_ALL_CLEAN_MASK ( \
84 (1U << VMCB_INTERCEPTS) | (1U << VMCB_PERM_MAP) | \
85 (1U << VMCB_ASID) | (1U << VMCB_INTR) | \
86 (1U << VMCB_NPT) | (1U << VMCB_CR) | (1U << VMCB_DR) | \
87 (1U << VMCB_DT) | (1U << VMCB_SEG) | (1U << VMCB_CR2) | \
88 (1U << VMCB_LBR) | (1U << VMCB_AVIC) | \
89 (1U << VMCB_SW))
90
91 /* TPR and CR2 are always written before VMRUN */
92 #define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
93
94 struct kvm_sev_info {
95 bool active; /* SEV enabled guest */
96 bool es_active; /* SEV-ES enabled guest */
97 bool need_init; /* waiting for SEV_INIT2 */
98 unsigned int asid; /* ASID used for this guest */
99 unsigned int handle; /* SEV firmware handle */
100 int fd; /* SEV device fd */
101 unsigned long pages_locked; /* Number of pages locked */
102 struct list_head regions_list; /* List of registered regions */
103 u64 ap_jump_table; /* SEV-ES AP Jump Table address */
104 u64 vmsa_features;
105 u16 ghcb_version; /* Highest guest GHCB protocol version allowed */
106 struct kvm *enc_context_owner; /* Owner of copied encryption context */
107 struct list_head mirror_vms; /* List of VMs mirroring */
108 struct list_head mirror_entry; /* Use as a list entry of mirrors */
109 struct misc_cg *misc_cg; /* For misc cgroup accounting */
110 atomic_t migration_in_progress;
111 void *snp_context; /* SNP guest context page */
112 void *guest_req_buf; /* Bounce buffer for SNP Guest Request input */
113 void *guest_resp_buf; /* Bounce buffer for SNP Guest Request output */
114 struct mutex guest_req_mutex; /* Must acquire before using bounce buffers */
115 };
116
117 struct kvm_svm {
118 struct kvm kvm;
119
120 /* Struct members for AVIC */
121 u32 avic_vm_id;
122 struct page *avic_logical_id_table_page;
123 struct page *avic_physical_id_table_page;
124 struct hlist_node hnode;
125
126 struct kvm_sev_info sev_info;
127 };
128
129 struct kvm_vcpu;
130
131 struct kvm_vmcb_info {
132 struct vmcb *ptr;
133 unsigned long pa;
134 int cpu;
135 uint64_t asid_generation;
136 };
137
138 struct vmcb_save_area_cached {
139 u64 efer;
140 u64 cr4;
141 u64 cr3;
142 u64 cr0;
143 u64 dr7;
144 u64 dr6;
145 };
146
147 struct vmcb_ctrl_area_cached {
148 u32 intercepts[MAX_INTERCEPT];
149 u16 pause_filter_thresh;
150 u16 pause_filter_count;
151 u64 iopm_base_pa;
152 u64 msrpm_base_pa;
153 u64 tsc_offset;
154 u32 asid;
155 u8 tlb_ctl;
156 u32 int_ctl;
157 u32 int_vector;
158 u32 int_state;
159 u32 exit_code;
160 u32 exit_code_hi;
161 u64 exit_info_1;
162 u64 exit_info_2;
163 u32 exit_int_info;
164 u32 exit_int_info_err;
165 u64 nested_ctl;
166 u32 event_inj;
167 u32 event_inj_err;
168 u64 next_rip;
169 u64 nested_cr3;
170 u64 virt_ext;
171 u32 clean;
172 union {
173 #if IS_ENABLED(CONFIG_HYPERV) || IS_ENABLED(CONFIG_KVM_HYPERV)
174 struct hv_vmcb_enlightenments hv_enlightenments;
175 #endif
176 u8 reserved_sw[32];
177 };
178 };
179
180 struct svm_nested_state {
181 struct kvm_vmcb_info vmcb02;
182 u64 hsave_msr;
183 u64 vm_cr_msr;
184 u64 vmcb12_gpa;
185 u64 last_vmcb12_gpa;
186
187 /* These are the merged vectors */
188 u32 *msrpm;
189
190 /* A VMRUN has started but has not yet been performed, so
191 * we cannot inject a nested vmexit yet. */
192 bool nested_run_pending;
193
194 /* cache for control fields of the guest */
195 struct vmcb_ctrl_area_cached ctl;
196
197 /*
198 * Note: this struct is not kept up-to-date while L2 runs; it is only
199 * valid within nested_svm_vmrun.
200 */
201 struct vmcb_save_area_cached save;
202
203 bool initialized;
204
205 /*
206 * Indicates whether MSR bitmap for L2 needs to be rebuilt due to
207 * changes in MSR bitmap for L1 or switching to a different L2. Note,
208 * this flag can only be used reliably in conjunction with a paravirt L1
209 * which informs L0 whether any changes to MSR bitmap for L2 were done
210 * on its side.
211 */
212 bool force_msr_bitmap_recalc;
213 };
214
215 struct vcpu_sev_es_state {
216 /* SEV-ES support */
217 struct sev_es_save_area *vmsa;
218 struct ghcb *ghcb;
219 u8 valid_bitmap[16];
220 struct kvm_host_map ghcb_map;
221 bool received_first_sipi;
222 unsigned int ap_reset_hold_type;
223
224 /* SEV-ES scratch area support */
225 u64 sw_scratch;
226 void *ghcb_sa;
227 u32 ghcb_sa_len;
228 bool ghcb_sa_sync;
229 bool ghcb_sa_free;
230
231 /* SNP Page-State-Change buffer entries currently being processed */
232 u16 psc_idx;
233 u16 psc_inflight;
234 bool psc_2m;
235
236 u64 ghcb_registered_gpa;
237
238 struct mutex snp_vmsa_mutex; /* Used to handle concurrent updates of VMSA. */
239 gpa_t snp_vmsa_gpa;
240 bool snp_ap_waiting_for_reset;
241 bool snp_has_guest_vmsa;
242 };
243
244 struct vcpu_svm {
245 struct kvm_vcpu vcpu;
246 /* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */
247 struct vmcb *vmcb;
248 struct kvm_vmcb_info vmcb01;
249 struct kvm_vmcb_info *current_vmcb;
250 u32 asid;
251 u32 sysenter_esp_hi;
252 u32 sysenter_eip_hi;
253 uint64_t tsc_aux;
254
255 u64 msr_decfg;
256
257 u64 next_rip;
258
259 u64 spec_ctrl;
260
261 u64 tsc_ratio_msr;
262 /*
263 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
264 * translated into the appropriate L2_CFG bits on the host to
265 * perform speculative control.
266 */
267 u64 virt_spec_ctrl;
268
269 u32 *msrpm;
270
271 ulong nmi_iret_rip;
272
273 struct svm_nested_state nested;
274
275 /* NMI mask value, used when vNMI is not enabled */
276 bool nmi_masked;
277
278 /*
279 * True when NMIs are still masked but guest IRET was just intercepted
280 * and KVM is waiting for RIP to change, which will signal that the
281 * intercepted IRET was retired and thus NMI can be unmasked.
282 */
283 bool awaiting_iret_completion;
284
285 /*
286 * Set when KVM is awaiting IRET completion and needs to inject NMIs as
287 * soon as the IRET completes (e.g. NMI is pending injection). KVM
288 * temporarily steals RFLAGS.TF to single-step the guest in this case
289 * in order to regain control as soon as the NMI-blocking condition
290 * goes away.
291 */
292 bool nmi_singlestep;
293 u64 nmi_singlestep_guest_rflags;
294
295 bool nmi_l1_to_l2;
296
297 unsigned long soft_int_csbase;
298 unsigned long soft_int_old_rip;
299 unsigned long soft_int_next_rip;
300 bool soft_int_injected;
301
302 u32 ldr_reg;
303 u32 dfr_reg;
304 struct page *avic_backing_page;
305 u64 *avic_physical_id_cache;
306
307 /*
308 * Per-vcpu list of struct amd_svm_iommu_ir:
309 * This is used mainly to store interrupt remapping information used
310 * when update the vcpu affinity. This avoids the need to scan for
311 * IRTE and try to match ga_tag in the IOMMU driver.
312 */
313 struct list_head ir_list;
314 spinlock_t ir_list_lock;
315
316 /* Save desired MSR intercept (read: pass-through) state */
317 struct {
318 DECLARE_BITMAP(read, MAX_DIRECT_ACCESS_MSRS);
319 DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS);
320 } shadow_msr_intercept;
321
322 struct vcpu_sev_es_state sev_es;
323
324 bool guest_state_loaded;
325
326 bool x2avic_msrs_intercepted;
327
328 /* Guest GIF value, used when vGIF is not enabled */
329 bool guest_gif;
330 };
331
332 struct svm_cpu_data {
333 u64 asid_generation;
334 u32 max_asid;
335 u32 next_asid;
336 u32 min_asid;
337
338 struct vmcb *save_area;
339 unsigned long save_area_pa;
340
341 struct vmcb *current_vmcb;
342
343 /* index = sev_asid, value = vmcb pointer */
344 struct vmcb **sev_vmcbs;
345 };
346
347 DECLARE_PER_CPU(struct svm_cpu_data, svm_data);
348
349 void recalc_intercepts(struct vcpu_svm *svm);
350
to_kvm_svm(struct kvm * kvm)351 static __always_inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
352 {
353 return container_of(kvm, struct kvm_svm, kvm);
354 }
355
to_kvm_sev_info(struct kvm * kvm)356 static __always_inline struct kvm_sev_info *to_kvm_sev_info(struct kvm *kvm)
357 {
358 return &to_kvm_svm(kvm)->sev_info;
359 }
360
361 #ifdef CONFIG_KVM_AMD_SEV
sev_guest(struct kvm * kvm)362 static __always_inline bool sev_guest(struct kvm *kvm)
363 {
364 return to_kvm_sev_info(kvm)->active;
365 }
sev_es_guest(struct kvm * kvm)366 static __always_inline bool sev_es_guest(struct kvm *kvm)
367 {
368 struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
369
370 return sev->es_active && !WARN_ON_ONCE(!sev->active);
371 }
372
sev_snp_guest(struct kvm * kvm)373 static __always_inline bool sev_snp_guest(struct kvm *kvm)
374 {
375 struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
376
377 return (sev->vmsa_features & SVM_SEV_FEAT_SNP_ACTIVE) &&
378 !WARN_ON_ONCE(!sev_es_guest(kvm));
379 }
380 #else
381 #define sev_guest(kvm) false
382 #define sev_es_guest(kvm) false
383 #define sev_snp_guest(kvm) false
384 #endif
385
ghcb_gpa_is_registered(struct vcpu_svm * svm,u64 val)386 static inline bool ghcb_gpa_is_registered(struct vcpu_svm *svm, u64 val)
387 {
388 return svm->sev_es.ghcb_registered_gpa == val;
389 }
390
vmcb_mark_all_dirty(struct vmcb * vmcb)391 static inline void vmcb_mark_all_dirty(struct vmcb *vmcb)
392 {
393 vmcb->control.clean = 0;
394 }
395
vmcb_mark_all_clean(struct vmcb * vmcb)396 static inline void vmcb_mark_all_clean(struct vmcb *vmcb)
397 {
398 vmcb->control.clean = VMCB_ALL_CLEAN_MASK
399 & ~VMCB_ALWAYS_DIRTY_MASK;
400 }
401
vmcb_mark_dirty(struct vmcb * vmcb,int bit)402 static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
403 {
404 vmcb->control.clean &= ~(1 << bit);
405 }
406
vmcb_is_dirty(struct vmcb * vmcb,int bit)407 static inline bool vmcb_is_dirty(struct vmcb *vmcb, int bit)
408 {
409 return !test_bit(bit, (unsigned long *)&vmcb->control.clean);
410 }
411
to_svm(struct kvm_vcpu * vcpu)412 static __always_inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
413 {
414 return container_of(vcpu, struct vcpu_svm, vcpu);
415 }
416
417 /*
418 * Only the PDPTRs are loaded on demand into the shadow MMU. All other
419 * fields are synchronized on VM-Exit, because accessing the VMCB is cheap.
420 *
421 * CR3 might be out of date in the VMCB but it is not marked dirty; instead,
422 * KVM_REQ_LOAD_MMU_PGD is always requested when the cached vcpu->arch.cr3
423 * is changed. svm_load_mmu_pgd() then syncs the new CR3 value into the VMCB.
424 */
425 #define SVM_REGS_LAZY_LOAD_SET (1 << VCPU_EXREG_PDPTR)
426
vmcb_set_intercept(struct vmcb_control_area * control,u32 bit)427 static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit)
428 {
429 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
430 __set_bit(bit, (unsigned long *)&control->intercepts);
431 }
432
vmcb_clr_intercept(struct vmcb_control_area * control,u32 bit)433 static inline void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit)
434 {
435 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
436 __clear_bit(bit, (unsigned long *)&control->intercepts);
437 }
438
vmcb_is_intercept(struct vmcb_control_area * control,u32 bit)439 static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit)
440 {
441 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
442 return test_bit(bit, (unsigned long *)&control->intercepts);
443 }
444
vmcb12_is_intercept(struct vmcb_ctrl_area_cached * control,u32 bit)445 static inline bool vmcb12_is_intercept(struct vmcb_ctrl_area_cached *control, u32 bit)
446 {
447 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
448 return test_bit(bit, (unsigned long *)&control->intercepts);
449 }
450
set_exception_intercept(struct vcpu_svm * svm,u32 bit)451 static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit)
452 {
453 struct vmcb *vmcb = svm->vmcb01.ptr;
454
455 WARN_ON_ONCE(bit >= 32);
456 vmcb_set_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
457
458 recalc_intercepts(svm);
459 }
460
clr_exception_intercept(struct vcpu_svm * svm,u32 bit)461 static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit)
462 {
463 struct vmcb *vmcb = svm->vmcb01.ptr;
464
465 WARN_ON_ONCE(bit >= 32);
466 vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
467
468 recalc_intercepts(svm);
469 }
470
svm_set_intercept(struct vcpu_svm * svm,int bit)471 static inline void svm_set_intercept(struct vcpu_svm *svm, int bit)
472 {
473 struct vmcb *vmcb = svm->vmcb01.ptr;
474
475 vmcb_set_intercept(&vmcb->control, bit);
476
477 recalc_intercepts(svm);
478 }
479
svm_clr_intercept(struct vcpu_svm * svm,int bit)480 static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit)
481 {
482 struct vmcb *vmcb = svm->vmcb01.ptr;
483
484 vmcb_clr_intercept(&vmcb->control, bit);
485
486 recalc_intercepts(svm);
487 }
488
svm_is_intercept(struct vcpu_svm * svm,int bit)489 static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit)
490 {
491 return vmcb_is_intercept(&svm->vmcb->control, bit);
492 }
493
nested_vgif_enabled(struct vcpu_svm * svm)494 static inline bool nested_vgif_enabled(struct vcpu_svm *svm)
495 {
496 return guest_cpu_cap_has(&svm->vcpu, X86_FEATURE_VGIF) &&
497 (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK);
498 }
499
get_vgif_vmcb(struct vcpu_svm * svm)500 static inline struct vmcb *get_vgif_vmcb(struct vcpu_svm *svm)
501 {
502 if (!vgif)
503 return NULL;
504
505 if (is_guest_mode(&svm->vcpu) && !nested_vgif_enabled(svm))
506 return svm->nested.vmcb02.ptr;
507 else
508 return svm->vmcb01.ptr;
509 }
510
enable_gif(struct vcpu_svm * svm)511 static inline void enable_gif(struct vcpu_svm *svm)
512 {
513 struct vmcb *vmcb = get_vgif_vmcb(svm);
514
515 if (vmcb)
516 vmcb->control.int_ctl |= V_GIF_MASK;
517 else
518 svm->guest_gif = true;
519 }
520
disable_gif(struct vcpu_svm * svm)521 static inline void disable_gif(struct vcpu_svm *svm)
522 {
523 struct vmcb *vmcb = get_vgif_vmcb(svm);
524
525 if (vmcb)
526 vmcb->control.int_ctl &= ~V_GIF_MASK;
527 else
528 svm->guest_gif = false;
529 }
530
gif_set(struct vcpu_svm * svm)531 static inline bool gif_set(struct vcpu_svm *svm)
532 {
533 struct vmcb *vmcb = get_vgif_vmcb(svm);
534
535 if (vmcb)
536 return !!(vmcb->control.int_ctl & V_GIF_MASK);
537 else
538 return svm->guest_gif;
539 }
540
nested_npt_enabled(struct vcpu_svm * svm)541 static inline bool nested_npt_enabled(struct vcpu_svm *svm)
542 {
543 return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE;
544 }
545
nested_vnmi_enabled(struct vcpu_svm * svm)546 static inline bool nested_vnmi_enabled(struct vcpu_svm *svm)
547 {
548 return guest_cpu_cap_has(&svm->vcpu, X86_FEATURE_VNMI) &&
549 (svm->nested.ctl.int_ctl & V_NMI_ENABLE_MASK);
550 }
551
is_x2apic_msrpm_offset(u32 offset)552 static inline bool is_x2apic_msrpm_offset(u32 offset)
553 {
554 /* 4 msrs per u8, and 4 u8 in u32 */
555 u32 msr = offset * 16;
556
557 return (msr >= APIC_BASE_MSR) &&
558 (msr < (APIC_BASE_MSR + 0x100));
559 }
560
get_vnmi_vmcb_l1(struct vcpu_svm * svm)561 static inline struct vmcb *get_vnmi_vmcb_l1(struct vcpu_svm *svm)
562 {
563 if (!vnmi)
564 return NULL;
565
566 if (is_guest_mode(&svm->vcpu))
567 return NULL;
568 else
569 return svm->vmcb01.ptr;
570 }
571
is_vnmi_enabled(struct vcpu_svm * svm)572 static inline bool is_vnmi_enabled(struct vcpu_svm *svm)
573 {
574 struct vmcb *vmcb = get_vnmi_vmcb_l1(svm);
575
576 if (vmcb)
577 return !!(vmcb->control.int_ctl & V_NMI_ENABLE_MASK);
578 else
579 return false;
580 }
581
svm_vmgexit_set_return_code(struct vcpu_svm * svm,u64 response,u64 data)582 static inline void svm_vmgexit_set_return_code(struct vcpu_svm *svm,
583 u64 response, u64 data)
584 {
585 ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, response);
586 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, data);
587 }
588
svm_vmgexit_inject_exception(struct vcpu_svm * svm,u8 vector)589 static inline void svm_vmgexit_inject_exception(struct vcpu_svm *svm, u8 vector)
590 {
591 u64 data = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT | vector;
592
593 svm_vmgexit_set_return_code(svm, GHCB_HV_RESP_ISSUE_EXCEPTION, data);
594 }
595
svm_vmgexit_bad_input(struct vcpu_svm * svm,u64 suberror)596 static inline void svm_vmgexit_bad_input(struct vcpu_svm *svm, u64 suberror)
597 {
598 svm_vmgexit_set_return_code(svm, GHCB_HV_RESP_MALFORMED_INPUT, suberror);
599 }
600
svm_vmgexit_success(struct vcpu_svm * svm,u64 data)601 static inline void svm_vmgexit_success(struct vcpu_svm *svm, u64 data)
602 {
603 svm_vmgexit_set_return_code(svm, GHCB_HV_RESP_NO_ACTION, data);
604 }
605
svm_vmgexit_no_action(struct vcpu_svm * svm,u64 data)606 static inline void svm_vmgexit_no_action(struct vcpu_svm *svm, u64 data)
607 {
608 svm_vmgexit_set_return_code(svm, GHCB_HV_RESP_NO_ACTION, data);
609 }
610
611 /* svm.c */
612 #define MSR_INVALID 0xffffffffU
613
614 #define DEBUGCTL_RESERVED_BITS (~DEBUGCTLMSR_LBR)
615
616 extern bool dump_invalid_vmcb;
617
618 u32 svm_msrpm_offset(u32 msr);
619 u32 *svm_vcpu_alloc_msrpm(void);
620 void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm);
621 void svm_vcpu_free_msrpm(u32 *msrpm);
622 void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
623 void svm_enable_lbrv(struct kvm_vcpu *vcpu);
624 void svm_update_lbrv(struct kvm_vcpu *vcpu);
625
626 int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
627 void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
628 void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
629 void disable_nmi_singlestep(struct vcpu_svm *svm);
630 bool svm_smi_blocked(struct kvm_vcpu *vcpu);
631 bool svm_nmi_blocked(struct kvm_vcpu *vcpu);
632 bool svm_interrupt_blocked(struct kvm_vcpu *vcpu);
633 void svm_set_gif(struct vcpu_svm *svm, bool value);
634 int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code);
635 void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
636 int read, int write);
637 void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool disable);
638 void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
639 int trig_mode, int vec);
640
641 /* nested.c */
642
643 #define NESTED_EXIT_HOST 0 /* Exit handled on host level */
644 #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
645 #define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
646
nested_svm_virtualize_tpr(struct kvm_vcpu * vcpu)647 static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu)
648 {
649 struct vcpu_svm *svm = to_svm(vcpu);
650
651 return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK);
652 }
653
nested_exit_on_smi(struct vcpu_svm * svm)654 static inline bool nested_exit_on_smi(struct vcpu_svm *svm)
655 {
656 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SMI);
657 }
658
nested_exit_on_intr(struct vcpu_svm * svm)659 static inline bool nested_exit_on_intr(struct vcpu_svm *svm)
660 {
661 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INTR);
662 }
663
nested_exit_on_nmi(struct vcpu_svm * svm)664 static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
665 {
666 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
667 }
668
669 int enter_svm_guest_mode(struct kvm_vcpu *vcpu,
670 u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun);
671 void svm_leave_nested(struct kvm_vcpu *vcpu);
672 void svm_free_nested(struct vcpu_svm *svm);
673 int svm_allocate_nested(struct vcpu_svm *svm);
674 int nested_svm_vmrun(struct kvm_vcpu *vcpu);
675 void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
676 struct vmcb_save_area *from_save);
677 void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
678 int nested_svm_vmexit(struct vcpu_svm *svm);
679
nested_svm_simple_vmexit(struct vcpu_svm * svm,u32 exit_code)680 static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code)
681 {
682 svm->vmcb->control.exit_code = exit_code;
683 svm->vmcb->control.exit_info_1 = 0;
684 svm->vmcb->control.exit_info_2 = 0;
685 return nested_svm_vmexit(svm);
686 }
687
688 int nested_svm_exit_handled(struct vcpu_svm *svm);
689 int nested_svm_check_permissions(struct kvm_vcpu *vcpu);
690 int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
691 bool has_error_code, u32 error_code);
692 int nested_svm_exit_special(struct vcpu_svm *svm);
693 void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu);
694 void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu);
695 void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
696 struct vmcb_control_area *control);
697 void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
698 struct vmcb_save_area *save);
699 void nested_sync_control_from_vmcb02(struct vcpu_svm *svm);
700 void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm);
701 void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb);
702
703 extern struct kvm_x86_nested_ops svm_nested_ops;
704
705 /* avic.c */
706 #define AVIC_REQUIRED_APICV_INHIBITS \
707 ( \
708 BIT(APICV_INHIBIT_REASON_DISABLED) | \
709 BIT(APICV_INHIBIT_REASON_ABSENT) | \
710 BIT(APICV_INHIBIT_REASON_HYPERV) | \
711 BIT(APICV_INHIBIT_REASON_NESTED) | \
712 BIT(APICV_INHIBIT_REASON_IRQWIN) | \
713 BIT(APICV_INHIBIT_REASON_PIT_REINJ) | \
714 BIT(APICV_INHIBIT_REASON_BLOCKIRQ) | \
715 BIT(APICV_INHIBIT_REASON_SEV) | \
716 BIT(APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED) | \
717 BIT(APICV_INHIBIT_REASON_APIC_ID_MODIFIED) | \
718 BIT(APICV_INHIBIT_REASON_APIC_BASE_MODIFIED) | \
719 BIT(APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED) \
720 )
721
722 bool avic_hardware_setup(void);
723 int avic_ga_log_notifier(u32 ga_tag);
724 void avic_vm_destroy(struct kvm *kvm);
725 int avic_vm_init(struct kvm *kvm);
726 void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb);
727 int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu);
728 int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu);
729 int avic_init_vcpu(struct vcpu_svm *svm);
730 void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
731 void avic_vcpu_put(struct kvm_vcpu *vcpu);
732 void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu);
733 void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
734 int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
735 uint32_t guest_irq, bool set);
736 void avic_vcpu_blocking(struct kvm_vcpu *vcpu);
737 void avic_vcpu_unblocking(struct kvm_vcpu *vcpu);
738 void avic_ring_doorbell(struct kvm_vcpu *vcpu);
739 unsigned long avic_vcpu_get_apicv_inhibit_reasons(struct kvm_vcpu *vcpu);
740 void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu);
741
742
743 /* sev.c */
744
745 int pre_sev_run(struct vcpu_svm *svm, int cpu);
746 void sev_init_vmcb(struct vcpu_svm *svm);
747 void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm);
748 int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
749 void sev_es_vcpu_reset(struct vcpu_svm *svm);
750 void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
751 void sev_es_prepare_switch_to_guest(struct vcpu_svm *svm, struct sev_es_save_area *hostsa);
752 void sev_es_unmap_ghcb(struct vcpu_svm *svm);
753
754 #ifdef CONFIG_KVM_AMD_SEV
755 int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp);
756 int sev_mem_enc_register_region(struct kvm *kvm,
757 struct kvm_enc_region *range);
758 int sev_mem_enc_unregister_region(struct kvm *kvm,
759 struct kvm_enc_region *range);
760 int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd);
761 int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd);
762 void sev_guest_memory_reclaimed(struct kvm *kvm);
763 int sev_handle_vmgexit(struct kvm_vcpu *vcpu);
764
765 /* These symbols are used in common code and are stubbed below. */
766
767 struct page *snp_safe_alloc_page_node(int node, gfp_t gfp);
snp_safe_alloc_page(void)768 static inline struct page *snp_safe_alloc_page(void)
769 {
770 return snp_safe_alloc_page_node(numa_node_id(), GFP_KERNEL_ACCOUNT);
771 }
772
773 void sev_free_vcpu(struct kvm_vcpu *vcpu);
774 void sev_vm_destroy(struct kvm *kvm);
775 void __init sev_set_cpu_caps(void);
776 void __init sev_hardware_setup(void);
777 void sev_hardware_unsetup(void);
778 int sev_cpu_init(struct svm_cpu_data *sd);
779 int sev_dev_get_attr(u32 group, u64 attr, u64 *val);
780 extern unsigned int max_sev_asid;
781 void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code);
782 void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu);
783 int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order);
784 void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end);
785 int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn);
786 #else
snp_safe_alloc_page_node(int node,gfp_t gfp)787 static inline struct page *snp_safe_alloc_page_node(int node, gfp_t gfp)
788 {
789 return alloc_pages_node(node, gfp | __GFP_ZERO, 0);
790 }
791
snp_safe_alloc_page(void)792 static inline struct page *snp_safe_alloc_page(void)
793 {
794 return snp_safe_alloc_page_node(numa_node_id(), GFP_KERNEL_ACCOUNT);
795 }
796
sev_free_vcpu(struct kvm_vcpu * vcpu)797 static inline void sev_free_vcpu(struct kvm_vcpu *vcpu) {}
sev_vm_destroy(struct kvm * kvm)798 static inline void sev_vm_destroy(struct kvm *kvm) {}
sev_set_cpu_caps(void)799 static inline void __init sev_set_cpu_caps(void) {}
sev_hardware_setup(void)800 static inline void __init sev_hardware_setup(void) {}
sev_hardware_unsetup(void)801 static inline void sev_hardware_unsetup(void) {}
sev_cpu_init(struct svm_cpu_data * sd)802 static inline int sev_cpu_init(struct svm_cpu_data *sd) { return 0; }
sev_dev_get_attr(u32 group,u64 attr,u64 * val)803 static inline int sev_dev_get_attr(u32 group, u64 attr, u64 *val) { return -ENXIO; }
804 #define max_sev_asid 0
sev_handle_rmp_fault(struct kvm_vcpu * vcpu,gpa_t gpa,u64 error_code)805 static inline void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code) {}
sev_snp_init_protected_guest_state(struct kvm_vcpu * vcpu)806 static inline void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu) {}
sev_gmem_prepare(struct kvm * kvm,kvm_pfn_t pfn,gfn_t gfn,int max_order)807 static inline int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order)
808 {
809 return 0;
810 }
sev_gmem_invalidate(kvm_pfn_t start,kvm_pfn_t end)811 static inline void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end) {}
sev_private_max_mapping_level(struct kvm * kvm,kvm_pfn_t pfn)812 static inline int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn)
813 {
814 return 0;
815 }
816
817 #endif
818
819 /* vmenter.S */
820
821 void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted,
822 struct sev_es_save_area *hostsa);
823 void __svm_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);
824
825 #define DEFINE_KVM_GHCB_ACCESSORS(field) \
826 static __always_inline bool kvm_ghcb_##field##_is_valid(const struct vcpu_svm *svm) \
827 { \
828 return test_bit(GHCB_BITMAP_IDX(field), \
829 (unsigned long *)&svm->sev_es.valid_bitmap); \
830 } \
831 \
832 static __always_inline u64 kvm_ghcb_get_##field##_if_valid(struct vcpu_svm *svm, struct ghcb *ghcb) \
833 { \
834 return kvm_ghcb_##field##_is_valid(svm) ? ghcb->save.field : 0; \
835 } \
836
837 DEFINE_KVM_GHCB_ACCESSORS(cpl)
838 DEFINE_KVM_GHCB_ACCESSORS(rax)
839 DEFINE_KVM_GHCB_ACCESSORS(rcx)
840 DEFINE_KVM_GHCB_ACCESSORS(rdx)
841 DEFINE_KVM_GHCB_ACCESSORS(rbx)
842 DEFINE_KVM_GHCB_ACCESSORS(rsi)
843 DEFINE_KVM_GHCB_ACCESSORS(sw_exit_code)
844 DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_1)
845 DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_2)
846 DEFINE_KVM_GHCB_ACCESSORS(sw_scratch)
847 DEFINE_KVM_GHCB_ACCESSORS(xcr0)
848
849 #endif
850