1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * AMD SVM support
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9 *
10 * Authors:
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
13 */
14
15 #ifndef __SVM_SVM_H
16 #define __SVM_SVM_H
17
18 #include <linux/kvm_types.h>
19 #include <linux/kvm_host.h>
20 #include <linux/bits.h>
21
22 #include <asm/svm.h>
23 #include <asm/sev-common.h>
24
25 #include "cpuid.h"
26 #include "kvm_cache_regs.h"
27
28 /*
29 * Helpers to convert to/from physical addresses for pages whose address is
30 * consumed directly by hardware. Even though it's a physical address, SVM
31 * often restricts the address to the natural width, hence 'unsigned long'
32 * instead of 'hpa_t'.
33 */
__sme_page_pa(struct page * page)34 static inline unsigned long __sme_page_pa(struct page *page)
35 {
36 return __sme_set(page_to_pfn(page) << PAGE_SHIFT);
37 }
38
__sme_pa_to_page(unsigned long pa)39 static inline struct page *__sme_pa_to_page(unsigned long pa)
40 {
41 return pfn_to_page(__sme_clr(pa) >> PAGE_SHIFT);
42 }
43
44 #define IOPM_SIZE PAGE_SIZE * 3
45 #define MSRPM_SIZE PAGE_SIZE * 2
46
47 extern bool npt_enabled;
48 extern int nrips;
49 extern int vgif;
50 extern bool intercept_smi;
51 extern bool vnmi;
52 extern int lbrv;
53
54 extern int tsc_aux_uret_slot __ro_after_init;
55
56 extern struct kvm_x86_ops svm_x86_ops __initdata;
57
58 /*
59 * Clean bits in VMCB.
60 * VMCB_ALL_CLEAN_MASK might also need to
61 * be updated if this enum is modified.
62 */
63 enum {
64 VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
65 pause filter count */
66 VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */
67 VMCB_ASID, /* ASID */
68 VMCB_INTR, /* int_ctl, int_vector */
69 VMCB_NPT, /* npt_en, nCR3, gPAT */
70 VMCB_CR, /* CR0, CR3, CR4, EFER */
71 VMCB_DR, /* DR6, DR7 */
72 VMCB_DT, /* GDT, IDT */
73 VMCB_SEG, /* CS, DS, SS, ES, CPL */
74 VMCB_CR2, /* CR2 only */
75 VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
76 VMCB_AVIC, /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
77 * AVIC PHYSICAL_TABLE pointer,
78 * AVIC LOGICAL_TABLE pointer
79 */
80 VMCB_CET, /* S_CET, SSP, ISST_ADDR */
81 VMCB_SW = 31, /* Reserved for hypervisor/software use */
82 };
83
84 #define VMCB_ALL_CLEAN_MASK ( \
85 (1U << VMCB_INTERCEPTS) | (1U << VMCB_PERM_MAP) | \
86 (1U << VMCB_ASID) | (1U << VMCB_INTR) | \
87 (1U << VMCB_NPT) | (1U << VMCB_CR) | (1U << VMCB_DR) | \
88 (1U << VMCB_DT) | (1U << VMCB_SEG) | (1U << VMCB_CR2) | \
89 (1U << VMCB_LBR) | (1U << VMCB_AVIC) | (1U << VMCB_CET) | \
90 (1U << VMCB_SW))
91
92 /* TPR and CR2 are always written before VMRUN */
93 #define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
94
95 #ifdef CONFIG_KVM_AMD_SEV
96 struct kvm_sev_info {
97 bool active; /* SEV enabled guest */
98 bool es_active; /* SEV-ES enabled guest */
99 bool need_init; /* waiting for SEV_INIT2 */
100 unsigned int asid; /* ASID used for this guest */
101 unsigned int handle; /* SEV firmware handle */
102 int fd; /* SEV device fd */
103 unsigned long policy;
104 unsigned long pages_locked; /* Number of pages locked */
105 struct list_head regions_list; /* List of registered regions */
106 u64 ap_jump_table; /* SEV-ES AP Jump Table address */
107 u64 vmsa_features;
108 u16 ghcb_version; /* Highest guest GHCB protocol version allowed */
109 struct kvm *enc_context_owner; /* Owner of copied encryption context */
110 struct list_head mirror_vms; /* List of VMs mirroring */
111 struct list_head mirror_entry; /* Use as a list entry of mirrors */
112 struct misc_cg *misc_cg; /* For misc cgroup accounting */
113 atomic_t migration_in_progress;
114 void *snp_context; /* SNP guest context page */
115 void *guest_req_buf; /* Bounce buffer for SNP Guest Request input */
116 void *guest_resp_buf; /* Bounce buffer for SNP Guest Request output */
117 struct mutex guest_req_mutex; /* Must acquire before using bounce buffers */
118 cpumask_var_t have_run_cpus; /* CPUs that have done VMRUN for this VM. */
119 bool snp_certs_enabled; /* SNP certificate-fetching support. */
120 };
121 #endif
122
123 struct kvm_svm {
124 struct kvm kvm;
125
126 /* Struct members for AVIC */
127 u32 avic_vm_id;
128 u32 *avic_logical_id_table;
129 u64 *avic_physical_id_table;
130 struct hlist_node hnode;
131
132 #ifdef CONFIG_KVM_AMD_SEV
133 struct kvm_sev_info sev_info;
134 #endif
135 };
136
137 struct kvm_vcpu;
138
139 struct kvm_vmcb_info {
140 struct vmcb *ptr;
141 unsigned long pa;
142 int cpu;
143 uint64_t asid_generation;
144 };
145
146 struct vmcb_save_area_cached {
147 struct vmcb_seg es;
148 struct vmcb_seg cs;
149 struct vmcb_seg ss;
150 struct vmcb_seg ds;
151 struct vmcb_seg gdtr;
152 struct vmcb_seg idtr;
153 u8 cpl;
154 u64 efer;
155 u64 cr4;
156 u64 cr3;
157 u64 cr0;
158 u64 dr7;
159 u64 dr6;
160 u64 rflags;
161 u64 rip;
162 u64 rsp;
163 u64 s_cet;
164 u64 ssp;
165 u64 isst_addr;
166 u64 rax;
167 u64 cr2;
168 u64 dbgctl;
169 u64 br_from;
170 u64 br_to;
171 u64 last_excp_from;
172 u64 last_excp_to;
173 };
174
175 struct vmcb_ctrl_area_cached {
176 u32 intercepts[MAX_INTERCEPT];
177 u16 pause_filter_thresh;
178 u16 pause_filter_count;
179 u64 iopm_base_pa;
180 u64 msrpm_base_pa;
181 u64 tsc_offset;
182 u32 asid;
183 u8 tlb_ctl;
184 u8 erap_ctl;
185 u32 int_ctl;
186 u32 int_vector;
187 u32 int_state;
188 u64 exit_code;
189 u64 exit_info_1;
190 u64 exit_info_2;
191 u32 exit_int_info;
192 u32 exit_int_info_err;
193 u64 misc_ctl;
194 u32 event_inj;
195 u32 event_inj_err;
196 u64 next_rip;
197 u64 nested_cr3;
198 u64 misc_ctl2;
199 u32 clean;
200 union {
201 #if IS_ENABLED(CONFIG_HYPERV) || IS_ENABLED(CONFIG_KVM_HYPERV)
202 struct hv_vmcb_enlightenments hv_enlightenments;
203 #endif
204 u8 reserved_sw[32];
205 };
206 };
207
208 struct svm_nested_state {
209 struct kvm_vmcb_info vmcb02;
210 u64 hsave_msr;
211 u64 vm_cr_msr;
212 u64 vmcb12_gpa;
213 u64 last_vmcb12_gpa;
214 u64 last_bus_lock_rip;
215
216 /*
217 * The MSR permissions map used for vmcb02, which is the merge result
218 * of vmcb01 and vmcb12
219 */
220 void *msrpm;
221
222 /* cache for control fields of the guest */
223 struct vmcb_ctrl_area_cached ctl;
224
225 /*
226 * Note: this struct is not kept up-to-date while L2 runs; it is only
227 * valid within nested_svm_vmrun.
228 */
229 struct vmcb_save_area_cached save;
230
231 bool initialized;
232
233 /*
234 * Indicates whether MSR bitmap for L2 needs to be rebuilt due to
235 * changes in MSR bitmap for L1 or switching to a different L2. Note,
236 * this flag can only be used reliably in conjunction with a paravirt L1
237 * which informs L0 whether any changes to MSR bitmap for L2 were done
238 * on its side.
239 */
240 bool force_msr_bitmap_recalc;
241 };
242
243 struct vcpu_sev_es_state {
244 /* SEV-ES support */
245 struct sev_es_save_area *vmsa;
246 struct ghcb *ghcb;
247 u8 valid_bitmap[16];
248 struct kvm_host_map ghcb_map;
249 bool received_first_sipi;
250 unsigned int ap_reset_hold_type;
251
252 /* SEV-ES scratch area support */
253 u64 sw_scratch;
254 void *ghcb_sa;
255 u32 ghcb_sa_len;
256 bool ghcb_sa_sync;
257 bool ghcb_sa_free;
258
259 /* SNP Page-State-Change buffer entries currently being processed */
260 u16 psc_idx;
261 u16 psc_inflight;
262 bool psc_2m;
263
264 u64 ghcb_registered_gpa;
265
266 struct mutex snp_vmsa_mutex; /* Used to handle concurrent updates of VMSA. */
267 gpa_t snp_vmsa_gpa;
268 bool snp_ap_waiting_for_reset;
269 bool snp_has_guest_vmsa;
270 };
271
272 struct vcpu_svm {
273 struct kvm_vcpu vcpu;
274 /* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */
275 struct vmcb *vmcb;
276 struct kvm_vmcb_info vmcb01;
277 struct kvm_vmcb_info *current_vmcb;
278 u32 asid;
279 u32 sysenter_esp_hi;
280 u32 sysenter_eip_hi;
281 uint64_t tsc_aux;
282
283 u64 msr_decfg;
284
285 u64 next_rip;
286
287 u64 spec_ctrl;
288
289 u64 tsc_ratio_msr;
290 /*
291 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
292 * translated into the appropriate L2_CFG bits on the host to
293 * perform speculative control.
294 */
295 u64 virt_spec_ctrl;
296
297 void *msrpm;
298
299 ulong nmi_iret_rip;
300
301 struct svm_nested_state nested;
302
303 /* NMI mask value, used when vNMI is not enabled */
304 bool nmi_masked;
305
306 /*
307 * True when NMIs are still masked but guest IRET was just intercepted
308 * and KVM is waiting for RIP to change, which will signal that the
309 * intercepted IRET was retired and thus NMI can be unmasked.
310 */
311 bool awaiting_iret_completion;
312
313 /*
314 * Set when KVM is awaiting IRET completion and needs to inject NMIs as
315 * soon as the IRET completes (e.g. NMI is pending injection). KVM
316 * temporarily steals RFLAGS.TF to single-step the guest in this case
317 * in order to regain control as soon as the NMI-blocking condition
318 * goes away.
319 */
320 bool nmi_singlestep;
321 u64 nmi_singlestep_guest_rflags;
322
323 bool nmi_l1_to_l2;
324
325 unsigned long soft_int_csbase;
326 unsigned long soft_int_old_rip;
327 unsigned long soft_int_next_rip;
328 bool soft_int_injected;
329
330 u32 ldr_reg;
331 u32 dfr_reg;
332
333 /* This is essentially a shadow of the vCPU's actual entry in the
334 * Physical ID table that is programmed into the VMCB, i.e. that is
335 * seen by the CPU. If IPI virtualization is disabled, IsRunning is
336 * only ever set in the shadow, i.e. is never propagated to the "real"
337 * table, so that hardware never sees IsRunning=1.
338 */
339 u64 avic_physical_id_entry;
340
341 /*
342 * Per-vCPU list of irqfds that are eligible to post IRQs directly to
343 * the vCPU (a.k.a. device posted IRQs, a.k.a. IRQ bypass). The list
344 * is used to reconfigure IRTEs when the vCPU is loaded/put (to set the
345 * target pCPU), when AVIC is toggled on/off (to (de)activate bypass),
346 * and if the irqfd becomes ineligible for posting (to put the IRTE
347 * back into remapped mode).
348 */
349 struct list_head ir_list;
350 raw_spinlock_t ir_list_lock;
351
352 struct vcpu_sev_es_state sev_es;
353
354 bool guest_state_loaded;
355
356 bool avic_irq_window;
357 bool x2avic_msrs_intercepted;
358 bool lbr_msrs_intercepted;
359
360 /* Guest GIF value, used when vGIF is not enabled */
361 bool guest_gif;
362 };
363
364 struct svm_cpu_data {
365 u64 asid_generation;
366 u32 max_asid;
367 u32 next_asid;
368 u32 min_asid;
369
370 bool bp_spec_reduce_set;
371
372 struct vmcb *save_area;
373 unsigned long save_area_pa;
374
375 /* index = sev_asid, value = vmcb pointer */
376 struct vmcb **sev_vmcbs;
377 };
378
379 DECLARE_PER_CPU(struct svm_cpu_data, svm_data);
380
to_kvm_svm(struct kvm * kvm)381 static __always_inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
382 {
383 return container_of(kvm, struct kvm_svm, kvm);
384 }
385
386 #ifdef CONFIG_KVM_AMD_SEV
to_kvm_sev_info(struct kvm * kvm)387 static __always_inline struct kvm_sev_info *to_kvm_sev_info(struct kvm *kvm)
388 {
389 return &to_kvm_svm(kvm)->sev_info;
390 }
391
____sev_guest(struct kvm * kvm)392 static __always_inline bool ____sev_guest(struct kvm *kvm)
393 {
394 return to_kvm_sev_info(kvm)->active;
395 }
____sev_es_guest(struct kvm * kvm)396 static __always_inline bool ____sev_es_guest(struct kvm *kvm)
397 {
398 struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
399
400 return sev->es_active && !WARN_ON_ONCE(!sev->active);
401 }
402
____sev_snp_guest(struct kvm * kvm)403 static __always_inline bool ____sev_snp_guest(struct kvm *kvm)
404 {
405 struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
406
407 return (sev->vmsa_features & SVM_SEV_FEAT_SNP_ACTIVE) &&
408 !WARN_ON_ONCE(!____sev_es_guest(kvm));
409 }
410
is_sev_guest(struct kvm_vcpu * vcpu)411 static __always_inline bool is_sev_guest(struct kvm_vcpu *vcpu)
412 {
413 return ____sev_guest(vcpu->kvm);
414 }
is_sev_es_guest(struct kvm_vcpu * vcpu)415 static __always_inline bool is_sev_es_guest(struct kvm_vcpu *vcpu)
416 {
417 return ____sev_es_guest(vcpu->kvm);
418 }
419
is_sev_snp_guest(struct kvm_vcpu * vcpu)420 static __always_inline bool is_sev_snp_guest(struct kvm_vcpu *vcpu)
421 {
422 return ____sev_snp_guest(vcpu->kvm);
423 }
424 #else
is_sev_guest(struct kvm_vcpu * vcpu)425 static __always_inline bool is_sev_guest(struct kvm_vcpu *vcpu)
426 {
427 return false;
428 }
is_sev_es_guest(struct kvm_vcpu * vcpu)429 static __always_inline bool is_sev_es_guest(struct kvm_vcpu *vcpu)
430 {
431 return false;
432 }
433
is_sev_snp_guest(struct kvm_vcpu * vcpu)434 static __always_inline bool is_sev_snp_guest(struct kvm_vcpu *vcpu)
435 {
436 return false;
437 }
438 #endif
439
ghcb_gpa_is_registered(struct vcpu_svm * svm,u64 val)440 static inline bool ghcb_gpa_is_registered(struct vcpu_svm *svm, u64 val)
441 {
442 return svm->sev_es.ghcb_registered_gpa == val;
443 }
444
vmcb_mark_all_dirty(struct vmcb * vmcb)445 static inline void vmcb_mark_all_dirty(struct vmcb *vmcb)
446 {
447 vmcb->control.clean = 0;
448 }
449
vmcb_mark_all_clean(struct vmcb * vmcb)450 static inline void vmcb_mark_all_clean(struct vmcb *vmcb)
451 {
452 vmcb->control.clean = VMCB_ALL_CLEAN_MASK
453 & ~VMCB_ALWAYS_DIRTY_MASK;
454 }
455
vmcb_mark_dirty(struct vmcb * vmcb,int bit)456 static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
457 {
458 vmcb->control.clean &= ~(1 << bit);
459 }
460
vmcb12_is_dirty(struct vmcb_ctrl_area_cached * control,int bit)461 static inline bool vmcb12_is_dirty(struct vmcb_ctrl_area_cached *control, int bit)
462 {
463 return !test_bit(bit, (unsigned long *)&control->clean);
464 }
465
to_svm(struct kvm_vcpu * vcpu)466 static __always_inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
467 {
468 return container_of(vcpu, struct vcpu_svm, vcpu);
469 }
470
svm_is_vmrun_failure(u64 exit_code)471 static inline bool svm_is_vmrun_failure(u64 exit_code)
472 {
473 if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
474 return (u32)exit_code == (u32)SVM_EXIT_ERR;
475
476 return exit_code == SVM_EXIT_ERR;
477 }
478
479 /*
480 * Only the PDPTRs are loaded on demand into the shadow MMU. All other
481 * fields are synchronized on VM-Exit, because accessing the VMCB is cheap.
482 *
483 * CR3 might be out of date in the VMCB but it is not marked dirty; instead,
484 * KVM_REQ_LOAD_MMU_PGD is always requested when the cached vcpu->arch.cr3
485 * is changed. svm_load_mmu_pgd() then syncs the new CR3 value into the VMCB.
486 */
487 #define SVM_REGS_LAZY_LOAD_SET (1 << VCPU_EXREG_PDPTR)
488
__vmcb_set_intercept(unsigned long * intercepts,u32 bit)489 static inline void __vmcb_set_intercept(unsigned long *intercepts, u32 bit)
490 {
491 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
492 __set_bit(bit, intercepts);
493 }
494
__vmcb_clr_intercept(unsigned long * intercepts,u32 bit)495 static inline void __vmcb_clr_intercept(unsigned long *intercepts, u32 bit)
496 {
497 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
498 __clear_bit(bit, intercepts);
499 }
500
__vmcb_is_intercept(unsigned long * intercepts,u32 bit)501 static inline bool __vmcb_is_intercept(unsigned long *intercepts, u32 bit)
502 {
503 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
504 return test_bit(bit, intercepts);
505 }
506
vmcb_set_intercept(struct vmcb_control_area * control,u32 bit)507 static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit)
508 {
509 __vmcb_set_intercept((unsigned long *)&control->intercepts, bit);
510 }
511
vmcb_clr_intercept(struct vmcb_control_area * control,u32 bit)512 static inline void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit)
513 {
514 __vmcb_clr_intercept((unsigned long *)&control->intercepts, bit);
515 }
516
vmcb_is_intercept(struct vmcb_control_area * control,u32 bit)517 static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit)
518 {
519 return __vmcb_is_intercept((unsigned long *)&control->intercepts, bit);
520 }
521
vmcb12_clr_intercept(struct vmcb_ctrl_area_cached * control,u32 bit)522 static inline void vmcb12_clr_intercept(struct vmcb_ctrl_area_cached *control, u32 bit)
523 {
524 __vmcb_clr_intercept((unsigned long *)&control->intercepts, bit);
525 }
526
vmcb12_is_intercept(struct vmcb_ctrl_area_cached * control,u32 bit)527 static inline bool vmcb12_is_intercept(struct vmcb_ctrl_area_cached *control, u32 bit)
528 {
529 return __vmcb_is_intercept((unsigned long *)&control->intercepts, bit);
530 }
531
532 void nested_vmcb02_recalc_intercepts(struct vcpu_svm *svm);
533
svm_mark_intercepts_dirty(struct vcpu_svm * svm)534 static inline void svm_mark_intercepts_dirty(struct vcpu_svm *svm)
535 {
536 vmcb_mark_dirty(svm->vmcb01.ptr, VMCB_INTERCEPTS);
537
538 /*
539 * If L2 is active, recalculate the intercepts for vmcb02 to account
540 * for the changes made to vmcb01. All intercept configuration is done
541 * for vmcb01 and then propagated to vmcb02 to combine KVM's intercepts
542 * with L1's intercepts (from the vmcb12 snapshot).
543 */
544 if (is_guest_mode(&svm->vcpu))
545 nested_vmcb02_recalc_intercepts(svm);
546 }
547
set_exception_intercept(struct vcpu_svm * svm,u32 bit)548 static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit)
549 {
550 struct vmcb *vmcb = svm->vmcb01.ptr;
551
552 WARN_ON_ONCE(bit >= 32);
553 vmcb_set_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
554
555 svm_mark_intercepts_dirty(svm);
556 }
557
clr_exception_intercept(struct vcpu_svm * svm,u32 bit)558 static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit)
559 {
560 struct vmcb *vmcb = svm->vmcb01.ptr;
561
562 WARN_ON_ONCE(bit >= 32);
563 vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
564
565 svm_mark_intercepts_dirty(svm);
566 }
567
svm_set_intercept(struct vcpu_svm * svm,int bit)568 static inline void svm_set_intercept(struct vcpu_svm *svm, int bit)
569 {
570 struct vmcb *vmcb = svm->vmcb01.ptr;
571
572 vmcb_set_intercept(&vmcb->control, bit);
573
574 svm_mark_intercepts_dirty(svm);
575 }
576
svm_clr_intercept(struct vcpu_svm * svm,int bit)577 static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit)
578 {
579 struct vmcb *vmcb = svm->vmcb01.ptr;
580
581 vmcb_clr_intercept(&vmcb->control, bit);
582
583 svm_mark_intercepts_dirty(svm);
584 }
585
svm_is_intercept(struct vcpu_svm * svm,int bit)586 static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit)
587 {
588 return vmcb_is_intercept(&svm->vmcb->control, bit);
589 }
590
nested_vgif_enabled(struct vcpu_svm * svm)591 static inline bool nested_vgif_enabled(struct vcpu_svm *svm)
592 {
593 return guest_cpu_cap_has(&svm->vcpu, X86_FEATURE_VGIF) &&
594 (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK);
595 }
596
get_vgif_vmcb(struct vcpu_svm * svm)597 static inline struct vmcb *get_vgif_vmcb(struct vcpu_svm *svm)
598 {
599 if (!vgif)
600 return NULL;
601
602 if (is_guest_mode(&svm->vcpu) && !nested_vgif_enabled(svm))
603 return svm->nested.vmcb02.ptr;
604 else
605 return svm->vmcb01.ptr;
606 }
607
enable_gif(struct vcpu_svm * svm)608 static inline void enable_gif(struct vcpu_svm *svm)
609 {
610 struct vmcb *vmcb = get_vgif_vmcb(svm);
611
612 if (vmcb)
613 vmcb->control.int_ctl |= V_GIF_MASK;
614 else
615 svm->guest_gif = true;
616 }
617
disable_gif(struct vcpu_svm * svm)618 static inline void disable_gif(struct vcpu_svm *svm)
619 {
620 struct vmcb *vmcb = get_vgif_vmcb(svm);
621
622 if (vmcb)
623 vmcb->control.int_ctl &= ~V_GIF_MASK;
624 else
625 svm->guest_gif = false;
626 }
627
gif_set(struct vcpu_svm * svm)628 static inline bool gif_set(struct vcpu_svm *svm)
629 {
630 struct vmcb *vmcb = get_vgif_vmcb(svm);
631
632 if (vmcb)
633 return !!(vmcb->control.int_ctl & V_GIF_MASK);
634 else
635 return svm->guest_gif;
636 }
637
nested_npt_enabled(struct vcpu_svm * svm)638 static inline bool nested_npt_enabled(struct vcpu_svm *svm)
639 {
640 return svm->nested.ctl.misc_ctl & SVM_MISC_ENABLE_NP;
641 }
642
nested_vnmi_enabled(struct vcpu_svm * svm)643 static inline bool nested_vnmi_enabled(struct vcpu_svm *svm)
644 {
645 return guest_cpu_cap_has(&svm->vcpu, X86_FEATURE_VNMI) &&
646 (svm->nested.ctl.int_ctl & V_NMI_ENABLE_MASK);
647 }
648
is_x2apic_msrpm_offset(u32 offset)649 static inline bool is_x2apic_msrpm_offset(u32 offset)
650 {
651 /* 4 msrs per u8, and 4 u8 in u32 */
652 u32 msr = offset * 16;
653
654 return (msr >= APIC_BASE_MSR) &&
655 (msr < (APIC_BASE_MSR + 0x100));
656 }
657
get_vnmi_vmcb_l1(struct vcpu_svm * svm)658 static inline struct vmcb *get_vnmi_vmcb_l1(struct vcpu_svm *svm)
659 {
660 if (!vnmi)
661 return NULL;
662
663 if (is_guest_mode(&svm->vcpu))
664 return NULL;
665 else
666 return svm->vmcb01.ptr;
667 }
668
is_vnmi_enabled(struct vcpu_svm * svm)669 static inline bool is_vnmi_enabled(struct vcpu_svm *svm)
670 {
671 struct vmcb *vmcb = get_vnmi_vmcb_l1(svm);
672
673 if (vmcb)
674 return !!(vmcb->control.int_ctl & V_NMI_ENABLE_MASK);
675 else
676 return false;
677 }
678
svm_vmgexit_set_return_code(struct vcpu_svm * svm,u64 response,u64 data)679 static inline void svm_vmgexit_set_return_code(struct vcpu_svm *svm,
680 u64 response, u64 data)
681 {
682 ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, response);
683 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, data);
684 }
685
svm_vmgexit_inject_exception(struct vcpu_svm * svm,u8 vector)686 static inline void svm_vmgexit_inject_exception(struct vcpu_svm *svm, u8 vector)
687 {
688 u64 data = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT | vector;
689
690 svm_vmgexit_set_return_code(svm, GHCB_HV_RESP_ISSUE_EXCEPTION, data);
691 }
692
svm_vmgexit_bad_input(struct vcpu_svm * svm,u64 suberror)693 static inline void svm_vmgexit_bad_input(struct vcpu_svm *svm, u64 suberror)
694 {
695 svm_vmgexit_set_return_code(svm, GHCB_HV_RESP_MALFORMED_INPUT, suberror);
696 }
697
svm_vmgexit_success(struct vcpu_svm * svm,u64 data)698 static inline void svm_vmgexit_success(struct vcpu_svm *svm, u64 data)
699 {
700 svm_vmgexit_set_return_code(svm, GHCB_HV_RESP_NO_ACTION, data);
701 }
702
svm_vmgexit_no_action(struct vcpu_svm * svm,u64 data)703 static inline void svm_vmgexit_no_action(struct vcpu_svm *svm, u64 data)
704 {
705 svm_vmgexit_set_return_code(svm, GHCB_HV_RESP_NO_ACTION, data);
706 }
707
708 /*
709 * The MSRPM is 8KiB in size, divided into four 2KiB ranges (the fourth range
710 * is reserved). Each MSR within a range is covered by two bits, one each for
711 * read (bit 0) and write (bit 1), where a bit value of '1' means intercepted.
712 */
713 #define SVM_MSRPM_BYTES_PER_RANGE 2048
714 #define SVM_BITS_PER_MSR 2
715 #define SVM_MSRS_PER_BYTE (BITS_PER_BYTE / SVM_BITS_PER_MSR)
716 #define SVM_MSRS_PER_RANGE (SVM_MSRPM_BYTES_PER_RANGE * SVM_MSRS_PER_BYTE)
717 static_assert(SVM_MSRS_PER_RANGE == 8192);
718 #define SVM_MSRPM_OFFSET_MASK (SVM_MSRS_PER_RANGE - 1)
719
svm_msrpm_bit_nr(u32 msr)720 static __always_inline int svm_msrpm_bit_nr(u32 msr)
721 {
722 int range_nr;
723
724 switch (msr & ~SVM_MSRPM_OFFSET_MASK) {
725 case 0:
726 range_nr = 0;
727 break;
728 case 0xc0000000:
729 range_nr = 1;
730 break;
731 case 0xc0010000:
732 range_nr = 2;
733 break;
734 default:
735 return -EINVAL;
736 }
737
738 return range_nr * SVM_MSRPM_BYTES_PER_RANGE * BITS_PER_BYTE +
739 (msr & SVM_MSRPM_OFFSET_MASK) * SVM_BITS_PER_MSR;
740 }
741
742 #define __BUILD_SVM_MSR_BITMAP_HELPER(rtype, action, bitop, access, bit_rw) \
743 static inline rtype svm_##action##_msr_bitmap_##access(unsigned long *bitmap, \
744 u32 msr) \
745 { \
746 int bit_nr; \
747 \
748 bit_nr = svm_msrpm_bit_nr(msr); \
749 if (bit_nr < 0) \
750 return (rtype)true; \
751 \
752 return bitop##_bit(bit_nr + bit_rw, bitmap); \
753 }
754
755 #define BUILD_SVM_MSR_BITMAP_HELPERS(ret_type, action, bitop) \
756 __BUILD_SVM_MSR_BITMAP_HELPER(ret_type, action, bitop, read, 0) \
757 __BUILD_SVM_MSR_BITMAP_HELPER(ret_type, action, bitop, write, 1)
758
759 BUILD_SVM_MSR_BITMAP_HELPERS(bool, test, test)
760 BUILD_SVM_MSR_BITMAP_HELPERS(void, clear, __clear)
761 BUILD_SVM_MSR_BITMAP_HELPERS(void, set, __set)
762
763 #define DEBUGCTL_RESERVED_BITS (~DEBUGCTLMSR_LBR)
764
765 /* svm.c */
766 extern bool dump_invalid_vmcb;
767
768 void *svm_alloc_permissions_map(unsigned long size, gfp_t gfp_mask);
769
svm_vcpu_alloc_msrpm(void)770 static inline void *svm_vcpu_alloc_msrpm(void)
771 {
772 return svm_alloc_permissions_map(MSRPM_SIZE, GFP_KERNEL_ACCOUNT);
773 }
774
775 #define svm_copy_lbrs(to, from) \
776 do { \
777 (to)->dbgctl = (from)->dbgctl; \
778 (to)->br_from = (from)->br_from; \
779 (to)->br_to = (from)->br_to; \
780 (to)->last_excp_from = (from)->last_excp_from; \
781 (to)->last_excp_to = (from)->last_excp_to; \
782 } while (0)
783
784 void svm_vcpu_free_msrpm(void *msrpm);
785 void svm_enable_lbrv(struct kvm_vcpu *vcpu);
786 void svm_update_lbrv(struct kvm_vcpu *vcpu);
787
788 int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
789 void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
790 void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
791 void disable_nmi_singlestep(struct vcpu_svm *svm);
792 bool svm_smi_blocked(struct kvm_vcpu *vcpu);
793 bool svm_nmi_blocked(struct kvm_vcpu *vcpu);
794 bool svm_interrupt_blocked(struct kvm_vcpu *vcpu);
795 void svm_set_gif(struct vcpu_svm *svm, bool value);
796 int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code);
797 void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
798 int read, int write);
799 void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
800 int trig_mode, int vec);
801
802 void svm_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool set);
803
svm_disable_intercept_for_msr(struct kvm_vcpu * vcpu,u32 msr,int type)804 static inline void svm_disable_intercept_for_msr(struct kvm_vcpu *vcpu,
805 u32 msr, int type)
806 {
807 svm_set_intercept_for_msr(vcpu, msr, type, false);
808 }
809
svm_enable_intercept_for_msr(struct kvm_vcpu * vcpu,u32 msr,int type)810 static inline void svm_enable_intercept_for_msr(struct kvm_vcpu *vcpu,
811 u32 msr, int type)
812 {
813 svm_set_intercept_for_msr(vcpu, msr, type, true);
814 }
815
816 /* nested.c */
817
818 #define NESTED_EXIT_HOST 0 /* Exit handled on host level */
819 #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
820 #define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
821
nested_svm_virtualize_tpr(struct kvm_vcpu * vcpu)822 static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu)
823 {
824 struct vcpu_svm *svm = to_svm(vcpu);
825
826 return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK);
827 }
828
nested_exit_on_smi(struct vcpu_svm * svm)829 static inline bool nested_exit_on_smi(struct vcpu_svm *svm)
830 {
831 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SMI);
832 }
833
nested_exit_on_intr(struct vcpu_svm * svm)834 static inline bool nested_exit_on_intr(struct vcpu_svm *svm)
835 {
836 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INTR);
837 }
838
nested_exit_on_nmi(struct vcpu_svm * svm)839 static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
840 {
841 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
842 }
843
844 int __init nested_svm_init_msrpm_merge_offsets(void);
845
846 int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb_gpa, bool from_vmrun);
847 void svm_leave_nested(struct kvm_vcpu *vcpu);
848 void svm_free_nested(struct vcpu_svm *svm);
849 int svm_allocate_nested(struct vcpu_svm *svm);
850 int nested_svm_vmrun(struct kvm_vcpu *vcpu);
851 void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
852 struct vmcb_save_area *from_save);
853 void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
854 void nested_svm_vmexit(struct vcpu_svm *svm);
855
nested_svm_simple_vmexit(struct vcpu_svm * svm,u32 exit_code)856 static inline void nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code)
857 {
858 svm->vmcb->control.exit_code = exit_code;
859 svm->vmcb->control.exit_info_1 = 0;
860 svm->vmcb->control.exit_info_2 = 0;
861 nested_svm_vmexit(svm);
862 }
863
864 int nested_svm_exit_handled(struct vcpu_svm *svm);
865 int nested_svm_check_permissions(struct kvm_vcpu *vcpu);
866 int nested_svm_check_cached_vmcb12(struct kvm_vcpu *vcpu);
867 int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
868 bool has_error_code, u32 error_code);
869 int nested_svm_exit_special(struct vcpu_svm *svm);
870 void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu);
871 void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu);
872 void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
873 struct vmcb_control_area *control);
874 void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
875 struct vmcb_save_area *save);
876 void nested_sync_control_from_vmcb02(struct vcpu_svm *svm);
877 void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm);
878 void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb);
879
880 extern struct kvm_x86_nested_ops svm_nested_ops;
881
882 /* avic.c */
883 #define AVIC_REQUIRED_APICV_INHIBITS \
884 ( \
885 BIT(APICV_INHIBIT_REASON_DISABLED) | \
886 BIT(APICV_INHIBIT_REASON_ABSENT) | \
887 BIT(APICV_INHIBIT_REASON_HYPERV) | \
888 BIT(APICV_INHIBIT_REASON_NESTED) | \
889 BIT(APICV_INHIBIT_REASON_IRQWIN) | \
890 BIT(APICV_INHIBIT_REASON_PIT_REINJ) | \
891 BIT(APICV_INHIBIT_REASON_BLOCKIRQ) | \
892 BIT(APICV_INHIBIT_REASON_SEV) | \
893 BIT(APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED) | \
894 BIT(APICV_INHIBIT_REASON_APIC_ID_MODIFIED) | \
895 BIT(APICV_INHIBIT_REASON_APIC_BASE_MODIFIED) | \
896 BIT(APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED) | \
897 BIT(APICV_INHIBIT_REASON_PHYSICAL_ID_TOO_BIG) \
898 )
899
900 bool __init avic_hardware_setup(void);
901 void avic_hardware_unsetup(void);
902 int avic_alloc_physical_id_table(struct kvm *kvm);
903 void avic_vm_destroy(struct kvm *kvm);
904 int avic_vm_init(struct kvm *kvm);
905 void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb);
906 int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu);
907 int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu);
908 int avic_init_vcpu(struct vcpu_svm *svm);
909 void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
910 void avic_vcpu_put(struct kvm_vcpu *vcpu);
911 void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu);
912 void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
913 int avic_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm,
914 unsigned int host_irq, uint32_t guest_irq,
915 struct kvm_vcpu *vcpu, u32 vector);
916 void avic_vcpu_blocking(struct kvm_vcpu *vcpu);
917 void avic_vcpu_unblocking(struct kvm_vcpu *vcpu);
918 void avic_ring_doorbell(struct kvm_vcpu *vcpu);
919 unsigned long avic_vcpu_get_apicv_inhibit_reasons(struct kvm_vcpu *vcpu);
920 void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu);
921
922
923 /* sev.c */
924
925 int pre_sev_run(struct vcpu_svm *svm, int cpu);
926 void sev_init_vmcb(struct vcpu_svm *svm, bool init_event);
927 void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm);
928 int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
929 void sev_es_recalc_msr_intercepts(struct kvm_vcpu *vcpu);
930 void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
931 void sev_es_prepare_switch_to_guest(struct vcpu_svm *svm, struct sev_es_save_area *hostsa);
932 void sev_es_unmap_ghcb(struct vcpu_svm *svm);
933
934 #ifdef CONFIG_KVM_AMD_SEV
935 int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp);
936 int sev_mem_enc_register_region(struct kvm *kvm,
937 struct kvm_enc_region *range);
938 int sev_mem_enc_unregister_region(struct kvm *kvm,
939 struct kvm_enc_region *range);
940 int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd);
941 int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd);
942 void sev_guest_memory_reclaimed(struct kvm *kvm);
943 int sev_handle_vmgexit(struct kvm_vcpu *vcpu);
944
945 /* These symbols are used in common code and are stubbed below. */
946
947 struct page *snp_safe_alloc_page_node(int node, gfp_t gfp);
snp_safe_alloc_page(void)948 static inline struct page *snp_safe_alloc_page(void)
949 {
950 return snp_safe_alloc_page_node(numa_node_id(), GFP_KERNEL_ACCOUNT);
951 }
952
953 int sev_vcpu_create(struct kvm_vcpu *vcpu);
954 void sev_free_vcpu(struct kvm_vcpu *vcpu);
955 void sev_vm_init(struct kvm *kvm);
956 void sev_vm_destroy(struct kvm *kvm);
957 void __init sev_set_cpu_caps(void);
958 void __init sev_hardware_setup(void);
959 void sev_hardware_unsetup(void);
960 int sev_cpu_init(struct svm_cpu_data *sd);
961 int sev_dev_get_attr(u32 group, u64 attr, u64 *val);
962 extern unsigned int max_sev_asid;
963 void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code);
964 int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order);
965 void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end);
966 int sev_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn, bool is_private);
967 struct vmcb_save_area *sev_decrypt_vmsa(struct kvm_vcpu *vcpu);
968 void sev_free_decrypted_vmsa(struct kvm_vcpu *vcpu, struct vmcb_save_area *vmsa);
969 #else
snp_safe_alloc_page_node(int node,gfp_t gfp)970 static inline struct page *snp_safe_alloc_page_node(int node, gfp_t gfp)
971 {
972 return alloc_pages_node(node, gfp | __GFP_ZERO, 0);
973 }
974
snp_safe_alloc_page(void)975 static inline struct page *snp_safe_alloc_page(void)
976 {
977 return snp_safe_alloc_page_node(numa_node_id(), GFP_KERNEL_ACCOUNT);
978 }
979
sev_vcpu_create(struct kvm_vcpu * vcpu)980 static inline int sev_vcpu_create(struct kvm_vcpu *vcpu) { return 0; }
sev_free_vcpu(struct kvm_vcpu * vcpu)981 static inline void sev_free_vcpu(struct kvm_vcpu *vcpu) {}
sev_vm_init(struct kvm * kvm)982 static inline void sev_vm_init(struct kvm *kvm) {}
sev_vm_destroy(struct kvm * kvm)983 static inline void sev_vm_destroy(struct kvm *kvm) {}
sev_set_cpu_caps(void)984 static inline void __init sev_set_cpu_caps(void) {}
sev_hardware_setup(void)985 static inline void __init sev_hardware_setup(void) {}
sev_hardware_unsetup(void)986 static inline void sev_hardware_unsetup(void) {}
sev_cpu_init(struct svm_cpu_data * sd)987 static inline int sev_cpu_init(struct svm_cpu_data *sd) { return 0; }
sev_dev_get_attr(u32 group,u64 attr,u64 * val)988 static inline int sev_dev_get_attr(u32 group, u64 attr, u64 *val) { return -ENXIO; }
989 #define max_sev_asid 0
sev_handle_rmp_fault(struct kvm_vcpu * vcpu,gpa_t gpa,u64 error_code)990 static inline void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code) {}
sev_gmem_prepare(struct kvm * kvm,kvm_pfn_t pfn,gfn_t gfn,int max_order)991 static inline int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order)
992 {
993 return 0;
994 }
sev_gmem_invalidate(kvm_pfn_t start,kvm_pfn_t end)995 static inline void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end) {}
sev_gmem_max_mapping_level(struct kvm * kvm,kvm_pfn_t pfn,bool is_private)996 static inline int sev_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn, bool is_private)
997 {
998 return 0;
999 }
1000
sev_decrypt_vmsa(struct kvm_vcpu * vcpu)1001 static inline struct vmcb_save_area *sev_decrypt_vmsa(struct kvm_vcpu *vcpu)
1002 {
1003 return NULL;
1004 }
sev_free_decrypted_vmsa(struct kvm_vcpu * vcpu,struct vmcb_save_area * vmsa)1005 static inline void sev_free_decrypted_vmsa(struct kvm_vcpu *vcpu, struct vmcb_save_area *vmsa) {}
1006 #endif
1007
1008 /* vmenter.S */
1009
1010 void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted,
1011 struct sev_es_save_area *hostsa);
1012 void __svm_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);
1013
1014 #define DEFINE_KVM_GHCB_ACCESSORS(field) \
1015 static __always_inline u64 kvm_ghcb_get_##field(struct vcpu_svm *svm) \
1016 { \
1017 return READ_ONCE(svm->sev_es.ghcb->save.field); \
1018 } \
1019 \
1020 static __always_inline bool kvm_ghcb_##field##_is_valid(const struct vcpu_svm *svm) \
1021 { \
1022 return test_bit(GHCB_BITMAP_IDX(field), \
1023 (unsigned long *)&svm->sev_es.valid_bitmap); \
1024 } \
1025 \
1026 static __always_inline u64 kvm_ghcb_get_##field##_if_valid(struct vcpu_svm *svm) \
1027 { \
1028 return kvm_ghcb_##field##_is_valid(svm) ? kvm_ghcb_get_##field(svm) : 0; \
1029 }
1030
1031 DEFINE_KVM_GHCB_ACCESSORS(cpl)
1032 DEFINE_KVM_GHCB_ACCESSORS(rax)
1033 DEFINE_KVM_GHCB_ACCESSORS(rcx)
1034 DEFINE_KVM_GHCB_ACCESSORS(rdx)
1035 DEFINE_KVM_GHCB_ACCESSORS(rbx)
1036 DEFINE_KVM_GHCB_ACCESSORS(rsi)
1037 DEFINE_KVM_GHCB_ACCESSORS(sw_exit_code)
1038 DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_1)
1039 DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_2)
1040 DEFINE_KVM_GHCB_ACCESSORS(sw_scratch)
1041 DEFINE_KVM_GHCB_ACCESSORS(xcr0)
1042 DEFINE_KVM_GHCB_ACCESSORS(xss)
1043
1044 #endif
1045