1 /* SPDX-License-Identifier: GPL-2.0-only */
2 #ifndef __KVM_X86_VMX_COMMON_H
3 #define __KVM_X86_VMX_COMMON_H
4
5 #include <linux/kvm_host.h>
6 #include <asm/posted_intr.h>
7
8 #include "mmu.h"
9
10 union vmx_exit_reason {
11 struct {
12 u32 basic : 16;
13 u32 reserved16 : 1;
14 u32 reserved17 : 1;
15 u32 reserved18 : 1;
16 u32 reserved19 : 1;
17 u32 reserved20 : 1;
18 u32 reserved21 : 1;
19 u32 reserved22 : 1;
20 u32 reserved23 : 1;
21 u32 reserved24 : 1;
22 u32 reserved25 : 1;
23 u32 bus_lock_detected : 1;
24 u32 enclave_mode : 1;
25 u32 smi_pending_mtf : 1;
26 u32 smi_from_vmx_root : 1;
27 u32 reserved30 : 1;
28 u32 failed_vmentry : 1;
29 };
30 u32 full;
31 };
32
33 struct vcpu_vt {
34 /* Posted interrupt descriptor */
35 struct pi_desc pi_desc;
36
37 /* Used if this vCPU is waiting for PI notification wakeup. */
38 struct list_head pi_wakeup_list;
39
40 union vmx_exit_reason exit_reason;
41
42 unsigned long exit_qualification;
43 u32 exit_intr_info;
44
45 /*
46 * If true, guest state has been loaded into hardware, and host state
47 * saved into vcpu_{vt,vmx,tdx}. If false, host state is loaded into
48 * hardware.
49 */
50 bool guest_state_loaded;
51 bool emulation_required;
52
53 #ifdef CONFIG_X86_64
54 u64 msr_host_kernel_gs_base;
55 #endif
56 };
57
58 #ifdef CONFIG_KVM_INTEL_TDX
59
is_td(struct kvm * kvm)60 static __always_inline bool is_td(struct kvm *kvm)
61 {
62 return kvm->arch.vm_type == KVM_X86_TDX_VM;
63 }
64
is_td_vcpu(struct kvm_vcpu * vcpu)65 static __always_inline bool is_td_vcpu(struct kvm_vcpu *vcpu)
66 {
67 return is_td(vcpu->kvm);
68 }
69
70 #else
71
is_td(struct kvm * kvm)72 static __always_inline bool is_td(struct kvm *kvm) { return false; }
is_td_vcpu(struct kvm_vcpu * vcpu)73 static __always_inline bool is_td_vcpu(struct kvm_vcpu *vcpu) { return false; }
74
75 #endif
76
vt_is_tdx_private_gpa(struct kvm * kvm,gpa_t gpa)77 static inline bool vt_is_tdx_private_gpa(struct kvm *kvm, gpa_t gpa)
78 {
79 /* For TDX the direct mask is the shared mask. */
80 return !kvm_is_addr_direct(kvm, gpa);
81 }
82
__vmx_handle_ept_violation(struct kvm_vcpu * vcpu,gpa_t gpa,unsigned long exit_qualification)83 static inline int __vmx_handle_ept_violation(struct kvm_vcpu *vcpu, gpa_t gpa,
84 unsigned long exit_qualification)
85 {
86 u64 error_code;
87
88 /* Is it a read fault? */
89 error_code = (exit_qualification & EPT_VIOLATION_ACC_READ)
90 ? PFERR_USER_MASK : 0;
91 /* Is it a write fault? */
92 error_code |= (exit_qualification & EPT_VIOLATION_ACC_WRITE)
93 ? PFERR_WRITE_MASK : 0;
94 /* Is it a fetch fault? */
95 error_code |= (exit_qualification & EPT_VIOLATION_ACC_INSTR)
96 ? PFERR_FETCH_MASK : 0;
97 /* ept page table entry is present? */
98 error_code |= (exit_qualification & EPT_VIOLATION_PROT_MASK)
99 ? PFERR_PRESENT_MASK : 0;
100
101 if (error_code & EPT_VIOLATION_GVA_IS_VALID)
102 error_code |= (exit_qualification & EPT_VIOLATION_GVA_TRANSLATED) ?
103 PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK;
104
105 if (vt_is_tdx_private_gpa(vcpu->kvm, gpa))
106 error_code |= PFERR_PRIVATE_ACCESS;
107
108 return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
109 }
110
kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu * vcpu,int pi_vec)111 static inline void kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu,
112 int pi_vec)
113 {
114 #ifdef CONFIG_SMP
115 if (vcpu->mode == IN_GUEST_MODE) {
116 /*
117 * The vector of the virtual has already been set in the PIR.
118 * Send a notification event to deliver the virtual interrupt
119 * unless the vCPU is the currently running vCPU, i.e. the
120 * event is being sent from a fastpath VM-Exit handler, in
121 * which case the PIR will be synced to the vIRR before
122 * re-entering the guest.
123 *
124 * When the target is not the running vCPU, the following
125 * possibilities emerge:
126 *
127 * Case 1: vCPU stays in non-root mode. Sending a notification
128 * event posts the interrupt to the vCPU.
129 *
130 * Case 2: vCPU exits to root mode and is still runnable. The
131 * PIR will be synced to the vIRR before re-entering the guest.
132 * Sending a notification event is ok as the host IRQ handler
133 * will ignore the spurious event.
134 *
135 * Case 3: vCPU exits to root mode and is blocked. vcpu_block()
136 * has already synced PIR to vIRR and never blocks the vCPU if
137 * the vIRR is not empty. Therefore, a blocked vCPU here does
138 * not wait for any requested interrupts in PIR, and sending a
139 * notification event also results in a benign, spurious event.
140 */
141
142 if (vcpu != kvm_get_running_vcpu())
143 __apic_send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec);
144 return;
145 }
146 #endif
147 /*
148 * The vCPU isn't in the guest; wake the vCPU in case it is blocking,
149 * otherwise do nothing as KVM will grab the highest priority pending
150 * IRQ via ->sync_pir_to_irr() in vcpu_enter_guest().
151 */
152 kvm_vcpu_wake_up(vcpu);
153 }
154
155 /*
156 * Post an interrupt to a vCPU's PIR and trigger the vCPU to process the
157 * interrupt if necessary.
158 */
__vmx_deliver_posted_interrupt(struct kvm_vcpu * vcpu,struct pi_desc * pi_desc,int vector)159 static inline void __vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu,
160 struct pi_desc *pi_desc, int vector)
161 {
162 if (pi_test_and_set_pir(vector, pi_desc))
163 return;
164
165 /* If a previous notification has sent the IPI, nothing to do. */
166 if (pi_test_and_set_on(pi_desc))
167 return;
168
169 /*
170 * The implied barrier in pi_test_and_set_on() pairs with the smp_mb_*()
171 * after setting vcpu->mode in vcpu_enter_guest(), thus the vCPU is
172 * guaranteed to see PID.ON=1 and sync the PIR to IRR if triggering a
173 * posted interrupt "fails" because vcpu->mode != IN_GUEST_MODE.
174 */
175 kvm_vcpu_trigger_posted_interrupt(vcpu, POSTED_INTR_VECTOR);
176 }
177
178 noinstr void vmx_handle_nmi(struct kvm_vcpu *vcpu);
179
180 #endif /* __KVM_X86_VMX_COMMON_H */
181