xref: /linux/arch/x86/kvm/vmx/common.h (revision 7f9039c524a351c684149ecf1b3c5145a0dff2fe)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 #ifndef __KVM_X86_VMX_COMMON_H
3 #define __KVM_X86_VMX_COMMON_H
4 
5 #include <linux/kvm_host.h>
6 #include <asm/posted_intr.h>
7 
8 #include "mmu.h"
9 
10 union vmx_exit_reason {
11 	struct {
12 		u32	basic			: 16;
13 		u32	reserved16		: 1;
14 		u32	reserved17		: 1;
15 		u32	reserved18		: 1;
16 		u32	reserved19		: 1;
17 		u32	reserved20		: 1;
18 		u32	reserved21		: 1;
19 		u32	reserved22		: 1;
20 		u32	reserved23		: 1;
21 		u32	reserved24		: 1;
22 		u32	reserved25		: 1;
23 		u32	bus_lock_detected	: 1;
24 		u32	enclave_mode		: 1;
25 		u32	smi_pending_mtf		: 1;
26 		u32	smi_from_vmx_root	: 1;
27 		u32	reserved30		: 1;
28 		u32	failed_vmentry		: 1;
29 	};
30 	u32 full;
31 };
32 
33 struct vcpu_vt {
34 	/* Posted interrupt descriptor */
35 	struct pi_desc pi_desc;
36 
37 	/* Used if this vCPU is waiting for PI notification wakeup. */
38 	struct list_head pi_wakeup_list;
39 
40 	union vmx_exit_reason exit_reason;
41 
42 	unsigned long	exit_qualification;
43 	u32		exit_intr_info;
44 
45 	/*
46 	 * If true, guest state has been loaded into hardware, and host state
47 	 * saved into vcpu_{vt,vmx,tdx}.  If false, host state is loaded into
48 	 * hardware.
49 	 */
50 	bool		guest_state_loaded;
51 	bool		emulation_required;
52 
53 #ifdef CONFIG_X86_64
54 	u64		msr_host_kernel_gs_base;
55 #endif
56 
57 	unsigned long	host_debugctlmsr;
58 };
59 
60 #ifdef CONFIG_KVM_INTEL_TDX
61 
62 static __always_inline bool is_td(struct kvm *kvm)
63 {
64 	return kvm->arch.vm_type == KVM_X86_TDX_VM;
65 }
66 
67 static __always_inline bool is_td_vcpu(struct kvm_vcpu *vcpu)
68 {
69 	return is_td(vcpu->kvm);
70 }
71 
72 #else
73 
74 static __always_inline bool is_td(struct kvm *kvm) { return false; }
75 static __always_inline bool is_td_vcpu(struct kvm_vcpu *vcpu) { return false; }
76 
77 #endif
78 
79 static inline bool vt_is_tdx_private_gpa(struct kvm *kvm, gpa_t gpa)
80 {
81 	/* For TDX the direct mask is the shared mask. */
82 	return !kvm_is_addr_direct(kvm, gpa);
83 }
84 
85 static inline int __vmx_handle_ept_violation(struct kvm_vcpu *vcpu, gpa_t gpa,
86 					     unsigned long exit_qualification)
87 {
88 	u64 error_code;
89 
90 	/* Is it a read fault? */
91 	error_code = (exit_qualification & EPT_VIOLATION_ACC_READ)
92 		     ? PFERR_USER_MASK : 0;
93 	/* Is it a write fault? */
94 	error_code |= (exit_qualification & EPT_VIOLATION_ACC_WRITE)
95 		      ? PFERR_WRITE_MASK : 0;
96 	/* Is it a fetch fault? */
97 	error_code |= (exit_qualification & EPT_VIOLATION_ACC_INSTR)
98 		      ? PFERR_FETCH_MASK : 0;
99 	/* ept page table entry is present? */
100 	error_code |= (exit_qualification & EPT_VIOLATION_PROT_MASK)
101 		      ? PFERR_PRESENT_MASK : 0;
102 
103 	if (error_code & EPT_VIOLATION_GVA_IS_VALID)
104 		error_code |= (exit_qualification & EPT_VIOLATION_GVA_TRANSLATED) ?
105 			      PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK;
106 
107 	if (vt_is_tdx_private_gpa(vcpu->kvm, gpa))
108 		error_code |= PFERR_PRIVATE_ACCESS;
109 
110 	return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
111 }
112 
113 static inline void kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu,
114 						     int pi_vec)
115 {
116 #ifdef CONFIG_SMP
117 	if (vcpu->mode == IN_GUEST_MODE) {
118 		/*
119 		 * The vector of the virtual has already been set in the PIR.
120 		 * Send a notification event to deliver the virtual interrupt
121 		 * unless the vCPU is the currently running vCPU, i.e. the
122 		 * event is being sent from a fastpath VM-Exit handler, in
123 		 * which case the PIR will be synced to the vIRR before
124 		 * re-entering the guest.
125 		 *
126 		 * When the target is not the running vCPU, the following
127 		 * possibilities emerge:
128 		 *
129 		 * Case 1: vCPU stays in non-root mode. Sending a notification
130 		 * event posts the interrupt to the vCPU.
131 		 *
132 		 * Case 2: vCPU exits to root mode and is still runnable. The
133 		 * PIR will be synced to the vIRR before re-entering the guest.
134 		 * Sending a notification event is ok as the host IRQ handler
135 		 * will ignore the spurious event.
136 		 *
137 		 * Case 3: vCPU exits to root mode and is blocked. vcpu_block()
138 		 * has already synced PIR to vIRR and never blocks the vCPU if
139 		 * the vIRR is not empty. Therefore, a blocked vCPU here does
140 		 * not wait for any requested interrupts in PIR, and sending a
141 		 * notification event also results in a benign, spurious event.
142 		 */
143 
144 		if (vcpu != kvm_get_running_vcpu())
145 			__apic_send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec);
146 		return;
147 	}
148 #endif
149 	/*
150 	 * The vCPU isn't in the guest; wake the vCPU in case it is blocking,
151 	 * otherwise do nothing as KVM will grab the highest priority pending
152 	 * IRQ via ->sync_pir_to_irr() in vcpu_enter_guest().
153 	 */
154 	kvm_vcpu_wake_up(vcpu);
155 }
156 
157 /*
158  * Post an interrupt to a vCPU's PIR and trigger the vCPU to process the
159  * interrupt if necessary.
160  */
161 static inline void __vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu,
162 						  struct pi_desc *pi_desc, int vector)
163 {
164 	if (pi_test_and_set_pir(vector, pi_desc))
165 		return;
166 
167 	/* If a previous notification has sent the IPI, nothing to do.  */
168 	if (pi_test_and_set_on(pi_desc))
169 		return;
170 
171 	/*
172 	 * The implied barrier in pi_test_and_set_on() pairs with the smp_mb_*()
173 	 * after setting vcpu->mode in vcpu_enter_guest(), thus the vCPU is
174 	 * guaranteed to see PID.ON=1 and sync the PIR to IRR if triggering a
175 	 * posted interrupt "fails" because vcpu->mode != IN_GUEST_MODE.
176 	 */
177 	kvm_vcpu_trigger_posted_interrupt(vcpu, POSTED_INTR_VECTOR);
178 }
179 
180 noinstr void vmx_handle_nmi(struct kvm_vcpu *vcpu);
181 
182 #endif /* __KVM_X86_VMX_COMMON_H */
183