xref: /linux/arch/x86/kvm/vmx/common.h (revision 7f9039c524a351c684149ecf1b3c5145a0dff2fe)
1c8563d1bSSean Christopherson /* SPDX-License-Identifier: GPL-2.0-only */
2c8563d1bSSean Christopherson #ifndef __KVM_X86_VMX_COMMON_H
3c8563d1bSSean Christopherson #define __KVM_X86_VMX_COMMON_H
4c8563d1bSSean Christopherson 
5c8563d1bSSean Christopherson #include <linux/kvm_host.h>
67172c753SBinbin Wu #include <asm/posted_intr.h>
7c8563d1bSSean Christopherson 
8c8563d1bSSean Christopherson #include "mmu.h"
9c8563d1bSSean Christopherson 
107172c753SBinbin Wu union vmx_exit_reason {
117172c753SBinbin Wu 	struct {
127172c753SBinbin Wu 		u32	basic			: 16;
137172c753SBinbin Wu 		u32	reserved16		: 1;
147172c753SBinbin Wu 		u32	reserved17		: 1;
157172c753SBinbin Wu 		u32	reserved18		: 1;
167172c753SBinbin Wu 		u32	reserved19		: 1;
177172c753SBinbin Wu 		u32	reserved20		: 1;
187172c753SBinbin Wu 		u32	reserved21		: 1;
197172c753SBinbin Wu 		u32	reserved22		: 1;
207172c753SBinbin Wu 		u32	reserved23		: 1;
217172c753SBinbin Wu 		u32	reserved24		: 1;
227172c753SBinbin Wu 		u32	reserved25		: 1;
237172c753SBinbin Wu 		u32	bus_lock_detected	: 1;
247172c753SBinbin Wu 		u32	enclave_mode		: 1;
257172c753SBinbin Wu 		u32	smi_pending_mtf		: 1;
267172c753SBinbin Wu 		u32	smi_from_vmx_root	: 1;
277172c753SBinbin Wu 		u32	reserved30		: 1;
287172c753SBinbin Wu 		u32	failed_vmentry		: 1;
297172c753SBinbin Wu 	};
307172c753SBinbin Wu 	u32 full;
317172c753SBinbin Wu };
327172c753SBinbin Wu 
337172c753SBinbin Wu struct vcpu_vt {
347172c753SBinbin Wu 	/* Posted interrupt descriptor */
357172c753SBinbin Wu 	struct pi_desc pi_desc;
367172c753SBinbin Wu 
377172c753SBinbin Wu 	/* Used if this vCPU is waiting for PI notification wakeup. */
387172c753SBinbin Wu 	struct list_head pi_wakeup_list;
397172c753SBinbin Wu 
407172c753SBinbin Wu 	union vmx_exit_reason exit_reason;
417172c753SBinbin Wu 
427172c753SBinbin Wu 	unsigned long	exit_qualification;
437172c753SBinbin Wu 	u32		exit_intr_info;
447172c753SBinbin Wu 
457172c753SBinbin Wu 	/*
467172c753SBinbin Wu 	 * If true, guest state has been loaded into hardware, and host state
477172c753SBinbin Wu 	 * saved into vcpu_{vt,vmx,tdx}.  If false, host state is loaded into
487172c753SBinbin Wu 	 * hardware.
497172c753SBinbin Wu 	 */
507172c753SBinbin Wu 	bool		guest_state_loaded;
51d5bc91e8SBinbin Wu 	bool		emulation_required;
527172c753SBinbin Wu 
537172c753SBinbin Wu #ifdef CONFIG_X86_64
547172c753SBinbin Wu 	u64		msr_host_kernel_gs_base;
557172c753SBinbin Wu #endif
567172c753SBinbin Wu 
577172c753SBinbin Wu 	unsigned long	host_debugctlmsr;
587172c753SBinbin Wu };
597172c753SBinbin Wu 
607172c753SBinbin Wu #ifdef CONFIG_KVM_INTEL_TDX
617172c753SBinbin Wu 
627172c753SBinbin Wu static __always_inline bool is_td(struct kvm *kvm)
637172c753SBinbin Wu {
647172c753SBinbin Wu 	return kvm->arch.vm_type == KVM_X86_TDX_VM;
657172c753SBinbin Wu }
667172c753SBinbin Wu 
677172c753SBinbin Wu static __always_inline bool is_td_vcpu(struct kvm_vcpu *vcpu)
687172c753SBinbin Wu {
697172c753SBinbin Wu 	return is_td(vcpu->kvm);
707172c753SBinbin Wu }
717172c753SBinbin Wu 
727172c753SBinbin Wu #else
737172c753SBinbin Wu 
74*cd1be30bSEdward Adam Davis static __always_inline bool is_td(struct kvm *kvm) { return false; }
75*cd1be30bSEdward Adam Davis static __always_inline bool is_td_vcpu(struct kvm_vcpu *vcpu) { return false; }
767172c753SBinbin Wu 
777172c753SBinbin Wu #endif
787172c753SBinbin Wu 
793b725e97SRick Edgecombe static inline bool vt_is_tdx_private_gpa(struct kvm *kvm, gpa_t gpa)
803b725e97SRick Edgecombe {
813b725e97SRick Edgecombe 	/* For TDX the direct mask is the shared mask. */
823b725e97SRick Edgecombe 	return !kvm_is_addr_direct(kvm, gpa);
833b725e97SRick Edgecombe }
843b725e97SRick Edgecombe 
85c8563d1bSSean Christopherson static inline int __vmx_handle_ept_violation(struct kvm_vcpu *vcpu, gpa_t gpa,
86c8563d1bSSean Christopherson 					     unsigned long exit_qualification)
87c8563d1bSSean Christopherson {
88c8563d1bSSean Christopherson 	u64 error_code;
89c8563d1bSSean Christopherson 
90c8563d1bSSean Christopherson 	/* Is it a read fault? */
91c8563d1bSSean Christopherson 	error_code = (exit_qualification & EPT_VIOLATION_ACC_READ)
92c8563d1bSSean Christopherson 		     ? PFERR_USER_MASK : 0;
93c8563d1bSSean Christopherson 	/* Is it a write fault? */
94c8563d1bSSean Christopherson 	error_code |= (exit_qualification & EPT_VIOLATION_ACC_WRITE)
95c8563d1bSSean Christopherson 		      ? PFERR_WRITE_MASK : 0;
96c8563d1bSSean Christopherson 	/* Is it a fetch fault? */
97c8563d1bSSean Christopherson 	error_code |= (exit_qualification & EPT_VIOLATION_ACC_INSTR)
98c8563d1bSSean Christopherson 		      ? PFERR_FETCH_MASK : 0;
99c8563d1bSSean Christopherson 	/* ept page table entry is present? */
100fd02aa45SPaolo Bonzini 	error_code |= (exit_qualification & EPT_VIOLATION_PROT_MASK)
101c8563d1bSSean Christopherson 		      ? PFERR_PRESENT_MASK : 0;
102c8563d1bSSean Christopherson 
103c8563d1bSSean Christopherson 	if (error_code & EPT_VIOLATION_GVA_IS_VALID)
104c8563d1bSSean Christopherson 		error_code |= (exit_qualification & EPT_VIOLATION_GVA_TRANSLATED) ?
105c8563d1bSSean Christopherson 			      PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK;
106c8563d1bSSean Christopherson 
1073b725e97SRick Edgecombe 	if (vt_is_tdx_private_gpa(vcpu->kvm, gpa))
1083b725e97SRick Edgecombe 		error_code |= PFERR_PRIVATE_ACCESS;
1093b725e97SRick Edgecombe 
110c8563d1bSSean Christopherson 	return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
111c8563d1bSSean Christopherson }
112c8563d1bSSean Christopherson 
113254e5dcdSIsaku Yamahata static inline void kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu,
114254e5dcdSIsaku Yamahata 						     int pi_vec)
115254e5dcdSIsaku Yamahata {
116254e5dcdSIsaku Yamahata #ifdef CONFIG_SMP
117254e5dcdSIsaku Yamahata 	if (vcpu->mode == IN_GUEST_MODE) {
118254e5dcdSIsaku Yamahata 		/*
119254e5dcdSIsaku Yamahata 		 * The vector of the virtual has already been set in the PIR.
120254e5dcdSIsaku Yamahata 		 * Send a notification event to deliver the virtual interrupt
121254e5dcdSIsaku Yamahata 		 * unless the vCPU is the currently running vCPU, i.e. the
122254e5dcdSIsaku Yamahata 		 * event is being sent from a fastpath VM-Exit handler, in
123254e5dcdSIsaku Yamahata 		 * which case the PIR will be synced to the vIRR before
124254e5dcdSIsaku Yamahata 		 * re-entering the guest.
125254e5dcdSIsaku Yamahata 		 *
126254e5dcdSIsaku Yamahata 		 * When the target is not the running vCPU, the following
127254e5dcdSIsaku Yamahata 		 * possibilities emerge:
128254e5dcdSIsaku Yamahata 		 *
129254e5dcdSIsaku Yamahata 		 * Case 1: vCPU stays in non-root mode. Sending a notification
130254e5dcdSIsaku Yamahata 		 * event posts the interrupt to the vCPU.
131254e5dcdSIsaku Yamahata 		 *
132254e5dcdSIsaku Yamahata 		 * Case 2: vCPU exits to root mode and is still runnable. The
133254e5dcdSIsaku Yamahata 		 * PIR will be synced to the vIRR before re-entering the guest.
134254e5dcdSIsaku Yamahata 		 * Sending a notification event is ok as the host IRQ handler
135254e5dcdSIsaku Yamahata 		 * will ignore the spurious event.
136254e5dcdSIsaku Yamahata 		 *
137254e5dcdSIsaku Yamahata 		 * Case 3: vCPU exits to root mode and is blocked. vcpu_block()
138254e5dcdSIsaku Yamahata 		 * has already synced PIR to vIRR and never blocks the vCPU if
139254e5dcdSIsaku Yamahata 		 * the vIRR is not empty. Therefore, a blocked vCPU here does
140254e5dcdSIsaku Yamahata 		 * not wait for any requested interrupts in PIR, and sending a
141254e5dcdSIsaku Yamahata 		 * notification event also results in a benign, spurious event.
142254e5dcdSIsaku Yamahata 		 */
143254e5dcdSIsaku Yamahata 
144254e5dcdSIsaku Yamahata 		if (vcpu != kvm_get_running_vcpu())
145254e5dcdSIsaku Yamahata 			__apic_send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec);
146254e5dcdSIsaku Yamahata 		return;
147254e5dcdSIsaku Yamahata 	}
148254e5dcdSIsaku Yamahata #endif
149254e5dcdSIsaku Yamahata 	/*
150254e5dcdSIsaku Yamahata 	 * The vCPU isn't in the guest; wake the vCPU in case it is blocking,
151254e5dcdSIsaku Yamahata 	 * otherwise do nothing as KVM will grab the highest priority pending
152254e5dcdSIsaku Yamahata 	 * IRQ via ->sync_pir_to_irr() in vcpu_enter_guest().
153254e5dcdSIsaku Yamahata 	 */
154254e5dcdSIsaku Yamahata 	kvm_vcpu_wake_up(vcpu);
155254e5dcdSIsaku Yamahata }
156254e5dcdSIsaku Yamahata 
157254e5dcdSIsaku Yamahata /*
158254e5dcdSIsaku Yamahata  * Post an interrupt to a vCPU's PIR and trigger the vCPU to process the
159254e5dcdSIsaku Yamahata  * interrupt if necessary.
160254e5dcdSIsaku Yamahata  */
161254e5dcdSIsaku Yamahata static inline void __vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu,
162254e5dcdSIsaku Yamahata 						  struct pi_desc *pi_desc, int vector)
163254e5dcdSIsaku Yamahata {
164254e5dcdSIsaku Yamahata 	if (pi_test_and_set_pir(vector, pi_desc))
165254e5dcdSIsaku Yamahata 		return;
166254e5dcdSIsaku Yamahata 
167254e5dcdSIsaku Yamahata 	/* If a previous notification has sent the IPI, nothing to do.  */
168254e5dcdSIsaku Yamahata 	if (pi_test_and_set_on(pi_desc))
169254e5dcdSIsaku Yamahata 		return;
170254e5dcdSIsaku Yamahata 
171254e5dcdSIsaku Yamahata 	/*
172254e5dcdSIsaku Yamahata 	 * The implied barrier in pi_test_and_set_on() pairs with the smp_mb_*()
173254e5dcdSIsaku Yamahata 	 * after setting vcpu->mode in vcpu_enter_guest(), thus the vCPU is
174254e5dcdSIsaku Yamahata 	 * guaranteed to see PID.ON=1 and sync the PIR to IRR if triggering a
175254e5dcdSIsaku Yamahata 	 * posted interrupt "fails" because vcpu->mode != IN_GUEST_MODE.
176254e5dcdSIsaku Yamahata 	 */
177254e5dcdSIsaku Yamahata 	kvm_vcpu_trigger_posted_interrupt(vcpu, POSTED_INTR_VECTOR);
178254e5dcdSIsaku Yamahata }
179254e5dcdSIsaku Yamahata 
1807e548b0dSSean Christopherson noinstr void vmx_handle_nmi(struct kvm_vcpu *vcpu);
1817e548b0dSSean Christopherson 
182c8563d1bSSean Christopherson #endif /* __KVM_X86_VMX_COMMON_H */
183