xref: /linux/arch/x86/kvm/x86.h (revision 7974c0643ee3b493d5a3f2a1ee25e9ddb53283c3)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
226eef70cSAvi Kivity #ifndef ARCH_X86_KVM_X86_H
326eef70cSAvi Kivity #define ARCH_X86_KVM_X86_H
426eef70cSAvi Kivity 
526eef70cSAvi Kivity #include <linux/kvm_host.h>
655cd57b5SSean Christopherson #include <asm/fpu/xstate.h>
73f1a18b9SUros Bizjak #include <asm/mce.h>
88d93c874SMarcelo Tosatti #include <asm/pvclock.h>
93eeb3288SAvi Kivity #include "kvm_cache_regs.h"
102f728d66SSean Christopherson #include "kvm_emulate.h"
1126eef70cSAvi Kivity 
12938c8745SSean Christopherson struct kvm_caps {
13938c8745SSean Christopherson 	/* control of guest tsc rate supported? */
14938c8745SSean Christopherson 	bool has_tsc_control;
15938c8745SSean Christopherson 	/* maximum supported tsc_khz for guests */
16938c8745SSean Christopherson 	u32  max_guest_tsc_khz;
17938c8745SSean Christopherson 	/* number of bits of the fractional part of the TSC scaling ratio */
18938c8745SSean Christopherson 	u8   tsc_scaling_ratio_frac_bits;
19938c8745SSean Christopherson 	/* maximum allowed value of TSC scaling ratio */
20938c8745SSean Christopherson 	u64  max_tsc_scaling_ratio;
21938c8745SSean Christopherson 	/* 1ull << kvm_caps.tsc_scaling_ratio_frac_bits */
22938c8745SSean Christopherson 	u64  default_tsc_scaling_ratio;
23938c8745SSean Christopherson 	/* bus lock detection supported? */
24938c8745SSean Christopherson 	bool has_bus_lock_exit;
252f4073e0STao Xu 	/* notify VM exit supported? */
262f4073e0STao Xu 	bool has_notify_vmexit;
272a955c4dSPaolo Bonzini 	/* bit mask of VM types */
282a955c4dSPaolo Bonzini 	u32 supported_vm_types;
29938c8745SSean Christopherson 
30938c8745SSean Christopherson 	u64 supported_mce_cap;
31938c8745SSean Christopherson 	u64 supported_xcr0;
32938c8745SSean Christopherson 	u64 supported_xss;
33bec46859SSean Christopherson 	u64 supported_perf_cap;
34938c8745SSean Christopherson };
35938c8745SSean Christopherson 
36*7974c064SSean Christopherson struct kvm_host_values {
37*7974c064SSean Christopherson 	u64 efer;
38*7974c064SSean Christopherson 	u64 xcr0;
39*7974c064SSean Christopherson 	u64 xss;
40*7974c064SSean Christopherson 	u64 arch_capabilities;
41*7974c064SSean Christopherson };
42*7974c064SSean Christopherson 
4365297341SUros Bizjak void kvm_spurious_fault(void);
4465297341SUros Bizjak 
45648fc8aeSSean Christopherson #define KVM_NESTED_VMENTER_CONSISTENCY_CHECK(consistency_check)		\
46648fc8aeSSean Christopherson ({									\
47648fc8aeSSean Christopherson 	bool failed = (consistency_check);				\
48648fc8aeSSean Christopherson 	if (failed)							\
49648fc8aeSSean Christopherson 		trace_kvm_nested_vmenter_failed(#consistency_check, 0);	\
50648fc8aeSSean Christopherson 	failed;								\
51648fc8aeSSean Christopherson })
52648fc8aeSSean Christopherson 
535757f5b9SSean Christopherson /*
545757f5b9SSean Christopherson  * The first...last VMX feature MSRs that are emulated by KVM.  This may or may
555757f5b9SSean Christopherson  * not cover all known VMX MSRs, as KVM doesn't emulate an MSR until there's an
565757f5b9SSean Christopherson  * associated feature that KVM supports for nested virtualization.
575757f5b9SSean Christopherson  */
585757f5b9SSean Christopherson #define KVM_FIRST_EMULATED_VMX_MSR	MSR_IA32_VMX_BASIC
595757f5b9SSean Christopherson #define KVM_LAST_EMULATED_VMX_MSR	MSR_IA32_VMX_VMFUNC
605757f5b9SSean Christopherson 
61c8e88717SBabu Moger #define KVM_DEFAULT_PLE_GAP		128
62c8e88717SBabu Moger #define KVM_VMX_DEFAULT_PLE_WINDOW	4096
63c8e88717SBabu Moger #define KVM_DEFAULT_PLE_WINDOW_GROW	2
64c8e88717SBabu Moger #define KVM_DEFAULT_PLE_WINDOW_SHRINK	0
65c8e88717SBabu Moger #define KVM_VMX_DEFAULT_PLE_WINDOW_MAX	UINT_MAX
668566ac8bSBabu Moger #define KVM_SVM_DEFAULT_PLE_WINDOW_MAX	USHRT_MAX
678566ac8bSBabu Moger #define KVM_SVM_DEFAULT_PLE_WINDOW	3000
68c8e88717SBabu Moger 
69c8e88717SBabu Moger static inline unsigned int __grow_ple_window(unsigned int val,
70c8e88717SBabu Moger 		unsigned int base, unsigned int modifier, unsigned int max)
71c8e88717SBabu Moger {
72c8e88717SBabu Moger 	u64 ret = val;
73c8e88717SBabu Moger 
74c8e88717SBabu Moger 	if (modifier < 1)
75c8e88717SBabu Moger 		return base;
76c8e88717SBabu Moger 
77c8e88717SBabu Moger 	if (modifier < base)
78c8e88717SBabu Moger 		ret *= modifier;
79c8e88717SBabu Moger 	else
80c8e88717SBabu Moger 		ret += modifier;
81c8e88717SBabu Moger 
82c8e88717SBabu Moger 	return min(ret, (u64)max);
83c8e88717SBabu Moger }
84c8e88717SBabu Moger 
85c8e88717SBabu Moger static inline unsigned int __shrink_ple_window(unsigned int val,
86c8e88717SBabu Moger 		unsigned int base, unsigned int modifier, unsigned int min)
87c8e88717SBabu Moger {
88c8e88717SBabu Moger 	if (modifier < 1)
89c8e88717SBabu Moger 		return base;
90c8e88717SBabu Moger 
91c8e88717SBabu Moger 	if (modifier < base)
92c8e88717SBabu Moger 		val /= modifier;
93c8e88717SBabu Moger 	else
94c8e88717SBabu Moger 		val -= modifier;
95c8e88717SBabu Moger 
96c8e88717SBabu Moger 	return max(val, min);
97c8e88717SBabu Moger }
98c8e88717SBabu Moger 
9974545705SRadim Krčmář #define MSR_IA32_CR_PAT_DEFAULT  0x0007040600070406ULL
10074545705SRadim Krčmář 
10140e5f908SSean Christopherson void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu);
102cb6a32c2SSean Christopherson int kvm_check_nested_events(struct kvm_vcpu *vcpu);
103cb6a32c2SSean Christopherson 
104fb3146b4SSean Christopherson static inline bool kvm_vcpu_has_run(struct kvm_vcpu *vcpu)
105fb3146b4SSean Christopherson {
106fb3146b4SSean Christopherson 	return vcpu->arch.last_vmentry_cpu != -1;
107fb3146b4SSean Christopherson }
108fb3146b4SSean Christopherson 
1097709aba8SSean Christopherson static inline bool kvm_is_exception_pending(struct kvm_vcpu *vcpu)
1107709aba8SSean Christopherson {
1117709aba8SSean Christopherson 	return vcpu->arch.exception.pending ||
1127055fb11SSean Christopherson 	       vcpu->arch.exception_vmexit.pending ||
1137055fb11SSean Christopherson 	       kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu);
1147709aba8SSean Christopherson }
1157709aba8SSean Christopherson 
11626eef70cSAvi Kivity static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
11726eef70cSAvi Kivity {
1185c7d4f9aSLiran Alon 	vcpu->arch.exception.pending = false;
119664f8e26SWanpeng Li 	vcpu->arch.exception.injected = false;
1207709aba8SSean Christopherson 	vcpu->arch.exception_vmexit.pending = false;
12126eef70cSAvi Kivity }
12226eef70cSAvi Kivity 
12366fd3f7fSGleb Natapov static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector,
12466fd3f7fSGleb Natapov 	bool soft)
125937a7eaeSAvi Kivity {
12604140b41SLiran Alon 	vcpu->arch.interrupt.injected = true;
12766fd3f7fSGleb Natapov 	vcpu->arch.interrupt.soft = soft;
128937a7eaeSAvi Kivity 	vcpu->arch.interrupt.nr = vector;
129937a7eaeSAvi Kivity }
130937a7eaeSAvi Kivity 
131937a7eaeSAvi Kivity static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu)
132937a7eaeSAvi Kivity {
13304140b41SLiran Alon 	vcpu->arch.interrupt.injected = false;
134937a7eaeSAvi Kivity }
135937a7eaeSAvi Kivity 
1363298b75cSGleb Natapov static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu)
1373298b75cSGleb Natapov {
13804140b41SLiran Alon 	return vcpu->arch.exception.injected || vcpu->arch.interrupt.injected ||
1393298b75cSGleb Natapov 		vcpu->arch.nmi_injected;
1403298b75cSGleb Natapov }
14166fd3f7fSGleb Natapov 
14266fd3f7fSGleb Natapov static inline bool kvm_exception_is_soft(unsigned int nr)
14366fd3f7fSGleb Natapov {
14466fd3f7fSGleb Natapov 	return (nr == BP_VECTOR) || (nr == OF_VECTOR);
14566fd3f7fSGleb Natapov }
146fc61b800SGleb Natapov 
1473eeb3288SAvi Kivity static inline bool is_protmode(struct kvm_vcpu *vcpu)
1483eeb3288SAvi Kivity {
149607475cfSBinbin Wu 	return kvm_is_cr0_bit_set(vcpu, X86_CR0_PE);
1503eeb3288SAvi Kivity }
1513eeb3288SAvi Kivity 
15268f7c82aSBinbin Wu static inline bool is_long_mode(struct kvm_vcpu *vcpu)
153836a1b3cSAvi Kivity {
154836a1b3cSAvi Kivity #ifdef CONFIG_X86_64
15568f7c82aSBinbin Wu 	return !!(vcpu->arch.efer & EFER_LMA);
156836a1b3cSAvi Kivity #else
15768f7c82aSBinbin Wu 	return false;
158836a1b3cSAvi Kivity #endif
159836a1b3cSAvi Kivity }
160836a1b3cSAvi Kivity 
1615777392eSNadav Amit static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
1625777392eSNadav Amit {
1635777392eSNadav Amit 	int cs_db, cs_l;
1645777392eSNadav Amit 
165b5aead00STom Lendacky 	WARN_ON_ONCE(vcpu->arch.guest_state_protected);
166b5aead00STom Lendacky 
1675777392eSNadav Amit 	if (!is_long_mode(vcpu))
1685777392eSNadav Amit 		return false;
169b3646477SJason Baron 	static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l);
1705777392eSNadav Amit 	return cs_l;
1715777392eSNadav Amit }
1725777392eSNadav Amit 
173b5aead00STom Lendacky static inline bool is_64_bit_hypercall(struct kvm_vcpu *vcpu)
174b5aead00STom Lendacky {
175b5aead00STom Lendacky 	/*
176b5aead00STom Lendacky 	 * If running with protected guest state, the CS register is not
177b5aead00STom Lendacky 	 * accessible. The hypercall register values will have had to been
178b5aead00STom Lendacky 	 * provided in 64-bit mode, so assume the guest is in 64-bit.
179b5aead00STom Lendacky 	 */
180b5aead00STom Lendacky 	return vcpu->arch.guest_state_protected || is_64_bit_mode(vcpu);
181b5aead00STom Lendacky }
182b5aead00STom Lendacky 
1830447378aSMarc Orr static inline bool x86_exception_has_error_code(unsigned int vector)
1840447378aSMarc Orr {
1850447378aSMarc Orr 	static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) |
1860447378aSMarc Orr 			BIT(NP_VECTOR) | BIT(SS_VECTOR) | BIT(GP_VECTOR) |
1870447378aSMarc Orr 			BIT(PF_VECTOR) | BIT(AC_VECTOR);
1880447378aSMarc Orr 
1890447378aSMarc Orr 	return (1U << vector) & exception_has_error_code;
1900447378aSMarc Orr }
1910447378aSMarc Orr 
1926539e738SJoerg Roedel static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
1936539e738SJoerg Roedel {
1946539e738SJoerg Roedel 	return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
1956539e738SJoerg Roedel }
1966539e738SJoerg Roedel 
197bede6eb4SBinbin Wu static inline bool is_pae(struct kvm_vcpu *vcpu)
198836a1b3cSAvi Kivity {
199bede6eb4SBinbin Wu 	return kvm_is_cr4_bit_set(vcpu, X86_CR4_PAE);
200836a1b3cSAvi Kivity }
201836a1b3cSAvi Kivity 
202bede6eb4SBinbin Wu static inline bool is_pse(struct kvm_vcpu *vcpu)
203836a1b3cSAvi Kivity {
204bede6eb4SBinbin Wu 	return kvm_is_cr4_bit_set(vcpu, X86_CR4_PSE);
205836a1b3cSAvi Kivity }
206836a1b3cSAvi Kivity 
207bede6eb4SBinbin Wu static inline bool is_paging(struct kvm_vcpu *vcpu)
208836a1b3cSAvi Kivity {
209bede6eb4SBinbin Wu 	return likely(kvm_is_cr0_bit_set(vcpu, X86_CR0_PG));
210836a1b3cSAvi Kivity }
211836a1b3cSAvi Kivity 
212bf03d4f9SPaolo Bonzini static inline bool is_pae_paging(struct kvm_vcpu *vcpu)
213bf03d4f9SPaolo Bonzini {
214bf03d4f9SPaolo Bonzini 	return !is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu);
215bf03d4f9SPaolo Bonzini }
216bf03d4f9SPaolo Bonzini 
217fd8cb433SYu Zhang static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu)
218fd8cb433SYu Zhang {
219607475cfSBinbin Wu 	return kvm_is_cr4_bit_set(vcpu, X86_CR4_LA57) ? 57 : 48;
220fd8cb433SYu Zhang }
221fd8cb433SYu Zhang 
222fd8cb433SYu Zhang static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)
223fd8cb433SYu Zhang {
2241fb85d06SAdrian Hunter 	return !__is_canonical_address(la, vcpu_virt_addr_bits(vcpu));
225fd8cb433SYu Zhang }
226fd8cb433SYu Zhang 
227bebb106aSXiao Guangrong static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
228bebb106aSXiao Guangrong 					gva_t gva, gfn_t gfn, unsigned access)
229bebb106aSXiao Guangrong {
230ddfd1730SSean Christopherson 	u64 gen = kvm_memslots(vcpu->kvm)->generation;
231ddfd1730SSean Christopherson 
232361209e0SSean Christopherson 	if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
233ddfd1730SSean Christopherson 		return;
234ddfd1730SSean Christopherson 
2359034e6e8SPaolo Bonzini 	/*
2369034e6e8SPaolo Bonzini 	 * If this is a shadow nested page table, the "GVA" is
2379034e6e8SPaolo Bonzini 	 * actually a nGPA.
2389034e6e8SPaolo Bonzini 	 */
2399034e6e8SPaolo Bonzini 	vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK;
240871bd034SSean Christopherson 	vcpu->arch.mmio_access = access;
241bebb106aSXiao Guangrong 	vcpu->arch.mmio_gfn = gfn;
242ddfd1730SSean Christopherson 	vcpu->arch.mmio_gen = gen;
24356f17dd3SDavid Matlack }
24456f17dd3SDavid Matlack 
24556f17dd3SDavid Matlack static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
24656f17dd3SDavid Matlack {
24756f17dd3SDavid Matlack 	return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation;
248bebb106aSXiao Guangrong }
249bebb106aSXiao Guangrong 
250bebb106aSXiao Guangrong /*
25156f17dd3SDavid Matlack  * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we
25256f17dd3SDavid Matlack  * clear all mmio cache info.
253bebb106aSXiao Guangrong  */
25456f17dd3SDavid Matlack #define MMIO_GVA_ANY (~(gva_t)0)
25556f17dd3SDavid Matlack 
256bebb106aSXiao Guangrong static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
257bebb106aSXiao Guangrong {
25856f17dd3SDavid Matlack 	if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
259bebb106aSXiao Guangrong 		return;
260bebb106aSXiao Guangrong 
261bebb106aSXiao Guangrong 	vcpu->arch.mmio_gva = 0;
262bebb106aSXiao Guangrong }
263bebb106aSXiao Guangrong 
264bebb106aSXiao Guangrong static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
265bebb106aSXiao Guangrong {
26656f17dd3SDavid Matlack 	if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva &&
26756f17dd3SDavid Matlack 	      vcpu->arch.mmio_gva == (gva & PAGE_MASK))
268bebb106aSXiao Guangrong 		return true;
269bebb106aSXiao Guangrong 
270bebb106aSXiao Guangrong 	return false;
271bebb106aSXiao Guangrong }
272bebb106aSXiao Guangrong 
273bebb106aSXiao Guangrong static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
274bebb106aSXiao Guangrong {
27556f17dd3SDavid Matlack 	if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn &&
27656f17dd3SDavid Matlack 	      vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
277bebb106aSXiao Guangrong 		return true;
278bebb106aSXiao Guangrong 
279bebb106aSXiao Guangrong 	return false;
280bebb106aSXiao Guangrong }
281bebb106aSXiao Guangrong 
28227b4a9c4SSean Christopherson static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
2835777392eSNadav Amit {
28427b4a9c4SSean Christopherson 	unsigned long val = kvm_register_read_raw(vcpu, reg);
2855777392eSNadav Amit 
2865777392eSNadav Amit 	return is_64_bit_mode(vcpu) ? val : (u32)val;
2875777392eSNadav Amit }
2885777392eSNadav Amit 
28927b4a9c4SSean Christopherson static inline void kvm_register_write(struct kvm_vcpu *vcpu,
290489cbcf0SSean Christopherson 				       int reg, unsigned long val)
29127e6fb5dSNadav Amit {
29227e6fb5dSNadav Amit 	if (!is_64_bit_mode(vcpu))
29327e6fb5dSNadav Amit 		val = (u32)val;
29427b4a9c4SSean Christopherson 	return kvm_register_write_raw(vcpu, reg, val);
29527e6fb5dSNadav Amit }
29627e6fb5dSNadav Amit 
29741dbc6bcSPaolo Bonzini static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
29841dbc6bcSPaolo Bonzini {
29941dbc6bcSPaolo Bonzini 	return !(kvm->arch.disabled_quirks & quirk);
30041dbc6bcSPaolo Bonzini }
30141dbc6bcSPaolo Bonzini 
3029497e1f2SSean Christopherson void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
303ff9d07a0SZhang, Yanmin 
304108b249cSPaolo Bonzini u64 get_kvmclock_ns(struct kvm *kvm);
3055d6d6a7dSDavid Woodhouse uint64_t kvm_get_wall_clock_epoch(struct kvm *kvm);
306451a7078SDavid Woodhouse bool kvm_get_monotonic_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp);
30799e3e30aSZachary Amsden 
308ce14e868SPaolo Bonzini int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
309064aea77SNadav Har'El 	gva_t addr, void *val, unsigned int bytes,
310064aea77SNadav Har'El 	struct x86_exception *exception);
311064aea77SNadav Har'El 
312ce14e868SPaolo Bonzini int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu,
3136a4d7550SNadav Har'El 	gva_t addr, void *val, unsigned int bytes,
3146a4d7550SNadav Har'El 	struct x86_exception *exception);
3156a4d7550SNadav Har'El 
316082d06edSWanpeng Li int handle_ud(struct kvm_vcpu *vcpu);
317082d06edSWanpeng Li 
318d4963e31SSean Christopherson void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu,
319d4963e31SSean Christopherson 				   struct kvm_queued_exception *ex);
320da998b46SJim Mattson 
32119efffa2SXiao Guangrong void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu);
322ff53604bSXiao Guangrong u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
323ff53604bSXiao Guangrong int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
324ff53604bSXiao Guangrong int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
3256a39bbc5SXiao Guangrong bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
3266a39bbc5SXiao Guangrong 					  int page_num);
32752004014SFeng Wu bool kvm_vector_hashing_enabled(void);
32889786147SMohammed Gamal void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code);
3294aa2691dSWei Huang int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
3304aa2691dSWei Huang 				    void *insn, int insn_len);
331736c291cSSean Christopherson int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
332c60658d1SSean Christopherson 			    int emulation_type, void *insn, int insn_len);
333404d5d7bSWanpeng Li fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu);
3344566654bSNadav Amit 
335938c8745SSean Christopherson extern struct kvm_caps kvm_caps;
336*7974c064SSean Christopherson extern struct kvm_host_values kvm_host;
337938c8745SSean Christopherson 
3384732f244SLike Xu extern bool enable_pmu;
3394ff41732SPaolo Bonzini 
3406be3ae45SAaron Lewis /*
3416be3ae45SAaron Lewis  * Get a filtered version of KVM's supported XCR0 that strips out dynamic
3426be3ae45SAaron Lewis  * features for which the current process doesn't (yet) have permission to use.
3436be3ae45SAaron Lewis  * This is intended to be used only when enumerating support to userspace,
3446be3ae45SAaron Lewis  * e.g. in KVM_GET_SUPPORTED_CPUID and KVM_CAP_XSAVE2, it does NOT need to be
3456be3ae45SAaron Lewis  * used to check/restrict guest behavior as KVM rejects KVM_SET_CPUID{2} if
3466be3ae45SAaron Lewis  * userspace attempts to enable unpermitted features.
3476be3ae45SAaron Lewis  */
3486be3ae45SAaron Lewis static inline u64 kvm_get_filtered_xcr0(void)
3496be3ae45SAaron Lewis {
35055cd57b5SSean Christopherson 	u64 permitted_xcr0 = kvm_caps.supported_xcr0;
35155cd57b5SSean Christopherson 
35255cd57b5SSean Christopherson 	BUILD_BUG_ON(XFEATURE_MASK_USER_DYNAMIC != XFEATURE_MASK_XTILE_DATA);
35355cd57b5SSean Christopherson 
35455cd57b5SSean Christopherson 	if (permitted_xcr0 & XFEATURE_MASK_USER_DYNAMIC) {
35555cd57b5SSean Christopherson 		permitted_xcr0 &= xstate_get_guest_group_perm();
35655cd57b5SSean Christopherson 
35755cd57b5SSean Christopherson 		/*
35855cd57b5SSean Christopherson 		 * Treat XTILE_CFG as unsupported if the current process isn't
35955cd57b5SSean Christopherson 		 * allowed to use XTILE_DATA, as attempting to set XTILE_CFG in
36055cd57b5SSean Christopherson 		 * XCR0 without setting XTILE_DATA is architecturally illegal.
36155cd57b5SSean Christopherson 		 */
36255cd57b5SSean Christopherson 		if (!(permitted_xcr0 & XFEATURE_MASK_XTILE_DATA))
36355cd57b5SSean Christopherson 			permitted_xcr0 &= ~XFEATURE_MASK_XTILE_CFG;
36455cd57b5SSean Christopherson 	}
36555cd57b5SSean Christopherson 	return permitted_xcr0;
3666be3ae45SAaron Lewis }
3676be3ae45SAaron Lewis 
368615a4ae1SSean Christopherson static inline bool kvm_mpx_supported(void)
369615a4ae1SSean Christopherson {
370938c8745SSean Christopherson 	return (kvm_caps.supported_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR))
371615a4ae1SSean Christopherson 		== (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
372615a4ae1SSean Christopherson }
373615a4ae1SSean Christopherson 
3749ed96e87SMarcelo Tosatti extern unsigned int min_timer_period_us;
3759ed96e87SMarcelo Tosatti 
376c4ae60e4SLiran Alon extern bool enable_vmware_backdoor;
377c4ae60e4SLiran Alon 
3780c5f81daSWanpeng Li extern int pi_inject_timer;
3790c5f81daSWanpeng Li 
380d855066fSLike Xu extern bool report_ignored_msrs;
381d855066fSLike Xu 
382cb00a70bSDavid Matlack extern bool eager_page_split;
383cb00a70bSDavid Matlack 
384e76ae527SSean Christopherson static inline void kvm_pr_unimpl_wrmsr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
385e76ae527SSean Christopherson {
386e76ae527SSean Christopherson 	if (report_ignored_msrs)
387e76ae527SSean Christopherson 		vcpu_unimpl(vcpu, "Unhandled WRMSR(0x%x) = 0x%llx\n", msr, data);
388e76ae527SSean Christopherson }
389e76ae527SSean Christopherson 
390e76ae527SSean Christopherson static inline void kvm_pr_unimpl_rdmsr(struct kvm_vcpu *vcpu, u32 msr)
391e76ae527SSean Christopherson {
392e76ae527SSean Christopherson 	if (report_ignored_msrs)
393e76ae527SSean Christopherson 		vcpu_unimpl(vcpu, "Unhandled RDMSR(0x%x)\n", msr);
394e76ae527SSean Christopherson }
395e76ae527SSean Christopherson 
3968d93c874SMarcelo Tosatti static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
3978d93c874SMarcelo Tosatti {
3988d93c874SMarcelo Tosatti 	return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
3998d93c874SMarcelo Tosatti 				   vcpu->arch.virtual_tsc_shift);
4008d93c874SMarcelo Tosatti }
4018d93c874SMarcelo Tosatti 
402b51012deSPaolo Bonzini /* Same "calling convention" as do_div:
403b51012deSPaolo Bonzini  * - divide (n << 32) by base
404b51012deSPaolo Bonzini  * - put result in n
405b51012deSPaolo Bonzini  * - return remainder
406b51012deSPaolo Bonzini  */
407b51012deSPaolo Bonzini #define do_shl32_div32(n, base)					\
408b51012deSPaolo Bonzini 	({							\
409b51012deSPaolo Bonzini 	    u32 __quot, __rem;					\
410b51012deSPaolo Bonzini 	    asm("divl %2" : "=a" (__quot), "=d" (__rem)		\
411b51012deSPaolo Bonzini 			: "rm" (base), "0" (0), "1" ((u32) n));	\
412b51012deSPaolo Bonzini 	    n = __quot;						\
413b51012deSPaolo Bonzini 	    __rem;						\
414b51012deSPaolo Bonzini 	 })
415b51012deSPaolo Bonzini 
4164d5422ceSWanpeng Li static inline bool kvm_mwait_in_guest(struct kvm *kvm)
417668fffa3SMichael S. Tsirkin {
4184d5422ceSWanpeng Li 	return kvm->arch.mwait_in_guest;
419668fffa3SMichael S. Tsirkin }
420668fffa3SMichael S. Tsirkin 
421caa057a2SWanpeng Li static inline bool kvm_hlt_in_guest(struct kvm *kvm)
422caa057a2SWanpeng Li {
423caa057a2SWanpeng Li 	return kvm->arch.hlt_in_guest;
424caa057a2SWanpeng Li }
425caa057a2SWanpeng Li 
426b31c114bSWanpeng Li static inline bool kvm_pause_in_guest(struct kvm *kvm)
427b31c114bSWanpeng Li {
428b31c114bSWanpeng Li 	return kvm->arch.pause_in_guest;
429b31c114bSWanpeng Li }
430b31c114bSWanpeng Li 
431b5170063SWanpeng Li static inline bool kvm_cstate_in_guest(struct kvm *kvm)
432b5170063SWanpeng Li {
433b5170063SWanpeng Li 	return kvm->arch.cstate_in_guest;
434b5170063SWanpeng Li }
435b5170063SWanpeng Li 
4362f4073e0STao Xu static inline bool kvm_notify_vmexit_enabled(struct kvm *kvm)
4372f4073e0STao Xu {
4382f4073e0STao Xu 	return kvm->arch.notify_vmexit_flags & KVM_X86_NOTIFY_VMEXIT_ENABLED;
4392f4073e0STao Xu }
4402f4073e0STao Xu 
44111df586dSSean Christopherson static __always_inline void kvm_before_interrupt(struct kvm_vcpu *vcpu,
442db215756SSean Christopherson 						 enum kvm_intr_type intr)
443dd60d217SAndi Kleen {
444db215756SSean Christopherson 	WRITE_ONCE(vcpu->arch.handling_intr_from_guest, (u8)intr);
445dd60d217SAndi Kleen }
446dd60d217SAndi Kleen 
44711df586dSSean Christopherson static __always_inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
448dd60d217SAndi Kleen {
44973cd107bSSean Christopherson 	WRITE_ONCE(vcpu->arch.handling_intr_from_guest, 0);
450dd60d217SAndi Kleen }
451dd60d217SAndi Kleen 
45273cd107bSSean Christopherson static inline bool kvm_handling_nmi_from_guest(struct kvm_vcpu *vcpu)
45373cd107bSSean Christopherson {
454db215756SSean Christopherson 	return vcpu->arch.handling_intr_from_guest == KVM_HANDLING_NMI;
45573cd107bSSean Christopherson }
456674ea351SPaolo Bonzini 
457674ea351SPaolo Bonzini static inline bool kvm_pat_valid(u64 data)
458674ea351SPaolo Bonzini {
459674ea351SPaolo Bonzini 	if (data & 0xF8F8F8F8F8F8F8F8ull)
460674ea351SPaolo Bonzini 		return false;
461674ea351SPaolo Bonzini 	/* 0, 1, 4, 5, 6, 7 are valid values.  */
462674ea351SPaolo Bonzini 	return (data | ((data & 0x0202020202020202ull) << 1)) == data;
463674ea351SPaolo Bonzini }
464674ea351SPaolo Bonzini 
4659b5e8532SSean Christopherson static inline bool kvm_dr7_valid(u64 data)
466b91991bfSKrish Sadhukhan {
467b91991bfSKrish Sadhukhan 	/* Bits [63:32] are reserved */
468b91991bfSKrish Sadhukhan 	return !(data >> 32);
469b91991bfSKrish Sadhukhan }
470f5f6145eSKrish Sadhukhan static inline bool kvm_dr6_valid(u64 data)
471f5f6145eSKrish Sadhukhan {
472f5f6145eSKrish Sadhukhan 	/* Bits [63:32] are reserved */
473f5f6145eSKrish Sadhukhan 	return !(data >> 32);
474f5f6145eSKrish Sadhukhan }
475b91991bfSKrish Sadhukhan 
4763f1a18b9SUros Bizjak /*
4773f1a18b9SUros Bizjak  * Trigger machine check on the host. We assume all the MSRs are already set up
4783f1a18b9SUros Bizjak  * by the CPU and that we still run on the same CPU as the MCE occurred on.
4793f1a18b9SUros Bizjak  * We pass a fake environment to the machine check handler because we want
4803f1a18b9SUros Bizjak  * the guest to be always treated like user space, no matter what context
4813f1a18b9SUros Bizjak  * it used internally.
4823f1a18b9SUros Bizjak  */
4833f1a18b9SUros Bizjak static inline void kvm_machine_check(void)
4843f1a18b9SUros Bizjak {
4853f1a18b9SUros Bizjak #if defined(CONFIG_X86_MCE)
4863f1a18b9SUros Bizjak 	struct pt_regs regs = {
4873f1a18b9SUros Bizjak 		.cs = 3, /* Fake ring 3 no matter what the guest ran on */
4883f1a18b9SUros Bizjak 		.flags = X86_EFLAGS_IF,
4893f1a18b9SUros Bizjak 	};
4903f1a18b9SUros Bizjak 
4913f1a18b9SUros Bizjak 	do_machine_check(&regs);
4923f1a18b9SUros Bizjak #endif
4933f1a18b9SUros Bizjak }
4943f1a18b9SUros Bizjak 
495139a12cfSAaron Lewis void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu);
496139a12cfSAaron Lewis void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
497841c2be0SMaxim Levitsky int kvm_spec_ctrl_test_value(u64 value);
498c33f6f22SSean Christopherson bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
4993f3393b3SBabu Moger int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
5003f3393b3SBabu Moger 			      struct x86_exception *e);
5019715092fSBabu Moger int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva);
50251de8151SAlexander Graf bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type);
503674ea351SPaolo Bonzini 
504cc4cb017SMaxim Levitsky /*
505cc4cb017SMaxim Levitsky  * Internal error codes that are used to indicate that MSR emulation encountered
506cc4cb017SMaxim Levitsky  * an error that should result in #GP in the guest, unless userspace
507cc4cb017SMaxim Levitsky  * handles it.
508cc4cb017SMaxim Levitsky  */
509cc4cb017SMaxim Levitsky #define  KVM_MSR_RET_INVALID	2	/* in-kernel MSR emulation #GP condition */
510cc4cb017SMaxim Levitsky #define  KVM_MSR_RET_FILTERED	3	/* #GP due to userspace MSR filter */
5116abe9c13SPeter Xu 
512b899c132SKrish Sadhukhan #define __cr4_reserved_bits(__cpu_has, __c)             \
513b899c132SKrish Sadhukhan ({                                                      \
514b899c132SKrish Sadhukhan 	u64 __reserved_bits = CR4_RESERVED_BITS;        \
515b899c132SKrish Sadhukhan                                                         \
516b899c132SKrish Sadhukhan 	if (!__cpu_has(__c, X86_FEATURE_XSAVE))         \
517b899c132SKrish Sadhukhan 		__reserved_bits |= X86_CR4_OSXSAVE;     \
518b899c132SKrish Sadhukhan 	if (!__cpu_has(__c, X86_FEATURE_SMEP))          \
519b899c132SKrish Sadhukhan 		__reserved_bits |= X86_CR4_SMEP;        \
520b899c132SKrish Sadhukhan 	if (!__cpu_has(__c, X86_FEATURE_SMAP))          \
521b899c132SKrish Sadhukhan 		__reserved_bits |= X86_CR4_SMAP;        \
522b899c132SKrish Sadhukhan 	if (!__cpu_has(__c, X86_FEATURE_FSGSBASE))      \
523b899c132SKrish Sadhukhan 		__reserved_bits |= X86_CR4_FSGSBASE;    \
524b899c132SKrish Sadhukhan 	if (!__cpu_has(__c, X86_FEATURE_PKU))           \
525b899c132SKrish Sadhukhan 		__reserved_bits |= X86_CR4_PKE;         \
526b899c132SKrish Sadhukhan 	if (!__cpu_has(__c, X86_FEATURE_LA57))          \
527b899c132SKrish Sadhukhan 		__reserved_bits |= X86_CR4_LA57;        \
528b899c132SKrish Sadhukhan 	if (!__cpu_has(__c, X86_FEATURE_UMIP))          \
529b899c132SKrish Sadhukhan 		__reserved_bits |= X86_CR4_UMIP;        \
53053efe527SPaolo Bonzini 	if (!__cpu_has(__c, X86_FEATURE_VMX))           \
53153efe527SPaolo Bonzini 		__reserved_bits |= X86_CR4_VMXE;        \
5324683d758SVitaly Kuznetsov 	if (!__cpu_has(__c, X86_FEATURE_PCID))          \
5334683d758SVitaly Kuznetsov 		__reserved_bits |= X86_CR4_PCIDE;       \
53493d1c9f4SRobert Hoo 	if (!__cpu_has(__c, X86_FEATURE_LAM))           \
53593d1c9f4SRobert Hoo 		__reserved_bits |= X86_CR4_LAM_SUP;     \
536b899c132SKrish Sadhukhan 	__reserved_bits;                                \
537b899c132SKrish Sadhukhan })
538b899c132SKrish Sadhukhan 
5398f423a80STom Lendacky int kvm_sev_es_mmio_write(struct kvm_vcpu *vcpu, gpa_t src, unsigned int bytes,
5408f423a80STom Lendacky 			  void *dst);
5418f423a80STom Lendacky int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t src, unsigned int bytes,
5428f423a80STom Lendacky 			 void *dst);
5437ed9abfeSTom Lendacky int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
5447ed9abfeSTom Lendacky 			 unsigned int port, void *data,  unsigned int count,
5457ed9abfeSTom Lendacky 			 int in);
5468f423a80STom Lendacky 
54726eef70cSAvi Kivity #endif
548