xref: /linux/arch/x86/kvm/x86.h (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
226eef70cSAvi Kivity #ifndef ARCH_X86_KVM_X86_H
326eef70cSAvi Kivity #define ARCH_X86_KVM_X86_H
426eef70cSAvi Kivity 
526eef70cSAvi Kivity #include <linux/kvm_host.h>
655cd57b5SSean Christopherson #include <asm/fpu/xstate.h>
73f1a18b9SUros Bizjak #include <asm/mce.h>
88d93c874SMarcelo Tosatti #include <asm/pvclock.h>
93eeb3288SAvi Kivity #include "kvm_cache_regs.h"
102f728d66SSean Christopherson #include "kvm_emulate.h"
1126eef70cSAvi Kivity 
12938c8745SSean Christopherson struct kvm_caps {
13938c8745SSean Christopherson 	/* control of guest tsc rate supported? */
14938c8745SSean Christopherson 	bool has_tsc_control;
15938c8745SSean Christopherson 	/* maximum supported tsc_khz for guests */
16938c8745SSean Christopherson 	u32  max_guest_tsc_khz;
17938c8745SSean Christopherson 	/* number of bits of the fractional part of the TSC scaling ratio */
18938c8745SSean Christopherson 	u8   tsc_scaling_ratio_frac_bits;
19938c8745SSean Christopherson 	/* maximum allowed value of TSC scaling ratio */
20938c8745SSean Christopherson 	u64  max_tsc_scaling_ratio;
21938c8745SSean Christopherson 	/* 1ull << kvm_caps.tsc_scaling_ratio_frac_bits */
22938c8745SSean Christopherson 	u64  default_tsc_scaling_ratio;
23938c8745SSean Christopherson 	/* bus lock detection supported? */
24938c8745SSean Christopherson 	bool has_bus_lock_exit;
252f4073e0STao Xu 	/* notify VM exit supported? */
262f4073e0STao Xu 	bool has_notify_vmexit;
272a955c4dSPaolo Bonzini 	/* bit mask of VM types */
282a955c4dSPaolo Bonzini 	u32 supported_vm_types;
29938c8745SSean Christopherson 
30938c8745SSean Christopherson 	u64 supported_mce_cap;
31938c8745SSean Christopherson 	u64 supported_xcr0;
32938c8745SSean Christopherson 	u64 supported_xss;
33bec46859SSean Christopherson 	u64 supported_perf_cap;
34938c8745SSean Christopherson };
35938c8745SSean Christopherson 
367974c064SSean Christopherson struct kvm_host_values {
3782897db9SSean Christopherson 	/*
3882897db9SSean Christopherson 	 * The host's raw MAXPHYADDR, i.e. the number of non-reserved physical
3982897db9SSean Christopherson 	 * address bits irrespective of features that repurpose legal bits,
4082897db9SSean Christopherson 	 * e.g. MKTME.
4182897db9SSean Christopherson 	 */
4282897db9SSean Christopherson 	u8 maxphyaddr;
4382897db9SSean Christopherson 
447974c064SSean Christopherson 	u64 efer;
457974c064SSean Christopherson 	u64 xcr0;
467974c064SSean Christopherson 	u64 xss;
477974c064SSean Christopherson 	u64 arch_capabilities;
487974c064SSean Christopherson };
497974c064SSean Christopherson 
5065297341SUros Bizjak void kvm_spurious_fault(void);
5165297341SUros Bizjak 
52648fc8aeSSean Christopherson #define KVM_NESTED_VMENTER_CONSISTENCY_CHECK(consistency_check)		\
53648fc8aeSSean Christopherson ({									\
54648fc8aeSSean Christopherson 	bool failed = (consistency_check);				\
55648fc8aeSSean Christopherson 	if (failed)							\
56648fc8aeSSean Christopherson 		trace_kvm_nested_vmenter_failed(#consistency_check, 0);	\
57648fc8aeSSean Christopherson 	failed;								\
58648fc8aeSSean Christopherson })
59648fc8aeSSean Christopherson 
605757f5b9SSean Christopherson /*
615757f5b9SSean Christopherson  * The first...last VMX feature MSRs that are emulated by KVM.  This may or may
625757f5b9SSean Christopherson  * not cover all known VMX MSRs, as KVM doesn't emulate an MSR until there's an
635757f5b9SSean Christopherson  * associated feature that KVM supports for nested virtualization.
645757f5b9SSean Christopherson  */
655757f5b9SSean Christopherson #define KVM_FIRST_EMULATED_VMX_MSR	MSR_IA32_VMX_BASIC
665757f5b9SSean Christopherson #define KVM_LAST_EMULATED_VMX_MSR	MSR_IA32_VMX_VMFUNC
675757f5b9SSean Christopherson 
68c8e88717SBabu Moger #define KVM_DEFAULT_PLE_GAP		128
69c8e88717SBabu Moger #define KVM_VMX_DEFAULT_PLE_WINDOW	4096
70c8e88717SBabu Moger #define KVM_DEFAULT_PLE_WINDOW_GROW	2
71c8e88717SBabu Moger #define KVM_DEFAULT_PLE_WINDOW_SHRINK	0
72c8e88717SBabu Moger #define KVM_VMX_DEFAULT_PLE_WINDOW_MAX	UINT_MAX
738566ac8bSBabu Moger #define KVM_SVM_DEFAULT_PLE_WINDOW_MAX	USHRT_MAX
748566ac8bSBabu Moger #define KVM_SVM_DEFAULT_PLE_WINDOW	3000
75c8e88717SBabu Moger 
__grow_ple_window(unsigned int val,unsigned int base,unsigned int modifier,unsigned int max)76c8e88717SBabu Moger static inline unsigned int __grow_ple_window(unsigned int val,
77c8e88717SBabu Moger 		unsigned int base, unsigned int modifier, unsigned int max)
78c8e88717SBabu Moger {
79c8e88717SBabu Moger 	u64 ret = val;
80c8e88717SBabu Moger 
81c8e88717SBabu Moger 	if (modifier < 1)
82c8e88717SBabu Moger 		return base;
83c8e88717SBabu Moger 
84c8e88717SBabu Moger 	if (modifier < base)
85c8e88717SBabu Moger 		ret *= modifier;
86c8e88717SBabu Moger 	else
87c8e88717SBabu Moger 		ret += modifier;
88c8e88717SBabu Moger 
89c8e88717SBabu Moger 	return min(ret, (u64)max);
90c8e88717SBabu Moger }
91c8e88717SBabu Moger 
__shrink_ple_window(unsigned int val,unsigned int base,unsigned int modifier,unsigned int min)92c8e88717SBabu Moger static inline unsigned int __shrink_ple_window(unsigned int val,
93c8e88717SBabu Moger 		unsigned int base, unsigned int modifier, unsigned int min)
94c8e88717SBabu Moger {
95c8e88717SBabu Moger 	if (modifier < 1)
96c8e88717SBabu Moger 		return base;
97c8e88717SBabu Moger 
98c8e88717SBabu Moger 	if (modifier < base)
99c8e88717SBabu Moger 		val /= modifier;
100c8e88717SBabu Moger 	else
101c8e88717SBabu Moger 		val -= modifier;
102c8e88717SBabu Moger 
103c8e88717SBabu Moger 	return max(val, min);
104c8e88717SBabu Moger }
105c8e88717SBabu Moger 
10674545705SRadim Krčmář #define MSR_IA32_CR_PAT_DEFAULT  0x0007040600070406ULL
10774545705SRadim Krčmář 
10840e5f908SSean Christopherson void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu);
109cb6a32c2SSean Christopherson int kvm_check_nested_events(struct kvm_vcpu *vcpu);
110cb6a32c2SSean Christopherson 
kvm_vcpu_has_run(struct kvm_vcpu * vcpu)111fb3146b4SSean Christopherson static inline bool kvm_vcpu_has_run(struct kvm_vcpu *vcpu)
112fb3146b4SSean Christopherson {
113fb3146b4SSean Christopherson 	return vcpu->arch.last_vmentry_cpu != -1;
114fb3146b4SSean Christopherson }
115fb3146b4SSean Christopherson 
kvm_is_exception_pending(struct kvm_vcpu * vcpu)1167709aba8SSean Christopherson static inline bool kvm_is_exception_pending(struct kvm_vcpu *vcpu)
1177709aba8SSean Christopherson {
1187709aba8SSean Christopherson 	return vcpu->arch.exception.pending ||
1197055fb11SSean Christopherson 	       vcpu->arch.exception_vmexit.pending ||
1207055fb11SSean Christopherson 	       kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu);
1217709aba8SSean Christopherson }
1227709aba8SSean Christopherson 
kvm_clear_exception_queue(struct kvm_vcpu * vcpu)12326eef70cSAvi Kivity static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
12426eef70cSAvi Kivity {
1255c7d4f9aSLiran Alon 	vcpu->arch.exception.pending = false;
126664f8e26SWanpeng Li 	vcpu->arch.exception.injected = false;
1277709aba8SSean Christopherson 	vcpu->arch.exception_vmexit.pending = false;
12826eef70cSAvi Kivity }
12926eef70cSAvi Kivity 
kvm_queue_interrupt(struct kvm_vcpu * vcpu,u8 vector,bool soft)13066fd3f7fSGleb Natapov static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector,
13166fd3f7fSGleb Natapov 	bool soft)
132937a7eaeSAvi Kivity {
13304140b41SLiran Alon 	vcpu->arch.interrupt.injected = true;
13466fd3f7fSGleb Natapov 	vcpu->arch.interrupt.soft = soft;
135937a7eaeSAvi Kivity 	vcpu->arch.interrupt.nr = vector;
136937a7eaeSAvi Kivity }
137937a7eaeSAvi Kivity 
kvm_clear_interrupt_queue(struct kvm_vcpu * vcpu)138937a7eaeSAvi Kivity static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu)
139937a7eaeSAvi Kivity {
14004140b41SLiran Alon 	vcpu->arch.interrupt.injected = false;
141937a7eaeSAvi Kivity }
142937a7eaeSAvi Kivity 
kvm_event_needs_reinjection(struct kvm_vcpu * vcpu)1433298b75cSGleb Natapov static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu)
1443298b75cSGleb Natapov {
14504140b41SLiran Alon 	return vcpu->arch.exception.injected || vcpu->arch.interrupt.injected ||
1463298b75cSGleb Natapov 		vcpu->arch.nmi_injected;
1473298b75cSGleb Natapov }
14866fd3f7fSGleb Natapov 
kvm_exception_is_soft(unsigned int nr)14966fd3f7fSGleb Natapov static inline bool kvm_exception_is_soft(unsigned int nr)
15066fd3f7fSGleb Natapov {
15166fd3f7fSGleb Natapov 	return (nr == BP_VECTOR) || (nr == OF_VECTOR);
15266fd3f7fSGleb Natapov }
153fc61b800SGleb Natapov 
is_protmode(struct kvm_vcpu * vcpu)1543eeb3288SAvi Kivity static inline bool is_protmode(struct kvm_vcpu *vcpu)
1553eeb3288SAvi Kivity {
156607475cfSBinbin Wu 	return kvm_is_cr0_bit_set(vcpu, X86_CR0_PE);
1573eeb3288SAvi Kivity }
1583eeb3288SAvi Kivity 
is_long_mode(struct kvm_vcpu * vcpu)15968f7c82aSBinbin Wu static inline bool is_long_mode(struct kvm_vcpu *vcpu)
160836a1b3cSAvi Kivity {
161836a1b3cSAvi Kivity #ifdef CONFIG_X86_64
16268f7c82aSBinbin Wu 	return !!(vcpu->arch.efer & EFER_LMA);
163836a1b3cSAvi Kivity #else
16468f7c82aSBinbin Wu 	return false;
165836a1b3cSAvi Kivity #endif
166836a1b3cSAvi Kivity }
167836a1b3cSAvi Kivity 
is_64_bit_mode(struct kvm_vcpu * vcpu)1685777392eSNadav Amit static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
1695777392eSNadav Amit {
1705777392eSNadav Amit 	int cs_db, cs_l;
1715777392eSNadav Amit 
172b5aead00STom Lendacky 	WARN_ON_ONCE(vcpu->arch.guest_state_protected);
173b5aead00STom Lendacky 
1745777392eSNadav Amit 	if (!is_long_mode(vcpu))
1755777392eSNadav Amit 		return false;
176*89604647SWei Wang 	kvm_x86_call(get_cs_db_l_bits)(vcpu, &cs_db, &cs_l);
1775777392eSNadav Amit 	return cs_l;
1785777392eSNadav Amit }
1795777392eSNadav Amit 
is_64_bit_hypercall(struct kvm_vcpu * vcpu)180b5aead00STom Lendacky static inline bool is_64_bit_hypercall(struct kvm_vcpu *vcpu)
181b5aead00STom Lendacky {
182b5aead00STom Lendacky 	/*
183b5aead00STom Lendacky 	 * If running with protected guest state, the CS register is not
184b5aead00STom Lendacky 	 * accessible. The hypercall register values will have had to been
185b5aead00STom Lendacky 	 * provided in 64-bit mode, so assume the guest is in 64-bit.
186b5aead00STom Lendacky 	 */
187b5aead00STom Lendacky 	return vcpu->arch.guest_state_protected || is_64_bit_mode(vcpu);
188b5aead00STom Lendacky }
189b5aead00STom Lendacky 
x86_exception_has_error_code(unsigned int vector)1900447378aSMarc Orr static inline bool x86_exception_has_error_code(unsigned int vector)
1910447378aSMarc Orr {
1920447378aSMarc Orr 	static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) |
1930447378aSMarc Orr 			BIT(NP_VECTOR) | BIT(SS_VECTOR) | BIT(GP_VECTOR) |
1940447378aSMarc Orr 			BIT(PF_VECTOR) | BIT(AC_VECTOR);
1950447378aSMarc Orr 
1960447378aSMarc Orr 	return (1U << vector) & exception_has_error_code;
1970447378aSMarc Orr }
1980447378aSMarc Orr 
mmu_is_nested(struct kvm_vcpu * vcpu)1996539e738SJoerg Roedel static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
2006539e738SJoerg Roedel {
2016539e738SJoerg Roedel 	return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
2026539e738SJoerg Roedel }
2036539e738SJoerg Roedel 
is_pae(struct kvm_vcpu * vcpu)204bede6eb4SBinbin Wu static inline bool is_pae(struct kvm_vcpu *vcpu)
205836a1b3cSAvi Kivity {
206bede6eb4SBinbin Wu 	return kvm_is_cr4_bit_set(vcpu, X86_CR4_PAE);
207836a1b3cSAvi Kivity }
208836a1b3cSAvi Kivity 
is_pse(struct kvm_vcpu * vcpu)209bede6eb4SBinbin Wu static inline bool is_pse(struct kvm_vcpu *vcpu)
210836a1b3cSAvi Kivity {
211bede6eb4SBinbin Wu 	return kvm_is_cr4_bit_set(vcpu, X86_CR4_PSE);
212836a1b3cSAvi Kivity }
213836a1b3cSAvi Kivity 
is_paging(struct kvm_vcpu * vcpu)214bede6eb4SBinbin Wu static inline bool is_paging(struct kvm_vcpu *vcpu)
215836a1b3cSAvi Kivity {
216bede6eb4SBinbin Wu 	return likely(kvm_is_cr0_bit_set(vcpu, X86_CR0_PG));
217836a1b3cSAvi Kivity }
218836a1b3cSAvi Kivity 
is_pae_paging(struct kvm_vcpu * vcpu)219bf03d4f9SPaolo Bonzini static inline bool is_pae_paging(struct kvm_vcpu *vcpu)
220bf03d4f9SPaolo Bonzini {
221bf03d4f9SPaolo Bonzini 	return !is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu);
222bf03d4f9SPaolo Bonzini }
223bf03d4f9SPaolo Bonzini 
vcpu_virt_addr_bits(struct kvm_vcpu * vcpu)224fd8cb433SYu Zhang static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu)
225fd8cb433SYu Zhang {
226607475cfSBinbin Wu 	return kvm_is_cr4_bit_set(vcpu, X86_CR4_LA57) ? 57 : 48;
227fd8cb433SYu Zhang }
228fd8cb433SYu Zhang 
is_noncanonical_address(u64 la,struct kvm_vcpu * vcpu)229fd8cb433SYu Zhang static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)
230fd8cb433SYu Zhang {
2311fb85d06SAdrian Hunter 	return !__is_canonical_address(la, vcpu_virt_addr_bits(vcpu));
232fd8cb433SYu Zhang }
233fd8cb433SYu Zhang 
vcpu_cache_mmio_info(struct kvm_vcpu * vcpu,gva_t gva,gfn_t gfn,unsigned access)234bebb106aSXiao Guangrong static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
235bebb106aSXiao Guangrong 					gva_t gva, gfn_t gfn, unsigned access)
236bebb106aSXiao Guangrong {
237ddfd1730SSean Christopherson 	u64 gen = kvm_memslots(vcpu->kvm)->generation;
238ddfd1730SSean Christopherson 
239361209e0SSean Christopherson 	if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
240ddfd1730SSean Christopherson 		return;
241ddfd1730SSean Christopherson 
2429034e6e8SPaolo Bonzini 	/*
2439034e6e8SPaolo Bonzini 	 * If this is a shadow nested page table, the "GVA" is
2449034e6e8SPaolo Bonzini 	 * actually a nGPA.
2459034e6e8SPaolo Bonzini 	 */
2469034e6e8SPaolo Bonzini 	vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK;
247871bd034SSean Christopherson 	vcpu->arch.mmio_access = access;
248bebb106aSXiao Guangrong 	vcpu->arch.mmio_gfn = gfn;
249ddfd1730SSean Christopherson 	vcpu->arch.mmio_gen = gen;
25056f17dd3SDavid Matlack }
25156f17dd3SDavid Matlack 
vcpu_match_mmio_gen(struct kvm_vcpu * vcpu)25256f17dd3SDavid Matlack static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
25356f17dd3SDavid Matlack {
25456f17dd3SDavid Matlack 	return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation;
255bebb106aSXiao Guangrong }
256bebb106aSXiao Guangrong 
257bebb106aSXiao Guangrong /*
25856f17dd3SDavid Matlack  * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we
25956f17dd3SDavid Matlack  * clear all mmio cache info.
260bebb106aSXiao Guangrong  */
26156f17dd3SDavid Matlack #define MMIO_GVA_ANY (~(gva_t)0)
26256f17dd3SDavid Matlack 
vcpu_clear_mmio_info(struct kvm_vcpu * vcpu,gva_t gva)263bebb106aSXiao Guangrong static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
264bebb106aSXiao Guangrong {
26556f17dd3SDavid Matlack 	if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
266bebb106aSXiao Guangrong 		return;
267bebb106aSXiao Guangrong 
268bebb106aSXiao Guangrong 	vcpu->arch.mmio_gva = 0;
269bebb106aSXiao Guangrong }
270bebb106aSXiao Guangrong 
vcpu_match_mmio_gva(struct kvm_vcpu * vcpu,unsigned long gva)271bebb106aSXiao Guangrong static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
272bebb106aSXiao Guangrong {
27356f17dd3SDavid Matlack 	if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva &&
27456f17dd3SDavid Matlack 	      vcpu->arch.mmio_gva == (gva & PAGE_MASK))
275bebb106aSXiao Guangrong 		return true;
276bebb106aSXiao Guangrong 
277bebb106aSXiao Guangrong 	return false;
278bebb106aSXiao Guangrong }
279bebb106aSXiao Guangrong 
vcpu_match_mmio_gpa(struct kvm_vcpu * vcpu,gpa_t gpa)280bebb106aSXiao Guangrong static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
281bebb106aSXiao Guangrong {
28256f17dd3SDavid Matlack 	if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn &&
28356f17dd3SDavid Matlack 	      vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
284bebb106aSXiao Guangrong 		return true;
285bebb106aSXiao Guangrong 
286bebb106aSXiao Guangrong 	return false;
287bebb106aSXiao Guangrong }
288bebb106aSXiao Guangrong 
kvm_register_read(struct kvm_vcpu * vcpu,int reg)28927b4a9c4SSean Christopherson static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
2905777392eSNadav Amit {
29127b4a9c4SSean Christopherson 	unsigned long val = kvm_register_read_raw(vcpu, reg);
2925777392eSNadav Amit 
2935777392eSNadav Amit 	return is_64_bit_mode(vcpu) ? val : (u32)val;
2945777392eSNadav Amit }
2955777392eSNadav Amit 
kvm_register_write(struct kvm_vcpu * vcpu,int reg,unsigned long val)29627b4a9c4SSean Christopherson static inline void kvm_register_write(struct kvm_vcpu *vcpu,
297489cbcf0SSean Christopherson 				       int reg, unsigned long val)
29827e6fb5dSNadav Amit {
29927e6fb5dSNadav Amit 	if (!is_64_bit_mode(vcpu))
30027e6fb5dSNadav Amit 		val = (u32)val;
30127b4a9c4SSean Christopherson 	return kvm_register_write_raw(vcpu, reg, val);
30227e6fb5dSNadav Amit }
30327e6fb5dSNadav Amit 
kvm_check_has_quirk(struct kvm * kvm,u64 quirk)30441dbc6bcSPaolo Bonzini static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
30541dbc6bcSPaolo Bonzini {
30641dbc6bcSPaolo Bonzini 	return !(kvm->arch.disabled_quirks & quirk);
30741dbc6bcSPaolo Bonzini }
30841dbc6bcSPaolo Bonzini 
3099497e1f2SSean Christopherson void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
310ff9d07a0SZhang, Yanmin 
311108b249cSPaolo Bonzini u64 get_kvmclock_ns(struct kvm *kvm);
3125d6d6a7dSDavid Woodhouse uint64_t kvm_get_wall_clock_epoch(struct kvm *kvm);
313451a7078SDavid Woodhouse bool kvm_get_monotonic_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp);
31499e3e30aSZachary Amsden 
315ce14e868SPaolo Bonzini int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
316064aea77SNadav Har'El 	gva_t addr, void *val, unsigned int bytes,
317064aea77SNadav Har'El 	struct x86_exception *exception);
318064aea77SNadav Har'El 
319ce14e868SPaolo Bonzini int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu,
3206a4d7550SNadav Har'El 	gva_t addr, void *val, unsigned int bytes,
3216a4d7550SNadav Har'El 	struct x86_exception *exception);
3226a4d7550SNadav Har'El 
323082d06edSWanpeng Li int handle_ud(struct kvm_vcpu *vcpu);
324082d06edSWanpeng Li 
325d4963e31SSean Christopherson void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu,
326d4963e31SSean Christopherson 				   struct kvm_queued_exception *ex);
327da998b46SJim Mattson 
328ff53604bSXiao Guangrong int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
329ff53604bSXiao Guangrong int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
33052004014SFeng Wu bool kvm_vector_hashing_enabled(void);
33189786147SMohammed Gamal void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code);
3324aa2691dSWei Huang int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
3334aa2691dSWei Huang 				    void *insn, int insn_len);
334736c291cSSean Christopherson int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
335c60658d1SSean Christopherson 			    int emulation_type, void *insn, int insn_len);
336404d5d7bSWanpeng Li fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu);
3374566654bSNadav Amit 
338938c8745SSean Christopherson extern struct kvm_caps kvm_caps;
3397974c064SSean Christopherson extern struct kvm_host_values kvm_host;
340938c8745SSean Christopherson 
3414732f244SLike Xu extern bool enable_pmu;
3424ff41732SPaolo Bonzini 
3436be3ae45SAaron Lewis /*
3446be3ae45SAaron Lewis  * Get a filtered version of KVM's supported XCR0 that strips out dynamic
3456be3ae45SAaron Lewis  * features for which the current process doesn't (yet) have permission to use.
3466be3ae45SAaron Lewis  * This is intended to be used only when enumerating support to userspace,
3476be3ae45SAaron Lewis  * e.g. in KVM_GET_SUPPORTED_CPUID and KVM_CAP_XSAVE2, it does NOT need to be
3486be3ae45SAaron Lewis  * used to check/restrict guest behavior as KVM rejects KVM_SET_CPUID{2} if
3496be3ae45SAaron Lewis  * userspace attempts to enable unpermitted features.
3506be3ae45SAaron Lewis  */
kvm_get_filtered_xcr0(void)3516be3ae45SAaron Lewis static inline u64 kvm_get_filtered_xcr0(void)
3526be3ae45SAaron Lewis {
35355cd57b5SSean Christopherson 	u64 permitted_xcr0 = kvm_caps.supported_xcr0;
35455cd57b5SSean Christopherson 
35555cd57b5SSean Christopherson 	BUILD_BUG_ON(XFEATURE_MASK_USER_DYNAMIC != XFEATURE_MASK_XTILE_DATA);
35655cd57b5SSean Christopherson 
35755cd57b5SSean Christopherson 	if (permitted_xcr0 & XFEATURE_MASK_USER_DYNAMIC) {
35855cd57b5SSean Christopherson 		permitted_xcr0 &= xstate_get_guest_group_perm();
35955cd57b5SSean Christopherson 
36055cd57b5SSean Christopherson 		/*
36155cd57b5SSean Christopherson 		 * Treat XTILE_CFG as unsupported if the current process isn't
36255cd57b5SSean Christopherson 		 * allowed to use XTILE_DATA, as attempting to set XTILE_CFG in
36355cd57b5SSean Christopherson 		 * XCR0 without setting XTILE_DATA is architecturally illegal.
36455cd57b5SSean Christopherson 		 */
36555cd57b5SSean Christopherson 		if (!(permitted_xcr0 & XFEATURE_MASK_XTILE_DATA))
36655cd57b5SSean Christopherson 			permitted_xcr0 &= ~XFEATURE_MASK_XTILE_CFG;
36755cd57b5SSean Christopherson 	}
36855cd57b5SSean Christopherson 	return permitted_xcr0;
3696be3ae45SAaron Lewis }
3706be3ae45SAaron Lewis 
kvm_mpx_supported(void)371615a4ae1SSean Christopherson static inline bool kvm_mpx_supported(void)
372615a4ae1SSean Christopherson {
373938c8745SSean Christopherson 	return (kvm_caps.supported_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR))
374615a4ae1SSean Christopherson 		== (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
375615a4ae1SSean Christopherson }
376615a4ae1SSean Christopherson 
3779ed96e87SMarcelo Tosatti extern unsigned int min_timer_period_us;
3789ed96e87SMarcelo Tosatti 
379c4ae60e4SLiran Alon extern bool enable_vmware_backdoor;
380c4ae60e4SLiran Alon 
3810c5f81daSWanpeng Li extern int pi_inject_timer;
3820c5f81daSWanpeng Li 
383d855066fSLike Xu extern bool report_ignored_msrs;
384d855066fSLike Xu 
385cb00a70bSDavid Matlack extern bool eager_page_split;
386cb00a70bSDavid Matlack 
kvm_pr_unimpl_wrmsr(struct kvm_vcpu * vcpu,u32 msr,u64 data)387e76ae527SSean Christopherson static inline void kvm_pr_unimpl_wrmsr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
388e76ae527SSean Christopherson {
389e76ae527SSean Christopherson 	if (report_ignored_msrs)
390e76ae527SSean Christopherson 		vcpu_unimpl(vcpu, "Unhandled WRMSR(0x%x) = 0x%llx\n", msr, data);
391e76ae527SSean Christopherson }
392e76ae527SSean Christopherson 
kvm_pr_unimpl_rdmsr(struct kvm_vcpu * vcpu,u32 msr)393e76ae527SSean Christopherson static inline void kvm_pr_unimpl_rdmsr(struct kvm_vcpu *vcpu, u32 msr)
394e76ae527SSean Christopherson {
395e76ae527SSean Christopherson 	if (report_ignored_msrs)
396e76ae527SSean Christopherson 		vcpu_unimpl(vcpu, "Unhandled RDMSR(0x%x)\n", msr);
397e76ae527SSean Christopherson }
398e76ae527SSean Christopherson 
nsec_to_cycles(struct kvm_vcpu * vcpu,u64 nsec)3998d93c874SMarcelo Tosatti static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
4008d93c874SMarcelo Tosatti {
4018d93c874SMarcelo Tosatti 	return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
4028d93c874SMarcelo Tosatti 				   vcpu->arch.virtual_tsc_shift);
4038d93c874SMarcelo Tosatti }
4048d93c874SMarcelo Tosatti 
405b51012deSPaolo Bonzini /* Same "calling convention" as do_div:
406b51012deSPaolo Bonzini  * - divide (n << 32) by base
407b51012deSPaolo Bonzini  * - put result in n
408b51012deSPaolo Bonzini  * - return remainder
409b51012deSPaolo Bonzini  */
410b51012deSPaolo Bonzini #define do_shl32_div32(n, base)					\
411b51012deSPaolo Bonzini 	({							\
412b51012deSPaolo Bonzini 	    u32 __quot, __rem;					\
413b51012deSPaolo Bonzini 	    asm("divl %2" : "=a" (__quot), "=d" (__rem)		\
414b51012deSPaolo Bonzini 			: "rm" (base), "0" (0), "1" ((u32) n));	\
415b51012deSPaolo Bonzini 	    n = __quot;						\
416b51012deSPaolo Bonzini 	    __rem;						\
417b51012deSPaolo Bonzini 	 })
418b51012deSPaolo Bonzini 
kvm_mwait_in_guest(struct kvm * kvm)4194d5422ceSWanpeng Li static inline bool kvm_mwait_in_guest(struct kvm *kvm)
420668fffa3SMichael S. Tsirkin {
4214d5422ceSWanpeng Li 	return kvm->arch.mwait_in_guest;
422668fffa3SMichael S. Tsirkin }
423668fffa3SMichael S. Tsirkin 
kvm_hlt_in_guest(struct kvm * kvm)424caa057a2SWanpeng Li static inline bool kvm_hlt_in_guest(struct kvm *kvm)
425caa057a2SWanpeng Li {
426caa057a2SWanpeng Li 	return kvm->arch.hlt_in_guest;
427caa057a2SWanpeng Li }
428caa057a2SWanpeng Li 
kvm_pause_in_guest(struct kvm * kvm)429b31c114bSWanpeng Li static inline bool kvm_pause_in_guest(struct kvm *kvm)
430b31c114bSWanpeng Li {
431b31c114bSWanpeng Li 	return kvm->arch.pause_in_guest;
432b31c114bSWanpeng Li }
433b31c114bSWanpeng Li 
kvm_cstate_in_guest(struct kvm * kvm)434b5170063SWanpeng Li static inline bool kvm_cstate_in_guest(struct kvm *kvm)
435b5170063SWanpeng Li {
436b5170063SWanpeng Li 	return kvm->arch.cstate_in_guest;
437b5170063SWanpeng Li }
438b5170063SWanpeng Li 
kvm_notify_vmexit_enabled(struct kvm * kvm)4392f4073e0STao Xu static inline bool kvm_notify_vmexit_enabled(struct kvm *kvm)
4402f4073e0STao Xu {
4412f4073e0STao Xu 	return kvm->arch.notify_vmexit_flags & KVM_X86_NOTIFY_VMEXIT_ENABLED;
4422f4073e0STao Xu }
4432f4073e0STao Xu 
kvm_before_interrupt(struct kvm_vcpu * vcpu,enum kvm_intr_type intr)44411df586dSSean Christopherson static __always_inline void kvm_before_interrupt(struct kvm_vcpu *vcpu,
445db215756SSean Christopherson 						 enum kvm_intr_type intr)
446dd60d217SAndi Kleen {
447db215756SSean Christopherson 	WRITE_ONCE(vcpu->arch.handling_intr_from_guest, (u8)intr);
448dd60d217SAndi Kleen }
449dd60d217SAndi Kleen 
kvm_after_interrupt(struct kvm_vcpu * vcpu)45011df586dSSean Christopherson static __always_inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
451dd60d217SAndi Kleen {
45273cd107bSSean Christopherson 	WRITE_ONCE(vcpu->arch.handling_intr_from_guest, 0);
453dd60d217SAndi Kleen }
454dd60d217SAndi Kleen 
kvm_handling_nmi_from_guest(struct kvm_vcpu * vcpu)45573cd107bSSean Christopherson static inline bool kvm_handling_nmi_from_guest(struct kvm_vcpu *vcpu)
45673cd107bSSean Christopherson {
457db215756SSean Christopherson 	return vcpu->arch.handling_intr_from_guest == KVM_HANDLING_NMI;
45873cd107bSSean Christopherson }
459674ea351SPaolo Bonzini 
kvm_pat_valid(u64 data)460674ea351SPaolo Bonzini static inline bool kvm_pat_valid(u64 data)
461674ea351SPaolo Bonzini {
462674ea351SPaolo Bonzini 	if (data & 0xF8F8F8F8F8F8F8F8ull)
463674ea351SPaolo Bonzini 		return false;
464674ea351SPaolo Bonzini 	/* 0, 1, 4, 5, 6, 7 are valid values.  */
465674ea351SPaolo Bonzini 	return (data | ((data & 0x0202020202020202ull) << 1)) == data;
466674ea351SPaolo Bonzini }
467674ea351SPaolo Bonzini 
kvm_dr7_valid(u64 data)4689b5e8532SSean Christopherson static inline bool kvm_dr7_valid(u64 data)
469b91991bfSKrish Sadhukhan {
470b91991bfSKrish Sadhukhan 	/* Bits [63:32] are reserved */
471b91991bfSKrish Sadhukhan 	return !(data >> 32);
472b91991bfSKrish Sadhukhan }
kvm_dr6_valid(u64 data)473f5f6145eSKrish Sadhukhan static inline bool kvm_dr6_valid(u64 data)
474f5f6145eSKrish Sadhukhan {
475f5f6145eSKrish Sadhukhan 	/* Bits [63:32] are reserved */
476f5f6145eSKrish Sadhukhan 	return !(data >> 32);
477f5f6145eSKrish Sadhukhan }
478b91991bfSKrish Sadhukhan 
4793f1a18b9SUros Bizjak /*
4803f1a18b9SUros Bizjak  * Trigger machine check on the host. We assume all the MSRs are already set up
4813f1a18b9SUros Bizjak  * by the CPU and that we still run on the same CPU as the MCE occurred on.
4823f1a18b9SUros Bizjak  * We pass a fake environment to the machine check handler because we want
4833f1a18b9SUros Bizjak  * the guest to be always treated like user space, no matter what context
4843f1a18b9SUros Bizjak  * it used internally.
4853f1a18b9SUros Bizjak  */
kvm_machine_check(void)4863f1a18b9SUros Bizjak static inline void kvm_machine_check(void)
4873f1a18b9SUros Bizjak {
4883f1a18b9SUros Bizjak #if defined(CONFIG_X86_MCE)
4893f1a18b9SUros Bizjak 	struct pt_regs regs = {
4903f1a18b9SUros Bizjak 		.cs = 3, /* Fake ring 3 no matter what the guest ran on */
4913f1a18b9SUros Bizjak 		.flags = X86_EFLAGS_IF,
4923f1a18b9SUros Bizjak 	};
4933f1a18b9SUros Bizjak 
4943f1a18b9SUros Bizjak 	do_machine_check(&regs);
4953f1a18b9SUros Bizjak #endif
4963f1a18b9SUros Bizjak }
4973f1a18b9SUros Bizjak 
498139a12cfSAaron Lewis void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu);
499139a12cfSAaron Lewis void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
500841c2be0SMaxim Levitsky int kvm_spec_ctrl_test_value(u64 value);
501c33f6f22SSean Christopherson bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
5023f3393b3SBabu Moger int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
5033f3393b3SBabu Moger 			      struct x86_exception *e);
5049715092fSBabu Moger int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva);
50551de8151SAlexander Graf bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type);
506674ea351SPaolo Bonzini 
507cc4cb017SMaxim Levitsky /*
508cc4cb017SMaxim Levitsky  * Internal error codes that are used to indicate that MSR emulation encountered
509cc4cb017SMaxim Levitsky  * an error that should result in #GP in the guest, unless userspace
510cc4cb017SMaxim Levitsky  * handles it.
511cc4cb017SMaxim Levitsky  */
512cc4cb017SMaxim Levitsky #define  KVM_MSR_RET_INVALID	2	/* in-kernel MSR emulation #GP condition */
513cc4cb017SMaxim Levitsky #define  KVM_MSR_RET_FILTERED	3	/* #GP due to userspace MSR filter */
5146abe9c13SPeter Xu 
515b899c132SKrish Sadhukhan #define __cr4_reserved_bits(__cpu_has, __c)             \
516b899c132SKrish Sadhukhan ({                                                      \
517b899c132SKrish Sadhukhan 	u64 __reserved_bits = CR4_RESERVED_BITS;        \
518b899c132SKrish Sadhukhan                                                         \
519b899c132SKrish Sadhukhan 	if (!__cpu_has(__c, X86_FEATURE_XSAVE))         \
520b899c132SKrish Sadhukhan 		__reserved_bits |= X86_CR4_OSXSAVE;     \
521b899c132SKrish Sadhukhan 	if (!__cpu_has(__c, X86_FEATURE_SMEP))          \
522b899c132SKrish Sadhukhan 		__reserved_bits |= X86_CR4_SMEP;        \
523b899c132SKrish Sadhukhan 	if (!__cpu_has(__c, X86_FEATURE_SMAP))          \
524b899c132SKrish Sadhukhan 		__reserved_bits |= X86_CR4_SMAP;        \
525b899c132SKrish Sadhukhan 	if (!__cpu_has(__c, X86_FEATURE_FSGSBASE))      \
526b899c132SKrish Sadhukhan 		__reserved_bits |= X86_CR4_FSGSBASE;    \
527b899c132SKrish Sadhukhan 	if (!__cpu_has(__c, X86_FEATURE_PKU))           \
528b899c132SKrish Sadhukhan 		__reserved_bits |= X86_CR4_PKE;         \
529b899c132SKrish Sadhukhan 	if (!__cpu_has(__c, X86_FEATURE_LA57))          \
530b899c132SKrish Sadhukhan 		__reserved_bits |= X86_CR4_LA57;        \
531b899c132SKrish Sadhukhan 	if (!__cpu_has(__c, X86_FEATURE_UMIP))          \
532b899c132SKrish Sadhukhan 		__reserved_bits |= X86_CR4_UMIP;        \
53353efe527SPaolo Bonzini 	if (!__cpu_has(__c, X86_FEATURE_VMX))           \
53453efe527SPaolo Bonzini 		__reserved_bits |= X86_CR4_VMXE;        \
5354683d758SVitaly Kuznetsov 	if (!__cpu_has(__c, X86_FEATURE_PCID))          \
5364683d758SVitaly Kuznetsov 		__reserved_bits |= X86_CR4_PCIDE;       \
53793d1c9f4SRobert Hoo 	if (!__cpu_has(__c, X86_FEATURE_LAM))           \
53893d1c9f4SRobert Hoo 		__reserved_bits |= X86_CR4_LAM_SUP;     \
539b899c132SKrish Sadhukhan 	__reserved_bits;                                \
540b899c132SKrish Sadhukhan })
541b899c132SKrish Sadhukhan 
5428f423a80STom Lendacky int kvm_sev_es_mmio_write(struct kvm_vcpu *vcpu, gpa_t src, unsigned int bytes,
5438f423a80STom Lendacky 			  void *dst);
5448f423a80STom Lendacky int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t src, unsigned int bytes,
5458f423a80STom Lendacky 			 void *dst);
5467ed9abfeSTom Lendacky int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
5477ed9abfeSTom Lendacky 			 unsigned int port, void *data,  unsigned int count,
5487ed9abfeSTom Lendacky 			 int in);
5498f423a80STom Lendacky 
55026eef70cSAvi Kivity #endif
551