xref: /linux/arch/x86/kvm/x86.h (revision fb3146b4dc3bc6d0c0402a75f21d628eccf9bf8c)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
226eef70cSAvi Kivity #ifndef ARCH_X86_KVM_X86_H
326eef70cSAvi Kivity #define ARCH_X86_KVM_X86_H
426eef70cSAvi Kivity 
526eef70cSAvi Kivity #include <linux/kvm_host.h>
63f1a18b9SUros Bizjak #include <asm/mce.h>
78d93c874SMarcelo Tosatti #include <asm/pvclock.h>
83eeb3288SAvi Kivity #include "kvm_cache_regs.h"
92f728d66SSean Christopherson #include "kvm_emulate.h"
1026eef70cSAvi Kivity 
11938c8745SSean Christopherson struct kvm_caps {
12938c8745SSean Christopherson 	/* control of guest tsc rate supported? */
13938c8745SSean Christopherson 	bool has_tsc_control;
14938c8745SSean Christopherson 	/* maximum supported tsc_khz for guests */
15938c8745SSean Christopherson 	u32  max_guest_tsc_khz;
16938c8745SSean Christopherson 	/* number of bits of the fractional part of the TSC scaling ratio */
17938c8745SSean Christopherson 	u8   tsc_scaling_ratio_frac_bits;
18938c8745SSean Christopherson 	/* maximum allowed value of TSC scaling ratio */
19938c8745SSean Christopherson 	u64  max_tsc_scaling_ratio;
20938c8745SSean Christopherson 	/* 1ull << kvm_caps.tsc_scaling_ratio_frac_bits */
21938c8745SSean Christopherson 	u64  default_tsc_scaling_ratio;
22938c8745SSean Christopherson 	/* bus lock detection supported? */
23938c8745SSean Christopherson 	bool has_bus_lock_exit;
242f4073e0STao Xu 	/* notify VM exit supported? */
252f4073e0STao Xu 	bool has_notify_vmexit;
26938c8745SSean Christopherson 
27938c8745SSean Christopherson 	u64 supported_mce_cap;
28938c8745SSean Christopherson 	u64 supported_xcr0;
29938c8745SSean Christopherson 	u64 supported_xss;
30bec46859SSean Christopherson 	u64 supported_perf_cap;
31938c8745SSean Christopherson };
32938c8745SSean Christopherson 
3365297341SUros Bizjak void kvm_spurious_fault(void);
3465297341SUros Bizjak 
35648fc8aeSSean Christopherson #define KVM_NESTED_VMENTER_CONSISTENCY_CHECK(consistency_check)		\
36648fc8aeSSean Christopherson ({									\
37648fc8aeSSean Christopherson 	bool failed = (consistency_check);				\
38648fc8aeSSean Christopherson 	if (failed)							\
39648fc8aeSSean Christopherson 		trace_kvm_nested_vmenter_failed(#consistency_check, 0);	\
40648fc8aeSSean Christopherson 	failed;								\
41648fc8aeSSean Christopherson })
42648fc8aeSSean Christopherson 
43c8e88717SBabu Moger #define KVM_DEFAULT_PLE_GAP		128
44c8e88717SBabu Moger #define KVM_VMX_DEFAULT_PLE_WINDOW	4096
45c8e88717SBabu Moger #define KVM_DEFAULT_PLE_WINDOW_GROW	2
46c8e88717SBabu Moger #define KVM_DEFAULT_PLE_WINDOW_SHRINK	0
47c8e88717SBabu Moger #define KVM_VMX_DEFAULT_PLE_WINDOW_MAX	UINT_MAX
488566ac8bSBabu Moger #define KVM_SVM_DEFAULT_PLE_WINDOW_MAX	USHRT_MAX
498566ac8bSBabu Moger #define KVM_SVM_DEFAULT_PLE_WINDOW	3000
50c8e88717SBabu Moger 
51c8e88717SBabu Moger static inline unsigned int __grow_ple_window(unsigned int val,
52c8e88717SBabu Moger 		unsigned int base, unsigned int modifier, unsigned int max)
53c8e88717SBabu Moger {
54c8e88717SBabu Moger 	u64 ret = val;
55c8e88717SBabu Moger 
56c8e88717SBabu Moger 	if (modifier < 1)
57c8e88717SBabu Moger 		return base;
58c8e88717SBabu Moger 
59c8e88717SBabu Moger 	if (modifier < base)
60c8e88717SBabu Moger 		ret *= modifier;
61c8e88717SBabu Moger 	else
62c8e88717SBabu Moger 		ret += modifier;
63c8e88717SBabu Moger 
64c8e88717SBabu Moger 	return min(ret, (u64)max);
65c8e88717SBabu Moger }
66c8e88717SBabu Moger 
67c8e88717SBabu Moger static inline unsigned int __shrink_ple_window(unsigned int val,
68c8e88717SBabu Moger 		unsigned int base, unsigned int modifier, unsigned int min)
69c8e88717SBabu Moger {
70c8e88717SBabu Moger 	if (modifier < 1)
71c8e88717SBabu Moger 		return base;
72c8e88717SBabu Moger 
73c8e88717SBabu Moger 	if (modifier < base)
74c8e88717SBabu Moger 		val /= modifier;
75c8e88717SBabu Moger 	else
76c8e88717SBabu Moger 		val -= modifier;
77c8e88717SBabu Moger 
78c8e88717SBabu Moger 	return max(val, min);
79c8e88717SBabu Moger }
80c8e88717SBabu Moger 
8174545705SRadim Krčmář #define MSR_IA32_CR_PAT_DEFAULT  0x0007040600070406ULL
8274545705SRadim Krčmář 
8340e5f908SSean Christopherson void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu);
84cb6a32c2SSean Christopherson int kvm_check_nested_events(struct kvm_vcpu *vcpu);
85cb6a32c2SSean Christopherson 
86*fb3146b4SSean Christopherson static inline bool kvm_vcpu_has_run(struct kvm_vcpu *vcpu)
87*fb3146b4SSean Christopherson {
88*fb3146b4SSean Christopherson 	return vcpu->arch.last_vmentry_cpu != -1;
89*fb3146b4SSean Christopherson }
90*fb3146b4SSean Christopherson 
917709aba8SSean Christopherson static inline bool kvm_is_exception_pending(struct kvm_vcpu *vcpu)
927709aba8SSean Christopherson {
937709aba8SSean Christopherson 	return vcpu->arch.exception.pending ||
947055fb11SSean Christopherson 	       vcpu->arch.exception_vmexit.pending ||
957055fb11SSean Christopherson 	       kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu);
967709aba8SSean Christopherson }
977709aba8SSean Christopherson 
9826eef70cSAvi Kivity static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
9926eef70cSAvi Kivity {
1005c7d4f9aSLiran Alon 	vcpu->arch.exception.pending = false;
101664f8e26SWanpeng Li 	vcpu->arch.exception.injected = false;
1027709aba8SSean Christopherson 	vcpu->arch.exception_vmexit.pending = false;
10326eef70cSAvi Kivity }
10426eef70cSAvi Kivity 
10566fd3f7fSGleb Natapov static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector,
10666fd3f7fSGleb Natapov 	bool soft)
107937a7eaeSAvi Kivity {
10804140b41SLiran Alon 	vcpu->arch.interrupt.injected = true;
10966fd3f7fSGleb Natapov 	vcpu->arch.interrupt.soft = soft;
110937a7eaeSAvi Kivity 	vcpu->arch.interrupt.nr = vector;
111937a7eaeSAvi Kivity }
112937a7eaeSAvi Kivity 
113937a7eaeSAvi Kivity static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu)
114937a7eaeSAvi Kivity {
11504140b41SLiran Alon 	vcpu->arch.interrupt.injected = false;
116937a7eaeSAvi Kivity }
117937a7eaeSAvi Kivity 
1183298b75cSGleb Natapov static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu)
1193298b75cSGleb Natapov {
12004140b41SLiran Alon 	return vcpu->arch.exception.injected || vcpu->arch.interrupt.injected ||
1213298b75cSGleb Natapov 		vcpu->arch.nmi_injected;
1223298b75cSGleb Natapov }
12366fd3f7fSGleb Natapov 
12466fd3f7fSGleb Natapov static inline bool kvm_exception_is_soft(unsigned int nr)
12566fd3f7fSGleb Natapov {
12666fd3f7fSGleb Natapov 	return (nr == BP_VECTOR) || (nr == OF_VECTOR);
12766fd3f7fSGleb Natapov }
128fc61b800SGleb Natapov 
1293eeb3288SAvi Kivity static inline bool is_protmode(struct kvm_vcpu *vcpu)
1303eeb3288SAvi Kivity {
1313eeb3288SAvi Kivity 	return kvm_read_cr0_bits(vcpu, X86_CR0_PE);
1323eeb3288SAvi Kivity }
1333eeb3288SAvi Kivity 
134836a1b3cSAvi Kivity static inline int is_long_mode(struct kvm_vcpu *vcpu)
135836a1b3cSAvi Kivity {
136836a1b3cSAvi Kivity #ifdef CONFIG_X86_64
137f6801dffSAvi Kivity 	return vcpu->arch.efer & EFER_LMA;
138836a1b3cSAvi Kivity #else
139836a1b3cSAvi Kivity 	return 0;
140836a1b3cSAvi Kivity #endif
141836a1b3cSAvi Kivity }
142836a1b3cSAvi Kivity 
1435777392eSNadav Amit static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
1445777392eSNadav Amit {
1455777392eSNadav Amit 	int cs_db, cs_l;
1465777392eSNadav Amit 
147b5aead00STom Lendacky 	WARN_ON_ONCE(vcpu->arch.guest_state_protected);
148b5aead00STom Lendacky 
1495777392eSNadav Amit 	if (!is_long_mode(vcpu))
1505777392eSNadav Amit 		return false;
151b3646477SJason Baron 	static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l);
1525777392eSNadav Amit 	return cs_l;
1535777392eSNadav Amit }
1545777392eSNadav Amit 
155b5aead00STom Lendacky static inline bool is_64_bit_hypercall(struct kvm_vcpu *vcpu)
156b5aead00STom Lendacky {
157b5aead00STom Lendacky 	/*
158b5aead00STom Lendacky 	 * If running with protected guest state, the CS register is not
159b5aead00STom Lendacky 	 * accessible. The hypercall register values will have had to been
160b5aead00STom Lendacky 	 * provided in 64-bit mode, so assume the guest is in 64-bit.
161b5aead00STom Lendacky 	 */
162b5aead00STom Lendacky 	return vcpu->arch.guest_state_protected || is_64_bit_mode(vcpu);
163b5aead00STom Lendacky }
164b5aead00STom Lendacky 
1650447378aSMarc Orr static inline bool x86_exception_has_error_code(unsigned int vector)
1660447378aSMarc Orr {
1670447378aSMarc Orr 	static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) |
1680447378aSMarc Orr 			BIT(NP_VECTOR) | BIT(SS_VECTOR) | BIT(GP_VECTOR) |
1690447378aSMarc Orr 			BIT(PF_VECTOR) | BIT(AC_VECTOR);
1700447378aSMarc Orr 
1710447378aSMarc Orr 	return (1U << vector) & exception_has_error_code;
1720447378aSMarc Orr }
1730447378aSMarc Orr 
1746539e738SJoerg Roedel static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
1756539e738SJoerg Roedel {
1766539e738SJoerg Roedel 	return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
1776539e738SJoerg Roedel }
1786539e738SJoerg Roedel 
179836a1b3cSAvi Kivity static inline int is_pae(struct kvm_vcpu *vcpu)
180836a1b3cSAvi Kivity {
181836a1b3cSAvi Kivity 	return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
182836a1b3cSAvi Kivity }
183836a1b3cSAvi Kivity 
184836a1b3cSAvi Kivity static inline int is_pse(struct kvm_vcpu *vcpu)
185836a1b3cSAvi Kivity {
186836a1b3cSAvi Kivity 	return kvm_read_cr4_bits(vcpu, X86_CR4_PSE);
187836a1b3cSAvi Kivity }
188836a1b3cSAvi Kivity 
189836a1b3cSAvi Kivity static inline int is_paging(struct kvm_vcpu *vcpu)
190836a1b3cSAvi Kivity {
191c36fc04eSDavidlohr Bueso 	return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG));
192836a1b3cSAvi Kivity }
193836a1b3cSAvi Kivity 
194bf03d4f9SPaolo Bonzini static inline bool is_pae_paging(struct kvm_vcpu *vcpu)
195bf03d4f9SPaolo Bonzini {
196bf03d4f9SPaolo Bonzini 	return !is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu);
197bf03d4f9SPaolo Bonzini }
198bf03d4f9SPaolo Bonzini 
199fd8cb433SYu Zhang static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu)
200fd8cb433SYu Zhang {
201fd8cb433SYu Zhang 	return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48;
202fd8cb433SYu Zhang }
203fd8cb433SYu Zhang 
204fd8cb433SYu Zhang static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)
205fd8cb433SYu Zhang {
2061fb85d06SAdrian Hunter 	return !__is_canonical_address(la, vcpu_virt_addr_bits(vcpu));
207fd8cb433SYu Zhang }
208fd8cb433SYu Zhang 
209bebb106aSXiao Guangrong static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
210bebb106aSXiao Guangrong 					gva_t gva, gfn_t gfn, unsigned access)
211bebb106aSXiao Guangrong {
212ddfd1730SSean Christopherson 	u64 gen = kvm_memslots(vcpu->kvm)->generation;
213ddfd1730SSean Christopherson 
214361209e0SSean Christopherson 	if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
215ddfd1730SSean Christopherson 		return;
216ddfd1730SSean Christopherson 
2179034e6e8SPaolo Bonzini 	/*
2189034e6e8SPaolo Bonzini 	 * If this is a shadow nested page table, the "GVA" is
2199034e6e8SPaolo Bonzini 	 * actually a nGPA.
2209034e6e8SPaolo Bonzini 	 */
2219034e6e8SPaolo Bonzini 	vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK;
222871bd034SSean Christopherson 	vcpu->arch.mmio_access = access;
223bebb106aSXiao Guangrong 	vcpu->arch.mmio_gfn = gfn;
224ddfd1730SSean Christopherson 	vcpu->arch.mmio_gen = gen;
22556f17dd3SDavid Matlack }
22656f17dd3SDavid Matlack 
22756f17dd3SDavid Matlack static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
22856f17dd3SDavid Matlack {
22956f17dd3SDavid Matlack 	return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation;
230bebb106aSXiao Guangrong }
231bebb106aSXiao Guangrong 
232bebb106aSXiao Guangrong /*
23356f17dd3SDavid Matlack  * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we
23456f17dd3SDavid Matlack  * clear all mmio cache info.
235bebb106aSXiao Guangrong  */
23656f17dd3SDavid Matlack #define MMIO_GVA_ANY (~(gva_t)0)
23756f17dd3SDavid Matlack 
238bebb106aSXiao Guangrong static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
239bebb106aSXiao Guangrong {
24056f17dd3SDavid Matlack 	if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
241bebb106aSXiao Guangrong 		return;
242bebb106aSXiao Guangrong 
243bebb106aSXiao Guangrong 	vcpu->arch.mmio_gva = 0;
244bebb106aSXiao Guangrong }
245bebb106aSXiao Guangrong 
246bebb106aSXiao Guangrong static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
247bebb106aSXiao Guangrong {
24856f17dd3SDavid Matlack 	if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva &&
24956f17dd3SDavid Matlack 	      vcpu->arch.mmio_gva == (gva & PAGE_MASK))
250bebb106aSXiao Guangrong 		return true;
251bebb106aSXiao Guangrong 
252bebb106aSXiao Guangrong 	return false;
253bebb106aSXiao Guangrong }
254bebb106aSXiao Guangrong 
255bebb106aSXiao Guangrong static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
256bebb106aSXiao Guangrong {
25756f17dd3SDavid Matlack 	if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn &&
25856f17dd3SDavid Matlack 	      vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
259bebb106aSXiao Guangrong 		return true;
260bebb106aSXiao Guangrong 
261bebb106aSXiao Guangrong 	return false;
262bebb106aSXiao Guangrong }
263bebb106aSXiao Guangrong 
26427b4a9c4SSean Christopherson static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
2655777392eSNadav Amit {
26627b4a9c4SSean Christopherson 	unsigned long val = kvm_register_read_raw(vcpu, reg);
2675777392eSNadav Amit 
2685777392eSNadav Amit 	return is_64_bit_mode(vcpu) ? val : (u32)val;
2695777392eSNadav Amit }
2705777392eSNadav Amit 
27127b4a9c4SSean Christopherson static inline void kvm_register_write(struct kvm_vcpu *vcpu,
272489cbcf0SSean Christopherson 				       int reg, unsigned long val)
27327e6fb5dSNadav Amit {
27427e6fb5dSNadav Amit 	if (!is_64_bit_mode(vcpu))
27527e6fb5dSNadav Amit 		val = (u32)val;
27627b4a9c4SSean Christopherson 	return kvm_register_write_raw(vcpu, reg, val);
27727e6fb5dSNadav Amit }
27827e6fb5dSNadav Amit 
27941dbc6bcSPaolo Bonzini static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
28041dbc6bcSPaolo Bonzini {
28141dbc6bcSPaolo Bonzini 	return !(kvm->arch.disabled_quirks & quirk);
28241dbc6bcSPaolo Bonzini }
28341dbc6bcSPaolo Bonzini 
2849497e1f2SSean Christopherson void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
285ff9d07a0SZhang, Yanmin 
286108b249cSPaolo Bonzini u64 get_kvmclock_ns(struct kvm *kvm);
28799e3e30aSZachary Amsden 
288ce14e868SPaolo Bonzini int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
289064aea77SNadav Har'El 	gva_t addr, void *val, unsigned int bytes,
290064aea77SNadav Har'El 	struct x86_exception *exception);
291064aea77SNadav Har'El 
292ce14e868SPaolo Bonzini int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu,
2936a4d7550SNadav Har'El 	gva_t addr, void *val, unsigned int bytes,
2946a4d7550SNadav Har'El 	struct x86_exception *exception);
2956a4d7550SNadav Har'El 
296082d06edSWanpeng Li int handle_ud(struct kvm_vcpu *vcpu);
297082d06edSWanpeng Li 
298d4963e31SSean Christopherson void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu,
299d4963e31SSean Christopherson 				   struct kvm_queued_exception *ex);
300da998b46SJim Mattson 
30119efffa2SXiao Guangrong void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu);
302ff53604bSXiao Guangrong u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
3034566654bSNadav Amit bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data);
304ff53604bSXiao Guangrong int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
305ff53604bSXiao Guangrong int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
3066a39bbc5SXiao Guangrong bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
3076a39bbc5SXiao Guangrong 					  int page_num);
30852004014SFeng Wu bool kvm_vector_hashing_enabled(void);
30989786147SMohammed Gamal void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code);
3104aa2691dSWei Huang int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
3114aa2691dSWei Huang 				    void *insn, int insn_len);
312736c291cSSean Christopherson int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
313c60658d1SSean Christopherson 			    int emulation_type, void *insn, int insn_len);
314404d5d7bSWanpeng Li fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu);
3154566654bSNadav Amit 
31600b27a3eSAvi Kivity extern u64 host_xcr0;
31786137773STom Lendacky extern u64 host_xss;
318938c8745SSean Christopherson 
319938c8745SSean Christopherson extern struct kvm_caps kvm_caps;
320938c8745SSean Christopherson 
3214732f244SLike Xu extern bool enable_pmu;
3224ff41732SPaolo Bonzini 
323615a4ae1SSean Christopherson static inline bool kvm_mpx_supported(void)
324615a4ae1SSean Christopherson {
325938c8745SSean Christopherson 	return (kvm_caps.supported_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR))
326615a4ae1SSean Christopherson 		== (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
327615a4ae1SSean Christopherson }
328615a4ae1SSean Christopherson 
3299ed96e87SMarcelo Tosatti extern unsigned int min_timer_period_us;
3309ed96e87SMarcelo Tosatti 
331c4ae60e4SLiran Alon extern bool enable_vmware_backdoor;
332c4ae60e4SLiran Alon 
3330c5f81daSWanpeng Li extern int pi_inject_timer;
3340c5f81daSWanpeng Li 
335d855066fSLike Xu extern bool report_ignored_msrs;
336d855066fSLike Xu 
337cb00a70bSDavid Matlack extern bool eager_page_split;
338cb00a70bSDavid Matlack 
339e76ae527SSean Christopherson static inline void kvm_pr_unimpl_wrmsr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
340e76ae527SSean Christopherson {
341e76ae527SSean Christopherson 	if (report_ignored_msrs)
342e76ae527SSean Christopherson 		vcpu_unimpl(vcpu, "Unhandled WRMSR(0x%x) = 0x%llx\n", msr, data);
343e76ae527SSean Christopherson }
344e76ae527SSean Christopherson 
345e76ae527SSean Christopherson static inline void kvm_pr_unimpl_rdmsr(struct kvm_vcpu *vcpu, u32 msr)
346e76ae527SSean Christopherson {
347e76ae527SSean Christopherson 	if (report_ignored_msrs)
348e76ae527SSean Christopherson 		vcpu_unimpl(vcpu, "Unhandled RDMSR(0x%x)\n", msr);
349e76ae527SSean Christopherson }
350e76ae527SSean Christopherson 
3518d93c874SMarcelo Tosatti static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
3528d93c874SMarcelo Tosatti {
3538d93c874SMarcelo Tosatti 	return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
3548d93c874SMarcelo Tosatti 				   vcpu->arch.virtual_tsc_shift);
3558d93c874SMarcelo Tosatti }
3568d93c874SMarcelo Tosatti 
357b51012deSPaolo Bonzini /* Same "calling convention" as do_div:
358b51012deSPaolo Bonzini  * - divide (n << 32) by base
359b51012deSPaolo Bonzini  * - put result in n
360b51012deSPaolo Bonzini  * - return remainder
361b51012deSPaolo Bonzini  */
362b51012deSPaolo Bonzini #define do_shl32_div32(n, base)					\
363b51012deSPaolo Bonzini 	({							\
364b51012deSPaolo Bonzini 	    u32 __quot, __rem;					\
365b51012deSPaolo Bonzini 	    asm("divl %2" : "=a" (__quot), "=d" (__rem)		\
366b51012deSPaolo Bonzini 			: "rm" (base), "0" (0), "1" ((u32) n));	\
367b51012deSPaolo Bonzini 	    n = __quot;						\
368b51012deSPaolo Bonzini 	    __rem;						\
369b51012deSPaolo Bonzini 	 })
370b51012deSPaolo Bonzini 
3714d5422ceSWanpeng Li static inline bool kvm_mwait_in_guest(struct kvm *kvm)
372668fffa3SMichael S. Tsirkin {
3734d5422ceSWanpeng Li 	return kvm->arch.mwait_in_guest;
374668fffa3SMichael S. Tsirkin }
375668fffa3SMichael S. Tsirkin 
376caa057a2SWanpeng Li static inline bool kvm_hlt_in_guest(struct kvm *kvm)
377caa057a2SWanpeng Li {
378caa057a2SWanpeng Li 	return kvm->arch.hlt_in_guest;
379caa057a2SWanpeng Li }
380caa057a2SWanpeng Li 
381b31c114bSWanpeng Li static inline bool kvm_pause_in_guest(struct kvm *kvm)
382b31c114bSWanpeng Li {
383b31c114bSWanpeng Li 	return kvm->arch.pause_in_guest;
384b31c114bSWanpeng Li }
385b31c114bSWanpeng Li 
386b5170063SWanpeng Li static inline bool kvm_cstate_in_guest(struct kvm *kvm)
387b5170063SWanpeng Li {
388b5170063SWanpeng Li 	return kvm->arch.cstate_in_guest;
389b5170063SWanpeng Li }
390b5170063SWanpeng Li 
3912f4073e0STao Xu static inline bool kvm_notify_vmexit_enabled(struct kvm *kvm)
3922f4073e0STao Xu {
3932f4073e0STao Xu 	return kvm->arch.notify_vmexit_flags & KVM_X86_NOTIFY_VMEXIT_ENABLED;
3942f4073e0STao Xu }
3952f4073e0STao Xu 
396db215756SSean Christopherson enum kvm_intr_type {
397db215756SSean Christopherson 	/* Values are arbitrary, but must be non-zero. */
398db215756SSean Christopherson 	KVM_HANDLING_IRQ = 1,
399db215756SSean Christopherson 	KVM_HANDLING_NMI,
400db215756SSean Christopherson };
401dd60d217SAndi Kleen 
40211df586dSSean Christopherson static __always_inline void kvm_before_interrupt(struct kvm_vcpu *vcpu,
403db215756SSean Christopherson 						 enum kvm_intr_type intr)
404dd60d217SAndi Kleen {
405db215756SSean Christopherson 	WRITE_ONCE(vcpu->arch.handling_intr_from_guest, (u8)intr);
406dd60d217SAndi Kleen }
407dd60d217SAndi Kleen 
40811df586dSSean Christopherson static __always_inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
409dd60d217SAndi Kleen {
41073cd107bSSean Christopherson 	WRITE_ONCE(vcpu->arch.handling_intr_from_guest, 0);
411dd60d217SAndi Kleen }
412dd60d217SAndi Kleen 
41373cd107bSSean Christopherson static inline bool kvm_handling_nmi_from_guest(struct kvm_vcpu *vcpu)
41473cd107bSSean Christopherson {
415db215756SSean Christopherson 	return vcpu->arch.handling_intr_from_guest == KVM_HANDLING_NMI;
41673cd107bSSean Christopherson }
417674ea351SPaolo Bonzini 
418674ea351SPaolo Bonzini static inline bool kvm_pat_valid(u64 data)
419674ea351SPaolo Bonzini {
420674ea351SPaolo Bonzini 	if (data & 0xF8F8F8F8F8F8F8F8ull)
421674ea351SPaolo Bonzini 		return false;
422674ea351SPaolo Bonzini 	/* 0, 1, 4, 5, 6, 7 are valid values.  */
423674ea351SPaolo Bonzini 	return (data | ((data & 0x0202020202020202ull) << 1)) == data;
424674ea351SPaolo Bonzini }
425674ea351SPaolo Bonzini 
4269b5e8532SSean Christopherson static inline bool kvm_dr7_valid(u64 data)
427b91991bfSKrish Sadhukhan {
428b91991bfSKrish Sadhukhan 	/* Bits [63:32] are reserved */
429b91991bfSKrish Sadhukhan 	return !(data >> 32);
430b91991bfSKrish Sadhukhan }
431f5f6145eSKrish Sadhukhan static inline bool kvm_dr6_valid(u64 data)
432f5f6145eSKrish Sadhukhan {
433f5f6145eSKrish Sadhukhan 	/* Bits [63:32] are reserved */
434f5f6145eSKrish Sadhukhan 	return !(data >> 32);
435f5f6145eSKrish Sadhukhan }
436b91991bfSKrish Sadhukhan 
4373f1a18b9SUros Bizjak /*
4383f1a18b9SUros Bizjak  * Trigger machine check on the host. We assume all the MSRs are already set up
4393f1a18b9SUros Bizjak  * by the CPU and that we still run on the same CPU as the MCE occurred on.
4403f1a18b9SUros Bizjak  * We pass a fake environment to the machine check handler because we want
4413f1a18b9SUros Bizjak  * the guest to be always treated like user space, no matter what context
4423f1a18b9SUros Bizjak  * it used internally.
4433f1a18b9SUros Bizjak  */
4443f1a18b9SUros Bizjak static inline void kvm_machine_check(void)
4453f1a18b9SUros Bizjak {
4463f1a18b9SUros Bizjak #if defined(CONFIG_X86_MCE)
4473f1a18b9SUros Bizjak 	struct pt_regs regs = {
4483f1a18b9SUros Bizjak 		.cs = 3, /* Fake ring 3 no matter what the guest ran on */
4493f1a18b9SUros Bizjak 		.flags = X86_EFLAGS_IF,
4503f1a18b9SUros Bizjak 	};
4513f1a18b9SUros Bizjak 
4523f1a18b9SUros Bizjak 	do_machine_check(&regs);
4533f1a18b9SUros Bizjak #endif
4543f1a18b9SUros Bizjak }
4553f1a18b9SUros Bizjak 
456139a12cfSAaron Lewis void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu);
457139a12cfSAaron Lewis void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
458841c2be0SMaxim Levitsky int kvm_spec_ctrl_test_value(u64 value);
459c33f6f22SSean Christopherson bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
4603f3393b3SBabu Moger int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
4613f3393b3SBabu Moger 			      struct x86_exception *e);
4629715092fSBabu Moger int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva);
46351de8151SAlexander Graf bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type);
464674ea351SPaolo Bonzini 
465cc4cb017SMaxim Levitsky /*
466cc4cb017SMaxim Levitsky  * Internal error codes that are used to indicate that MSR emulation encountered
467cc4cb017SMaxim Levitsky  * an error that should result in #GP in the guest, unless userspace
468cc4cb017SMaxim Levitsky  * handles it.
469cc4cb017SMaxim Levitsky  */
470cc4cb017SMaxim Levitsky #define  KVM_MSR_RET_INVALID	2	/* in-kernel MSR emulation #GP condition */
471cc4cb017SMaxim Levitsky #define  KVM_MSR_RET_FILTERED	3	/* #GP due to userspace MSR filter */
4726abe9c13SPeter Xu 
473b899c132SKrish Sadhukhan #define __cr4_reserved_bits(__cpu_has, __c)             \
474b899c132SKrish Sadhukhan ({                                                      \
475b899c132SKrish Sadhukhan 	u64 __reserved_bits = CR4_RESERVED_BITS;        \
476b899c132SKrish Sadhukhan                                                         \
477b899c132SKrish Sadhukhan 	if (!__cpu_has(__c, X86_FEATURE_XSAVE))         \
478b899c132SKrish Sadhukhan 		__reserved_bits |= X86_CR4_OSXSAVE;     \
479b899c132SKrish Sadhukhan 	if (!__cpu_has(__c, X86_FEATURE_SMEP))          \
480b899c132SKrish Sadhukhan 		__reserved_bits |= X86_CR4_SMEP;        \
481b899c132SKrish Sadhukhan 	if (!__cpu_has(__c, X86_FEATURE_SMAP))          \
482b899c132SKrish Sadhukhan 		__reserved_bits |= X86_CR4_SMAP;        \
483b899c132SKrish Sadhukhan 	if (!__cpu_has(__c, X86_FEATURE_FSGSBASE))      \
484b899c132SKrish Sadhukhan 		__reserved_bits |= X86_CR4_FSGSBASE;    \
485b899c132SKrish Sadhukhan 	if (!__cpu_has(__c, X86_FEATURE_PKU))           \
486b899c132SKrish Sadhukhan 		__reserved_bits |= X86_CR4_PKE;         \
487b899c132SKrish Sadhukhan 	if (!__cpu_has(__c, X86_FEATURE_LA57))          \
488b899c132SKrish Sadhukhan 		__reserved_bits |= X86_CR4_LA57;        \
489b899c132SKrish Sadhukhan 	if (!__cpu_has(__c, X86_FEATURE_UMIP))          \
490b899c132SKrish Sadhukhan 		__reserved_bits |= X86_CR4_UMIP;        \
49153efe527SPaolo Bonzini 	if (!__cpu_has(__c, X86_FEATURE_VMX))           \
49253efe527SPaolo Bonzini 		__reserved_bits |= X86_CR4_VMXE;        \
4934683d758SVitaly Kuznetsov 	if (!__cpu_has(__c, X86_FEATURE_PCID))          \
4944683d758SVitaly Kuznetsov 		__reserved_bits |= X86_CR4_PCIDE;       \
495b899c132SKrish Sadhukhan 	__reserved_bits;                                \
496b899c132SKrish Sadhukhan })
497b899c132SKrish Sadhukhan 
4988f423a80STom Lendacky int kvm_sev_es_mmio_write(struct kvm_vcpu *vcpu, gpa_t src, unsigned int bytes,
4998f423a80STom Lendacky 			  void *dst);
5008f423a80STom Lendacky int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t src, unsigned int bytes,
5018f423a80STom Lendacky 			 void *dst);
5027ed9abfeSTom Lendacky int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
5037ed9abfeSTom Lendacky 			 unsigned int port, void *data,  unsigned int count,
5047ed9abfeSTom Lendacky 			 int in);
5058f423a80STom Lendacky 
50626eef70cSAvi Kivity #endif
507