xref: /linux/arch/x86/kvm/x86.h (revision 04140b4144cd888c080cddbb2be2ec603f00d081)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
226eef70cSAvi Kivity #ifndef ARCH_X86_KVM_X86_H
326eef70cSAvi Kivity #define ARCH_X86_KVM_X86_H
426eef70cSAvi Kivity 
526eef70cSAvi Kivity #include <linux/kvm_host.h>
68d93c874SMarcelo Tosatti #include <asm/pvclock.h>
73eeb3288SAvi Kivity #include "kvm_cache_regs.h"
826eef70cSAvi Kivity 
9c8e88717SBabu Moger #define KVM_DEFAULT_PLE_GAP		128
10c8e88717SBabu Moger #define KVM_VMX_DEFAULT_PLE_WINDOW	4096
11c8e88717SBabu Moger #define KVM_DEFAULT_PLE_WINDOW_GROW	2
12c8e88717SBabu Moger #define KVM_DEFAULT_PLE_WINDOW_SHRINK	0
13c8e88717SBabu Moger #define KVM_VMX_DEFAULT_PLE_WINDOW_MAX	UINT_MAX
148566ac8bSBabu Moger #define KVM_SVM_DEFAULT_PLE_WINDOW_MAX	USHRT_MAX
158566ac8bSBabu Moger #define KVM_SVM_DEFAULT_PLE_WINDOW	3000
16c8e88717SBabu Moger 
17c8e88717SBabu Moger static inline unsigned int __grow_ple_window(unsigned int val,
18c8e88717SBabu Moger 		unsigned int base, unsigned int modifier, unsigned int max)
19c8e88717SBabu Moger {
20c8e88717SBabu Moger 	u64 ret = val;
21c8e88717SBabu Moger 
22c8e88717SBabu Moger 	if (modifier < 1)
23c8e88717SBabu Moger 		return base;
24c8e88717SBabu Moger 
25c8e88717SBabu Moger 	if (modifier < base)
26c8e88717SBabu Moger 		ret *= modifier;
27c8e88717SBabu Moger 	else
28c8e88717SBabu Moger 		ret += modifier;
29c8e88717SBabu Moger 
30c8e88717SBabu Moger 	return min(ret, (u64)max);
31c8e88717SBabu Moger }
32c8e88717SBabu Moger 
33c8e88717SBabu Moger static inline unsigned int __shrink_ple_window(unsigned int val,
34c8e88717SBabu Moger 		unsigned int base, unsigned int modifier, unsigned int min)
35c8e88717SBabu Moger {
36c8e88717SBabu Moger 	if (modifier < 1)
37c8e88717SBabu Moger 		return base;
38c8e88717SBabu Moger 
39c8e88717SBabu Moger 	if (modifier < base)
40c8e88717SBabu Moger 		val /= modifier;
41c8e88717SBabu Moger 	else
42c8e88717SBabu Moger 		val -= modifier;
43c8e88717SBabu Moger 
44c8e88717SBabu Moger 	return max(val, min);
45c8e88717SBabu Moger }
46c8e88717SBabu Moger 
4774545705SRadim Krčmář #define MSR_IA32_CR_PAT_DEFAULT  0x0007040600070406ULL
4874545705SRadim Krčmář 
4926eef70cSAvi Kivity static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
5026eef70cSAvi Kivity {
515c7d4f9aSLiran Alon 	vcpu->arch.exception.pending = false;
52664f8e26SWanpeng Li 	vcpu->arch.exception.injected = false;
5326eef70cSAvi Kivity }
5426eef70cSAvi Kivity 
5566fd3f7fSGleb Natapov static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector,
5666fd3f7fSGleb Natapov 	bool soft)
57937a7eaeSAvi Kivity {
58*04140b41SLiran Alon 	vcpu->arch.interrupt.injected = true;
5966fd3f7fSGleb Natapov 	vcpu->arch.interrupt.soft = soft;
60937a7eaeSAvi Kivity 	vcpu->arch.interrupt.nr = vector;
61937a7eaeSAvi Kivity }
62937a7eaeSAvi Kivity 
63937a7eaeSAvi Kivity static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu)
64937a7eaeSAvi Kivity {
65*04140b41SLiran Alon 	vcpu->arch.interrupt.injected = false;
66937a7eaeSAvi Kivity }
67937a7eaeSAvi Kivity 
683298b75cSGleb Natapov static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu)
693298b75cSGleb Natapov {
70*04140b41SLiran Alon 	return vcpu->arch.exception.injected || vcpu->arch.interrupt.injected ||
713298b75cSGleb Natapov 		vcpu->arch.nmi_injected;
723298b75cSGleb Natapov }
7366fd3f7fSGleb Natapov 
7466fd3f7fSGleb Natapov static inline bool kvm_exception_is_soft(unsigned int nr)
7566fd3f7fSGleb Natapov {
7666fd3f7fSGleb Natapov 	return (nr == BP_VECTOR) || (nr == OF_VECTOR);
7766fd3f7fSGleb Natapov }
78fc61b800SGleb Natapov 
793eeb3288SAvi Kivity static inline bool is_protmode(struct kvm_vcpu *vcpu)
803eeb3288SAvi Kivity {
813eeb3288SAvi Kivity 	return kvm_read_cr0_bits(vcpu, X86_CR0_PE);
823eeb3288SAvi Kivity }
833eeb3288SAvi Kivity 
84836a1b3cSAvi Kivity static inline int is_long_mode(struct kvm_vcpu *vcpu)
85836a1b3cSAvi Kivity {
86836a1b3cSAvi Kivity #ifdef CONFIG_X86_64
87f6801dffSAvi Kivity 	return vcpu->arch.efer & EFER_LMA;
88836a1b3cSAvi Kivity #else
89836a1b3cSAvi Kivity 	return 0;
90836a1b3cSAvi Kivity #endif
91836a1b3cSAvi Kivity }
92836a1b3cSAvi Kivity 
935777392eSNadav Amit static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
945777392eSNadav Amit {
955777392eSNadav Amit 	int cs_db, cs_l;
965777392eSNadav Amit 
975777392eSNadav Amit 	if (!is_long_mode(vcpu))
985777392eSNadav Amit 		return false;
995777392eSNadav Amit 	kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
1005777392eSNadav Amit 	return cs_l;
1015777392eSNadav Amit }
1025777392eSNadav Amit 
103855feb67SYu Zhang static inline bool is_la57_mode(struct kvm_vcpu *vcpu)
104855feb67SYu Zhang {
105855feb67SYu Zhang #ifdef CONFIG_X86_64
106855feb67SYu Zhang 	return (vcpu->arch.efer & EFER_LMA) &&
107855feb67SYu Zhang 		 kvm_read_cr4_bits(vcpu, X86_CR4_LA57);
108855feb67SYu Zhang #else
109855feb67SYu Zhang 	return 0;
110855feb67SYu Zhang #endif
111855feb67SYu Zhang }
112855feb67SYu Zhang 
1136539e738SJoerg Roedel static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
1146539e738SJoerg Roedel {
1156539e738SJoerg Roedel 	return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
1166539e738SJoerg Roedel }
1176539e738SJoerg Roedel 
118836a1b3cSAvi Kivity static inline int is_pae(struct kvm_vcpu *vcpu)
119836a1b3cSAvi Kivity {
120836a1b3cSAvi Kivity 	return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
121836a1b3cSAvi Kivity }
122836a1b3cSAvi Kivity 
123836a1b3cSAvi Kivity static inline int is_pse(struct kvm_vcpu *vcpu)
124836a1b3cSAvi Kivity {
125836a1b3cSAvi Kivity 	return kvm_read_cr4_bits(vcpu, X86_CR4_PSE);
126836a1b3cSAvi Kivity }
127836a1b3cSAvi Kivity 
128836a1b3cSAvi Kivity static inline int is_paging(struct kvm_vcpu *vcpu)
129836a1b3cSAvi Kivity {
130c36fc04eSDavidlohr Bueso 	return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG));
131836a1b3cSAvi Kivity }
132836a1b3cSAvi Kivity 
13324d1b15fSJoerg Roedel static inline u32 bit(int bitno)
13424d1b15fSJoerg Roedel {
13524d1b15fSJoerg Roedel 	return 1 << (bitno & 31);
13624d1b15fSJoerg Roedel }
13724d1b15fSJoerg Roedel 
138fd8cb433SYu Zhang static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu)
139fd8cb433SYu Zhang {
140fd8cb433SYu Zhang 	return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48;
141fd8cb433SYu Zhang }
142fd8cb433SYu Zhang 
143fd8cb433SYu Zhang static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt)
144fd8cb433SYu Zhang {
145fd8cb433SYu Zhang 	return (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_LA57) ? 57 : 48;
146fd8cb433SYu Zhang }
147fd8cb433SYu Zhang 
148fd8cb433SYu Zhang static inline u64 get_canonical(u64 la, u8 vaddr_bits)
149fd8cb433SYu Zhang {
150fd8cb433SYu Zhang 	return ((int64_t)la << (64 - vaddr_bits)) >> (64 - vaddr_bits);
151fd8cb433SYu Zhang }
152fd8cb433SYu Zhang 
153fd8cb433SYu Zhang static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)
154fd8cb433SYu Zhang {
155fd8cb433SYu Zhang #ifdef CONFIG_X86_64
156fd8cb433SYu Zhang 	return get_canonical(la, vcpu_virt_addr_bits(vcpu)) != la;
157fd8cb433SYu Zhang #else
158fd8cb433SYu Zhang 	return false;
159fd8cb433SYu Zhang #endif
160fd8cb433SYu Zhang }
161fd8cb433SYu Zhang 
162fd8cb433SYu Zhang static inline bool emul_is_noncanonical_address(u64 la,
163fd8cb433SYu Zhang 						struct x86_emulate_ctxt *ctxt)
164fd8cb433SYu Zhang {
165fd8cb433SYu Zhang #ifdef CONFIG_X86_64
166fd8cb433SYu Zhang 	return get_canonical(la, ctxt_virt_addr_bits(ctxt)) != la;
167fd8cb433SYu Zhang #else
168fd8cb433SYu Zhang 	return false;
169fd8cb433SYu Zhang #endif
170fd8cb433SYu Zhang }
171fd8cb433SYu Zhang 
172bebb106aSXiao Guangrong static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
173bebb106aSXiao Guangrong 					gva_t gva, gfn_t gfn, unsigned access)
174bebb106aSXiao Guangrong {
1759034e6e8SPaolo Bonzini 	/*
1769034e6e8SPaolo Bonzini 	 * If this is a shadow nested page table, the "GVA" is
1779034e6e8SPaolo Bonzini 	 * actually a nGPA.
1789034e6e8SPaolo Bonzini 	 */
1799034e6e8SPaolo Bonzini 	vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK;
180bebb106aSXiao Guangrong 	vcpu->arch.access = access;
181bebb106aSXiao Guangrong 	vcpu->arch.mmio_gfn = gfn;
18256f17dd3SDavid Matlack 	vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation;
18356f17dd3SDavid Matlack }
18456f17dd3SDavid Matlack 
18556f17dd3SDavid Matlack static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
18656f17dd3SDavid Matlack {
18756f17dd3SDavid Matlack 	return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation;
188bebb106aSXiao Guangrong }
189bebb106aSXiao Guangrong 
190bebb106aSXiao Guangrong /*
19156f17dd3SDavid Matlack  * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we
19256f17dd3SDavid Matlack  * clear all mmio cache info.
193bebb106aSXiao Guangrong  */
19456f17dd3SDavid Matlack #define MMIO_GVA_ANY (~(gva_t)0)
19556f17dd3SDavid Matlack 
196bebb106aSXiao Guangrong static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
197bebb106aSXiao Guangrong {
19856f17dd3SDavid Matlack 	if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
199bebb106aSXiao Guangrong 		return;
200bebb106aSXiao Guangrong 
201bebb106aSXiao Guangrong 	vcpu->arch.mmio_gva = 0;
202bebb106aSXiao Guangrong }
203bebb106aSXiao Guangrong 
204bebb106aSXiao Guangrong static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
205bebb106aSXiao Guangrong {
20656f17dd3SDavid Matlack 	if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva &&
20756f17dd3SDavid Matlack 	      vcpu->arch.mmio_gva == (gva & PAGE_MASK))
208bebb106aSXiao Guangrong 		return true;
209bebb106aSXiao Guangrong 
210bebb106aSXiao Guangrong 	return false;
211bebb106aSXiao Guangrong }
212bebb106aSXiao Guangrong 
213bebb106aSXiao Guangrong static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
214bebb106aSXiao Guangrong {
21556f17dd3SDavid Matlack 	if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn &&
21656f17dd3SDavid Matlack 	      vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
217bebb106aSXiao Guangrong 		return true;
218bebb106aSXiao Guangrong 
219bebb106aSXiao Guangrong 	return false;
220bebb106aSXiao Guangrong }
221bebb106aSXiao Guangrong 
2225777392eSNadav Amit static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu,
2235777392eSNadav Amit 					       enum kvm_reg reg)
2245777392eSNadav Amit {
2255777392eSNadav Amit 	unsigned long val = kvm_register_read(vcpu, reg);
2265777392eSNadav Amit 
2275777392eSNadav Amit 	return is_64_bit_mode(vcpu) ? val : (u32)val;
2285777392eSNadav Amit }
2295777392eSNadav Amit 
23027e6fb5dSNadav Amit static inline void kvm_register_writel(struct kvm_vcpu *vcpu,
23127e6fb5dSNadav Amit 				       enum kvm_reg reg,
23227e6fb5dSNadav Amit 				       unsigned long val)
23327e6fb5dSNadav Amit {
23427e6fb5dSNadav Amit 	if (!is_64_bit_mode(vcpu))
23527e6fb5dSNadav Amit 		val = (u32)val;
23627e6fb5dSNadav Amit 	return kvm_register_write(vcpu, reg, val);
23727e6fb5dSNadav Amit }
23827e6fb5dSNadav Amit 
23941dbc6bcSPaolo Bonzini static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
24041dbc6bcSPaolo Bonzini {
24141dbc6bcSPaolo Bonzini 	return !(kvm->arch.disabled_quirks & quirk);
24241dbc6bcSPaolo Bonzini }
24341dbc6bcSPaolo Bonzini 
244bab5bb39SNicholas Krause void kvm_set_pending_timer(struct kvm_vcpu *vcpu);
24571f9833bSSerge E. Hallyn int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
246ff9d07a0SZhang, Yanmin 
2478fe8ab46SWill Auld void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
248108b249cSPaolo Bonzini u64 get_kvmclock_ns(struct kvm *kvm);
24999e3e30aSZachary Amsden 
250064aea77SNadav Har'El int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
251064aea77SNadav Har'El 	gva_t addr, void *val, unsigned int bytes,
252064aea77SNadav Har'El 	struct x86_exception *exception);
253064aea77SNadav Har'El 
2546a4d7550SNadav Har'El int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
2556a4d7550SNadav Har'El 	gva_t addr, void *val, unsigned int bytes,
2566a4d7550SNadav Har'El 	struct x86_exception *exception);
2576a4d7550SNadav Har'El 
25819efffa2SXiao Guangrong void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu);
259ff53604bSXiao Guangrong u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
2604566654bSNadav Amit bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data);
261ff53604bSXiao Guangrong int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
262ff53604bSXiao Guangrong int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
2636a39bbc5SXiao Guangrong bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
2646a39bbc5SXiao Guangrong 					  int page_num);
26552004014SFeng Wu bool kvm_vector_hashing_enabled(void);
2664566654bSNadav Amit 
267d91cab78SDave Hansen #define KVM_SUPPORTED_XCR0     (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
268d91cab78SDave Hansen 				| XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \
26917a511f8SHuaitong Han 				| XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \
27017a511f8SHuaitong Han 				| XFEATURE_MASK_PKRU)
27100b27a3eSAvi Kivity extern u64 host_xcr0;
27200b27a3eSAvi Kivity 
2734ff41732SPaolo Bonzini extern u64 kvm_supported_xcr0(void);
2744ff41732SPaolo Bonzini 
2759ed96e87SMarcelo Tosatti extern unsigned int min_timer_period_us;
2769ed96e87SMarcelo Tosatti 
277d0659d94SMarcelo Tosatti extern unsigned int lapic_timer_advance_ns;
278d0659d94SMarcelo Tosatti 
279c4ae60e4SLiran Alon extern bool enable_vmware_backdoor;
280c4ae60e4SLiran Alon 
28154e9818fSGleb Natapov extern struct static_key kvm_no_apic_vcpu;
282b51012deSPaolo Bonzini 
2838d93c874SMarcelo Tosatti static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
2848d93c874SMarcelo Tosatti {
2858d93c874SMarcelo Tosatti 	return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
2868d93c874SMarcelo Tosatti 				   vcpu->arch.virtual_tsc_shift);
2878d93c874SMarcelo Tosatti }
2888d93c874SMarcelo Tosatti 
289b51012deSPaolo Bonzini /* Same "calling convention" as do_div:
290b51012deSPaolo Bonzini  * - divide (n << 32) by base
291b51012deSPaolo Bonzini  * - put result in n
292b51012deSPaolo Bonzini  * - return remainder
293b51012deSPaolo Bonzini  */
294b51012deSPaolo Bonzini #define do_shl32_div32(n, base)					\
295b51012deSPaolo Bonzini 	({							\
296b51012deSPaolo Bonzini 	    u32 __quot, __rem;					\
297b51012deSPaolo Bonzini 	    asm("divl %2" : "=a" (__quot), "=d" (__rem)		\
298b51012deSPaolo Bonzini 			: "rm" (base), "0" (0), "1" ((u32) n));	\
299b51012deSPaolo Bonzini 	    n = __quot;						\
300b51012deSPaolo Bonzini 	    __rem;						\
301b51012deSPaolo Bonzini 	 })
302b51012deSPaolo Bonzini 
3034d5422ceSWanpeng Li #define KVM_X86_DISABLE_EXITS_MWAIT          (1 << 0)
304caa057a2SWanpeng Li #define KVM_X86_DISABLE_EXITS_HTL            (1 << 1)
305b31c114bSWanpeng Li #define KVM_X86_DISABLE_EXITS_PAUSE          (1 << 2)
306caa057a2SWanpeng Li #define KVM_X86_DISABLE_VALID_EXITS          (KVM_X86_DISABLE_EXITS_MWAIT | \
307b31c114bSWanpeng Li                                               KVM_X86_DISABLE_EXITS_HTL | \
308b31c114bSWanpeng Li                                               KVM_X86_DISABLE_EXITS_PAUSE)
3094d5422ceSWanpeng Li 
3104d5422ceSWanpeng Li static inline bool kvm_mwait_in_guest(struct kvm *kvm)
311668fffa3SMichael S. Tsirkin {
3124d5422ceSWanpeng Li 	return kvm->arch.mwait_in_guest;
313668fffa3SMichael S. Tsirkin }
314668fffa3SMichael S. Tsirkin 
315caa057a2SWanpeng Li static inline bool kvm_hlt_in_guest(struct kvm *kvm)
316caa057a2SWanpeng Li {
317caa057a2SWanpeng Li 	return kvm->arch.hlt_in_guest;
318caa057a2SWanpeng Li }
319caa057a2SWanpeng Li 
320b31c114bSWanpeng Li static inline bool kvm_pause_in_guest(struct kvm *kvm)
321b31c114bSWanpeng Li {
322b31c114bSWanpeng Li 	return kvm->arch.pause_in_guest;
323b31c114bSWanpeng Li }
324b31c114bSWanpeng Li 
325dd60d217SAndi Kleen DECLARE_PER_CPU(struct kvm_vcpu *, current_vcpu);
326dd60d217SAndi Kleen 
327dd60d217SAndi Kleen static inline void kvm_before_interrupt(struct kvm_vcpu *vcpu)
328dd60d217SAndi Kleen {
329dd60d217SAndi Kleen 	__this_cpu_write(current_vcpu, vcpu);
330dd60d217SAndi Kleen }
331dd60d217SAndi Kleen 
332dd60d217SAndi Kleen static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
333dd60d217SAndi Kleen {
334dd60d217SAndi Kleen 	__this_cpu_write(current_vcpu, NULL);
335dd60d217SAndi Kleen }
336dd60d217SAndi Kleen 
33726eef70cSAvi Kivity #endif
338