xref: /linux/arch/x86/kvm/x86.h (revision 19efffa244071ccd0385b240d03adb38feaab04e)
126eef70cSAvi Kivity #ifndef ARCH_X86_KVM_X86_H
226eef70cSAvi Kivity #define ARCH_X86_KVM_X86_H
326eef70cSAvi Kivity 
426eef70cSAvi Kivity #include <linux/kvm_host.h>
53eeb3288SAvi Kivity #include "kvm_cache_regs.h"
626eef70cSAvi Kivity 
774545705SRadim Krčmář #define MSR_IA32_CR_PAT_DEFAULT  0x0007040600070406ULL
874545705SRadim Krčmář 
926eef70cSAvi Kivity static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
1026eef70cSAvi Kivity {
1126eef70cSAvi Kivity 	vcpu->arch.exception.pending = false;
1226eef70cSAvi Kivity }
1326eef70cSAvi Kivity 
1466fd3f7fSGleb Natapov static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector,
1566fd3f7fSGleb Natapov 	bool soft)
16937a7eaeSAvi Kivity {
17937a7eaeSAvi Kivity 	vcpu->arch.interrupt.pending = true;
1866fd3f7fSGleb Natapov 	vcpu->arch.interrupt.soft = soft;
19937a7eaeSAvi Kivity 	vcpu->arch.interrupt.nr = vector;
20937a7eaeSAvi Kivity }
21937a7eaeSAvi Kivity 
22937a7eaeSAvi Kivity static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu)
23937a7eaeSAvi Kivity {
24937a7eaeSAvi Kivity 	vcpu->arch.interrupt.pending = false;
25937a7eaeSAvi Kivity }
26937a7eaeSAvi Kivity 
273298b75cSGleb Natapov static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu)
283298b75cSGleb Natapov {
293298b75cSGleb Natapov 	return vcpu->arch.exception.pending || vcpu->arch.interrupt.pending ||
303298b75cSGleb Natapov 		vcpu->arch.nmi_injected;
313298b75cSGleb Natapov }
3266fd3f7fSGleb Natapov 
3366fd3f7fSGleb Natapov static inline bool kvm_exception_is_soft(unsigned int nr)
3466fd3f7fSGleb Natapov {
3566fd3f7fSGleb Natapov 	return (nr == BP_VECTOR) || (nr == OF_VECTOR);
3666fd3f7fSGleb Natapov }
37fc61b800SGleb Natapov 
383eeb3288SAvi Kivity static inline bool is_protmode(struct kvm_vcpu *vcpu)
393eeb3288SAvi Kivity {
403eeb3288SAvi Kivity 	return kvm_read_cr0_bits(vcpu, X86_CR0_PE);
413eeb3288SAvi Kivity }
423eeb3288SAvi Kivity 
43836a1b3cSAvi Kivity static inline int is_long_mode(struct kvm_vcpu *vcpu)
44836a1b3cSAvi Kivity {
45836a1b3cSAvi Kivity #ifdef CONFIG_X86_64
46f6801dffSAvi Kivity 	return vcpu->arch.efer & EFER_LMA;
47836a1b3cSAvi Kivity #else
48836a1b3cSAvi Kivity 	return 0;
49836a1b3cSAvi Kivity #endif
50836a1b3cSAvi Kivity }
51836a1b3cSAvi Kivity 
525777392eSNadav Amit static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
535777392eSNadav Amit {
545777392eSNadav Amit 	int cs_db, cs_l;
555777392eSNadav Amit 
565777392eSNadav Amit 	if (!is_long_mode(vcpu))
575777392eSNadav Amit 		return false;
585777392eSNadav Amit 	kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
595777392eSNadav Amit 	return cs_l;
605777392eSNadav Amit }
615777392eSNadav Amit 
626539e738SJoerg Roedel static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
636539e738SJoerg Roedel {
646539e738SJoerg Roedel 	return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
656539e738SJoerg Roedel }
666539e738SJoerg Roedel 
67836a1b3cSAvi Kivity static inline int is_pae(struct kvm_vcpu *vcpu)
68836a1b3cSAvi Kivity {
69836a1b3cSAvi Kivity 	return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
70836a1b3cSAvi Kivity }
71836a1b3cSAvi Kivity 
72836a1b3cSAvi Kivity static inline int is_pse(struct kvm_vcpu *vcpu)
73836a1b3cSAvi Kivity {
74836a1b3cSAvi Kivity 	return kvm_read_cr4_bits(vcpu, X86_CR4_PSE);
75836a1b3cSAvi Kivity }
76836a1b3cSAvi Kivity 
77836a1b3cSAvi Kivity static inline int is_paging(struct kvm_vcpu *vcpu)
78836a1b3cSAvi Kivity {
79c36fc04eSDavidlohr Bueso 	return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG));
80836a1b3cSAvi Kivity }
81836a1b3cSAvi Kivity 
8224d1b15fSJoerg Roedel static inline u32 bit(int bitno)
8324d1b15fSJoerg Roedel {
8424d1b15fSJoerg Roedel 	return 1 << (bitno & 31);
8524d1b15fSJoerg Roedel }
8624d1b15fSJoerg Roedel 
87bebb106aSXiao Guangrong static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
88bebb106aSXiao Guangrong 					gva_t gva, gfn_t gfn, unsigned access)
89bebb106aSXiao Guangrong {
90bebb106aSXiao Guangrong 	vcpu->arch.mmio_gva = gva & PAGE_MASK;
91bebb106aSXiao Guangrong 	vcpu->arch.access = access;
92bebb106aSXiao Guangrong 	vcpu->arch.mmio_gfn = gfn;
9356f17dd3SDavid Matlack 	vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation;
9456f17dd3SDavid Matlack }
9556f17dd3SDavid Matlack 
9656f17dd3SDavid Matlack static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
9756f17dd3SDavid Matlack {
9856f17dd3SDavid Matlack 	return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation;
99bebb106aSXiao Guangrong }
100bebb106aSXiao Guangrong 
101bebb106aSXiao Guangrong /*
10256f17dd3SDavid Matlack  * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we
10356f17dd3SDavid Matlack  * clear all mmio cache info.
104bebb106aSXiao Guangrong  */
10556f17dd3SDavid Matlack #define MMIO_GVA_ANY (~(gva_t)0)
10656f17dd3SDavid Matlack 
107bebb106aSXiao Guangrong static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
108bebb106aSXiao Guangrong {
10956f17dd3SDavid Matlack 	if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
110bebb106aSXiao Guangrong 		return;
111bebb106aSXiao Guangrong 
112bebb106aSXiao Guangrong 	vcpu->arch.mmio_gva = 0;
113bebb106aSXiao Guangrong }
114bebb106aSXiao Guangrong 
115bebb106aSXiao Guangrong static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
116bebb106aSXiao Guangrong {
11756f17dd3SDavid Matlack 	if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva &&
11856f17dd3SDavid Matlack 	      vcpu->arch.mmio_gva == (gva & PAGE_MASK))
119bebb106aSXiao Guangrong 		return true;
120bebb106aSXiao Guangrong 
121bebb106aSXiao Guangrong 	return false;
122bebb106aSXiao Guangrong }
123bebb106aSXiao Guangrong 
124bebb106aSXiao Guangrong static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
125bebb106aSXiao Guangrong {
12656f17dd3SDavid Matlack 	if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn &&
12756f17dd3SDavid Matlack 	      vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
128bebb106aSXiao Guangrong 		return true;
129bebb106aSXiao Guangrong 
130bebb106aSXiao Guangrong 	return false;
131bebb106aSXiao Guangrong }
132bebb106aSXiao Guangrong 
1335777392eSNadav Amit static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu,
1345777392eSNadav Amit 					       enum kvm_reg reg)
1355777392eSNadav Amit {
1365777392eSNadav Amit 	unsigned long val = kvm_register_read(vcpu, reg);
1375777392eSNadav Amit 
1385777392eSNadav Amit 	return is_64_bit_mode(vcpu) ? val : (u32)val;
1395777392eSNadav Amit }
1405777392eSNadav Amit 
14127e6fb5dSNadav Amit static inline void kvm_register_writel(struct kvm_vcpu *vcpu,
14227e6fb5dSNadav Amit 				       enum kvm_reg reg,
14327e6fb5dSNadav Amit 				       unsigned long val)
14427e6fb5dSNadav Amit {
14527e6fb5dSNadav Amit 	if (!is_64_bit_mode(vcpu))
14627e6fb5dSNadav Amit 		val = (u32)val;
14727e6fb5dSNadav Amit 	return kvm_register_write(vcpu, reg, val);
14827e6fb5dSNadav Amit }
14927e6fb5dSNadav Amit 
150ff9d07a0SZhang, Yanmin void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
151ff9d07a0SZhang, Yanmin void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
152bab5bb39SNicholas Krause void kvm_set_pending_timer(struct kvm_vcpu *vcpu);
15371f9833bSSerge E. Hallyn int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
154ff9d07a0SZhang, Yanmin 
1558fe8ab46SWill Auld void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
15699e3e30aSZachary Amsden 
157064aea77SNadav Har'El int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
158064aea77SNadav Har'El 	gva_t addr, void *val, unsigned int bytes,
159064aea77SNadav Har'El 	struct x86_exception *exception);
160064aea77SNadav Har'El 
1616a4d7550SNadav Har'El int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
1626a4d7550SNadav Har'El 	gva_t addr, void *val, unsigned int bytes,
1636a4d7550SNadav Har'El 	struct x86_exception *exception);
1646a4d7550SNadav Har'El 
165*19efffa2SXiao Guangrong void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu);
166ff53604bSXiao Guangrong u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
1674566654bSNadav Amit bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data);
168ff53604bSXiao Guangrong int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
169ff53604bSXiao Guangrong int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
1704566654bSNadav Amit 
171390bd528SLiu, Jinsong #define KVM_SUPPORTED_XCR0     (XSTATE_FP | XSTATE_SSE | XSTATE_YMM \
172612263b3SChao Peng 				| XSTATE_BNDREGS | XSTATE_BNDCSR \
173612263b3SChao Peng 				| XSTATE_AVX512)
17400b27a3eSAvi Kivity extern u64 host_xcr0;
17500b27a3eSAvi Kivity 
1764ff41732SPaolo Bonzini extern u64 kvm_supported_xcr0(void);
1774ff41732SPaolo Bonzini 
1789ed96e87SMarcelo Tosatti extern unsigned int min_timer_period_us;
1799ed96e87SMarcelo Tosatti 
180d0659d94SMarcelo Tosatti extern unsigned int lapic_timer_advance_ns;
181d0659d94SMarcelo Tosatti 
18254e9818fSGleb Natapov extern struct static_key kvm_no_apic_vcpu;
18326eef70cSAvi Kivity #endif
184