xref: /linux/arch/x86/kvm/x86.h (revision e5c86679d5e864947a52fb31e45a425dea3e7fa9)
1 #ifndef ARCH_X86_KVM_X86_H
2 #define ARCH_X86_KVM_X86_H
3 
4 #include <linux/kvm_host.h>
5 #include <asm/pvclock.h>
6 #include "kvm_cache_regs.h"
7 
8 #define MSR_IA32_CR_PAT_DEFAULT  0x0007040600070406ULL
9 
10 static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
11 {
12 	vcpu->arch.exception.pending = false;
13 }
14 
15 static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector,
16 	bool soft)
17 {
18 	vcpu->arch.interrupt.pending = true;
19 	vcpu->arch.interrupt.soft = soft;
20 	vcpu->arch.interrupt.nr = vector;
21 }
22 
23 static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu)
24 {
25 	vcpu->arch.interrupt.pending = false;
26 }
27 
28 static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu)
29 {
30 	return vcpu->arch.exception.pending || vcpu->arch.interrupt.pending ||
31 		vcpu->arch.nmi_injected;
32 }
33 
34 static inline bool kvm_exception_is_soft(unsigned int nr)
35 {
36 	return (nr == BP_VECTOR) || (nr == OF_VECTOR);
37 }
38 
39 static inline bool is_protmode(struct kvm_vcpu *vcpu)
40 {
41 	return kvm_read_cr0_bits(vcpu, X86_CR0_PE);
42 }
43 
44 static inline int is_long_mode(struct kvm_vcpu *vcpu)
45 {
46 #ifdef CONFIG_X86_64
47 	return vcpu->arch.efer & EFER_LMA;
48 #else
49 	return 0;
50 #endif
51 }
52 
53 static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
54 {
55 	int cs_db, cs_l;
56 
57 	if (!is_long_mode(vcpu))
58 		return false;
59 	kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
60 	return cs_l;
61 }
62 
63 static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
64 {
65 	return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
66 }
67 
68 static inline int is_pae(struct kvm_vcpu *vcpu)
69 {
70 	return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
71 }
72 
73 static inline int is_pse(struct kvm_vcpu *vcpu)
74 {
75 	return kvm_read_cr4_bits(vcpu, X86_CR4_PSE);
76 }
77 
78 static inline int is_paging(struct kvm_vcpu *vcpu)
79 {
80 	return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG));
81 }
82 
83 static inline u32 bit(int bitno)
84 {
85 	return 1 << (bitno & 31);
86 }
87 
88 static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
89 					gva_t gva, gfn_t gfn, unsigned access)
90 {
91 	vcpu->arch.mmio_gva = gva & PAGE_MASK;
92 	vcpu->arch.access = access;
93 	vcpu->arch.mmio_gfn = gfn;
94 	vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation;
95 }
96 
97 static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
98 {
99 	return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation;
100 }
101 
102 /*
103  * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we
104  * clear all mmio cache info.
105  */
106 #define MMIO_GVA_ANY (~(gva_t)0)
107 
108 static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
109 {
110 	if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
111 		return;
112 
113 	vcpu->arch.mmio_gva = 0;
114 }
115 
116 static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
117 {
118 	if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva &&
119 	      vcpu->arch.mmio_gva == (gva & PAGE_MASK))
120 		return true;
121 
122 	return false;
123 }
124 
125 static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
126 {
127 	if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn &&
128 	      vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
129 		return true;
130 
131 	return false;
132 }
133 
134 static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu,
135 					       enum kvm_reg reg)
136 {
137 	unsigned long val = kvm_register_read(vcpu, reg);
138 
139 	return is_64_bit_mode(vcpu) ? val : (u32)val;
140 }
141 
142 static inline void kvm_register_writel(struct kvm_vcpu *vcpu,
143 				       enum kvm_reg reg,
144 				       unsigned long val)
145 {
146 	if (!is_64_bit_mode(vcpu))
147 		val = (u32)val;
148 	return kvm_register_write(vcpu, reg, val);
149 }
150 
151 static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
152 {
153 	return !(kvm->arch.disabled_quirks & quirk);
154 }
155 
156 void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
157 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
158 void kvm_set_pending_timer(struct kvm_vcpu *vcpu);
159 int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
160 
161 void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
162 u64 get_kvmclock_ns(struct kvm *kvm);
163 
164 int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
165 	gva_t addr, void *val, unsigned int bytes,
166 	struct x86_exception *exception);
167 
168 int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
169 	gva_t addr, void *val, unsigned int bytes,
170 	struct x86_exception *exception);
171 
172 void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu);
173 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
174 bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data);
175 int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
176 int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
177 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
178 					  int page_num);
179 bool kvm_vector_hashing_enabled(void);
180 
181 #define KVM_SUPPORTED_XCR0     (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
182 				| XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \
183 				| XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \
184 				| XFEATURE_MASK_PKRU)
185 extern u64 host_xcr0;
186 
187 extern u64 kvm_supported_xcr0(void);
188 
189 extern unsigned int min_timer_period_us;
190 
191 extern unsigned int lapic_timer_advance_ns;
192 
193 extern struct static_key kvm_no_apic_vcpu;
194 
195 static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
196 {
197 	return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
198 				   vcpu->arch.virtual_tsc_shift);
199 }
200 
201 /* Same "calling convention" as do_div:
202  * - divide (n << 32) by base
203  * - put result in n
204  * - return remainder
205  */
206 #define do_shl32_div32(n, base)					\
207 	({							\
208 	    u32 __quot, __rem;					\
209 	    asm("divl %2" : "=a" (__quot), "=d" (__rem)		\
210 			: "rm" (base), "0" (0), "1" ((u32) n));	\
211 	    n = __quot;						\
212 	    __rem;						\
213 	 })
214 
215 #endif
216