xref: /linux/tools/testing/selftests/kvm/include/x86_64/processor.h (revision 95298d63c67673c654c08952672d016212b26054)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * tools/testing/selftests/kvm/include/x86_64/processor.h
4  *
5  * Copyright (C) 2018, Google LLC.
6  */
7 
8 #ifndef SELFTEST_KVM_PROCESSOR_H
9 #define SELFTEST_KVM_PROCESSOR_H
10 
11 #include <assert.h>
12 #include <stdint.h>
13 
14 #include <asm/msr-index.h>
15 
16 #define X86_EFLAGS_FIXED	 (1u << 1)
17 
18 #define X86_CR4_VME		(1ul << 0)
19 #define X86_CR4_PVI		(1ul << 1)
20 #define X86_CR4_TSD		(1ul << 2)
21 #define X86_CR4_DE		(1ul << 3)
22 #define X86_CR4_PSE		(1ul << 4)
23 #define X86_CR4_PAE		(1ul << 5)
24 #define X86_CR4_MCE		(1ul << 6)
25 #define X86_CR4_PGE		(1ul << 7)
26 #define X86_CR4_PCE		(1ul << 8)
27 #define X86_CR4_OSFXSR		(1ul << 9)
28 #define X86_CR4_OSXMMEXCPT	(1ul << 10)
29 #define X86_CR4_UMIP		(1ul << 11)
30 #define X86_CR4_VMXE		(1ul << 13)
31 #define X86_CR4_SMXE		(1ul << 14)
32 #define X86_CR4_FSGSBASE	(1ul << 16)
33 #define X86_CR4_PCIDE		(1ul << 17)
34 #define X86_CR4_OSXSAVE		(1ul << 18)
35 #define X86_CR4_SMEP		(1ul << 20)
36 #define X86_CR4_SMAP		(1ul << 21)
37 #define X86_CR4_PKE		(1ul << 22)
38 
39 /* General Registers in 64-Bit Mode */
40 struct gpr64_regs {
41 	u64 rax;
42 	u64 rcx;
43 	u64 rdx;
44 	u64 rbx;
45 	u64 rsp;
46 	u64 rbp;
47 	u64 rsi;
48 	u64 rdi;
49 	u64 r8;
50 	u64 r9;
51 	u64 r10;
52 	u64 r11;
53 	u64 r12;
54 	u64 r13;
55 	u64 r14;
56 	u64 r15;
57 };
58 
59 struct desc64 {
60 	uint16_t limit0;
61 	uint16_t base0;
62 	unsigned base1:8, s:1, type:4, dpl:2, p:1;
63 	unsigned limit1:4, avl:1, l:1, db:1, g:1, base2:8;
64 	uint32_t base3;
65 	uint32_t zero1;
66 } __attribute__((packed));
67 
68 struct desc_ptr {
69 	uint16_t size;
70 	uint64_t address;
71 } __attribute__((packed));
72 
73 static inline uint64_t get_desc64_base(const struct desc64 *desc)
74 {
75 	return ((uint64_t)desc->base3 << 32) |
76 		(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
77 }
78 
79 static inline uint64_t rdtsc(void)
80 {
81 	uint32_t eax, edx;
82 	uint64_t tsc_val;
83 	/*
84 	 * The lfence is to wait (on Intel CPUs) until all previous
85 	 * instructions have been executed. If software requires RDTSC to be
86 	 * executed prior to execution of any subsequent instruction, it can
87 	 * execute LFENCE immediately after RDTSC
88 	 */
89 	__asm__ __volatile__("lfence; rdtsc; lfence" : "=a"(eax), "=d"(edx));
90 	tsc_val = ((uint64_t)edx) << 32 | eax;
91 	return tsc_val;
92 }
93 
94 static inline uint64_t rdtscp(uint32_t *aux)
95 {
96 	uint32_t eax, edx;
97 
98 	__asm__ __volatile__("rdtscp" : "=a"(eax), "=d"(edx), "=c"(*aux));
99 	return ((uint64_t)edx) << 32 | eax;
100 }
101 
102 static inline uint64_t rdmsr(uint32_t msr)
103 {
104 	uint32_t a, d;
105 
106 	__asm__ __volatile__("rdmsr" : "=a"(a), "=d"(d) : "c"(msr) : "memory");
107 
108 	return a | ((uint64_t) d << 32);
109 }
110 
111 static inline void wrmsr(uint32_t msr, uint64_t value)
112 {
113 	uint32_t a = value;
114 	uint32_t d = value >> 32;
115 
116 	__asm__ __volatile__("wrmsr" :: "a"(a), "d"(d), "c"(msr) : "memory");
117 }
118 
119 
120 static inline uint16_t inw(uint16_t port)
121 {
122 	uint16_t tmp;
123 
124 	__asm__ __volatile__("in %%dx, %%ax"
125 		: /* output */ "=a" (tmp)
126 		: /* input */ "d" (port));
127 
128 	return tmp;
129 }
130 
131 static inline uint16_t get_es(void)
132 {
133 	uint16_t es;
134 
135 	__asm__ __volatile__("mov %%es, %[es]"
136 			     : /* output */ [es]"=rm"(es));
137 	return es;
138 }
139 
140 static inline uint16_t get_cs(void)
141 {
142 	uint16_t cs;
143 
144 	__asm__ __volatile__("mov %%cs, %[cs]"
145 			     : /* output */ [cs]"=rm"(cs));
146 	return cs;
147 }
148 
149 static inline uint16_t get_ss(void)
150 {
151 	uint16_t ss;
152 
153 	__asm__ __volatile__("mov %%ss, %[ss]"
154 			     : /* output */ [ss]"=rm"(ss));
155 	return ss;
156 }
157 
158 static inline uint16_t get_ds(void)
159 {
160 	uint16_t ds;
161 
162 	__asm__ __volatile__("mov %%ds, %[ds]"
163 			     : /* output */ [ds]"=rm"(ds));
164 	return ds;
165 }
166 
167 static inline uint16_t get_fs(void)
168 {
169 	uint16_t fs;
170 
171 	__asm__ __volatile__("mov %%fs, %[fs]"
172 			     : /* output */ [fs]"=rm"(fs));
173 	return fs;
174 }
175 
176 static inline uint16_t get_gs(void)
177 {
178 	uint16_t gs;
179 
180 	__asm__ __volatile__("mov %%gs, %[gs]"
181 			     : /* output */ [gs]"=rm"(gs));
182 	return gs;
183 }
184 
185 static inline uint16_t get_tr(void)
186 {
187 	uint16_t tr;
188 
189 	__asm__ __volatile__("str %[tr]"
190 			     : /* output */ [tr]"=rm"(tr));
191 	return tr;
192 }
193 
194 static inline uint64_t get_cr0(void)
195 {
196 	uint64_t cr0;
197 
198 	__asm__ __volatile__("mov %%cr0, %[cr0]"
199 			     : /* output */ [cr0]"=r"(cr0));
200 	return cr0;
201 }
202 
203 static inline uint64_t get_cr3(void)
204 {
205 	uint64_t cr3;
206 
207 	__asm__ __volatile__("mov %%cr3, %[cr3]"
208 			     : /* output */ [cr3]"=r"(cr3));
209 	return cr3;
210 }
211 
212 static inline uint64_t get_cr4(void)
213 {
214 	uint64_t cr4;
215 
216 	__asm__ __volatile__("mov %%cr4, %[cr4]"
217 			     : /* output */ [cr4]"=r"(cr4));
218 	return cr4;
219 }
220 
221 static inline void set_cr4(uint64_t val)
222 {
223 	__asm__ __volatile__("mov %0, %%cr4" : : "r" (val) : "memory");
224 }
225 
226 static inline struct desc_ptr get_gdt(void)
227 {
228 	struct desc_ptr gdt;
229 	__asm__ __volatile__("sgdt %[gdt]"
230 			     : /* output */ [gdt]"=m"(gdt));
231 	return gdt;
232 }
233 
234 static inline struct desc_ptr get_idt(void)
235 {
236 	struct desc_ptr idt;
237 	__asm__ __volatile__("sidt %[idt]"
238 			     : /* output */ [idt]"=m"(idt));
239 	return idt;
240 }
241 
242 #define SET_XMM(__var, __xmm) \
243 	asm volatile("movq %0, %%"#__xmm : : "r"(__var) : #__xmm)
244 
245 static inline void set_xmm(int n, unsigned long val)
246 {
247 	switch (n) {
248 	case 0:
249 		SET_XMM(val, xmm0);
250 		break;
251 	case 1:
252 		SET_XMM(val, xmm1);
253 		break;
254 	case 2:
255 		SET_XMM(val, xmm2);
256 		break;
257 	case 3:
258 		SET_XMM(val, xmm3);
259 		break;
260 	case 4:
261 		SET_XMM(val, xmm4);
262 		break;
263 	case 5:
264 		SET_XMM(val, xmm5);
265 		break;
266 	case 6:
267 		SET_XMM(val, xmm6);
268 		break;
269 	case 7:
270 		SET_XMM(val, xmm7);
271 		break;
272 	}
273 }
274 
275 typedef unsigned long v1di __attribute__ ((vector_size (8)));
276 static inline unsigned long get_xmm(int n)
277 {
278 	assert(n >= 0 && n <= 7);
279 
280 	register v1di xmm0 __asm__("%xmm0");
281 	register v1di xmm1 __asm__("%xmm1");
282 	register v1di xmm2 __asm__("%xmm2");
283 	register v1di xmm3 __asm__("%xmm3");
284 	register v1di xmm4 __asm__("%xmm4");
285 	register v1di xmm5 __asm__("%xmm5");
286 	register v1di xmm6 __asm__("%xmm6");
287 	register v1di xmm7 __asm__("%xmm7");
288 	switch (n) {
289 	case 0:
290 		return (unsigned long)xmm0;
291 	case 1:
292 		return (unsigned long)xmm1;
293 	case 2:
294 		return (unsigned long)xmm2;
295 	case 3:
296 		return (unsigned long)xmm3;
297 	case 4:
298 		return (unsigned long)xmm4;
299 	case 5:
300 		return (unsigned long)xmm5;
301 	case 6:
302 		return (unsigned long)xmm6;
303 	case 7:
304 		return (unsigned long)xmm7;
305 	}
306 	return 0;
307 }
308 
309 bool is_intel_cpu(void);
310 
311 struct kvm_x86_state;
312 struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid);
313 void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid,
314 		     struct kvm_x86_state *state);
315 
316 struct kvm_msr_list *kvm_get_msr_index_list(void);
317 
318 struct kvm_cpuid2 *kvm_get_supported_cpuid(void);
319 void vcpu_set_cpuid(struct kvm_vm *vm, uint32_t vcpuid,
320 		    struct kvm_cpuid2 *cpuid);
321 
322 struct kvm_cpuid_entry2 *
323 kvm_get_supported_cpuid_index(uint32_t function, uint32_t index);
324 
325 static inline struct kvm_cpuid_entry2 *
326 kvm_get_supported_cpuid_entry(uint32_t function)
327 {
328 	return kvm_get_supported_cpuid_index(function, 0);
329 }
330 
331 uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index);
332 int _vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
333 		  uint64_t msr_value);
334 void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
335 	  	  uint64_t msr_value);
336 
337 uint32_t kvm_get_cpuid_max_basic(void);
338 uint32_t kvm_get_cpuid_max_extended(void);
339 void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits);
340 
341 /*
342  * Basic CPU control in CR0
343  */
344 #define X86_CR0_PE          (1UL<<0) /* Protection Enable */
345 #define X86_CR0_MP          (1UL<<1) /* Monitor Coprocessor */
346 #define X86_CR0_EM          (1UL<<2) /* Emulation */
347 #define X86_CR0_TS          (1UL<<3) /* Task Switched */
348 #define X86_CR0_ET          (1UL<<4) /* Extension Type */
349 #define X86_CR0_NE          (1UL<<5) /* Numeric Error */
350 #define X86_CR0_WP          (1UL<<16) /* Write Protect */
351 #define X86_CR0_AM          (1UL<<18) /* Alignment Mask */
352 #define X86_CR0_NW          (1UL<<29) /* Not Write-through */
353 #define X86_CR0_CD          (1UL<<30) /* Cache Disable */
354 #define X86_CR0_PG          (1UL<<31) /* Paging */
355 
356 #define APIC_BASE_MSR	0x800
357 #define X2APIC_ENABLE	(1UL << 10)
358 #define	APIC_ICR	0x300
359 #define		APIC_DEST_SELF		0x40000
360 #define		APIC_DEST_ALLINC	0x80000
361 #define		APIC_DEST_ALLBUT	0xC0000
362 #define		APIC_ICR_RR_MASK	0x30000
363 #define		APIC_ICR_RR_INVALID	0x00000
364 #define		APIC_ICR_RR_INPROG	0x10000
365 #define		APIC_ICR_RR_VALID	0x20000
366 #define		APIC_INT_LEVELTRIG	0x08000
367 #define		APIC_INT_ASSERT		0x04000
368 #define		APIC_ICR_BUSY		0x01000
369 #define		APIC_DEST_LOGICAL	0x00800
370 #define		APIC_DEST_PHYSICAL	0x00000
371 #define		APIC_DM_FIXED		0x00000
372 #define		APIC_DM_FIXED_MASK	0x00700
373 #define		APIC_DM_LOWEST		0x00100
374 #define		APIC_DM_SMI		0x00200
375 #define		APIC_DM_REMRD		0x00300
376 #define		APIC_DM_NMI		0x00400
377 #define		APIC_DM_INIT		0x00500
378 #define		APIC_DM_STARTUP		0x00600
379 #define		APIC_DM_EXTINT		0x00700
380 #define		APIC_VECTOR_MASK	0x000FF
381 #define	APIC_ICR2	0x310
382 
383 /* VMX_EPT_VPID_CAP bits */
384 #define VMX_EPT_VPID_CAP_AD_BITS       (1ULL << 21)
385 
386 #endif /* SELFTEST_KVM_PROCESSOR_H */
387