xref: /linux/tools/testing/selftests/kvm/include/x86/kvm_util_arch.h (revision 39f1c201b93f4ff71631bac72cff6eb155f976a4)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 #ifndef SELFTEST_KVM_UTIL_ARCH_H
3 #define SELFTEST_KVM_UTIL_ARCH_H
4 
5 #include <stdbool.h>
6 #include <stdint.h>
7 
8 #include "kvm_util_types.h"
9 #include "test_util.h"
10 
11 extern bool is_forced_emulation_enabled;
12 
13 struct pte_masks {
14 	u64 present;
15 	u64 writable;
16 	u64 user;
17 	u64 readable;
18 	u64 executable;
19 	u64 accessed;
20 	u64 dirty;
21 	u64 huge;
22 	u64 nx;
23 	u64 c;
24 	u64 s;
25 
26 	u64 always_set;
27 };
28 
29 struct kvm_mmu_arch {
30 	struct pte_masks pte_masks;
31 };
32 
33 struct kvm_mmu;
34 
35 struct kvm_vm_arch {
36 	gva_t gdt;
37 	gva_t tss;
38 	gva_t idt;
39 
40 	u64 c_bit;
41 	u64 s_bit;
42 	int sev_fd;
43 	bool is_pt_protected;
44 };
45 
46 static inline bool __vm_arch_has_protected_memory(struct kvm_vm_arch *arch)
47 {
48 	return arch->c_bit || arch->s_bit;
49 }
50 
51 #define vm_arch_has_protected_memory(vm) \
52 	__vm_arch_has_protected_memory(&(vm)->arch)
53 
54 #define vcpu_arch_put_guest(mem, __val)							\
55 do {											\
56 	const typeof(mem) val = (__val);						\
57 											\
58 	if (!is_forced_emulation_enabled || guest_random_bool(&guest_rng)) {		\
59 		(mem) = val;								\
60 	} else if (guest_random_bool(&guest_rng)) {					\
61 		__asm__ __volatile__(KVM_FEP "mov %1, %0"				\
62 				     : "+m" (mem)					\
63 				     : "r" (val) : "memory");				\
64 	} else {									\
65 		u64 __old = READ_ONCE(mem);					\
66 											\
67 		__asm__ __volatile__(KVM_FEP LOCK_PREFIX "cmpxchg %[new], %[ptr]"	\
68 				     : [ptr] "+m" (mem), [old] "+a" (__old)		\
69 				     : [new]"r" (val) : "memory", "cc");		\
70 	}										\
71 } while (0)
72 
73 #endif  // SELFTEST_KVM_UTIL_ARCH_H
74