xref: /linux/tools/testing/selftests/kvm/include/x86/kvm_util_arch.h (revision ca220141fa8ebae09765a242076b2b77338106b0)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 #ifndef SELFTEST_KVM_UTIL_ARCH_H
3 #define SELFTEST_KVM_UTIL_ARCH_H
4 
5 #include <stdbool.h>
6 #include <stdint.h>
7 
8 #include "kvm_util_types.h"
9 #include "test_util.h"
10 
11 extern bool is_forced_emulation_enabled;
12 
13 struct pte_masks {
14 	uint64_t present;
15 	uint64_t writable;
16 	uint64_t user;
17 	uint64_t readable;
18 	uint64_t executable;
19 	uint64_t accessed;
20 	uint64_t dirty;
21 	uint64_t huge;
22 	uint64_t nx;
23 	uint64_t c;
24 	uint64_t s;
25 
26 	uint64_t always_set;
27 };
28 
29 struct kvm_mmu_arch {
30 	struct pte_masks pte_masks;
31 };
32 
33 struct kvm_mmu;
34 
35 struct kvm_vm_arch {
36 	vm_vaddr_t gdt;
37 	vm_vaddr_t tss;
38 	vm_vaddr_t idt;
39 
40 	uint64_t c_bit;
41 	uint64_t s_bit;
42 	int sev_fd;
43 	bool is_pt_protected;
44 };
45 
46 static inline bool __vm_arch_has_protected_memory(struct kvm_vm_arch *arch)
47 {
48 	return arch->c_bit || arch->s_bit;
49 }
50 
51 #define vm_arch_has_protected_memory(vm) \
52 	__vm_arch_has_protected_memory(&(vm)->arch)
53 
54 #define vcpu_arch_put_guest(mem, __val)							\
55 do {											\
56 	const typeof(mem) val = (__val);						\
57 											\
58 	if (!is_forced_emulation_enabled || guest_random_bool(&guest_rng)) {		\
59 		(mem) = val;								\
60 	} else if (guest_random_bool(&guest_rng)) {					\
61 		__asm__ __volatile__(KVM_FEP "mov %1, %0"				\
62 				     : "+m" (mem)					\
63 				     : "r" (val) : "memory");				\
64 	} else {									\
65 		uint64_t __old = READ_ONCE(mem);					\
66 											\
67 		__asm__ __volatile__(KVM_FEP LOCK_PREFIX "cmpxchg %[new], %[ptr]"	\
68 				     : [ptr] "+m" (mem), [old] "+a" (__old)		\
69 				     : [new]"r" (val) : "memory", "cc");		\
70 	}										\
71 } while (0)
72 
73 #endif  // SELFTEST_KVM_UTIL_ARCH_H
74