xref: /linux/tools/testing/selftests/kvm/include/x86/svm_util.h (revision 69050f8d6d075dc01af7a5f2f550a8067510366f)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2020, Red Hat, Inc.
4  */
5 
6 #ifndef SELFTEST_KVM_SVM_UTILS_H
7 #define SELFTEST_KVM_SVM_UTILS_H
8 
9 #include <asm/svm.h>
10 
11 #include <stdint.h>
12 #include "svm.h"
13 #include "processor.h"
14 
15 struct svm_test_data {
16 	/* VMCB */
17 	struct vmcb *vmcb; /* gva */
18 	void *vmcb_hva;
19 	uint64_t vmcb_gpa;
20 
21 	/* host state-save area */
22 	struct vmcb_save_area *save_area; /* gva */
23 	void *save_area_hva;
24 	uint64_t save_area_gpa;
25 
26 	/* MSR-Bitmap */
27 	void *msr; /* gva */
28 	void *msr_hva;
29 	uint64_t msr_gpa;
30 
31 	/* NPT */
32 	uint64_t ncr3_gpa;
33 };
34 
35 static inline void vmmcall(void)
36 {
37 	/*
38 	 * Stuff RAX and RCX with "safe" values to make sure L0 doesn't handle
39 	 * it as a valid hypercall (e.g. Hyper-V L2 TLB flush) as the intended
40 	 * use of this function is to exit to L1 from L2.  Clobber all other
41 	 * GPRs as L1 doesn't correctly preserve them during vmexits.
42 	 */
43 	__asm__ __volatile__("push %%rbp; vmmcall; pop %%rbp"
44 			     : : "a"(0xdeadbeef), "c"(0xbeefdead)
45 			     : "rbx", "rdx", "rsi", "rdi", "r8", "r9",
46 			       "r10", "r11", "r12", "r13", "r14", "r15");
47 }
48 
49 #define stgi()			\
50 	__asm__ __volatile__(	\
51 		"stgi\n"	\
52 		)
53 
54 #define clgi()			\
55 	__asm__ __volatile__(	\
56 		"clgi\n"	\
57 		)
58 
59 struct svm_test_data *vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva);
60 void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp);
61 void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa);
62 
63 static inline bool kvm_cpu_has_npt(void)
64 {
65 	return kvm_cpu_has(X86_FEATURE_NPT);
66 }
67 void vm_enable_npt(struct kvm_vm *vm);
68 
69 int open_sev_dev_path_or_exit(void);
70 
71 #endif /* SELFTEST_KVM_SVM_UTILS_H */
72