xref: /linux/tools/testing/selftests/kvm/x86_64/xcr0_cpuid_test.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * XCR0 cpuid test
4  *
5  * Copyright (C) 2022, Google LLC.
6  */
7 #include <fcntl.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <sys/ioctl.h>
12 
13 #include "test_util.h"
14 
15 #include "kvm_util.h"
16 #include "processor.h"
17 
18 /*
19  * Assert that architectural dependency rules are satisfied, e.g. that AVX is
20  * supported if and only if SSE is supported.
21  */
22 #define ASSERT_XFEATURE_DEPENDENCIES(supported_xcr0, xfeatures, dependencies)		\
23 do {											\
24 	uint64_t __supported = (supported_xcr0) & ((xfeatures) | (dependencies));	\
25 											\
26 	__GUEST_ASSERT((__supported & (xfeatures)) != (xfeatures) ||			\
27 		       __supported == ((xfeatures) | (dependencies)),			\
28 		       "supported = 0x%lx, xfeatures = 0x%llx, dependencies = 0x%llx",	\
29 		       __supported, (xfeatures), (dependencies));			\
30 } while (0)
31 
32 /*
33  * Assert that KVM reports a sane, usable as-is XCR0.  Architecturally, a CPU
34  * isn't strictly required to _support_ all XFeatures related to a feature, but
35  * at the same time XSETBV will #GP if bundled XFeatures aren't enabled and
36  * disabled coherently.  E.g. a CPU can technically enumerate supported for
37  * XTILE_CFG but not XTILE_DATA, but attempting to enable XTILE_CFG without
38  * XTILE_DATA will #GP.
39  */
40 #define ASSERT_ALL_OR_NONE_XFEATURE(supported_xcr0, xfeatures)		\
41 do {									\
42 	uint64_t __supported = (supported_xcr0) & (xfeatures);		\
43 									\
44 	__GUEST_ASSERT(!__supported || __supported == (xfeatures),	\
45 		       "supported = 0x%lx, xfeatures = 0x%llx",		\
46 		       __supported, (xfeatures));			\
47 } while (0)
48 
49 static void guest_code(void)
50 {
51 	uint64_t xcr0_reset;
52 	uint64_t supported_xcr0;
53 	int i, vector;
54 
55 	set_cr4(get_cr4() | X86_CR4_OSXSAVE);
56 
57 	xcr0_reset = xgetbv(0);
58 	supported_xcr0 = this_cpu_supported_xcr0();
59 
60 	GUEST_ASSERT(xcr0_reset == XFEATURE_MASK_FP);
61 
62 	/* Check AVX */
63 	ASSERT_XFEATURE_DEPENDENCIES(supported_xcr0,
64 				     XFEATURE_MASK_YMM,
65 				     XFEATURE_MASK_SSE);
66 
67 	/* Check MPX */
68 	ASSERT_ALL_OR_NONE_XFEATURE(supported_xcr0,
69 				    XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
70 
71 	/* Check AVX-512 */
72 	ASSERT_XFEATURE_DEPENDENCIES(supported_xcr0,
73 				     XFEATURE_MASK_AVX512,
74 				     XFEATURE_MASK_SSE | XFEATURE_MASK_YMM);
75 	ASSERT_ALL_OR_NONE_XFEATURE(supported_xcr0,
76 				    XFEATURE_MASK_AVX512);
77 
78 	/* Check AMX */
79 	ASSERT_ALL_OR_NONE_XFEATURE(supported_xcr0,
80 				    XFEATURE_MASK_XTILE);
81 
82 	vector = xsetbv_safe(0, supported_xcr0);
83 	__GUEST_ASSERT(!vector,
84 		       "Expected success on XSETBV(0x%lx), got vector '0x%x'",
85 		       supported_xcr0, vector);
86 
87 	for (i = 0; i < 64; i++) {
88 		if (supported_xcr0 & BIT_ULL(i))
89 			continue;
90 
91 		vector = xsetbv_safe(0, supported_xcr0 | BIT_ULL(i));
92 		__GUEST_ASSERT(vector == GP_VECTOR,
93 			       "Expected #GP on XSETBV(0x%llx), supported XCR0 = %lx, got vector '0x%x'",
94 			       BIT_ULL(i), supported_xcr0, vector);
95 	}
96 
97 	GUEST_DONE();
98 }
99 
100 int main(int argc, char *argv[])
101 {
102 	struct kvm_vcpu *vcpu;
103 	struct kvm_run *run;
104 	struct kvm_vm *vm;
105 	struct ucall uc;
106 
107 	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XSAVE));
108 
109 	vm = vm_create_with_one_vcpu(&vcpu, guest_code);
110 	run = vcpu->run;
111 
112 	while (1) {
113 		vcpu_run(vcpu);
114 
115 		TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
116 			    "Unexpected exit reason: %u (%s),",
117 			    run->exit_reason,
118 			    exit_reason_str(run->exit_reason));
119 
120 		switch (get_ucall(vcpu, &uc)) {
121 		case UCALL_ABORT:
122 			REPORT_GUEST_ASSERT(uc);
123 			break;
124 		case UCALL_DONE:
125 			goto done;
126 		default:
127 			TEST_FAIL("Unknown ucall %lu", uc.cmd);
128 		}
129 	}
130 
131 done:
132 	kvm_vm_free(vm);
133 	return 0;
134 }
135