xref: /linux/tools/testing/selftests/kvm/x86_64/amx_test.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * amx tests
4  *
5  * Copyright (C) 2021, Intel, Inc.
6  *
7  * Tests for amx #NM exception and save/restore.
8  */
9 #include <fcntl.h>
10 #include <stdio.h>
11 #include <stdlib.h>
12 #include <string.h>
13 #include <sys/ioctl.h>
14 #include <sys/syscall.h>
15 
16 #include "test_util.h"
17 
18 #include "kvm_util.h"
19 #include "processor.h"
20 #include "vmx.h"
21 
22 #ifndef __x86_64__
23 # error This test is 64-bit only
24 #endif
25 
26 #define NUM_TILES			8
27 #define TILE_SIZE			1024
28 #define XSAVE_SIZE			((NUM_TILES * TILE_SIZE) + PAGE_SIZE)
29 
30 /* Tile configuration associated: */
31 #define PALETTE_TABLE_INDEX		1
32 #define MAX_TILES			16
33 #define RESERVED_BYTES			14
34 
35 #define XSAVE_HDR_OFFSET		512
36 
37 struct tile_config {
38 	u8  palette_id;
39 	u8  start_row;
40 	u8  reserved[RESERVED_BYTES];
41 	u16 colsb[MAX_TILES];
42 	u8  rows[MAX_TILES];
43 };
44 
45 struct tile_data {
46 	u8 data[NUM_TILES * TILE_SIZE];
47 };
48 
49 struct xtile_info {
50 	u16 bytes_per_tile;
51 	u16 bytes_per_row;
52 	u16 max_names;
53 	u16 max_rows;
54 	u32 xsave_offset;
55 	u32 xsave_size;
56 };
57 
58 static struct xtile_info xtile;
59 
60 static inline void __ldtilecfg(void *cfg)
61 {
62 	asm volatile(".byte 0xc4,0xe2,0x78,0x49,0x00"
63 		     : : "a"(cfg));
64 }
65 
66 static inline void __tileloadd(void *tile)
67 {
68 	asm volatile(".byte 0xc4,0xe2,0x7b,0x4b,0x04,0x10"
69 		     : : "a"(tile), "d"(0));
70 }
71 
72 static inline void __tilerelease(void)
73 {
74 	asm volatile(".byte 0xc4, 0xe2, 0x78, 0x49, 0xc0" ::);
75 }
76 
77 static inline void __xsavec(struct xstate *xstate, uint64_t rfbm)
78 {
79 	uint32_t rfbm_lo = rfbm;
80 	uint32_t rfbm_hi = rfbm >> 32;
81 
82 	asm volatile("xsavec (%%rdi)"
83 		     : : "D" (xstate), "a" (rfbm_lo), "d" (rfbm_hi)
84 		     : "memory");
85 }
86 
87 static void check_xtile_info(void)
88 {
89 	GUEST_ASSERT((xgetbv(0) & XFEATURE_MASK_XTILE) == XFEATURE_MASK_XTILE);
90 
91 	GUEST_ASSERT(this_cpu_has_p(X86_PROPERTY_XSTATE_MAX_SIZE_XCR0));
92 	GUEST_ASSERT(this_cpu_property(X86_PROPERTY_XSTATE_MAX_SIZE_XCR0) <= XSAVE_SIZE);
93 
94 	xtile.xsave_offset = this_cpu_property(X86_PROPERTY_XSTATE_TILE_OFFSET);
95 	GUEST_ASSERT(xtile.xsave_offset == 2816);
96 	xtile.xsave_size = this_cpu_property(X86_PROPERTY_XSTATE_TILE_SIZE);
97 	GUEST_ASSERT(xtile.xsave_size == 8192);
98 	GUEST_ASSERT(sizeof(struct tile_data) >= xtile.xsave_size);
99 
100 	GUEST_ASSERT(this_cpu_has_p(X86_PROPERTY_AMX_MAX_PALETTE_TABLES));
101 	GUEST_ASSERT(this_cpu_property(X86_PROPERTY_AMX_MAX_PALETTE_TABLES) >=
102 		     PALETTE_TABLE_INDEX);
103 
104 	GUEST_ASSERT(this_cpu_has_p(X86_PROPERTY_AMX_NR_TILE_REGS));
105 	xtile.max_names = this_cpu_property(X86_PROPERTY_AMX_NR_TILE_REGS);
106 	GUEST_ASSERT(xtile.max_names == 8);
107 	xtile.bytes_per_tile = this_cpu_property(X86_PROPERTY_AMX_BYTES_PER_TILE);
108 	GUEST_ASSERT(xtile.bytes_per_tile == 1024);
109 	xtile.bytes_per_row = this_cpu_property(X86_PROPERTY_AMX_BYTES_PER_ROW);
110 	GUEST_ASSERT(xtile.bytes_per_row == 64);
111 	xtile.max_rows = this_cpu_property(X86_PROPERTY_AMX_MAX_ROWS);
112 	GUEST_ASSERT(xtile.max_rows == 16);
113 }
114 
115 static void set_tilecfg(struct tile_config *cfg)
116 {
117 	int i;
118 
119 	/* Only palette id 1 */
120 	cfg->palette_id = 1;
121 	for (i = 0; i < xtile.max_names; i++) {
122 		cfg->colsb[i] = xtile.bytes_per_row;
123 		cfg->rows[i] = xtile.max_rows;
124 	}
125 }
126 
127 static void __attribute__((__flatten__)) guest_code(struct tile_config *amx_cfg,
128 						    struct tile_data *tiledata,
129 						    struct xstate *xstate)
130 {
131 	GUEST_ASSERT(this_cpu_has(X86_FEATURE_XSAVE) &&
132 		     this_cpu_has(X86_FEATURE_OSXSAVE));
133 	check_xtile_info();
134 	GUEST_SYNC(1);
135 
136 	/* xfd=0, enable amx */
137 	wrmsr(MSR_IA32_XFD, 0);
138 	GUEST_SYNC(2);
139 	GUEST_ASSERT(rdmsr(MSR_IA32_XFD) == 0);
140 	set_tilecfg(amx_cfg);
141 	__ldtilecfg(amx_cfg);
142 	GUEST_SYNC(3);
143 	/* Check save/restore when trap to userspace */
144 	__tileloadd(tiledata);
145 	GUEST_SYNC(4);
146 	__tilerelease();
147 	GUEST_SYNC(5);
148 	/*
149 	 * After XSAVEC, XTILEDATA is cleared in the xstate_bv but is set in
150 	 * the xcomp_bv.
151 	 */
152 	xstate->header.xstate_bv = XFEATURE_MASK_XTILE_DATA;
153 	__xsavec(xstate, XFEATURE_MASK_XTILE_DATA);
154 	GUEST_ASSERT(!(xstate->header.xstate_bv & XFEATURE_MASK_XTILE_DATA));
155 	GUEST_ASSERT(xstate->header.xcomp_bv & XFEATURE_MASK_XTILE_DATA);
156 
157 	/* xfd=0x40000, disable amx tiledata */
158 	wrmsr(MSR_IA32_XFD, XFEATURE_MASK_XTILE_DATA);
159 
160 	/*
161 	 * XTILEDATA is cleared in xstate_bv but set in xcomp_bv, this property
162 	 * remains the same even when amx tiledata is disabled by IA32_XFD.
163 	 */
164 	xstate->header.xstate_bv = XFEATURE_MASK_XTILE_DATA;
165 	__xsavec(xstate, XFEATURE_MASK_XTILE_DATA);
166 	GUEST_ASSERT(!(xstate->header.xstate_bv & XFEATURE_MASK_XTILE_DATA));
167 	GUEST_ASSERT((xstate->header.xcomp_bv & XFEATURE_MASK_XTILE_DATA));
168 
169 	GUEST_SYNC(6);
170 	GUEST_ASSERT(rdmsr(MSR_IA32_XFD) == XFEATURE_MASK_XTILE_DATA);
171 	set_tilecfg(amx_cfg);
172 	__ldtilecfg(amx_cfg);
173 	/* Trigger #NM exception */
174 	__tileloadd(tiledata);
175 	GUEST_SYNC(10);
176 
177 	GUEST_DONE();
178 }
179 
180 void guest_nm_handler(struct ex_regs *regs)
181 {
182 	/* Check if #NM is triggered by XFEATURE_MASK_XTILE_DATA */
183 	GUEST_SYNC(7);
184 	GUEST_ASSERT(!(get_cr0() & X86_CR0_TS));
185 	GUEST_ASSERT(rdmsr(MSR_IA32_XFD_ERR) == XFEATURE_MASK_XTILE_DATA);
186 	GUEST_ASSERT(rdmsr(MSR_IA32_XFD) == XFEATURE_MASK_XTILE_DATA);
187 	GUEST_SYNC(8);
188 	GUEST_ASSERT(rdmsr(MSR_IA32_XFD_ERR) == XFEATURE_MASK_XTILE_DATA);
189 	GUEST_ASSERT(rdmsr(MSR_IA32_XFD) == XFEATURE_MASK_XTILE_DATA);
190 	/* Clear xfd_err */
191 	wrmsr(MSR_IA32_XFD_ERR, 0);
192 	/* xfd=0, enable amx */
193 	wrmsr(MSR_IA32_XFD, 0);
194 	GUEST_SYNC(9);
195 }
196 
197 int main(int argc, char *argv[])
198 {
199 	struct kvm_regs regs1, regs2;
200 	struct kvm_vcpu *vcpu;
201 	struct kvm_vm *vm;
202 	struct kvm_x86_state *state;
203 	int xsave_restore_size;
204 	vm_vaddr_t amx_cfg, tiledata, xstate;
205 	struct ucall uc;
206 	u32 amx_offset;
207 	int ret;
208 
209 	/*
210 	 * Note, all off-by-default features must be enabled before anything
211 	 * caches KVM_GET_SUPPORTED_CPUID, e.g. before using kvm_cpu_has().
212 	 */
213 	vm_xsave_require_permission(XFEATURE_MASK_XTILE_DATA);
214 
215 	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XFD));
216 	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XSAVE));
217 	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_AMX_TILE));
218 	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XTILECFG));
219 	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XTILEDATA));
220 	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XTILEDATA_XFD));
221 
222 	/* Create VM */
223 	vm = vm_create_with_one_vcpu(&vcpu, guest_code);
224 
225 	TEST_ASSERT(kvm_cpu_has_p(X86_PROPERTY_XSTATE_MAX_SIZE),
226 		    "KVM should enumerate max XSAVE size when XSAVE is supported");
227 	xsave_restore_size = kvm_cpu_property(X86_PROPERTY_XSTATE_MAX_SIZE);
228 
229 	vcpu_regs_get(vcpu, &regs1);
230 
231 	/* Register #NM handler */
232 	vm_install_exception_handler(vm, NM_VECTOR, guest_nm_handler);
233 
234 	/* amx cfg for guest_code */
235 	amx_cfg = vm_vaddr_alloc_page(vm);
236 	memset(addr_gva2hva(vm, amx_cfg), 0x0, getpagesize());
237 
238 	/* amx tiledata for guest_code */
239 	tiledata = vm_vaddr_alloc_pages(vm, 2);
240 	memset(addr_gva2hva(vm, tiledata), rand() | 1, 2 * getpagesize());
241 
242 	/* XSAVE state for guest_code */
243 	xstate = vm_vaddr_alloc_pages(vm, DIV_ROUND_UP(XSAVE_SIZE, PAGE_SIZE));
244 	memset(addr_gva2hva(vm, xstate), 0, PAGE_SIZE * DIV_ROUND_UP(XSAVE_SIZE, PAGE_SIZE));
245 	vcpu_args_set(vcpu, 3, amx_cfg, tiledata, xstate);
246 
247 	for (;;) {
248 		vcpu_run(vcpu);
249 		TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
250 
251 		switch (get_ucall(vcpu, &uc)) {
252 		case UCALL_ABORT:
253 			REPORT_GUEST_ASSERT(uc);
254 			/* NOT REACHED */
255 		case UCALL_SYNC:
256 			switch (uc.args[1]) {
257 			case 1:
258 			case 2:
259 			case 3:
260 			case 5:
261 			case 6:
262 			case 7:
263 			case 8:
264 				fprintf(stderr, "GUEST_SYNC(%ld)\n", uc.args[1]);
265 				break;
266 			case 4:
267 			case 10:
268 				fprintf(stderr,
269 				"GUEST_SYNC(%ld), check save/restore status\n", uc.args[1]);
270 
271 				/* Compacted mode, get amx offset by xsave area
272 				 * size subtract 8K amx size.
273 				 */
274 				amx_offset = xsave_restore_size - NUM_TILES*TILE_SIZE;
275 				state = vcpu_save_state(vcpu);
276 				void *amx_start = (void *)state->xsave + amx_offset;
277 				void *tiles_data = (void *)addr_gva2hva(vm, tiledata);
278 				/* Only check TMM0 register, 1 tile */
279 				ret = memcmp(amx_start, tiles_data, TILE_SIZE);
280 				TEST_ASSERT(ret == 0, "memcmp failed, ret=%d", ret);
281 				kvm_x86_state_cleanup(state);
282 				break;
283 			case 9:
284 				fprintf(stderr,
285 				"GUEST_SYNC(%ld), #NM exception and enable amx\n", uc.args[1]);
286 				break;
287 			}
288 			break;
289 		case UCALL_DONE:
290 			fprintf(stderr, "UCALL_DONE\n");
291 			goto done;
292 		default:
293 			TEST_FAIL("Unknown ucall %lu", uc.cmd);
294 		}
295 
296 		state = vcpu_save_state(vcpu);
297 		memset(&regs1, 0, sizeof(regs1));
298 		vcpu_regs_get(vcpu, &regs1);
299 
300 		kvm_vm_release(vm);
301 
302 		/* Restore state in a new VM.  */
303 		vcpu = vm_recreate_with_one_vcpu(vm);
304 		vcpu_load_state(vcpu, state);
305 		kvm_x86_state_cleanup(state);
306 
307 		memset(&regs2, 0, sizeof(regs2));
308 		vcpu_regs_get(vcpu, &regs2);
309 		TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)),
310 			    "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
311 			    (ulong) regs2.rdi, (ulong) regs2.rsi);
312 	}
313 done:
314 	kvm_vm_free(vm);
315 }
316