xref: /illumos-gate/usr/src/test/bhyve-tests/tests/vmm/fpu_getset.c (revision dd72704bd9e794056c558153663c739e2012d721)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright 2022 Oxide Computer Company
14  */
15 
16 
17 #include <stdio.h>
18 #include <unistd.h>
19 #include <stdlib.h>
20 #include <stropts.h>
21 #include <strings.h>
22 #include <signal.h>
23 #include <setjmp.h>
24 #include <libgen.h>
25 #include <sys/debug.h>
26 #include <sys/fp.h>
27 
28 #include <sys/vmm.h>
29 #include <sys/vmm_dev.h>
30 #include <sys/x86_archext.h>
31 #include <vmmapi.h>
32 
33 #include "common.h"
34 
35 /* Minimal xsave state area (sans any AVX storage) */
36 struct xsave_min {
37 	struct fxsave_state	legacy;
38 	struct xsave_header	header;
39 };
40 
41 CTASSERT(sizeof (struct xsave_min) == MIN_XSAVE_SIZE);
42 
43 struct avx_state {
44 	/* 16 x 128-bit: high portions of the ymm registers */
45 	uint64_t	ymm[32];
46 };
47 
48 static bool
49 get_fpu(int fd, struct vm_fpu_state *req)
50 {
51 	int res = ioctl(fd, VM_GET_FPU, req);
52 	if (res != 0) {
53 		perror("could not read FPU for vCPU");
54 		return (false);
55 	}
56 	return (true);
57 }
58 
59 static bool
60 set_fpu(int fd, struct vm_fpu_state *req)
61 {
62 	int res = ioctl(fd, VM_SET_FPU, req);
63 	if (res != 0) {
64 		perror("could not write FPU for vCPU");
65 		return (false);
66 	}
67 	return (true);
68 }
69 
70 static bool
71 check_sse(int fd, const struct vm_fpu_desc *desc, void *fpu_area,
72     size_t fpu_size)
73 {
74 	/* Make sure the x87/MMX/SSE state is described as present */
75 	bool found_fp = false, found_sse = false;
76 	for (uint_t i = 0; i < desc->vfd_num_entries; i++) {
77 		const struct vm_fpu_desc_entry *ent = &desc->vfd_entry_data[i];
78 
79 		switch (ent->vfde_feature) {
80 		case XFEATURE_LEGACY_FP:
81 			found_fp = true;
82 			if (ent->vfde_off != 0 ||
83 			    ent->vfde_size != sizeof (struct fxsave_state)) {
84 				(void) fprintf(stderr,
85 				    "unexpected entity for %x: "
86 				    "size=%x off=%x\n", ent->vfde_feature,
87 				    ent->vfde_size, ent->vfde_off);
88 				return (false);
89 			}
90 			break;
91 		case XFEATURE_SSE:
92 			found_sse = true;
93 			if (ent->vfde_off != 0 ||
94 			    ent->vfde_size != sizeof (struct fxsave_state)) {
95 				(void) fprintf(stderr,
96 				    "unexpected entity for %x: "
97 				    "size=%x off=%x\n", ent->vfde_feature,
98 				    ent->vfde_size, ent->vfde_off);
99 				return (false);
100 			}
101 			break;
102 		}
103 	}
104 
105 	if (!found_fp || !found_sse) {
106 		(void) fprintf(stderr, "did not find x87 and SSE area "
107 		    "descriptors as expected in initial FPU\n");
108 		return (false);
109 	}
110 
111 	struct vm_fpu_state req = {
112 		.vcpuid = 0,
113 		.buf = fpu_area,
114 		.len = fpu_size,
115 	};
116 
117 	if (!get_fpu(fd, &req)) {
118 		return (false);
119 	}
120 
121 	struct xsave_min *xs = fpu_area;
122 	/*
123 	 * Executing this test on a freshly-created instance, we expect the FPU
124 	 * to only have the legacy and SSE features present in its active state.
125 	 */
126 	if (xs->header.xsh_xstate_bv != (XFEATURE_LEGACY_FP | XFEATURE_SSE)) {
127 		(void) fprintf(stderr, "bad xstate_bv %lx, expected %lx",
128 		    xs->header.xsh_xstate_bv,
129 		    (XFEATURE_LEGACY_FP | XFEATURE_SSE));
130 		return (false);
131 	}
132 
133 	/* load some SSE values to check for a get/set cycle */
134 	uint64_t *xmm = (void *)&xs->legacy.fx_xmm[0];
135 	xmm[0] = UINT64_MAX;
136 	xmm[2] = 1;
137 
138 	if (!set_fpu(fd, &req)) {
139 		return (false);
140 	}
141 
142 	/* check that those values made it in/out of the guest FPU */
143 	bzero(fpu_area, fpu_size);
144 	if (!get_fpu(fd, &req)) {
145 		return (false);
146 	}
147 	if (xmm[0] != UINT64_MAX || xmm[2] != 1) {
148 		(void) fprintf(stderr, "SSE test registers not saved\n");
149 		return (false);
150 	}
151 
152 	/* Make sure that a bogus MXCSR value is rejected */
153 	xs->legacy.fx_mxcsr = UINT32_MAX;
154 	int res = ioctl(fd, VM_SET_FPU, &req);
155 	if (res == 0) {
156 		(void) fprintf(stderr,
157 		    "write of invalid MXCSR erroneously allowed\n");
158 		return (false);
159 	}
160 
161 	return (true);
162 }
163 
164 static bool
165 check_avx(int fd, const struct vm_fpu_desc *desc, void *fpu_area,
166     size_t fpu_size)
167 {
168 	bool found_avx = false;
169 	size_t avx_size, avx_off;
170 	for (uint_t i = 0; i < desc->vfd_num_entries; i++) {
171 		const struct vm_fpu_desc_entry *ent = &desc->vfd_entry_data[i];
172 
173 		if (ent->vfde_feature == XFEATURE_AVX) {
174 			found_avx = true;
175 			avx_size = ent->vfde_size;
176 			avx_off = ent->vfde_off;
177 			break;
178 		}
179 	}
180 
181 	if (!found_avx) {
182 		(void) printf("AVX capability not found on host CPU, "
183 		    "skipping related tests\n");
184 		return (true);
185 	}
186 
187 	if (avx_size != sizeof (struct avx_state)) {
188 		(void) fprintf(stderr, "unexpected AVX state size: %x, "
189 		    "expected %x\n", avx_size, sizeof (struct avx_state));
190 		return (false);
191 	}
192 	if ((avx_off + avx_size) > fpu_size) {
193 		(void) fprintf(stderr, "AVX data falls outside fpu size: "
194 		    "%x > %x\n", avx_off + avx_size, fpu_size);
195 		return (false);
196 	}
197 
198 	struct xsave_min *xs = fpu_area;
199 	struct avx_state *avx = fpu_area + avx_off;
200 
201 	/* do a simple data round-trip */
202 	struct vm_fpu_state req = {
203 		.vcpuid = 0,
204 		.buf = fpu_area,
205 		.len = fpu_size,
206 	};
207 	if (!get_fpu(fd, &req)) {
208 		return (false);
209 	}
210 
211 	/* With AVX unused so far, we expect it to be absent from the BV */
212 	if (xs->header.xsh_xstate_bv != (XFEATURE_LEGACY_FP | XFEATURE_SSE)) {
213 		(void) fprintf(stderr, "bad xstate_bv %lx, expected %lx\n",
214 		    xs->header.xsh_xstate_bv,
215 		    (XFEATURE_LEGACY_FP | XFEATURE_SSE));
216 		return (false);
217 	}
218 
219 	avx->ymm[0] = UINT64_MAX;
220 	avx->ymm[2] = 2;
221 
222 	/* first write without asserting AVX in BV */
223 	if (!set_fpu(fd, &req)) {
224 		return (false);
225 	}
226 
227 	/* And check that the AVX state stays empty */
228 	bzero(fpu_area, fpu_size);
229 	if (!get_fpu(fd, &req)) {
230 		return (false);
231 	}
232 	if (xs->header.xsh_xstate_bv != (XFEATURE_LEGACY_FP | XFEATURE_SSE)) {
233 		(void) fprintf(stderr, "xstate_bv changed unexpectedly %lx\n",
234 		    xs->header.xsh_xstate_bv);
235 		return (false);
236 	}
237 	if (avx->ymm[0] != 0 || avx->ymm[2] != 0) {
238 		(void) fprintf(stderr, "YMM state changed unexpectedly "
239 		    "%lx %lx\n", avx->ymm[0], avx->ymm[2]);
240 		return (false);
241 	}
242 
243 	/* Now write YMM and set the appropriate AVX BV state */
244 	avx->ymm[0] = UINT64_MAX;
245 	avx->ymm[2] = 2;
246 	xs->header.xsh_xstate_bv |= XFEATURE_AVX;
247 	if (!set_fpu(fd, &req)) {
248 		return (false);
249 	}
250 
251 	/* ... and now check that it stuck */
252 	bzero(fpu_area, fpu_size);
253 	if (!get_fpu(fd, &req)) {
254 		return (false);
255 	}
256 	if ((xs->header.xsh_xstate_bv & XFEATURE_AVX) == 0) {
257 		(void) fprintf(stderr, "AVX missing from xstate_bv %lx\n",
258 		    xs->header.xsh_xstate_bv);
259 		return (false);
260 	}
261 	if (avx->ymm[0] != UINT64_MAX || avx->ymm[2] != 2) {
262 		(void) fprintf(stderr, "YMM state not preserved "
263 		    "%lx != %lx | %lx != %lx\n",
264 		    avx->ymm[0], UINT64_MAX, avx->ymm[2], 2);
265 		return (false);
266 	}
267 
268 
269 	return (true);
270 }
271 
272 int
273 main(int argc, char *argv[])
274 {
275 	struct vmctx *ctx;
276 	int res, fd;
277 	const char *suite_name = basename(argv[0]);
278 
279 	ctx = create_test_vm(suite_name);
280 	if (ctx == NULL) {
281 		perror("could not open test VM");
282 		return (EXIT_FAILURE);
283 	}
284 	fd = vm_get_device_fd(ctx);
285 
286 	struct vm_fpu_desc_entry entries[64];
287 	struct vm_fpu_desc desc = {
288 		.vfd_entry_data = entries,
289 		.vfd_num_entries = 64,
290 	};
291 
292 	res = ioctl(fd, VM_DESC_FPU_AREA, &desc);
293 	if (res != 0) {
294 		perror("could not query fpu area description");
295 		goto bail;
296 	}
297 
298 	/* Make sure the XSAVE area described for this machine is reasonable */
299 	if (desc.vfd_num_entries == 0) {
300 		(void) fprintf(stderr, "no FPU description entries found\n");
301 		goto bail;
302 	}
303 	if (desc.vfd_req_size < MIN_XSAVE_SIZE) {
304 		(void) fprintf(stderr, "required XSAVE size %lu < "
305 		    "expected %lu\n", desc.vfd_req_size, MIN_XSAVE_SIZE);
306 		goto bail;
307 	}
308 
309 	const size_t fpu_size = desc.vfd_req_size;
310 	void *fpu_area = malloc(fpu_size);
311 	if (fpu_area == NULL) {
312 		perror("could not allocate fpu area");
313 		goto bail;
314 	}
315 	bzero(fpu_area, fpu_size);
316 
317 	if (!check_sse(fd, &desc, fpu_area, fpu_size)) {
318 		goto bail;
319 	}
320 	if (!check_avx(fd, &desc, fpu_area, fpu_size)) {
321 		goto bail;
322 	}
323 
324 	/* mission accomplished */
325 	vm_destroy(ctx);
326 	(void) printf("%s\tPASS\n", suite_name);
327 	return (EXIT_SUCCESS);
328 
329 bail:
330 	vm_destroy(ctx);
331 	(void) printf("%s\tFAIL\n", suite_name);
332 	return (EXIT_FAILURE);
333 }
334