xref: /illumos-gate/usr/src/test/bhyve-tests/tests/vmm/npt_ops.c (revision 9b9d39d2a32ff806d2431dbcc50968ef1e6d46b2)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright 2024 Oxide Computer Company
14  */
15 
16 #include <stdio.h>
17 #include <unistd.h>
18 #include <stdlib.h>
19 #include <fcntl.h>
20 #include <libgen.h>
21 #include <sys/stat.h>
22 #include <errno.h>
23 #include <err.h>
24 #include <assert.h>
25 #include <sys/sysmacros.h>
26 #include <stdbool.h>
27 
28 #include <sys/vmm.h>
29 #include <sys/vmm_dev.h>
30 #include <sys/vmm_data.h>
31 #include <vmmapi.h>
32 
33 #include "common.h"
34 
35 #define	PAGESZ		4096
36 #define	TEST_PAGE_COUNT	256
37 #define	TEST_MEM_SZ	(PAGESZ * 256)
38 
39 static struct vmctx *
40 check_vmm_capability(const char *tname)
41 {
42 	char vmname[VM_MAX_NAMELEN];
43 
44 	name_test_vm(tname, vmname);
45 	int res = vm_create(vmname, VCF_TRACK_DIRTY);
46 
47 	if (res != 0) {
48 		if (errno == ENOTSUP) {
49 			(void) fprintf(stderr,
50 			    "VMM lacks dirty page tracking capability");
51 			(void) printf("%s\tSKIP\n", tname);
52 			exit(EXIT_SUCCESS);
53 		}
54 		err(EXIT_FAILURE, "could not create VM");
55 	}
56 	struct vmctx *ctx = vm_open(vmname);
57 	if (ctx == NULL) {
58 		err(EXIT_FAILURE, "could not open test VM");
59 	}
60 
61 	return (ctx);
62 }
63 
64 static void
65 expect_errno(int expected)
66 {
67 	if (errno != expected) {
68 		errx(EXIT_FAILURE, "unexpected errno %d != %d",
69 		    errno, expected);
70 	}
71 }
72 
73 static uint8_t
74 popc8(uint8_t val)
75 {
76 	uint8_t cnt;
77 
78 	for (cnt = 0; val != 0; val &= (val - 1)) {
79 		cnt++;
80 	}
81 	return (cnt);
82 }
83 
84 static uint_t
85 legacy_clear_dirty(struct vmctx *ctx)
86 {
87 	uint8_t bitmap[TEST_PAGE_COUNT / 8] = { 0 };
88 	struct vmm_dirty_tracker req = {
89 		.vdt_start_gpa = 0,
90 		.vdt_len = TEST_MEM_SZ,
91 		.vdt_pfns = bitmap,
92 	};
93 
94 	if (ioctl(vm_get_device_fd(ctx), VM_TRACK_DIRTY_PAGES, &req) != 0) {
95 		err(EXIT_FAILURE, "VM_TRACK_DIRTY_PAGES failed");
96 	}
97 
98 	uint_t bits_set = 0;
99 	for (uint_t i = 0; i < (TEST_PAGE_COUNT / 8); i++) {
100 		bits_set += popc8(bitmap[i]);
101 	}
102 	return (bits_set);
103 }
104 
105 static void
106 do_npt_op(int vmfd, struct vm_npt_operation *vno)
107 {
108 	if (ioctl(vmfd, VM_NPT_OPERATION, vno) != 0) {
109 		err(EXIT_FAILURE, "VM_NPT_OPERATION failed");
110 	}
111 }
112 
113 static void
114 test_legacy(struct vmctx *ctx)
115 {
116 	const int vmfd = vm_get_device_fd(ctx);
117 	uint8_t *datap = vm_map_gpa(ctx, 0, PAGESZ);
118 
119 	/* dirty the first page */
120 	*datap = 0xff;
121 
122 	uint8_t bitmap[TEST_PAGE_COUNT / 8] = { 0 };
123 	struct vmm_dirty_tracker req = {
124 		.vdt_start_gpa = 0,
125 		.vdt_len = TEST_MEM_SZ,
126 		.vdt_pfns = bitmap,
127 	};
128 
129 	if (ioctl(vmfd, VM_TRACK_DIRTY_PAGES, &req) != 0) {
130 		err(EXIT_FAILURE, "VM_TRACK_DIRTY_PAGES failed");
131 	}
132 
133 	if (bitmap[0] != 1) {
134 		errx(EXIT_FAILURE, "first page not marked dirty");
135 	}
136 	for (uint_t i = 1; i < (TEST_PAGE_COUNT / 8); i++) {
137 		if (bitmap[i] != 0) {
138 			errx(EXIT_FAILURE,
139 			    "unexpected non-zero entry: bitmap[%u] = %x\n",
140 			    i, bitmap[i]);
141 		}
142 	}
143 }
144 
145 static void
146 test_toggle_tracking(struct vmctx *ctx)
147 {
148 	const int vmfd = vm_get_device_fd(ctx);
149 	struct vm_npt_operation vno = {
150 		.vno_operation = VNO_OP_GET_TRACK_DIRTY,
151 		.vno_gpa = 0,
152 		.vno_len = 0,
153 	};
154 
155 	/*
156 	 * Since the VM was created with VCF_TRACK_DIRTY set, dirty tracking
157 	 * should already be active.
158 	 */
159 	if (ioctl(vmfd, VM_NPT_OPERATION, &vno) != 1) {
160 		errx(EXIT_FAILURE, "expected dirty tracking to be active");
161 	}
162 
163 	vno.vno_operation = VNO_OP_DIS_TRACK_DIRTY;
164 	if (ioctl(vmfd, VM_NPT_OPERATION, &vno) != 0) {
165 		err(EXIT_FAILURE, "VM_NPT_OPERATION failed");
166 	}
167 
168 	vno.vno_operation = VNO_OP_GET_TRACK_DIRTY;
169 	if (ioctl(vmfd, VM_NPT_OPERATION, &vno) != 0) {
170 		errx(EXIT_FAILURE, "expected dirty tracking to be inactive");
171 	}
172 
173 	vno.vno_operation = VNO_OP_EN_TRACK_DIRTY;
174 	if (ioctl(vmfd, VM_NPT_OPERATION, &vno) != 0) {
175 		err(EXIT_FAILURE, "VM_NPT_OPERATION failed");
176 	}
177 
178 	vno.vno_operation = VNO_OP_GET_TRACK_DIRTY;
179 	if (ioctl(vmfd, VM_NPT_OPERATION, &vno) != 1) {
180 		errx(EXIT_FAILURE,
181 		    "expected dirty tracking to be active again");
182 	}
183 }
184 
185 static void
186 test_inval_args(struct vmctx *ctx)
187 {
188 	const int vmfd = vm_get_device_fd(ctx);
189 	struct vm_npt_operation vno = { 0 };
190 
191 	/* invalid vno_operation */
192 	vno.vno_operation = ~0;
193 	if (ioctl(vmfd, VM_NPT_OPERATION, &vno) == 0) {
194 		err(EXIT_FAILURE, "unexpected VM_NPT_OPERATION success");
195 	}
196 	expect_errno(EINVAL);
197 
198 	/* valid operation, but gpa which is not page-aligned */
199 	vno.vno_operation = VNO_OP_GET_DIRTY | VNO_FLAG_BITMAP_IN;
200 	vno.vno_gpa = 0x100;
201 	vno.vno_len = PAGESZ;
202 
203 	if (ioctl(vmfd, VM_NPT_OPERATION, &vno) == 0) {
204 		err(EXIT_FAILURE, "unexpected VM_NPT_OPERATION success");
205 	}
206 	expect_errno(EINVAL);
207 
208 	/* gpa is page-aligned, but len isn't */
209 	vno.vno_gpa = 0;
210 	vno.vno_len = PAGESZ + 0x100;
211 
212 	if (ioctl(vmfd, VM_NPT_OPERATION, &vno) == 0) {
213 		err(EXIT_FAILURE, "unexpected VM_NPT_OPERATION success");
214 	}
215 	expect_errno(EINVAL);
216 
217 	/* overflowing region */
218 	vno.vno_gpa = 0xffffffffffffe000;
219 	vno.vno_len = 512 * PAGESZ;
220 
221 	if (ioctl(vmfd, VM_NPT_OPERATION, &vno) == 0) {
222 		err(EXIT_FAILURE, "unexpected VM_NPT_OPERATION success");
223 	}
224 	expect_errno(EOVERFLOW);
225 }
226 
227 static void
228 test_op_get_dirty(struct vmctx *ctx)
229 {
230 	const int vmfd = vm_get_device_fd(ctx);
231 	uint8_t *datap = vm_map_gpa(ctx, 0, TEST_MEM_SZ);
232 
233 	/* Use legacy mechanism to ensure dirty bits are clear to start */
234 	(void) legacy_clear_dirty(ctx);
235 
236 	/* Dirty the first page out of every 8 */
237 	for (uint_t i = 0; i < TEST_MEM_SZ; i += (PAGESZ * 8)) {
238 		datap[i] = 0xff;
239 	}
240 
241 	uint8_t bits[TEST_PAGE_COUNT / 8] = { 0 };
242 	struct vm_npt_operation vno = {
243 		.vno_gpa = 0,
244 		.vno_len = TEST_MEM_SZ,
245 		.vno_operation = VNO_OP_GET_DIRTY | VNO_FLAG_BITMAP_OUT,
246 		.vno_bitmap = bits,
247 	};
248 	do_npt_op(vmfd, &vno);
249 
250 	for (uint_t i = 0; i < TEST_PAGE_COUNT / 8; i++) {
251 		if (bits[i] != 0x01) {
252 			errx(EXIT_FAILURE,
253 			    "unexpected dirty bits %02x at base gpa %08x",
254 			    bits[i], i * PAGESZ * 8);
255 		}
256 	}
257 
258 	/* Clear those bits again */
259 	(void) legacy_clear_dirty(ctx);
260 
261 	/* And check that they are zeroed now */
262 	do_npt_op(vmfd, &vno);
263 	for (uint_t i = 0; i < TEST_PAGE_COUNT / 8; i++) {
264 		if (bits[i] != 0) {
265 			errx(EXIT_FAILURE,
266 			    "unexpected dirty bits %02x at base gpa %08x",
267 			    bits[i], i * PAGESZ * 8);
268 		}
269 	}
270 }
271 
272 static void
273 test_op_set_dirty(struct vmctx *ctx)
274 {
275 	const int vmfd = vm_get_device_fd(ctx);
276 
277 	/* Use legacy mechanism to ensure dirty bits are clear to start */
278 	(void) legacy_clear_dirty(ctx);
279 
280 	/* Mark first 17 pages as dirty */
281 	uint8_t bits[TEST_PAGE_COUNT / 8] = { 0xff, 0xff, 0x80 };
282 	struct vm_npt_operation vno = {
283 		.vno_gpa = 0,
284 		.vno_len = TEST_MEM_SZ,
285 		.vno_operation = VNO_OP_SET_DIRTY | VNO_FLAG_BITMAP_IN,
286 		.vno_bitmap = bits,
287 	};
288 	do_npt_op(vmfd, &vno);
289 
290 	uint_t legacy_dirty = legacy_clear_dirty(ctx);
291 	if (legacy_dirty != 17) {
292 		errx(EXIT_FAILURE, "unexpected dirty count after OP_SET_DIRTY");
293 	}
294 }
295 
296 #define	BMAP_IDX(gpa)	((gpa) / (PAGESZ * 8))
297 #define	BMAP_BIT(gpa)	(((gpa) / PAGESZ) % 8)
298 
299 static void
300 test_op_reset_dirty(struct vmctx *ctx)
301 {
302 	const int vmfd = vm_get_device_fd(ctx);
303 	uint8_t *datap = vm_map_gpa(ctx, 0, TEST_MEM_SZ);
304 
305 	/* Use legacy mechanism to ensure dirty bits are clear to start */
306 	(void) legacy_clear_dirty(ctx);
307 
308 	/* Dirty the front half of memory */
309 	for (uintptr_t gpa = 0; gpa < (TEST_MEM_SZ / 2); gpa += PAGESZ) {
310 		datap[gpa] = 0xff;
311 	}
312 
313 	uint8_t bits[TEST_PAGE_COUNT / 8] = { 0 };
314 	/* Mark bitmap for every other page, starting at 0 */
315 	for (uintptr_t gpa = 0; gpa < TEST_MEM_SZ; gpa += (2 * PAGESZ)) {
316 		bits[BMAP_IDX(gpa)] |= (1 << BMAP_BIT(gpa));
317 	}
318 
319 	struct vm_npt_operation vno = {
320 		.vno_gpa = 0,
321 		.vno_len = TEST_MEM_SZ,
322 		.vno_operation = VNO_OP_RESET_DIRTY |
323 		    VNO_FLAG_BITMAP_IN | VNO_FLAG_BITMAP_OUT,
324 		.vno_bitmap = bits,
325 	};
326 	do_npt_op(vmfd, &vno);
327 
328 	/* Check that pages marked dirty were reported back as such */
329 	for (uintptr_t gpa = 0; gpa < TEST_MEM_SZ; gpa += PAGESZ) {
330 		const bool is_even_page = (BMAP_BIT(gpa) % 2) == 0;
331 		const bool is_dirty =
332 		    (bits[BMAP_IDX(gpa)] & (1 << BMAP_BIT(gpa))) != 0;
333 
334 		/* Even pages in the first half should be set */
335 		if (is_even_page && gpa < (TEST_MEM_SZ / 2) && !is_dirty) {
336 			errx(EXIT_FAILURE,
337 			    "missing dirty bit set at gpa %08lx", gpa);
338 		}
339 
340 		/* Odd pages and even pages in second half should be unset */
341 		if (is_dirty && (!is_even_page || gpa >= (TEST_MEM_SZ / 2))) {
342 			errx(EXIT_FAILURE,
343 			    "unexpected dirty bit set at gpa %08lx", gpa);
344 		}
345 	}
346 
347 	/*
348 	 * With half of the pages dirtied at first, and then half of those reset
349 	 * from dirty in the NPT operation, we expect 1/4 to be remaining.
350 	 */
351 	uint_t remaining_dirty = legacy_clear_dirty(ctx);
352 	if (remaining_dirty != (TEST_PAGE_COUNT / 4)) {
353 		errx(EXIT_FAILURE,
354 		    "expected %u pages remaining dirty, found %u",
355 		    TEST_PAGE_COUNT / 2, remaining_dirty);
356 	}
357 }
358 
359 int
360 main(int argc, char *argv[])
361 {
362 	const char *suite_name = basename(argv[0]);
363 	struct vmctx *ctx;
364 
365 	ctx = check_vmm_capability(suite_name);
366 
367 	if (vm_setup_memory(ctx, TEST_MEM_SZ, VM_MMAP_ALL) != 0) {
368 		err(EXIT_FAILURE, "could not setup VM memory");
369 	}
370 
371 	/* Test "legacy" VM_TRACK_DIRTY_PAGES mechanism first */
372 	test_legacy(ctx);
373 
374 	/* Confirm that dirty tracking can be queried and toggled on/off */
375 	test_toggle_tracking(ctx);
376 
377 	/* Check some invalid argument conditions */
378 	test_inval_args(ctx);
379 
380 	/* Can dirty bits be queried with VNO_OP_GET_DIRTY */
381 	test_op_get_dirty(ctx);
382 
383 	/* Can dirty bits be set with VNO_OP_SET_DIRTY */
384 	test_op_set_dirty(ctx);
385 
386 	/*
387 	 * Can dirty bits be reset (simultaneously queried and cleared )
388 	 * with VNO_OP_RESET_DIRTY
389 	 */
390 	test_op_reset_dirty(ctx);
391 
392 	vm_destroy(ctx);
393 	(void) printf("%s\tPASS\n", suite_name);
394 	return (EXIT_SUCCESS);
395 }
396