xref: /linux/tools/testing/selftests/kvm/demand_paging_test.c (revision 48dea9a700c8728cc31a1dd44588b97578de86ee)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KVM demand paging test
4  * Adapted from dirty_log_test.c
5  *
6  * Copyright (C) 2018, Red Hat, Inc.
7  * Copyright (C) 2019, Google, Inc.
8  */
9 
10 #define _GNU_SOURCE /* for program_invocation_name */
11 
12 #include <stdio.h>
13 #include <stdlib.h>
14 #include <sys/syscall.h>
15 #include <unistd.h>
16 #include <asm/unistd.h>
17 #include <time.h>
18 #include <poll.h>
19 #include <pthread.h>
20 #include <linux/bitmap.h>
21 #include <linux/bitops.h>
22 #include <linux/userfaultfd.h>
23 
24 #include "test_util.h"
25 #include "kvm_util.h"
26 #include "processor.h"
27 
28 #ifdef __NR_userfaultfd
29 
30 /* The memory slot index demand page */
31 #define TEST_MEM_SLOT_INDEX		1
32 
33 /* Default guest test virtual memory offset */
34 #define DEFAULT_GUEST_TEST_MEM		0xc0000000
35 
36 #define DEFAULT_GUEST_TEST_MEM_SIZE (1 << 30) /* 1G */
37 
38 #ifdef PRINT_PER_PAGE_UPDATES
39 #define PER_PAGE_DEBUG(...) printf(__VA_ARGS__)
40 #else
41 #define PER_PAGE_DEBUG(...) _no_printf(__VA_ARGS__)
42 #endif
43 
44 #ifdef PRINT_PER_VCPU_UPDATES
45 #define PER_VCPU_DEBUG(...) printf(__VA_ARGS__)
46 #else
47 #define PER_VCPU_DEBUG(...) _no_printf(__VA_ARGS__)
48 #endif
49 
50 #define MAX_VCPUS 512
51 
52 /*
53  * Guest/Host shared variables. Ensure addr_gva2hva() and/or
54  * sync_global_to/from_guest() are used when accessing from
55  * the host. READ/WRITE_ONCE() should also be used with anything
56  * that may change.
57  */
58 static uint64_t host_page_size;
59 static uint64_t guest_page_size;
60 
61 static char *guest_data_prototype;
62 
63 /*
64  * Guest physical memory offset of the testing memory slot.
65  * This will be set to the topmost valid physical address minus
66  * the test memory size.
67  */
68 static uint64_t guest_test_phys_mem;
69 
70 /*
71  * Guest virtual memory offset of the testing memory slot.
72  * Must not conflict with identity mapped test code.
73  */
74 static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
75 
76 struct vcpu_args {
77 	uint64_t gva;
78 	uint64_t pages;
79 
80 	/* Only used by the host userspace part of the vCPU thread */
81 	int vcpu_id;
82 	struct kvm_vm *vm;
83 };
84 
85 static struct vcpu_args vcpu_args[MAX_VCPUS];
86 
87 /*
88  * Continuously write to the first 8 bytes of each page in the demand paging
89  * memory region.
90  */
91 static void guest_code(uint32_t vcpu_id)
92 {
93 	uint64_t gva;
94 	uint64_t pages;
95 	int i;
96 
97 	/* Make sure vCPU args data structure is not corrupt. */
98 	GUEST_ASSERT(vcpu_args[vcpu_id].vcpu_id == vcpu_id);
99 
100 	gva = vcpu_args[vcpu_id].gva;
101 	pages = vcpu_args[vcpu_id].pages;
102 
103 	for (i = 0; i < pages; i++) {
104 		uint64_t addr = gva + (i * guest_page_size);
105 
106 		addr &= ~(host_page_size - 1);
107 		*(uint64_t *)addr = 0x0123456789ABCDEF;
108 	}
109 
110 	GUEST_SYNC(1);
111 }
112 
113 static void *vcpu_worker(void *data)
114 {
115 	int ret;
116 	struct vcpu_args *args = (struct vcpu_args *)data;
117 	struct kvm_vm *vm = args->vm;
118 	int vcpu_id = args->vcpu_id;
119 	struct kvm_run *run;
120 	struct timespec start, end, ts_diff;
121 
122 	vcpu_args_set(vm, vcpu_id, 1, vcpu_id);
123 	run = vcpu_state(vm, vcpu_id);
124 
125 	clock_gettime(CLOCK_MONOTONIC, &start);
126 
127 	/* Let the guest access its memory */
128 	ret = _vcpu_run(vm, vcpu_id);
129 	TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
130 	if (get_ucall(vm, vcpu_id, NULL) != UCALL_SYNC) {
131 		TEST_ASSERT(false,
132 			    "Invalid guest sync status: exit_reason=%s\n",
133 			    exit_reason_str(run->exit_reason));
134 	}
135 
136 	clock_gettime(CLOCK_MONOTONIC, &end);
137 	ts_diff = timespec_sub(end, start);
138 	PER_VCPU_DEBUG("vCPU %d execution time: %ld.%.9lds\n", vcpu_id,
139 		       ts_diff.tv_sec, ts_diff.tv_nsec);
140 
141 	return NULL;
142 }
143 
144 #define PAGE_SHIFT_4K  12
145 #define PTES_PER_4K_PT 512
146 
147 static struct kvm_vm *create_vm(enum vm_guest_mode mode, int vcpus,
148 				uint64_t vcpu_memory_bytes)
149 {
150 	struct kvm_vm *vm;
151 	uint64_t pages = DEFAULT_GUEST_PHY_PAGES;
152 
153 	/* Account for a few pages per-vCPU for stacks */
154 	pages += DEFAULT_STACK_PGS * vcpus;
155 
156 	/*
157 	 * Reserve twice the ammount of memory needed to map the test region and
158 	 * the page table / stacks region, at 4k, for page tables. Do the
159 	 * calculation with 4K page size: the smallest of all archs. (e.g., 64K
160 	 * page size guest will need even less memory for page tables).
161 	 */
162 	pages += (2 * pages) / PTES_PER_4K_PT;
163 	pages += ((2 * vcpus * vcpu_memory_bytes) >> PAGE_SHIFT_4K) /
164 		 PTES_PER_4K_PT;
165 	pages = vm_adjust_num_guest_pages(mode, pages);
166 
167 	pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
168 
169 	vm = _vm_create(mode, pages, O_RDWR);
170 	kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
171 #ifdef __x86_64__
172 	vm_create_irqchip(vm);
173 #endif
174 	return vm;
175 }
176 
177 static int handle_uffd_page_request(int uffd, uint64_t addr)
178 {
179 	pid_t tid;
180 	struct timespec start;
181 	struct timespec end;
182 	struct uffdio_copy copy;
183 	int r;
184 
185 	tid = syscall(__NR_gettid);
186 
187 	copy.src = (uint64_t)guest_data_prototype;
188 	copy.dst = addr;
189 	copy.len = host_page_size;
190 	copy.mode = 0;
191 
192 	clock_gettime(CLOCK_MONOTONIC, &start);
193 
194 	r = ioctl(uffd, UFFDIO_COPY, &copy);
195 	if (r == -1) {
196 		pr_info("Failed Paged in 0x%lx from thread %d with errno: %d\n",
197 			addr, tid, errno);
198 		return r;
199 	}
200 
201 	clock_gettime(CLOCK_MONOTONIC, &end);
202 
203 	PER_PAGE_DEBUG("UFFDIO_COPY %d \t%ld ns\n", tid,
204 		       timespec_to_ns(timespec_sub(end, start)));
205 	PER_PAGE_DEBUG("Paged in %ld bytes at 0x%lx from thread %d\n",
206 		       host_page_size, addr, tid);
207 
208 	return 0;
209 }
210 
211 bool quit_uffd_thread;
212 
213 struct uffd_handler_args {
214 	int uffd;
215 	int pipefd;
216 	useconds_t delay;
217 };
218 
219 static void *uffd_handler_thread_fn(void *arg)
220 {
221 	struct uffd_handler_args *uffd_args = (struct uffd_handler_args *)arg;
222 	int uffd = uffd_args->uffd;
223 	int pipefd = uffd_args->pipefd;
224 	useconds_t delay = uffd_args->delay;
225 	int64_t pages = 0;
226 	struct timespec start, end, ts_diff;
227 
228 	clock_gettime(CLOCK_MONOTONIC, &start);
229 	while (!quit_uffd_thread) {
230 		struct uffd_msg msg;
231 		struct pollfd pollfd[2];
232 		char tmp_chr;
233 		int r;
234 		uint64_t addr;
235 
236 		pollfd[0].fd = uffd;
237 		pollfd[0].events = POLLIN;
238 		pollfd[1].fd = pipefd;
239 		pollfd[1].events = POLLIN;
240 
241 		r = poll(pollfd, 2, -1);
242 		switch (r) {
243 		case -1:
244 			pr_info("poll err");
245 			continue;
246 		case 0:
247 			continue;
248 		case 1:
249 			break;
250 		default:
251 			pr_info("Polling uffd returned %d", r);
252 			return NULL;
253 		}
254 
255 		if (pollfd[0].revents & POLLERR) {
256 			pr_info("uffd revents has POLLERR");
257 			return NULL;
258 		}
259 
260 		if (pollfd[1].revents & POLLIN) {
261 			r = read(pollfd[1].fd, &tmp_chr, 1);
262 			TEST_ASSERT(r == 1,
263 				    "Error reading pipefd in UFFD thread\n");
264 			return NULL;
265 		}
266 
267 		if (!pollfd[0].revents & POLLIN)
268 			continue;
269 
270 		r = read(uffd, &msg, sizeof(msg));
271 		if (r == -1) {
272 			if (errno == EAGAIN)
273 				continue;
274 			pr_info("Read of uffd gor errno %d", errno);
275 			return NULL;
276 		}
277 
278 		if (r != sizeof(msg)) {
279 			pr_info("Read on uffd returned unexpected size: %d bytes", r);
280 			return NULL;
281 		}
282 
283 		if (!(msg.event & UFFD_EVENT_PAGEFAULT))
284 			continue;
285 
286 		if (delay)
287 			usleep(delay);
288 		addr =  msg.arg.pagefault.address;
289 		r = handle_uffd_page_request(uffd, addr);
290 		if (r < 0)
291 			return NULL;
292 		pages++;
293 	}
294 
295 	clock_gettime(CLOCK_MONOTONIC, &end);
296 	ts_diff = timespec_sub(end, start);
297 	PER_VCPU_DEBUG("userfaulted %ld pages over %ld.%.9lds. (%f/sec)\n",
298 		       pages, ts_diff.tv_sec, ts_diff.tv_nsec,
299 		       pages / ((double)ts_diff.tv_sec + (double)ts_diff.tv_nsec / 100000000.0));
300 
301 	return NULL;
302 }
303 
304 static int setup_demand_paging(struct kvm_vm *vm,
305 			       pthread_t *uffd_handler_thread, int pipefd,
306 			       useconds_t uffd_delay,
307 			       struct uffd_handler_args *uffd_args,
308 			       void *hva, uint64_t len)
309 {
310 	int uffd;
311 	struct uffdio_api uffdio_api;
312 	struct uffdio_register uffdio_register;
313 
314 	uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
315 	if (uffd == -1) {
316 		pr_info("uffd creation failed\n");
317 		return -1;
318 	}
319 
320 	uffdio_api.api = UFFD_API;
321 	uffdio_api.features = 0;
322 	if (ioctl(uffd, UFFDIO_API, &uffdio_api) == -1) {
323 		pr_info("ioctl uffdio_api failed\n");
324 		return -1;
325 	}
326 
327 	uffdio_register.range.start = (uint64_t)hva;
328 	uffdio_register.range.len = len;
329 	uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING;
330 	if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register) == -1) {
331 		pr_info("ioctl uffdio_register failed\n");
332 		return -1;
333 	}
334 
335 	if ((uffdio_register.ioctls & UFFD_API_RANGE_IOCTLS) !=
336 			UFFD_API_RANGE_IOCTLS) {
337 		pr_info("unexpected userfaultfd ioctl set\n");
338 		return -1;
339 	}
340 
341 	uffd_args->uffd = uffd;
342 	uffd_args->pipefd = pipefd;
343 	uffd_args->delay = uffd_delay;
344 	pthread_create(uffd_handler_thread, NULL, uffd_handler_thread_fn,
345 		       uffd_args);
346 
347 	PER_VCPU_DEBUG("Created uffd thread for HVA range [%p, %p)\n",
348 		       hva, hva + len);
349 
350 	return 0;
351 }
352 
353 static void run_test(enum vm_guest_mode mode, bool use_uffd,
354 		     useconds_t uffd_delay, int vcpus,
355 		     uint64_t vcpu_memory_bytes)
356 {
357 	pthread_t *vcpu_threads;
358 	pthread_t *uffd_handler_threads = NULL;
359 	struct uffd_handler_args *uffd_args = NULL;
360 	struct timespec start, end, ts_diff;
361 	int *pipefds = NULL;
362 	struct kvm_vm *vm;
363 	uint64_t guest_num_pages;
364 	int vcpu_id;
365 	int r;
366 
367 	vm = create_vm(mode, vcpus, vcpu_memory_bytes);
368 
369 	guest_page_size = vm_get_page_size(vm);
370 
371 	TEST_ASSERT(vcpu_memory_bytes % guest_page_size == 0,
372 		    "Guest memory size is not guest page size aligned.");
373 
374 	guest_num_pages = (vcpus * vcpu_memory_bytes) / guest_page_size;
375 	guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
376 
377 	/*
378 	 * If there should be more memory in the guest test region than there
379 	 * can be pages in the guest, it will definitely cause problems.
380 	 */
381 	TEST_ASSERT(guest_num_pages < vm_get_max_gfn(vm),
382 		    "Requested more guest memory than address space allows.\n"
383 		    "    guest pages: %lx max gfn: %x vcpus: %d wss: %lx]\n",
384 		    guest_num_pages, vm_get_max_gfn(vm), vcpus,
385 		    vcpu_memory_bytes);
386 
387 	host_page_size = getpagesize();
388 	TEST_ASSERT(vcpu_memory_bytes % host_page_size == 0,
389 		    "Guest memory size is not host page size aligned.");
390 
391 	guest_test_phys_mem = (vm_get_max_gfn(vm) - guest_num_pages) *
392 			      guest_page_size;
393 	guest_test_phys_mem &= ~(host_page_size - 1);
394 
395 #ifdef __s390x__
396 	/* Align to 1M (segment size) */
397 	guest_test_phys_mem &= ~((1 << 20) - 1);
398 #endif
399 
400 	pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
401 
402 	/* Add an extra memory slot for testing demand paging */
403 	vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
404 				    guest_test_phys_mem,
405 				    TEST_MEM_SLOT_INDEX,
406 				    guest_num_pages, 0);
407 
408 	/* Do mapping for the demand paging memory slot */
409 	virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages, 0);
410 
411 	ucall_init(vm, NULL);
412 
413 	guest_data_prototype = malloc(host_page_size);
414 	TEST_ASSERT(guest_data_prototype,
415 		    "Failed to allocate buffer for guest data pattern");
416 	memset(guest_data_prototype, 0xAB, host_page_size);
417 
418 	vcpu_threads = malloc(vcpus * sizeof(*vcpu_threads));
419 	TEST_ASSERT(vcpu_threads, "Memory allocation failed");
420 
421 	if (use_uffd) {
422 		uffd_handler_threads =
423 			malloc(vcpus * sizeof(*uffd_handler_threads));
424 		TEST_ASSERT(uffd_handler_threads, "Memory allocation failed");
425 
426 		uffd_args = malloc(vcpus * sizeof(*uffd_args));
427 		TEST_ASSERT(uffd_args, "Memory allocation failed");
428 
429 		pipefds = malloc(sizeof(int) * vcpus * 2);
430 		TEST_ASSERT(pipefds, "Unable to allocate memory for pipefd");
431 	}
432 
433 	for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
434 		vm_paddr_t vcpu_gpa;
435 		void *vcpu_hva;
436 
437 		vm_vcpu_add_default(vm, vcpu_id, guest_code);
438 
439 		vcpu_gpa = guest_test_phys_mem + (vcpu_id * vcpu_memory_bytes);
440 		PER_VCPU_DEBUG("Added VCPU %d with test mem gpa [%lx, %lx)\n",
441 			       vcpu_id, vcpu_gpa, vcpu_gpa + vcpu_memory_bytes);
442 
443 		/* Cache the HVA pointer of the region */
444 		vcpu_hva = addr_gpa2hva(vm, vcpu_gpa);
445 
446 		if (use_uffd) {
447 			/*
448 			 * Set up user fault fd to handle demand paging
449 			 * requests.
450 			 */
451 			r = pipe2(&pipefds[vcpu_id * 2],
452 				  O_CLOEXEC | O_NONBLOCK);
453 			TEST_ASSERT(!r, "Failed to set up pipefd");
454 
455 			r = setup_demand_paging(vm,
456 						&uffd_handler_threads[vcpu_id],
457 						pipefds[vcpu_id * 2],
458 						uffd_delay, &uffd_args[vcpu_id],
459 						vcpu_hva, vcpu_memory_bytes);
460 			if (r < 0)
461 				exit(-r);
462 		}
463 
464 #ifdef __x86_64__
465 		vcpu_set_cpuid(vm, vcpu_id, kvm_get_supported_cpuid());
466 #endif
467 
468 		vcpu_args[vcpu_id].vm = vm;
469 		vcpu_args[vcpu_id].vcpu_id = vcpu_id;
470 		vcpu_args[vcpu_id].gva = guest_test_virt_mem +
471 					 (vcpu_id * vcpu_memory_bytes);
472 		vcpu_args[vcpu_id].pages = vcpu_memory_bytes / guest_page_size;
473 	}
474 
475 	/* Export the shared variables to the guest */
476 	sync_global_to_guest(vm, host_page_size);
477 	sync_global_to_guest(vm, guest_page_size);
478 	sync_global_to_guest(vm, vcpu_args);
479 
480 	pr_info("Finished creating vCPUs and starting uffd threads\n");
481 
482 	clock_gettime(CLOCK_MONOTONIC, &start);
483 
484 	for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
485 		pthread_create(&vcpu_threads[vcpu_id], NULL, vcpu_worker,
486 			       &vcpu_args[vcpu_id]);
487 	}
488 
489 	pr_info("Started all vCPUs\n");
490 
491 	/* Wait for the vcpu threads to quit */
492 	for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
493 		pthread_join(vcpu_threads[vcpu_id], NULL);
494 		PER_VCPU_DEBUG("Joined thread for vCPU %d\n", vcpu_id);
495 	}
496 
497 	pr_info("All vCPU threads joined\n");
498 
499 	clock_gettime(CLOCK_MONOTONIC, &end);
500 
501 	if (use_uffd) {
502 		char c;
503 
504 		/* Tell the user fault fd handler threads to quit */
505 		for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
506 			r = write(pipefds[vcpu_id * 2 + 1], &c, 1);
507 			TEST_ASSERT(r == 1, "Unable to write to pipefd");
508 
509 			pthread_join(uffd_handler_threads[vcpu_id], NULL);
510 		}
511 	}
512 
513 	ts_diff = timespec_sub(end, start);
514 	pr_info("Total guest execution time: %ld.%.9lds\n",
515 		ts_diff.tv_sec, ts_diff.tv_nsec);
516 	pr_info("Overall demand paging rate: %f pgs/sec\n",
517 		guest_num_pages / ((double)ts_diff.tv_sec + (double)ts_diff.tv_nsec / 100000000.0));
518 
519 	ucall_uninit(vm);
520 	kvm_vm_free(vm);
521 
522 	free(guest_data_prototype);
523 	free(vcpu_threads);
524 	if (use_uffd) {
525 		free(uffd_handler_threads);
526 		free(uffd_args);
527 		free(pipefds);
528 	}
529 }
530 
531 struct guest_mode {
532 	bool supported;
533 	bool enabled;
534 };
535 static struct guest_mode guest_modes[NUM_VM_MODES];
536 
537 #define guest_mode_init(mode, supported, enabled) ({ \
538 	guest_modes[mode] = (struct guest_mode){ supported, enabled }; \
539 })
540 
541 static void help(char *name)
542 {
543 	int i;
544 
545 	puts("");
546 	printf("usage: %s [-h] [-m mode] [-u] [-d uffd_delay_usec]\n"
547 	       "          [-b memory] [-v vcpus]\n", name);
548 	printf(" -m: specify the guest mode ID to test\n"
549 	       "     (default: test all supported modes)\n"
550 	       "     This option may be used multiple times.\n"
551 	       "     Guest mode IDs:\n");
552 	for (i = 0; i < NUM_VM_MODES; ++i) {
553 		printf("         %d:    %s%s\n", i, vm_guest_mode_string(i),
554 		       guest_modes[i].supported ? " (supported)" : "");
555 	}
556 	printf(" -u: use User Fault FD to handle vCPU page\n"
557 	       "     faults.\n");
558 	printf(" -d: add a delay in usec to the User Fault\n"
559 	       "     FD handler to simulate demand paging\n"
560 	       "     overheads. Ignored without -u.\n");
561 	printf(" -b: specify the size of the memory region which should be\n"
562 	       "     demand paged by each vCPU. e.g. 10M or 3G.\n"
563 	       "     Default: 1G\n");
564 	printf(" -v: specify the number of vCPUs to run.\n");
565 	puts("");
566 	exit(0);
567 }
568 
569 int main(int argc, char *argv[])
570 {
571 	bool mode_selected = false;
572 	uint64_t vcpu_memory_bytes = DEFAULT_GUEST_TEST_MEM_SIZE;
573 	int vcpus = 1;
574 	unsigned int mode;
575 	int opt, i;
576 	bool use_uffd = false;
577 	useconds_t uffd_delay = 0;
578 
579 #ifdef __x86_64__
580 	guest_mode_init(VM_MODE_PXXV48_4K, true, true);
581 #endif
582 #ifdef __aarch64__
583 	guest_mode_init(VM_MODE_P40V48_4K, true, true);
584 	guest_mode_init(VM_MODE_P40V48_64K, true, true);
585 	{
586 		unsigned int limit = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE);
587 
588 		if (limit >= 52)
589 			guest_mode_init(VM_MODE_P52V48_64K, true, true);
590 		if (limit >= 48) {
591 			guest_mode_init(VM_MODE_P48V48_4K, true, true);
592 			guest_mode_init(VM_MODE_P48V48_64K, true, true);
593 		}
594 	}
595 #endif
596 #ifdef __s390x__
597 	guest_mode_init(VM_MODE_P40V48_4K, true, true);
598 #endif
599 
600 	while ((opt = getopt(argc, argv, "hm:ud:b:v:")) != -1) {
601 		switch (opt) {
602 		case 'm':
603 			if (!mode_selected) {
604 				for (i = 0; i < NUM_VM_MODES; ++i)
605 					guest_modes[i].enabled = false;
606 				mode_selected = true;
607 			}
608 			mode = strtoul(optarg, NULL, 10);
609 			TEST_ASSERT(mode < NUM_VM_MODES,
610 				    "Guest mode ID %d too big", mode);
611 			guest_modes[mode].enabled = true;
612 			break;
613 		case 'u':
614 			use_uffd = true;
615 			break;
616 		case 'd':
617 			uffd_delay = strtoul(optarg, NULL, 0);
618 			TEST_ASSERT(uffd_delay >= 0,
619 				    "A negative UFFD delay is not supported.");
620 			break;
621 		case 'b':
622 			vcpu_memory_bytes = parse_size(optarg);
623 			break;
624 		case 'v':
625 			vcpus = atoi(optarg);
626 			TEST_ASSERT(vcpus > 0,
627 				    "Must have a positive number of vCPUs");
628 			TEST_ASSERT(vcpus <= MAX_VCPUS,
629 				    "This test does not currently support\n"
630 				    "more than %d vCPUs.", MAX_VCPUS);
631 			break;
632 		case 'h':
633 		default:
634 			help(argv[0]);
635 			break;
636 		}
637 	}
638 
639 	for (i = 0; i < NUM_VM_MODES; ++i) {
640 		if (!guest_modes[i].enabled)
641 			continue;
642 		TEST_ASSERT(guest_modes[i].supported,
643 			    "Guest mode ID %d (%s) not supported.",
644 			    i, vm_guest_mode_string(i));
645 		run_test(i, use_uffd, uffd_delay, vcpus, vcpu_memory_bytes);
646 	}
647 
648 	return 0;
649 }
650 
651 #else /* __NR_userfaultfd */
652 
653 #warning "missing __NR_userfaultfd definition"
654 
655 int main(void)
656 {
657 	print_skip("__NR_userfaultfd must be present for userfaultfd test");
658 	return KSFT_SKIP;
659 }
660 
661 #endif /* __NR_userfaultfd */
662