xref: /linux/tools/testing/selftests/kvm/guest_memfd_test.c (revision c13008ed3d76142a001ebc56d8e391431cac2411)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright Intel Corporation, 2023
4  *
5  * Author: Chao Peng <chao.p.peng@linux.intel.com>
6  */
7 #include <stdlib.h>
8 #include <string.h>
9 #include <unistd.h>
10 #include <errno.h>
11 #include <stdio.h>
12 #include <fcntl.h>
13 
14 #include <linux/bitmap.h>
15 #include <linux/falloc.h>
16 #include <linux/sizes.h>
17 #include <sys/mman.h>
18 #include <sys/types.h>
19 #include <sys/stat.h>
20 
21 #include "kvm_util.h"
22 #include "numaif.h"
23 #include "test_util.h"
24 #include "ucall_common.h"
25 
26 static size_t page_size;
27 
28 static void test_file_read_write(int fd, size_t total_size)
29 {
30 	char buf[64];
31 
32 	TEST_ASSERT(read(fd, buf, sizeof(buf)) < 0,
33 		    "read on a guest_mem fd should fail");
34 	TEST_ASSERT(write(fd, buf, sizeof(buf)) < 0,
35 		    "write on a guest_mem fd should fail");
36 	TEST_ASSERT(pread(fd, buf, sizeof(buf), 0) < 0,
37 		    "pread on a guest_mem fd should fail");
38 	TEST_ASSERT(pwrite(fd, buf, sizeof(buf), 0) < 0,
39 		    "pwrite on a guest_mem fd should fail");
40 }
41 
42 static void test_mmap_cow(int fd, size_t size)
43 {
44 	void *mem;
45 
46 	mem = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
47 	TEST_ASSERT(mem == MAP_FAILED, "Copy-on-write not allowed by guest_memfd.");
48 }
49 
50 static void test_mmap_supported(int fd, size_t total_size)
51 {
52 	const char val = 0xaa;
53 	char *mem;
54 	size_t i;
55 	int ret;
56 
57 	mem = kvm_mmap(total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd);
58 
59 	memset(mem, val, total_size);
60 	for (i = 0; i < total_size; i++)
61 		TEST_ASSERT_EQ(READ_ONCE(mem[i]), val);
62 
63 	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE, 0,
64 			page_size);
65 	TEST_ASSERT(!ret, "fallocate the first page should succeed.");
66 
67 	for (i = 0; i < page_size; i++)
68 		TEST_ASSERT_EQ(READ_ONCE(mem[i]), 0x00);
69 	for (; i < total_size; i++)
70 		TEST_ASSERT_EQ(READ_ONCE(mem[i]), val);
71 
72 	memset(mem, val, page_size);
73 	for (i = 0; i < total_size; i++)
74 		TEST_ASSERT_EQ(READ_ONCE(mem[i]), val);
75 
76 	kvm_munmap(mem, total_size);
77 }
78 
79 static void test_mbind(int fd, size_t total_size)
80 {
81 	const unsigned long nodemask_0 = 1; /* nid: 0 */
82 	unsigned long nodemask = 0;
83 	unsigned long maxnode = BITS_PER_TYPE(nodemask);
84 	int policy;
85 	char *mem;
86 	int ret;
87 
88 	if (!is_multi_numa_node_system())
89 		return;
90 
91 	mem = kvm_mmap(total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd);
92 
93 	/* Test MPOL_INTERLEAVE policy */
94 	kvm_mbind(mem, page_size * 2, MPOL_INTERLEAVE, &nodemask_0, maxnode, 0);
95 	kvm_get_mempolicy(&policy, &nodemask, maxnode, mem, MPOL_F_ADDR);
96 	TEST_ASSERT(policy == MPOL_INTERLEAVE && nodemask == nodemask_0,
97 		    "Wanted MPOL_INTERLEAVE (%u) and nodemask 0x%lx, got %u and 0x%lx",
98 		    MPOL_INTERLEAVE, nodemask_0, policy, nodemask);
99 
100 	/* Test basic MPOL_BIND policy */
101 	kvm_mbind(mem + page_size * 2, page_size * 2, MPOL_BIND, &nodemask_0, maxnode, 0);
102 	kvm_get_mempolicy(&policy, &nodemask, maxnode, mem + page_size * 2, MPOL_F_ADDR);
103 	TEST_ASSERT(policy == MPOL_BIND && nodemask == nodemask_0,
104 		    "Wanted MPOL_BIND (%u) and nodemask 0x%lx, got %u and 0x%lx",
105 		    MPOL_BIND, nodemask_0, policy, nodemask);
106 
107 	/* Test MPOL_DEFAULT policy */
108 	kvm_mbind(mem, total_size, MPOL_DEFAULT, NULL, 0, 0);
109 	kvm_get_mempolicy(&policy, &nodemask, maxnode, mem, MPOL_F_ADDR);
110 	TEST_ASSERT(policy == MPOL_DEFAULT && !nodemask,
111 		    "Wanted MPOL_DEFAULT (%u) and nodemask 0x0, got %u and 0x%lx",
112 		    MPOL_DEFAULT, policy, nodemask);
113 
114 	/* Test with invalid policy */
115 	ret = mbind(mem, page_size, 999, &nodemask_0, maxnode, 0);
116 	TEST_ASSERT(ret == -1 && errno == EINVAL,
117 		    "mbind with invalid policy should fail with EINVAL");
118 
119 	kvm_munmap(mem, total_size);
120 }
121 
122 static void test_numa_allocation(int fd, size_t total_size)
123 {
124 	unsigned long node0_mask = 1;  /* Node 0 */
125 	unsigned long node1_mask = 2;  /* Node 1 */
126 	unsigned long maxnode = 8;
127 	void *pages[4];
128 	int status[4];
129 	char *mem;
130 	int i;
131 
132 	if (!is_multi_numa_node_system())
133 		return;
134 
135 	mem = kvm_mmap(total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd);
136 
137 	for (i = 0; i < 4; i++)
138 		pages[i] = (char *)mem + page_size * i;
139 
140 	/* Set NUMA policy after allocation */
141 	memset(mem, 0xaa, page_size);
142 	kvm_mbind(pages[0], page_size, MPOL_BIND, &node0_mask, maxnode, 0);
143 	kvm_fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0, page_size);
144 
145 	/* Set NUMA policy before allocation */
146 	kvm_mbind(pages[0], page_size * 2, MPOL_BIND, &node1_mask, maxnode, 0);
147 	kvm_mbind(pages[2], page_size * 2, MPOL_BIND, &node0_mask, maxnode, 0);
148 	memset(mem, 0xaa, total_size);
149 
150 	/* Validate if pages are allocated on specified NUMA nodes */
151 	kvm_move_pages(0, 4, pages, NULL, status, 0);
152 	TEST_ASSERT(status[0] == 1, "Expected page 0 on node 1, got it on node %d", status[0]);
153 	TEST_ASSERT(status[1] == 1, "Expected page 1 on node 1, got it on node %d", status[1]);
154 	TEST_ASSERT(status[2] == 0, "Expected page 2 on node 0, got it on node %d", status[2]);
155 	TEST_ASSERT(status[3] == 0, "Expected page 3 on node 0, got it on node %d", status[3]);
156 
157 	/* Punch hole for all pages */
158 	kvm_fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0, total_size);
159 
160 	/* Change NUMA policy nodes and reallocate */
161 	kvm_mbind(pages[0], page_size * 2, MPOL_BIND, &node0_mask, maxnode, 0);
162 	kvm_mbind(pages[2], page_size * 2, MPOL_BIND, &node1_mask, maxnode, 0);
163 	memset(mem, 0xaa, total_size);
164 
165 	kvm_move_pages(0, 4, pages, NULL, status, 0);
166 	TEST_ASSERT(status[0] == 0, "Expected page 0 on node 0, got it on node %d", status[0]);
167 	TEST_ASSERT(status[1] == 0, "Expected page 1 on node 0, got it on node %d", status[1]);
168 	TEST_ASSERT(status[2] == 1, "Expected page 2 on node 1, got it on node %d", status[2]);
169 	TEST_ASSERT(status[3] == 1, "Expected page 3 on node 1, got it on node %d", status[3]);
170 
171 	kvm_munmap(mem, total_size);
172 }
173 
174 static void test_collapse(int fd, uint64_t flags)
175 {
176 	const size_t pmd_size = get_trans_hugepagesz();
177 	void *reserved_addr;
178 	void *aligned_addr;
179 	char *mem;
180 	off_t i;
181 
182 	/*
183 	 * To even reach the point where the guest_memfd folios will
184 	 * get collapsed, both the userspace address and the offset
185 	 * within the guest_memfd have to be aligned to pmd_size.
186 	 *
187 	 * To achieve that alignment, reserve virtual address space
188 	 * with regular mmap, then use MAP_FIXED to allocate memory
189 	 * from a pmd_size-aligned offset (0) at a known, available
190 	 * virtual address.
191 	 */
192 	reserved_addr = kvm_mmap(pmd_size * 2, PROT_NONE,
193 				 MAP_PRIVATE | MAP_ANONYMOUS, -1);
194 	aligned_addr = align_ptr_up(reserved_addr, pmd_size);
195 
196 	mem = mmap(aligned_addr, pmd_size, PROT_READ | PROT_WRITE,
197 		   MAP_FIXED | MAP_SHARED, fd, 0);
198 	TEST_ASSERT(IS_ALIGNED((u64)mem, pmd_size),
199 		    "Userspace address must be aligned to PMD size.");
200 
201 	/*
202 	 * Use reads to populate page table to avoid setting dirty
203 	 * flag on page.
204 	 */
205 	for (i = 0; i < pmd_size; i += getpagesize())
206 		READ_ONCE(mem[i]);
207 
208 	/*
209 	 * Advising the use of huge pages in guest_memfd should be
210 	 * fine...
211 	 */
212 	kvm_madvise(mem, pmd_size, MADV_HUGEPAGE);
213 
214 	/*
215 	 * ... but collapsing folios must not be supported to avoid
216 	 * mapping beyond shared ranges into host userspace page
217 	 * tables.
218 	 */
219 	TEST_ASSERT_EQ(madvise(mem, pmd_size, MADV_COLLAPSE), -1);
220 	TEST_ASSERT_EQ(errno, EINVAL);
221 
222 	/*
223 	 * Removing from host page tables and re-faulting should be
224 	 * fine; should not end up faulting in a collapsed/huge folio.
225 	 */
226 	kvm_madvise(mem, pmd_size, MADV_DONTNEED);
227 	READ_ONCE(mem[0]);
228 
229 	kvm_munmap(reserved_addr, pmd_size * 2);
230 }
231 
232 static void test_fault_sigbus(int fd, size_t accessible_size, size_t map_size)
233 {
234 	const char val = 0xaa;
235 	char *mem;
236 	size_t i;
237 
238 	mem = kvm_mmap(map_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd);
239 
240 	TEST_EXPECT_SIGBUS(memset(mem, val, map_size));
241 	TEST_EXPECT_SIGBUS((void)READ_ONCE(mem[accessible_size]));
242 
243 	for (i = 0; i < accessible_size; i++)
244 		TEST_ASSERT_EQ(READ_ONCE(mem[i]), val);
245 
246 	kvm_munmap(mem, map_size);
247 }
248 
249 static void test_fault_overflow(int fd, size_t total_size)
250 {
251 	test_fault_sigbus(fd, total_size, total_size * 4);
252 }
253 
254 static void test_fault_private(int fd, size_t total_size)
255 {
256 	test_fault_sigbus(fd, 0, total_size);
257 }
258 
259 static void test_mmap_not_supported(int fd, size_t total_size)
260 {
261 	char *mem;
262 
263 	mem = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
264 	TEST_ASSERT_EQ(mem, MAP_FAILED);
265 
266 	mem = mmap(NULL, total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
267 	TEST_ASSERT_EQ(mem, MAP_FAILED);
268 }
269 
270 static void test_file_size(int fd, size_t total_size)
271 {
272 	struct stat sb;
273 	int ret;
274 
275 	ret = fstat(fd, &sb);
276 	TEST_ASSERT(!ret, "fstat should succeed");
277 	TEST_ASSERT_EQ(sb.st_size, total_size);
278 	TEST_ASSERT_EQ(sb.st_blksize, page_size);
279 }
280 
281 static void test_fallocate(int fd, size_t total_size)
282 {
283 	int ret;
284 
285 	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE, 0, total_size);
286 	TEST_ASSERT(!ret, "fallocate with aligned offset and size should succeed");
287 
288 	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
289 			page_size - 1, page_size);
290 	TEST_ASSERT(ret, "fallocate with unaligned offset should fail");
291 
292 	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE, total_size, page_size);
293 	TEST_ASSERT(ret, "fallocate beginning at total_size should fail");
294 
295 	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE, total_size + page_size, page_size);
296 	TEST_ASSERT(ret, "fallocate beginning after total_size should fail");
297 
298 	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
299 			total_size, page_size);
300 	TEST_ASSERT(!ret, "fallocate(PUNCH_HOLE) at total_size should succeed");
301 
302 	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
303 			total_size + page_size, page_size);
304 	TEST_ASSERT(!ret, "fallocate(PUNCH_HOLE) after total_size should succeed");
305 
306 	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
307 			page_size, page_size - 1);
308 	TEST_ASSERT(ret, "fallocate with unaligned size should fail");
309 
310 	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
311 			page_size, page_size);
312 	TEST_ASSERT(!ret, "fallocate(PUNCH_HOLE) with aligned offset and size should succeed");
313 
314 	ret = fallocate(fd, FALLOC_FL_KEEP_SIZE, page_size, page_size);
315 	TEST_ASSERT(!ret, "fallocate to restore punched hole should succeed");
316 }
317 
318 static void test_invalid_punch_hole(int fd, size_t total_size)
319 {
320 	struct {
321 		off_t offset;
322 		off_t len;
323 	} testcases[] = {
324 		{0, 1},
325 		{0, page_size - 1},
326 		{0, page_size + 1},
327 
328 		{1, 1},
329 		{1, page_size - 1},
330 		{1, page_size},
331 		{1, page_size + 1},
332 
333 		{page_size, 1},
334 		{page_size, page_size - 1},
335 		{page_size, page_size + 1},
336 	};
337 	int ret, i;
338 
339 	for (i = 0; i < ARRAY_SIZE(testcases); i++) {
340 		ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
341 				testcases[i].offset, testcases[i].len);
342 		TEST_ASSERT(ret == -1 && errno == EINVAL,
343 			    "PUNCH_HOLE with !PAGE_SIZE offset (%lx) and/or length (%lx) should fail",
344 			    testcases[i].offset, testcases[i].len);
345 	}
346 }
347 
348 static void test_create_guest_memfd_invalid_sizes(struct kvm_vm *vm,
349 						  uint64_t guest_memfd_flags)
350 {
351 	size_t size;
352 	int fd;
353 
354 	for (size = 1; size < page_size; size++) {
355 		fd = __vm_create_guest_memfd(vm, size, guest_memfd_flags);
356 		TEST_ASSERT(fd < 0 && errno == EINVAL,
357 			    "guest_memfd() with non-page-aligned page size '0x%lx' should fail with EINVAL",
358 			    size);
359 	}
360 }
361 
362 static void test_create_guest_memfd_multiple(struct kvm_vm *vm)
363 {
364 	int fd1, fd2, ret;
365 	struct stat st1, st2;
366 
367 	fd1 = __vm_create_guest_memfd(vm, page_size, 0);
368 	TEST_ASSERT(fd1 != -1, "memfd creation should succeed");
369 
370 	ret = fstat(fd1, &st1);
371 	TEST_ASSERT(ret != -1, "memfd fstat should succeed");
372 	TEST_ASSERT(st1.st_size == page_size, "memfd st_size should match requested size");
373 
374 	fd2 = __vm_create_guest_memfd(vm, page_size * 2, 0);
375 	TEST_ASSERT(fd2 != -1, "memfd creation should succeed");
376 
377 	ret = fstat(fd2, &st2);
378 	TEST_ASSERT(ret != -1, "memfd fstat should succeed");
379 	TEST_ASSERT(st2.st_size == page_size * 2, "second memfd st_size should match requested size");
380 
381 	ret = fstat(fd1, &st1);
382 	TEST_ASSERT(ret != -1, "memfd fstat should succeed");
383 	TEST_ASSERT(st1.st_size == page_size, "first memfd st_size should still match requested size");
384 	TEST_ASSERT(st1.st_ino != st2.st_ino, "different memfd should have different inode numbers");
385 
386 	close(fd2);
387 	close(fd1);
388 }
389 
390 static void test_guest_memfd_flags(struct kvm_vm *vm)
391 {
392 	uint64_t valid_flags = vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS);
393 	uint64_t flag;
394 	int fd;
395 
396 	for (flag = BIT(0); flag; flag <<= 1) {
397 		fd = __vm_create_guest_memfd(vm, page_size, flag);
398 		if (flag & valid_flags) {
399 			TEST_ASSERT(fd >= 0,
400 				    "guest_memfd() with flag '0x%lx' should succeed",
401 				    flag);
402 			close(fd);
403 		} else {
404 			TEST_ASSERT(fd < 0 && errno == EINVAL,
405 				    "guest_memfd() with flag '0x%lx' should fail with EINVAL",
406 				    flag);
407 		}
408 	}
409 }
410 
411 #define __gmem_test(__test, __vm, __flags, __gmem_size)			\
412 do {									\
413 	int fd = vm_create_guest_memfd(__vm, __gmem_size, __flags);	\
414 									\
415 	test_##__test(fd, __gmem_size);					\
416 	close(fd);							\
417 } while (0)
418 
419 #define gmem_test(__test, __vm, __flags)				\
420 	__gmem_test(__test, __vm, __flags, page_size * 4)
421 
422 static void __test_guest_memfd(struct kvm_vm *vm, uint64_t flags)
423 {
424 	test_create_guest_memfd_multiple(vm);
425 	test_create_guest_memfd_invalid_sizes(vm, flags);
426 
427 	gmem_test(file_read_write, vm, flags);
428 
429 	if (flags & GUEST_MEMFD_FLAG_MMAP) {
430 		if (flags & GUEST_MEMFD_FLAG_INIT_SHARED) {
431 			size_t pmd_size = get_trans_hugepagesz();
432 
433 			gmem_test(mmap_supported, vm, flags);
434 			gmem_test(fault_overflow, vm, flags);
435 			gmem_test(numa_allocation, vm, flags);
436 			__gmem_test(collapse, vm, flags, pmd_size);
437 		} else {
438 			gmem_test(fault_private, vm, flags);
439 		}
440 
441 		gmem_test(mmap_cow, vm, flags);
442 		gmem_test(mbind, vm, flags);
443 	} else {
444 		gmem_test(mmap_not_supported, vm, flags);
445 	}
446 
447 	gmem_test(file_size, vm, flags);
448 	gmem_test(fallocate, vm, flags);
449 	gmem_test(invalid_punch_hole, vm, flags);
450 }
451 
452 static void test_guest_memfd(unsigned long vm_type)
453 {
454 	struct kvm_vm *vm = vm_create_barebones_type(vm_type);
455 	uint64_t flags;
456 
457 	test_guest_memfd_flags(vm);
458 
459 	__test_guest_memfd(vm, 0);
460 
461 	flags = vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS);
462 	if (flags & GUEST_MEMFD_FLAG_MMAP)
463 		__test_guest_memfd(vm, GUEST_MEMFD_FLAG_MMAP);
464 
465 	/* MMAP should always be supported if INIT_SHARED is supported. */
466 	if (flags & GUEST_MEMFD_FLAG_INIT_SHARED)
467 		__test_guest_memfd(vm, GUEST_MEMFD_FLAG_MMAP |
468 				       GUEST_MEMFD_FLAG_INIT_SHARED);
469 
470 	kvm_vm_free(vm);
471 }
472 
473 static void guest_code(uint8_t *mem, uint64_t size)
474 {
475 	size_t i;
476 
477 	for (i = 0; i < size; i++)
478 		__GUEST_ASSERT(mem[i] == 0xaa,
479 			       "Guest expected 0xaa at offset %lu, got 0x%x", i, mem[i]);
480 
481 	memset(mem, 0xff, size);
482 	GUEST_DONE();
483 }
484 
485 static void test_guest_memfd_guest(void)
486 {
487 	/*
488 	 * Skip the first 4gb and slot0.  slot0 maps <1gb and is used to back
489 	 * the guest's code, stack, and page tables, and low memory contains
490 	 * the PCI hole and other MMIO regions that need to be avoided.
491 	 */
492 	const uint64_t gpa = SZ_4G;
493 	const int slot = 1;
494 
495 	struct kvm_vcpu *vcpu;
496 	struct kvm_vm *vm;
497 	uint8_t *mem;
498 	size_t size;
499 	int fd, i;
500 
501 	if (!kvm_check_cap(KVM_CAP_GUEST_MEMFD_FLAGS))
502 		return;
503 
504 	vm = __vm_create_shape_with_one_vcpu(VM_SHAPE_DEFAULT, &vcpu, 1, guest_code);
505 
506 	TEST_ASSERT(vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS) & GUEST_MEMFD_FLAG_MMAP,
507 		    "Default VM type should support MMAP, supported flags = 0x%x",
508 		    vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS));
509 	TEST_ASSERT(vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS) & GUEST_MEMFD_FLAG_INIT_SHARED,
510 		    "Default VM type should support INIT_SHARED, supported flags = 0x%x",
511 		    vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS));
512 
513 	size = vm->page_size;
514 	fd = vm_create_guest_memfd(vm, size, GUEST_MEMFD_FLAG_MMAP |
515 					     GUEST_MEMFD_FLAG_INIT_SHARED);
516 	vm_set_user_memory_region2(vm, slot, KVM_MEM_GUEST_MEMFD, gpa, size, NULL, fd, 0);
517 
518 	mem = kvm_mmap(size, PROT_READ | PROT_WRITE, MAP_SHARED, fd);
519 	memset(mem, 0xaa, size);
520 	kvm_munmap(mem, size);
521 
522 	virt_pg_map(vm, gpa, gpa);
523 	vcpu_args_set(vcpu, 2, gpa, size);
524 	vcpu_run(vcpu);
525 
526 	TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE);
527 
528 	mem = kvm_mmap(size, PROT_READ | PROT_WRITE, MAP_SHARED, fd);
529 	for (i = 0; i < size; i++)
530 		TEST_ASSERT_EQ(mem[i], 0xff);
531 
532 	close(fd);
533 	kvm_vm_free(vm);
534 }
535 
536 int main(int argc, char *argv[])
537 {
538 	unsigned long vm_types, vm_type;
539 
540 	TEST_REQUIRE(kvm_has_cap(KVM_CAP_GUEST_MEMFD));
541 
542 	page_size = getpagesize();
543 
544 	/*
545 	 * Not all architectures support KVM_CAP_VM_TYPES. However, those that
546 	 * support guest_memfd have that support for the default VM type.
547 	 */
548 	vm_types = kvm_check_cap(KVM_CAP_VM_TYPES);
549 	if (!vm_types)
550 		vm_types = BIT(VM_TYPE_DEFAULT);
551 
552 	for_each_set_bit(vm_type, &vm_types, BITS_PER_TYPE(vm_types))
553 		test_guest_memfd(vm_type);
554 
555 	test_guest_memfd_guest();
556 }
557