xref: /linux/tools/testing/selftests/kvm/lib/kvm_util.c (revision 08df80a3c51674ab73ae770885a383ca553fbbbf)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * tools/testing/selftests/kvm/lib/kvm_util.c
4  *
5  * Copyright (C) 2018, Google LLC.
6  */
7 
8 #define _GNU_SOURCE /* for program_invocation_name */
9 #include "test_util.h"
10 #include "kvm_util.h"
11 #include "processor.h"
12 
13 #include <assert.h>
14 #include <sched.h>
15 #include <sys/mman.h>
16 #include <sys/types.h>
17 #include <sys/stat.h>
18 #include <unistd.h>
19 #include <linux/kernel.h>
20 
21 #define KVM_UTIL_MIN_PFN	2
22 
23 static int vcpu_mmap_sz(void);
24 
25 int open_path_or_exit(const char *path, int flags)
26 {
27 	int fd;
28 
29 	fd = open(path, flags);
30 	__TEST_REQUIRE(fd >= 0, "%s not available (errno: %d)", path, errno);
31 
32 	return fd;
33 }
34 
35 /*
36  * Open KVM_DEV_PATH if available, otherwise exit the entire program.
37  *
38  * Input Args:
39  *   flags - The flags to pass when opening KVM_DEV_PATH.
40  *
41  * Return:
42  *   The opened file descriptor of /dev/kvm.
43  */
44 static int _open_kvm_dev_path_or_exit(int flags)
45 {
46 	return open_path_or_exit(KVM_DEV_PATH, flags);
47 }
48 
49 int open_kvm_dev_path_or_exit(void)
50 {
51 	return _open_kvm_dev_path_or_exit(O_RDONLY);
52 }
53 
54 static bool get_module_param_bool(const char *module_name, const char *param)
55 {
56 	const int path_size = 128;
57 	char path[path_size];
58 	char value;
59 	ssize_t r;
60 	int fd;
61 
62 	r = snprintf(path, path_size, "/sys/module/%s/parameters/%s",
63 		     module_name, param);
64 	TEST_ASSERT(r < path_size,
65 		    "Failed to construct sysfs path in %d bytes.", path_size);
66 
67 	fd = open_path_or_exit(path, O_RDONLY);
68 
69 	r = read(fd, &value, 1);
70 	TEST_ASSERT(r == 1, "read(%s) failed", path);
71 
72 	r = close(fd);
73 	TEST_ASSERT(!r, "close(%s) failed", path);
74 
75 	if (value == 'Y')
76 		return true;
77 	else if (value == 'N')
78 		return false;
79 
80 	TEST_FAIL("Unrecognized value '%c' for boolean module param", value);
81 }
82 
83 bool get_kvm_param_bool(const char *param)
84 {
85 	return get_module_param_bool("kvm", param);
86 }
87 
88 bool get_kvm_intel_param_bool(const char *param)
89 {
90 	return get_module_param_bool("kvm_intel", param);
91 }
92 
93 bool get_kvm_amd_param_bool(const char *param)
94 {
95 	return get_module_param_bool("kvm_amd", param);
96 }
97 
98 /*
99  * Capability
100  *
101  * Input Args:
102  *   cap - Capability
103  *
104  * Output Args: None
105  *
106  * Return:
107  *   On success, the Value corresponding to the capability (KVM_CAP_*)
108  *   specified by the value of cap.  On failure a TEST_ASSERT failure
109  *   is produced.
110  *
111  * Looks up and returns the value corresponding to the capability
112  * (KVM_CAP_*) given by cap.
113  */
114 unsigned int kvm_check_cap(long cap)
115 {
116 	int ret;
117 	int kvm_fd;
118 
119 	kvm_fd = open_kvm_dev_path_or_exit();
120 	ret = __kvm_ioctl(kvm_fd, KVM_CHECK_EXTENSION, (void *)cap);
121 	TEST_ASSERT(ret >= 0, KVM_IOCTL_ERROR(KVM_CHECK_EXTENSION, ret));
122 
123 	close(kvm_fd);
124 
125 	return (unsigned int)ret;
126 }
127 
128 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size)
129 {
130 	if (vm_check_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL))
131 		vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL, ring_size);
132 	else
133 		vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING, ring_size);
134 	vm->dirty_ring_size = ring_size;
135 }
136 
137 static void vm_open(struct kvm_vm *vm)
138 {
139 	vm->kvm_fd = _open_kvm_dev_path_or_exit(O_RDWR);
140 
141 	TEST_REQUIRE(kvm_has_cap(KVM_CAP_IMMEDIATE_EXIT));
142 
143 	vm->fd = __kvm_ioctl(vm->kvm_fd, KVM_CREATE_VM, (void *)vm->type);
144 	TEST_ASSERT(vm->fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm->fd));
145 }
146 
147 const char *vm_guest_mode_string(uint32_t i)
148 {
149 	static const char * const strings[] = {
150 		[VM_MODE_P52V48_4K]	= "PA-bits:52,  VA-bits:48,  4K pages",
151 		[VM_MODE_P52V48_16K]	= "PA-bits:52,  VA-bits:48, 16K pages",
152 		[VM_MODE_P52V48_64K]	= "PA-bits:52,  VA-bits:48, 64K pages",
153 		[VM_MODE_P48V48_4K]	= "PA-bits:48,  VA-bits:48,  4K pages",
154 		[VM_MODE_P48V48_16K]	= "PA-bits:48,  VA-bits:48, 16K pages",
155 		[VM_MODE_P48V48_64K]	= "PA-bits:48,  VA-bits:48, 64K pages",
156 		[VM_MODE_P40V48_4K]	= "PA-bits:40,  VA-bits:48,  4K pages",
157 		[VM_MODE_P40V48_16K]	= "PA-bits:40,  VA-bits:48, 16K pages",
158 		[VM_MODE_P40V48_64K]	= "PA-bits:40,  VA-bits:48, 64K pages",
159 		[VM_MODE_PXXV48_4K]	= "PA-bits:ANY, VA-bits:48,  4K pages",
160 		[VM_MODE_P47V64_4K]	= "PA-bits:47,  VA-bits:64,  4K pages",
161 		[VM_MODE_P44V64_4K]	= "PA-bits:44,  VA-bits:64,  4K pages",
162 		[VM_MODE_P36V48_4K]	= "PA-bits:36,  VA-bits:48,  4K pages",
163 		[VM_MODE_P36V48_16K]	= "PA-bits:36,  VA-bits:48, 16K pages",
164 		[VM_MODE_P36V48_64K]	= "PA-bits:36,  VA-bits:48, 64K pages",
165 		[VM_MODE_P36V47_16K]	= "PA-bits:36,  VA-bits:47, 16K pages",
166 	};
167 	_Static_assert(sizeof(strings)/sizeof(char *) == NUM_VM_MODES,
168 		       "Missing new mode strings?");
169 
170 	TEST_ASSERT(i < NUM_VM_MODES, "Guest mode ID %d too big", i);
171 
172 	return strings[i];
173 }
174 
175 const struct vm_guest_mode_params vm_guest_mode_params[] = {
176 	[VM_MODE_P52V48_4K]	= { 52, 48,  0x1000, 12 },
177 	[VM_MODE_P52V48_16K]	= { 52, 48,  0x4000, 14 },
178 	[VM_MODE_P52V48_64K]	= { 52, 48, 0x10000, 16 },
179 	[VM_MODE_P48V48_4K]	= { 48, 48,  0x1000, 12 },
180 	[VM_MODE_P48V48_16K]	= { 48, 48,  0x4000, 14 },
181 	[VM_MODE_P48V48_64K]	= { 48, 48, 0x10000, 16 },
182 	[VM_MODE_P40V48_4K]	= { 40, 48,  0x1000, 12 },
183 	[VM_MODE_P40V48_16K]	= { 40, 48,  0x4000, 14 },
184 	[VM_MODE_P40V48_64K]	= { 40, 48, 0x10000, 16 },
185 	[VM_MODE_PXXV48_4K]	= {  0,  0,  0x1000, 12 },
186 	[VM_MODE_P47V64_4K]	= { 47, 64,  0x1000, 12 },
187 	[VM_MODE_P44V64_4K]	= { 44, 64,  0x1000, 12 },
188 	[VM_MODE_P36V48_4K]	= { 36, 48,  0x1000, 12 },
189 	[VM_MODE_P36V48_16K]	= { 36, 48,  0x4000, 14 },
190 	[VM_MODE_P36V48_64K]	= { 36, 48, 0x10000, 16 },
191 	[VM_MODE_P36V47_16K]	= { 36, 47,  0x4000, 14 },
192 };
193 _Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES,
194 	       "Missing new mode params?");
195 
196 /*
197  * Initializes vm->vpages_valid to match the canonical VA space of the
198  * architecture.
199  *
200  * The default implementation is valid for architectures which split the
201  * range addressed by a single page table into a low and high region
202  * based on the MSB of the VA. On architectures with this behavior
203  * the VA region spans [0, 2^(va_bits - 1)), [-(2^(va_bits - 1), -1].
204  */
205 __weak void vm_vaddr_populate_bitmap(struct kvm_vm *vm)
206 {
207 	sparsebit_set_num(vm->vpages_valid,
208 		0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
209 	sparsebit_set_num(vm->vpages_valid,
210 		(~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift,
211 		(1ULL << (vm->va_bits - 1)) >> vm->page_shift);
212 }
213 
214 struct kvm_vm *____vm_create(struct vm_shape shape)
215 {
216 	struct kvm_vm *vm;
217 
218 	vm = calloc(1, sizeof(*vm));
219 	TEST_ASSERT(vm != NULL, "Insufficient Memory");
220 
221 	INIT_LIST_HEAD(&vm->vcpus);
222 	vm->regions.gpa_tree = RB_ROOT;
223 	vm->regions.hva_tree = RB_ROOT;
224 	hash_init(vm->regions.slot_hash);
225 
226 	vm->mode = shape.mode;
227 	vm->type = shape.type;
228 
229 	vm->pa_bits = vm_guest_mode_params[vm->mode].pa_bits;
230 	vm->va_bits = vm_guest_mode_params[vm->mode].va_bits;
231 	vm->page_size = vm_guest_mode_params[vm->mode].page_size;
232 	vm->page_shift = vm_guest_mode_params[vm->mode].page_shift;
233 
234 	/* Setup mode specific traits. */
235 	switch (vm->mode) {
236 	case VM_MODE_P52V48_4K:
237 		vm->pgtable_levels = 4;
238 		break;
239 	case VM_MODE_P52V48_64K:
240 		vm->pgtable_levels = 3;
241 		break;
242 	case VM_MODE_P48V48_4K:
243 		vm->pgtable_levels = 4;
244 		break;
245 	case VM_MODE_P48V48_64K:
246 		vm->pgtable_levels = 3;
247 		break;
248 	case VM_MODE_P40V48_4K:
249 	case VM_MODE_P36V48_4K:
250 		vm->pgtable_levels = 4;
251 		break;
252 	case VM_MODE_P40V48_64K:
253 	case VM_MODE_P36V48_64K:
254 		vm->pgtable_levels = 3;
255 		break;
256 	case VM_MODE_P52V48_16K:
257 	case VM_MODE_P48V48_16K:
258 	case VM_MODE_P40V48_16K:
259 	case VM_MODE_P36V48_16K:
260 		vm->pgtable_levels = 4;
261 		break;
262 	case VM_MODE_P36V47_16K:
263 		vm->pgtable_levels = 3;
264 		break;
265 	case VM_MODE_PXXV48_4K:
266 #ifdef __x86_64__
267 		kvm_get_cpu_address_width(&vm->pa_bits, &vm->va_bits);
268 		/*
269 		 * Ignore KVM support for 5-level paging (vm->va_bits == 57),
270 		 * it doesn't take effect unless a CR4.LA57 is set, which it
271 		 * isn't for this mode (48-bit virtual address space).
272 		 */
273 		TEST_ASSERT(vm->va_bits == 48 || vm->va_bits == 57,
274 			    "Linear address width (%d bits) not supported",
275 			    vm->va_bits);
276 		pr_debug("Guest physical address width detected: %d\n",
277 			 vm->pa_bits);
278 		vm->pgtable_levels = 4;
279 		vm->va_bits = 48;
280 #else
281 		TEST_FAIL("VM_MODE_PXXV48_4K not supported on non-x86 platforms");
282 #endif
283 		break;
284 	case VM_MODE_P47V64_4K:
285 		vm->pgtable_levels = 5;
286 		break;
287 	case VM_MODE_P44V64_4K:
288 		vm->pgtable_levels = 5;
289 		break;
290 	default:
291 		TEST_FAIL("Unknown guest mode: 0x%x", vm->mode);
292 	}
293 
294 #ifdef __aarch64__
295 	TEST_ASSERT(!vm->type, "ARM doesn't support test-provided types");
296 	if (vm->pa_bits != 40)
297 		vm->type = KVM_VM_TYPE_ARM_IPA_SIZE(vm->pa_bits);
298 #endif
299 
300 	vm_open(vm);
301 
302 	/* Limit to VA-bit canonical virtual addresses. */
303 	vm->vpages_valid = sparsebit_alloc();
304 	vm_vaddr_populate_bitmap(vm);
305 
306 	/* Limit physical addresses to PA-bits. */
307 	vm->max_gfn = vm_compute_max_gfn(vm);
308 
309 	/* Allocate and setup memory for guest. */
310 	vm->vpages_mapped = sparsebit_alloc();
311 
312 	return vm;
313 }
314 
315 static uint64_t vm_nr_pages_required(enum vm_guest_mode mode,
316 				     uint32_t nr_runnable_vcpus,
317 				     uint64_t extra_mem_pages)
318 {
319 	uint64_t page_size = vm_guest_mode_params[mode].page_size;
320 	uint64_t nr_pages;
321 
322 	TEST_ASSERT(nr_runnable_vcpus,
323 		    "Use vm_create_barebones() for VMs that _never_ have vCPUs\n");
324 
325 	TEST_ASSERT(nr_runnable_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS),
326 		    "nr_vcpus = %d too large for host, max-vcpus = %d",
327 		    nr_runnable_vcpus, kvm_check_cap(KVM_CAP_MAX_VCPUS));
328 
329 	/*
330 	 * Arbitrarily allocate 512 pages (2mb when page size is 4kb) for the
331 	 * test code and other per-VM assets that will be loaded into memslot0.
332 	 */
333 	nr_pages = 512;
334 
335 	/* Account for the per-vCPU stacks on behalf of the test. */
336 	nr_pages += nr_runnable_vcpus * DEFAULT_STACK_PGS;
337 
338 	/*
339 	 * Account for the number of pages needed for the page tables.  The
340 	 * maximum page table size for a memory region will be when the
341 	 * smallest page size is used. Considering each page contains x page
342 	 * table descriptors, the total extra size for page tables (for extra
343 	 * N pages) will be: N/x+N/x^2+N/x^3+... which is definitely smaller
344 	 * than N/x*2.
345 	 */
346 	nr_pages += (nr_pages + extra_mem_pages) / PTES_PER_MIN_PAGE * 2;
347 
348 	/* Account for the number of pages needed by ucall. */
349 	nr_pages += ucall_nr_pages_required(page_size);
350 
351 	return vm_adjust_num_guest_pages(mode, nr_pages);
352 }
353 
354 struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
355 			   uint64_t nr_extra_pages)
356 {
357 	uint64_t nr_pages = vm_nr_pages_required(shape.mode, nr_runnable_vcpus,
358 						 nr_extra_pages);
359 	struct userspace_mem_region *slot0;
360 	struct kvm_vm *vm;
361 	int i;
362 
363 	pr_debug("%s: mode='%s' type='%d', pages='%ld'\n", __func__,
364 		 vm_guest_mode_string(shape.mode), shape.type, nr_pages);
365 
366 	vm = ____vm_create(shape);
367 
368 	vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 0, 0, nr_pages, 0);
369 	for (i = 0; i < NR_MEM_REGIONS; i++)
370 		vm->memslots[i] = 0;
371 
372 	kvm_vm_elf_load(vm, program_invocation_name);
373 
374 	/*
375 	 * TODO: Add proper defines to protect the library's memslots, and then
376 	 * carve out memslot1 for the ucall MMIO address.  KVM treats writes to
377 	 * read-only memslots as MMIO, and creating a read-only memslot for the
378 	 * MMIO region would prevent silently clobbering the MMIO region.
379 	 */
380 	slot0 = memslot2region(vm, 0);
381 	ucall_init(vm, slot0->region.guest_phys_addr + slot0->region.memory_size);
382 
383 	kvm_arch_vm_post_create(vm);
384 
385 	return vm;
386 }
387 
388 /*
389  * VM Create with customized parameters
390  *
391  * Input Args:
392  *   mode - VM Mode (e.g. VM_MODE_P52V48_4K)
393  *   nr_vcpus - VCPU count
394  *   extra_mem_pages - Non-slot0 physical memory total size
395  *   guest_code - Guest entry point
396  *   vcpuids - VCPU IDs
397  *
398  * Output Args: None
399  *
400  * Return:
401  *   Pointer to opaque structure that describes the created VM.
402  *
403  * Creates a VM with the mode specified by mode (e.g. VM_MODE_P52V48_4K).
404  * extra_mem_pages is only used to calculate the maximum page table size,
405  * no real memory allocation for non-slot0 memory in this function.
406  */
407 struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus,
408 				      uint64_t extra_mem_pages,
409 				      void *guest_code, struct kvm_vcpu *vcpus[])
410 {
411 	struct kvm_vm *vm;
412 	int i;
413 
414 	TEST_ASSERT(!nr_vcpus || vcpus, "Must provide vCPU array");
415 
416 	vm = __vm_create(shape, nr_vcpus, extra_mem_pages);
417 
418 	for (i = 0; i < nr_vcpus; ++i)
419 		vcpus[i] = vm_vcpu_add(vm, i, guest_code);
420 
421 	return vm;
422 }
423 
424 struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape,
425 					       struct kvm_vcpu **vcpu,
426 					       uint64_t extra_mem_pages,
427 					       void *guest_code)
428 {
429 	struct kvm_vcpu *vcpus[1];
430 	struct kvm_vm *vm;
431 
432 	vm = __vm_create_with_vcpus(shape, 1, extra_mem_pages, guest_code, vcpus);
433 
434 	*vcpu = vcpus[0];
435 	return vm;
436 }
437 
438 /*
439  * VM Restart
440  *
441  * Input Args:
442  *   vm - VM that has been released before
443  *
444  * Output Args: None
445  *
446  * Reopens the file descriptors associated to the VM and reinstates the
447  * global state, such as the irqchip and the memory regions that are mapped
448  * into the guest.
449  */
450 void kvm_vm_restart(struct kvm_vm *vmp)
451 {
452 	int ctr;
453 	struct userspace_mem_region *region;
454 
455 	vm_open(vmp);
456 	if (vmp->has_irqchip)
457 		vm_create_irqchip(vmp);
458 
459 	hash_for_each(vmp->regions.slot_hash, ctr, region, slot_node) {
460 		int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION2, &region->region);
461 
462 		TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION2 IOCTL failed,\n"
463 			    "  rc: %i errno: %i\n"
464 			    "  slot: %u flags: 0x%x\n"
465 			    "  guest_phys_addr: 0x%llx size: 0x%llx",
466 			    ret, errno, region->region.slot,
467 			    region->region.flags,
468 			    region->region.guest_phys_addr,
469 			    region->region.memory_size);
470 	}
471 }
472 
473 __weak struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm,
474 					      uint32_t vcpu_id)
475 {
476 	return __vm_vcpu_add(vm, vcpu_id);
477 }
478 
479 struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm)
480 {
481 	kvm_vm_restart(vm);
482 
483 	return vm_vcpu_recreate(vm, 0);
484 }
485 
486 void kvm_pin_this_task_to_pcpu(uint32_t pcpu)
487 {
488 	cpu_set_t mask;
489 	int r;
490 
491 	CPU_ZERO(&mask);
492 	CPU_SET(pcpu, &mask);
493 	r = sched_setaffinity(0, sizeof(mask), &mask);
494 	TEST_ASSERT(!r, "sched_setaffinity() failed for pCPU '%u'.\n", pcpu);
495 }
496 
497 static uint32_t parse_pcpu(const char *cpu_str, const cpu_set_t *allowed_mask)
498 {
499 	uint32_t pcpu = atoi_non_negative("CPU number", cpu_str);
500 
501 	TEST_ASSERT(CPU_ISSET(pcpu, allowed_mask),
502 		    "Not allowed to run on pCPU '%d', check cgroups?\n", pcpu);
503 	return pcpu;
504 }
505 
506 void kvm_print_vcpu_pinning_help(void)
507 {
508 	const char *name = program_invocation_name;
509 
510 	printf(" -c: Pin tasks to physical CPUs.  Takes a list of comma separated\n"
511 	       "     values (target pCPU), one for each vCPU, plus an optional\n"
512 	       "     entry for the main application task (specified via entry\n"
513 	       "     <nr_vcpus + 1>).  If used, entries must be provided for all\n"
514 	       "     vCPUs, i.e. pinning vCPUs is all or nothing.\n\n"
515 	       "     E.g. to create 3 vCPUs, pin vCPU0=>pCPU22, vCPU1=>pCPU23,\n"
516 	       "     vCPU2=>pCPU24, and pin the application task to pCPU50:\n\n"
517 	       "         %s -v 3 -c 22,23,24,50\n\n"
518 	       "     To leave the application task unpinned, drop the final entry:\n\n"
519 	       "         %s -v 3 -c 22,23,24\n\n"
520 	       "     (default: no pinning)\n", name, name);
521 }
522 
523 void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],
524 			    int nr_vcpus)
525 {
526 	cpu_set_t allowed_mask;
527 	char *cpu, *cpu_list;
528 	char delim[2] = ",";
529 	int i, r;
530 
531 	cpu_list = strdup(pcpus_string);
532 	TEST_ASSERT(cpu_list, "strdup() allocation failed.\n");
533 
534 	r = sched_getaffinity(0, sizeof(allowed_mask), &allowed_mask);
535 	TEST_ASSERT(!r, "sched_getaffinity() failed");
536 
537 	cpu = strtok(cpu_list, delim);
538 
539 	/* 1. Get all pcpus for vcpus. */
540 	for (i = 0; i < nr_vcpus; i++) {
541 		TEST_ASSERT(cpu, "pCPU not provided for vCPU '%d'\n", i);
542 		vcpu_to_pcpu[i] = parse_pcpu(cpu, &allowed_mask);
543 		cpu = strtok(NULL, delim);
544 	}
545 
546 	/* 2. Check if the main worker needs to be pinned. */
547 	if (cpu) {
548 		kvm_pin_this_task_to_pcpu(parse_pcpu(cpu, &allowed_mask));
549 		cpu = strtok(NULL, delim);
550 	}
551 
552 	TEST_ASSERT(!cpu, "pCPU list contains trailing garbage characters '%s'", cpu);
553 	free(cpu_list);
554 }
555 
556 /*
557  * Userspace Memory Region Find
558  *
559  * Input Args:
560  *   vm - Virtual Machine
561  *   start - Starting VM physical address
562  *   end - Ending VM physical address, inclusive.
563  *
564  * Output Args: None
565  *
566  * Return:
567  *   Pointer to overlapping region, NULL if no such region.
568  *
569  * Searches for a region with any physical memory that overlaps with
570  * any portion of the guest physical addresses from start to end
571  * inclusive.  If multiple overlapping regions exist, a pointer to any
572  * of the regions is returned.  Null is returned only when no overlapping
573  * region exists.
574  */
575 static struct userspace_mem_region *
576 userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end)
577 {
578 	struct rb_node *node;
579 
580 	for (node = vm->regions.gpa_tree.rb_node; node; ) {
581 		struct userspace_mem_region *region =
582 			container_of(node, struct userspace_mem_region, gpa_node);
583 		uint64_t existing_start = region->region.guest_phys_addr;
584 		uint64_t existing_end = region->region.guest_phys_addr
585 			+ region->region.memory_size - 1;
586 		if (start <= existing_end && end >= existing_start)
587 			return region;
588 
589 		if (start < existing_start)
590 			node = node->rb_left;
591 		else
592 			node = node->rb_right;
593 	}
594 
595 	return NULL;
596 }
597 
598 __weak void vcpu_arch_free(struct kvm_vcpu *vcpu)
599 {
600 
601 }
602 
603 /*
604  * VM VCPU Remove
605  *
606  * Input Args:
607  *   vcpu - VCPU to remove
608  *
609  * Output Args: None
610  *
611  * Return: None, TEST_ASSERT failures for all error conditions
612  *
613  * Removes a vCPU from a VM and frees its resources.
614  */
615 static void vm_vcpu_rm(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
616 {
617 	int ret;
618 
619 	if (vcpu->dirty_gfns) {
620 		ret = munmap(vcpu->dirty_gfns, vm->dirty_ring_size);
621 		TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
622 		vcpu->dirty_gfns = NULL;
623 	}
624 
625 	ret = munmap(vcpu->run, vcpu_mmap_sz());
626 	TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
627 
628 	ret = close(vcpu->fd);
629 	TEST_ASSERT(!ret,  __KVM_SYSCALL_ERROR("close()", ret));
630 
631 	list_del(&vcpu->list);
632 
633 	vcpu_arch_free(vcpu);
634 	free(vcpu);
635 }
636 
637 void kvm_vm_release(struct kvm_vm *vmp)
638 {
639 	struct kvm_vcpu *vcpu, *tmp;
640 	int ret;
641 
642 	list_for_each_entry_safe(vcpu, tmp, &vmp->vcpus, list)
643 		vm_vcpu_rm(vmp, vcpu);
644 
645 	ret = close(vmp->fd);
646 	TEST_ASSERT(!ret,  __KVM_SYSCALL_ERROR("close()", ret));
647 
648 	ret = close(vmp->kvm_fd);
649 	TEST_ASSERT(!ret,  __KVM_SYSCALL_ERROR("close()", ret));
650 }
651 
652 static void __vm_mem_region_delete(struct kvm_vm *vm,
653 				   struct userspace_mem_region *region,
654 				   bool unlink)
655 {
656 	int ret;
657 
658 	if (unlink) {
659 		rb_erase(&region->gpa_node, &vm->regions.gpa_tree);
660 		rb_erase(&region->hva_node, &vm->regions.hva_tree);
661 		hash_del(&region->slot_node);
662 	}
663 
664 	region->region.memory_size = 0;
665 	vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, &region->region);
666 
667 	sparsebit_free(&region->unused_phy_pages);
668 	ret = munmap(region->mmap_start, region->mmap_size);
669 	TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
670 	if (region->fd >= 0) {
671 		/* There's an extra map when using shared memory. */
672 		ret = munmap(region->mmap_alias, region->mmap_size);
673 		TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
674 		close(region->fd);
675 	}
676 	if (region->region.guest_memfd >= 0)
677 		close(region->region.guest_memfd);
678 
679 	free(region);
680 }
681 
682 /*
683  * Destroys and frees the VM pointed to by vmp.
684  */
685 void kvm_vm_free(struct kvm_vm *vmp)
686 {
687 	int ctr;
688 	struct hlist_node *node;
689 	struct userspace_mem_region *region;
690 
691 	if (vmp == NULL)
692 		return;
693 
694 	/* Free cached stats metadata and close FD */
695 	if (vmp->stats_fd) {
696 		free(vmp->stats_desc);
697 		close(vmp->stats_fd);
698 	}
699 
700 	/* Free userspace_mem_regions. */
701 	hash_for_each_safe(vmp->regions.slot_hash, ctr, node, region, slot_node)
702 		__vm_mem_region_delete(vmp, region, false);
703 
704 	/* Free sparsebit arrays. */
705 	sparsebit_free(&vmp->vpages_valid);
706 	sparsebit_free(&vmp->vpages_mapped);
707 
708 	kvm_vm_release(vmp);
709 
710 	/* Free the structure describing the VM. */
711 	free(vmp);
712 }
713 
714 int kvm_memfd_alloc(size_t size, bool hugepages)
715 {
716 	int memfd_flags = MFD_CLOEXEC;
717 	int fd, r;
718 
719 	if (hugepages)
720 		memfd_flags |= MFD_HUGETLB;
721 
722 	fd = memfd_create("kvm_selftest", memfd_flags);
723 	TEST_ASSERT(fd != -1, __KVM_SYSCALL_ERROR("memfd_create()", fd));
724 
725 	r = ftruncate(fd, size);
726 	TEST_ASSERT(!r, __KVM_SYSCALL_ERROR("ftruncate()", r));
727 
728 	r = fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0, size);
729 	TEST_ASSERT(!r, __KVM_SYSCALL_ERROR("fallocate()", r));
730 
731 	return fd;
732 }
733 
734 /*
735  * Memory Compare, host virtual to guest virtual
736  *
737  * Input Args:
738  *   hva - Starting host virtual address
739  *   vm - Virtual Machine
740  *   gva - Starting guest virtual address
741  *   len - number of bytes to compare
742  *
743  * Output Args: None
744  *
745  * Input/Output Args: None
746  *
747  * Return:
748  *   Returns 0 if the bytes starting at hva for a length of len
749  *   are equal the guest virtual bytes starting at gva.  Returns
750  *   a value < 0, if bytes at hva are less than those at gva.
751  *   Otherwise a value > 0 is returned.
752  *
753  * Compares the bytes starting at the host virtual address hva, for
754  * a length of len, to the guest bytes starting at the guest virtual
755  * address given by gva.
756  */
757 int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len)
758 {
759 	size_t amt;
760 
761 	/*
762 	 * Compare a batch of bytes until either a match is found
763 	 * or all the bytes have been compared.
764 	 */
765 	for (uintptr_t offset = 0; offset < len; offset += amt) {
766 		uintptr_t ptr1 = (uintptr_t)hva + offset;
767 
768 		/*
769 		 * Determine host address for guest virtual address
770 		 * at offset.
771 		 */
772 		uintptr_t ptr2 = (uintptr_t)addr_gva2hva(vm, gva + offset);
773 
774 		/*
775 		 * Determine amount to compare on this pass.
776 		 * Don't allow the comparsion to cross a page boundary.
777 		 */
778 		amt = len - offset;
779 		if ((ptr1 >> vm->page_shift) != ((ptr1 + amt) >> vm->page_shift))
780 			amt = vm->page_size - (ptr1 % vm->page_size);
781 		if ((ptr2 >> vm->page_shift) != ((ptr2 + amt) >> vm->page_shift))
782 			amt = vm->page_size - (ptr2 % vm->page_size);
783 
784 		assert((ptr1 >> vm->page_shift) == ((ptr1 + amt - 1) >> vm->page_shift));
785 		assert((ptr2 >> vm->page_shift) == ((ptr2 + amt - 1) >> vm->page_shift));
786 
787 		/*
788 		 * Perform the comparison.  If there is a difference
789 		 * return that result to the caller, otherwise need
790 		 * to continue on looking for a mismatch.
791 		 */
792 		int ret = memcmp((void *)ptr1, (void *)ptr2, amt);
793 		if (ret != 0)
794 			return ret;
795 	}
796 
797 	/*
798 	 * No mismatch found.  Let the caller know the two memory
799 	 * areas are equal.
800 	 */
801 	return 0;
802 }
803 
804 static void vm_userspace_mem_region_gpa_insert(struct rb_root *gpa_tree,
805 					       struct userspace_mem_region *region)
806 {
807 	struct rb_node **cur, *parent;
808 
809 	for (cur = &gpa_tree->rb_node, parent = NULL; *cur; ) {
810 		struct userspace_mem_region *cregion;
811 
812 		cregion = container_of(*cur, typeof(*cregion), gpa_node);
813 		parent = *cur;
814 		if (region->region.guest_phys_addr <
815 		    cregion->region.guest_phys_addr)
816 			cur = &(*cur)->rb_left;
817 		else {
818 			TEST_ASSERT(region->region.guest_phys_addr !=
819 				    cregion->region.guest_phys_addr,
820 				    "Duplicate GPA in region tree");
821 
822 			cur = &(*cur)->rb_right;
823 		}
824 	}
825 
826 	rb_link_node(&region->gpa_node, parent, cur);
827 	rb_insert_color(&region->gpa_node, gpa_tree);
828 }
829 
830 static void vm_userspace_mem_region_hva_insert(struct rb_root *hva_tree,
831 					       struct userspace_mem_region *region)
832 {
833 	struct rb_node **cur, *parent;
834 
835 	for (cur = &hva_tree->rb_node, parent = NULL; *cur; ) {
836 		struct userspace_mem_region *cregion;
837 
838 		cregion = container_of(*cur, typeof(*cregion), hva_node);
839 		parent = *cur;
840 		if (region->host_mem < cregion->host_mem)
841 			cur = &(*cur)->rb_left;
842 		else {
843 			TEST_ASSERT(region->host_mem !=
844 				    cregion->host_mem,
845 				    "Duplicate HVA in region tree");
846 
847 			cur = &(*cur)->rb_right;
848 		}
849 	}
850 
851 	rb_link_node(&region->hva_node, parent, cur);
852 	rb_insert_color(&region->hva_node, hva_tree);
853 }
854 
855 
856 int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
857 				uint64_t gpa, uint64_t size, void *hva)
858 {
859 	struct kvm_userspace_memory_region region = {
860 		.slot = slot,
861 		.flags = flags,
862 		.guest_phys_addr = gpa,
863 		.memory_size = size,
864 		.userspace_addr = (uintptr_t)hva,
865 	};
866 
867 	return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, &region);
868 }
869 
870 void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
871 			       uint64_t gpa, uint64_t size, void *hva)
872 {
873 	int ret = __vm_set_user_memory_region(vm, slot, flags, gpa, size, hva);
874 
875 	TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION failed, errno = %d (%s)",
876 		    errno, strerror(errno));
877 }
878 
879 int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
880 				 uint64_t gpa, uint64_t size, void *hva,
881 				 uint32_t guest_memfd, uint64_t guest_memfd_offset)
882 {
883 	struct kvm_userspace_memory_region2 region = {
884 		.slot = slot,
885 		.flags = flags,
886 		.guest_phys_addr = gpa,
887 		.memory_size = size,
888 		.userspace_addr = (uintptr_t)hva,
889 		.guest_memfd = guest_memfd,
890 		.guest_memfd_offset = guest_memfd_offset,
891 	};
892 
893 	return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION2, &region);
894 }
895 
896 void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
897 				uint64_t gpa, uint64_t size, void *hva,
898 				uint32_t guest_memfd, uint64_t guest_memfd_offset)
899 {
900 	int ret = __vm_set_user_memory_region2(vm, slot, flags, gpa, size, hva,
901 					       guest_memfd, guest_memfd_offset);
902 
903 	TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION2 failed, errno = %d (%s)",
904 		    errno, strerror(errno));
905 }
906 
907 
908 /* FIXME: This thing needs to be ripped apart and rewritten. */
909 void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
910 		uint64_t guest_paddr, uint32_t slot, uint64_t npages,
911 		uint32_t flags, int guest_memfd, uint64_t guest_memfd_offset)
912 {
913 	int ret;
914 	struct userspace_mem_region *region;
915 	size_t backing_src_pagesz = get_backing_src_pagesz(src_type);
916 	size_t mem_size = npages * vm->page_size;
917 	size_t alignment;
918 
919 	TEST_ASSERT(vm_adjust_num_guest_pages(vm->mode, npages) == npages,
920 		"Number of guest pages is not compatible with the host. "
921 		"Try npages=%d", vm_adjust_num_guest_pages(vm->mode, npages));
922 
923 	TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical "
924 		"address not on a page boundary.\n"
925 		"  guest_paddr: 0x%lx vm->page_size: 0x%x",
926 		guest_paddr, vm->page_size);
927 	TEST_ASSERT((((guest_paddr >> vm->page_shift) + npages) - 1)
928 		<= vm->max_gfn, "Physical range beyond maximum "
929 		"supported physical address,\n"
930 		"  guest_paddr: 0x%lx npages: 0x%lx\n"
931 		"  vm->max_gfn: 0x%lx vm->page_size: 0x%x",
932 		guest_paddr, npages, vm->max_gfn, vm->page_size);
933 
934 	/*
935 	 * Confirm a mem region with an overlapping address doesn't
936 	 * already exist.
937 	 */
938 	region = (struct userspace_mem_region *) userspace_mem_region_find(
939 		vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1);
940 	if (region != NULL)
941 		TEST_FAIL("overlapping userspace_mem_region already "
942 			"exists\n"
943 			"  requested guest_paddr: 0x%lx npages: 0x%lx "
944 			"page_size: 0x%x\n"
945 			"  existing guest_paddr: 0x%lx size: 0x%lx",
946 			guest_paddr, npages, vm->page_size,
947 			(uint64_t) region->region.guest_phys_addr,
948 			(uint64_t) region->region.memory_size);
949 
950 	/* Confirm no region with the requested slot already exists. */
951 	hash_for_each_possible(vm->regions.slot_hash, region, slot_node,
952 			       slot) {
953 		if (region->region.slot != slot)
954 			continue;
955 
956 		TEST_FAIL("A mem region with the requested slot "
957 			"already exists.\n"
958 			"  requested slot: %u paddr: 0x%lx npages: 0x%lx\n"
959 			"  existing slot: %u paddr: 0x%lx size: 0x%lx",
960 			slot, guest_paddr, npages,
961 			region->region.slot,
962 			(uint64_t) region->region.guest_phys_addr,
963 			(uint64_t) region->region.memory_size);
964 	}
965 
966 	/* Allocate and initialize new mem region structure. */
967 	region = calloc(1, sizeof(*region));
968 	TEST_ASSERT(region != NULL, "Insufficient Memory");
969 	region->mmap_size = mem_size;
970 
971 #ifdef __s390x__
972 	/* On s390x, the host address must be aligned to 1M (due to PGSTEs) */
973 	alignment = 0x100000;
974 #else
975 	alignment = 1;
976 #endif
977 
978 	/*
979 	 * When using THP mmap is not guaranteed to returned a hugepage aligned
980 	 * address so we have to pad the mmap. Padding is not needed for HugeTLB
981 	 * because mmap will always return an address aligned to the HugeTLB
982 	 * page size.
983 	 */
984 	if (src_type == VM_MEM_SRC_ANONYMOUS_THP)
985 		alignment = max(backing_src_pagesz, alignment);
986 
987 	TEST_ASSERT_EQ(guest_paddr, align_up(guest_paddr, backing_src_pagesz));
988 
989 	/* Add enough memory to align up if necessary */
990 	if (alignment > 1)
991 		region->mmap_size += alignment;
992 
993 	region->fd = -1;
994 	if (backing_src_is_shared(src_type))
995 		region->fd = kvm_memfd_alloc(region->mmap_size,
996 					     src_type == VM_MEM_SRC_SHARED_HUGETLB);
997 
998 	region->mmap_start = mmap(NULL, region->mmap_size,
999 				  PROT_READ | PROT_WRITE,
1000 				  vm_mem_backing_src_alias(src_type)->flag,
1001 				  region->fd, 0);
1002 	TEST_ASSERT(region->mmap_start != MAP_FAILED,
1003 		    __KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED));
1004 
1005 	TEST_ASSERT(!is_backing_src_hugetlb(src_type) ||
1006 		    region->mmap_start == align_ptr_up(region->mmap_start, backing_src_pagesz),
1007 		    "mmap_start %p is not aligned to HugeTLB page size 0x%lx",
1008 		    region->mmap_start, backing_src_pagesz);
1009 
1010 	/* Align host address */
1011 	region->host_mem = align_ptr_up(region->mmap_start, alignment);
1012 
1013 	/* As needed perform madvise */
1014 	if ((src_type == VM_MEM_SRC_ANONYMOUS ||
1015 	     src_type == VM_MEM_SRC_ANONYMOUS_THP) && thp_configured()) {
1016 		ret = madvise(region->host_mem, mem_size,
1017 			      src_type == VM_MEM_SRC_ANONYMOUS ? MADV_NOHUGEPAGE : MADV_HUGEPAGE);
1018 		TEST_ASSERT(ret == 0, "madvise failed, addr: %p length: 0x%lx src_type: %s",
1019 			    region->host_mem, mem_size,
1020 			    vm_mem_backing_src_alias(src_type)->name);
1021 	}
1022 
1023 	region->backing_src_type = src_type;
1024 
1025 	if (flags & KVM_MEM_GUEST_MEMFD) {
1026 		if (guest_memfd < 0) {
1027 			uint32_t guest_memfd_flags = 0;
1028 			TEST_ASSERT(!guest_memfd_offset,
1029 				    "Offset must be zero when creating new guest_memfd");
1030 			guest_memfd = vm_create_guest_memfd(vm, mem_size, guest_memfd_flags);
1031 		} else {
1032 			/*
1033 			 * Install a unique fd for each memslot so that the fd
1034 			 * can be closed when the region is deleted without
1035 			 * needing to track if the fd is owned by the framework
1036 			 * or by the caller.
1037 			 */
1038 			guest_memfd = dup(guest_memfd);
1039 			TEST_ASSERT(guest_memfd >= 0, __KVM_SYSCALL_ERROR("dup()", guest_memfd));
1040 		}
1041 
1042 		region->region.guest_memfd = guest_memfd;
1043 		region->region.guest_memfd_offset = guest_memfd_offset;
1044 	} else {
1045 		region->region.guest_memfd = -1;
1046 	}
1047 
1048 	region->unused_phy_pages = sparsebit_alloc();
1049 	sparsebit_set_num(region->unused_phy_pages,
1050 		guest_paddr >> vm->page_shift, npages);
1051 	region->region.slot = slot;
1052 	region->region.flags = flags;
1053 	region->region.guest_phys_addr = guest_paddr;
1054 	region->region.memory_size = npages * vm->page_size;
1055 	region->region.userspace_addr = (uintptr_t) region->host_mem;
1056 	ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, &region->region);
1057 	TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION2 IOCTL failed,\n"
1058 		"  rc: %i errno: %i\n"
1059 		"  slot: %u flags: 0x%x\n"
1060 		"  guest_phys_addr: 0x%lx size: 0x%lx guest_memfd: %d\n",
1061 		ret, errno, slot, flags,
1062 		guest_paddr, (uint64_t) region->region.memory_size,
1063 		region->region.guest_memfd);
1064 
1065 	/* Add to quick lookup data structures */
1066 	vm_userspace_mem_region_gpa_insert(&vm->regions.gpa_tree, region);
1067 	vm_userspace_mem_region_hva_insert(&vm->regions.hva_tree, region);
1068 	hash_add(vm->regions.slot_hash, &region->slot_node, slot);
1069 
1070 	/* If shared memory, create an alias. */
1071 	if (region->fd >= 0) {
1072 		region->mmap_alias = mmap(NULL, region->mmap_size,
1073 					  PROT_READ | PROT_WRITE,
1074 					  vm_mem_backing_src_alias(src_type)->flag,
1075 					  region->fd, 0);
1076 		TEST_ASSERT(region->mmap_alias != MAP_FAILED,
1077 			    __KVM_SYSCALL_ERROR("mmap()",  (int)(unsigned long)MAP_FAILED));
1078 
1079 		/* Align host alias address */
1080 		region->host_alias = align_ptr_up(region->mmap_alias, alignment);
1081 	}
1082 }
1083 
1084 void vm_userspace_mem_region_add(struct kvm_vm *vm,
1085 				 enum vm_mem_backing_src_type src_type,
1086 				 uint64_t guest_paddr, uint32_t slot,
1087 				 uint64_t npages, uint32_t flags)
1088 {
1089 	vm_mem_add(vm, src_type, guest_paddr, slot, npages, flags, -1, 0);
1090 }
1091 
1092 /*
1093  * Memslot to region
1094  *
1095  * Input Args:
1096  *   vm - Virtual Machine
1097  *   memslot - KVM memory slot ID
1098  *
1099  * Output Args: None
1100  *
1101  * Return:
1102  *   Pointer to memory region structure that describe memory region
1103  *   using kvm memory slot ID given by memslot.  TEST_ASSERT failure
1104  *   on error (e.g. currently no memory region using memslot as a KVM
1105  *   memory slot ID).
1106  */
1107 struct userspace_mem_region *
1108 memslot2region(struct kvm_vm *vm, uint32_t memslot)
1109 {
1110 	struct userspace_mem_region *region;
1111 
1112 	hash_for_each_possible(vm->regions.slot_hash, region, slot_node,
1113 			       memslot)
1114 		if (region->region.slot == memslot)
1115 			return region;
1116 
1117 	fprintf(stderr, "No mem region with the requested slot found,\n"
1118 		"  requested slot: %u\n", memslot);
1119 	fputs("---- vm dump ----\n", stderr);
1120 	vm_dump(stderr, vm, 2);
1121 	TEST_FAIL("Mem region not found");
1122 	return NULL;
1123 }
1124 
1125 /*
1126  * VM Memory Region Flags Set
1127  *
1128  * Input Args:
1129  *   vm - Virtual Machine
1130  *   flags - Starting guest physical address
1131  *
1132  * Output Args: None
1133  *
1134  * Return: None
1135  *
1136  * Sets the flags of the memory region specified by the value of slot,
1137  * to the values given by flags.
1138  */
1139 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags)
1140 {
1141 	int ret;
1142 	struct userspace_mem_region *region;
1143 
1144 	region = memslot2region(vm, slot);
1145 
1146 	region->region.flags = flags;
1147 
1148 	ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, &region->region);
1149 
1150 	TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION2 IOCTL failed,\n"
1151 		"  rc: %i errno: %i slot: %u flags: 0x%x",
1152 		ret, errno, slot, flags);
1153 }
1154 
1155 /*
1156  * VM Memory Region Move
1157  *
1158  * Input Args:
1159  *   vm - Virtual Machine
1160  *   slot - Slot of the memory region to move
1161  *   new_gpa - Starting guest physical address
1162  *
1163  * Output Args: None
1164  *
1165  * Return: None
1166  *
1167  * Change the gpa of a memory region.
1168  */
1169 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa)
1170 {
1171 	struct userspace_mem_region *region;
1172 	int ret;
1173 
1174 	region = memslot2region(vm, slot);
1175 
1176 	region->region.guest_phys_addr = new_gpa;
1177 
1178 	ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, &region->region);
1179 
1180 	TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION2 failed\n"
1181 		    "ret: %i errno: %i slot: %u new_gpa: 0x%lx",
1182 		    ret, errno, slot, new_gpa);
1183 }
1184 
1185 /*
1186  * VM Memory Region Delete
1187  *
1188  * Input Args:
1189  *   vm - Virtual Machine
1190  *   slot - Slot of the memory region to delete
1191  *
1192  * Output Args: None
1193  *
1194  * Return: None
1195  *
1196  * Delete a memory region.
1197  */
1198 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot)
1199 {
1200 	__vm_mem_region_delete(vm, memslot2region(vm, slot), true);
1201 }
1202 
1203 void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t base, uint64_t size,
1204 			    bool punch_hole)
1205 {
1206 	const int mode = FALLOC_FL_KEEP_SIZE | (punch_hole ? FALLOC_FL_PUNCH_HOLE : 0);
1207 	struct userspace_mem_region *region;
1208 	uint64_t end = base + size;
1209 	uint64_t gpa, len;
1210 	off_t fd_offset;
1211 	int ret;
1212 
1213 	for (gpa = base; gpa < end; gpa += len) {
1214 		uint64_t offset;
1215 
1216 		region = userspace_mem_region_find(vm, gpa, gpa);
1217 		TEST_ASSERT(region && region->region.flags & KVM_MEM_GUEST_MEMFD,
1218 			    "Private memory region not found for GPA 0x%lx", gpa);
1219 
1220 		offset = gpa - region->region.guest_phys_addr;
1221 		fd_offset = region->region.guest_memfd_offset + offset;
1222 		len = min_t(uint64_t, end - gpa, region->region.memory_size - offset);
1223 
1224 		ret = fallocate(region->region.guest_memfd, mode, fd_offset, len);
1225 		TEST_ASSERT(!ret, "fallocate() failed to %s at %lx (len = %lu), fd = %d, mode = %x, offset = %lx\n",
1226 			    punch_hole ? "punch hole" : "allocate", gpa, len,
1227 			    region->region.guest_memfd, mode, fd_offset);
1228 	}
1229 }
1230 
1231 /* Returns the size of a vCPU's kvm_run structure. */
1232 static int vcpu_mmap_sz(void)
1233 {
1234 	int dev_fd, ret;
1235 
1236 	dev_fd = open_kvm_dev_path_or_exit();
1237 
1238 	ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL);
1239 	TEST_ASSERT(ret >= sizeof(struct kvm_run),
1240 		    KVM_IOCTL_ERROR(KVM_GET_VCPU_MMAP_SIZE, ret));
1241 
1242 	close(dev_fd);
1243 
1244 	return ret;
1245 }
1246 
1247 static bool vcpu_exists(struct kvm_vm *vm, uint32_t vcpu_id)
1248 {
1249 	struct kvm_vcpu *vcpu;
1250 
1251 	list_for_each_entry(vcpu, &vm->vcpus, list) {
1252 		if (vcpu->id == vcpu_id)
1253 			return true;
1254 	}
1255 
1256 	return false;
1257 }
1258 
1259 /*
1260  * Adds a virtual CPU to the VM specified by vm with the ID given by vcpu_id.
1261  * No additional vCPU setup is done.  Returns the vCPU.
1262  */
1263 struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
1264 {
1265 	struct kvm_vcpu *vcpu;
1266 
1267 	/* Confirm a vcpu with the specified id doesn't already exist. */
1268 	TEST_ASSERT(!vcpu_exists(vm, vcpu_id), "vCPU%d already exists\n", vcpu_id);
1269 
1270 	/* Allocate and initialize new vcpu structure. */
1271 	vcpu = calloc(1, sizeof(*vcpu));
1272 	TEST_ASSERT(vcpu != NULL, "Insufficient Memory");
1273 
1274 	vcpu->vm = vm;
1275 	vcpu->id = vcpu_id;
1276 	vcpu->fd = __vm_ioctl(vm, KVM_CREATE_VCPU, (void *)(unsigned long)vcpu_id);
1277 	TEST_ASSERT_VM_VCPU_IOCTL(vcpu->fd >= 0, KVM_CREATE_VCPU, vcpu->fd, vm);
1278 
1279 	TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->run), "vcpu mmap size "
1280 		"smaller than expected, vcpu_mmap_sz: %i expected_min: %zi",
1281 		vcpu_mmap_sz(), sizeof(*vcpu->run));
1282 	vcpu->run = (struct kvm_run *) mmap(NULL, vcpu_mmap_sz(),
1283 		PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 0);
1284 	TEST_ASSERT(vcpu->run != MAP_FAILED,
1285 		    __KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED));
1286 
1287 	/* Add to linked-list of VCPUs. */
1288 	list_add(&vcpu->list, &vm->vcpus);
1289 
1290 	return vcpu;
1291 }
1292 
1293 /*
1294  * VM Virtual Address Unused Gap
1295  *
1296  * Input Args:
1297  *   vm - Virtual Machine
1298  *   sz - Size (bytes)
1299  *   vaddr_min - Minimum Virtual Address
1300  *
1301  * Output Args: None
1302  *
1303  * Return:
1304  *   Lowest virtual address at or below vaddr_min, with at least
1305  *   sz unused bytes.  TEST_ASSERT failure if no area of at least
1306  *   size sz is available.
1307  *
1308  * Within the VM specified by vm, locates the lowest starting virtual
1309  * address >= vaddr_min, that has at least sz unallocated bytes.  A
1310  * TEST_ASSERT failure occurs for invalid input or no area of at least
1311  * sz unallocated bytes >= vaddr_min is available.
1312  */
1313 vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
1314 			       vm_vaddr_t vaddr_min)
1315 {
1316 	uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift;
1317 
1318 	/* Determine lowest permitted virtual page index. */
1319 	uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift;
1320 	if ((pgidx_start * vm->page_size) < vaddr_min)
1321 		goto no_va_found;
1322 
1323 	/* Loop over section with enough valid virtual page indexes. */
1324 	if (!sparsebit_is_set_num(vm->vpages_valid,
1325 		pgidx_start, pages))
1326 		pgidx_start = sparsebit_next_set_num(vm->vpages_valid,
1327 			pgidx_start, pages);
1328 	do {
1329 		/*
1330 		 * Are there enough unused virtual pages available at
1331 		 * the currently proposed starting virtual page index.
1332 		 * If not, adjust proposed starting index to next
1333 		 * possible.
1334 		 */
1335 		if (sparsebit_is_clear_num(vm->vpages_mapped,
1336 			pgidx_start, pages))
1337 			goto va_found;
1338 		pgidx_start = sparsebit_next_clear_num(vm->vpages_mapped,
1339 			pgidx_start, pages);
1340 		if (pgidx_start == 0)
1341 			goto no_va_found;
1342 
1343 		/*
1344 		 * If needed, adjust proposed starting virtual address,
1345 		 * to next range of valid virtual addresses.
1346 		 */
1347 		if (!sparsebit_is_set_num(vm->vpages_valid,
1348 			pgidx_start, pages)) {
1349 			pgidx_start = sparsebit_next_set_num(
1350 				vm->vpages_valid, pgidx_start, pages);
1351 			if (pgidx_start == 0)
1352 				goto no_va_found;
1353 		}
1354 	} while (pgidx_start != 0);
1355 
1356 no_va_found:
1357 	TEST_FAIL("No vaddr of specified pages available, pages: 0x%lx", pages);
1358 
1359 	/* NOT REACHED */
1360 	return -1;
1361 
1362 va_found:
1363 	TEST_ASSERT(sparsebit_is_set_num(vm->vpages_valid,
1364 		pgidx_start, pages),
1365 		"Unexpected, invalid virtual page index range,\n"
1366 		"  pgidx_start: 0x%lx\n"
1367 		"  pages: 0x%lx",
1368 		pgidx_start, pages);
1369 	TEST_ASSERT(sparsebit_is_clear_num(vm->vpages_mapped,
1370 		pgidx_start, pages),
1371 		"Unexpected, pages already mapped,\n"
1372 		"  pgidx_start: 0x%lx\n"
1373 		"  pages: 0x%lx",
1374 		pgidx_start, pages);
1375 
1376 	return pgidx_start * vm->page_size;
1377 }
1378 
1379 vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
1380 			    enum kvm_mem_region_type type)
1381 {
1382 	uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
1383 
1384 	virt_pgd_alloc(vm);
1385 	vm_paddr_t paddr = vm_phy_pages_alloc(vm, pages,
1386 					      KVM_UTIL_MIN_PFN * vm->page_size,
1387 					      vm->memslots[type]);
1388 
1389 	/*
1390 	 * Find an unused range of virtual page addresses of at least
1391 	 * pages in length.
1392 	 */
1393 	vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min);
1394 
1395 	/* Map the virtual pages. */
1396 	for (vm_vaddr_t vaddr = vaddr_start; pages > 0;
1397 		pages--, vaddr += vm->page_size, paddr += vm->page_size) {
1398 
1399 		virt_pg_map(vm, vaddr, paddr);
1400 
1401 		sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift);
1402 	}
1403 
1404 	return vaddr_start;
1405 }
1406 
1407 /*
1408  * VM Virtual Address Allocate
1409  *
1410  * Input Args:
1411  *   vm - Virtual Machine
1412  *   sz - Size in bytes
1413  *   vaddr_min - Minimum starting virtual address
1414  *
1415  * Output Args: None
1416  *
1417  * Return:
1418  *   Starting guest virtual address
1419  *
1420  * Allocates at least sz bytes within the virtual address space of the vm
1421  * given by vm.  The allocated bytes are mapped to a virtual address >=
1422  * the address given by vaddr_min.  Note that each allocation uses a
1423  * a unique set of pages, with the minimum real allocation being at least
1424  * a page. The allocated physical space comes from the TEST_DATA memory region.
1425  */
1426 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
1427 {
1428 	return __vm_vaddr_alloc(vm, sz, vaddr_min, MEM_REGION_TEST_DATA);
1429 }
1430 
1431 /*
1432  * VM Virtual Address Allocate Pages
1433  *
1434  * Input Args:
1435  *   vm - Virtual Machine
1436  *
1437  * Output Args: None
1438  *
1439  * Return:
1440  *   Starting guest virtual address
1441  *
1442  * Allocates at least N system pages worth of bytes within the virtual address
1443  * space of the vm.
1444  */
1445 vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages)
1446 {
1447 	return vm_vaddr_alloc(vm, nr_pages * getpagesize(), KVM_UTIL_MIN_VADDR);
1448 }
1449 
1450 vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type)
1451 {
1452 	return __vm_vaddr_alloc(vm, getpagesize(), KVM_UTIL_MIN_VADDR, type);
1453 }
1454 
1455 /*
1456  * VM Virtual Address Allocate Page
1457  *
1458  * Input Args:
1459  *   vm - Virtual Machine
1460  *
1461  * Output Args: None
1462  *
1463  * Return:
1464  *   Starting guest virtual address
1465  *
1466  * Allocates at least one system page worth of bytes within the virtual address
1467  * space of the vm.
1468  */
1469 vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm)
1470 {
1471 	return vm_vaddr_alloc_pages(vm, 1);
1472 }
1473 
1474 /*
1475  * Map a range of VM virtual address to the VM's physical address
1476  *
1477  * Input Args:
1478  *   vm - Virtual Machine
1479  *   vaddr - Virtuall address to map
1480  *   paddr - VM Physical Address
1481  *   npages - The number of pages to map
1482  *
1483  * Output Args: None
1484  *
1485  * Return: None
1486  *
1487  * Within the VM given by @vm, creates a virtual translation for
1488  * @npages starting at @vaddr to the page range starting at @paddr.
1489  */
1490 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
1491 	      unsigned int npages)
1492 {
1493 	size_t page_size = vm->page_size;
1494 	size_t size = npages * page_size;
1495 
1496 	TEST_ASSERT(vaddr + size > vaddr, "Vaddr overflow");
1497 	TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
1498 
1499 	while (npages--) {
1500 		virt_pg_map(vm, vaddr, paddr);
1501 		sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift);
1502 
1503 		vaddr += page_size;
1504 		paddr += page_size;
1505 	}
1506 }
1507 
1508 /*
1509  * Address VM Physical to Host Virtual
1510  *
1511  * Input Args:
1512  *   vm - Virtual Machine
1513  *   gpa - VM physical address
1514  *
1515  * Output Args: None
1516  *
1517  * Return:
1518  *   Equivalent host virtual address
1519  *
1520  * Locates the memory region containing the VM physical address given
1521  * by gpa, within the VM given by vm.  When found, the host virtual
1522  * address providing the memory to the vm physical address is returned.
1523  * A TEST_ASSERT failure occurs if no region containing gpa exists.
1524  */
1525 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
1526 {
1527 	struct userspace_mem_region *region;
1528 
1529 	region = userspace_mem_region_find(vm, gpa, gpa);
1530 	if (!region) {
1531 		TEST_FAIL("No vm physical memory at 0x%lx", gpa);
1532 		return NULL;
1533 	}
1534 
1535 	return (void *)((uintptr_t)region->host_mem
1536 		+ (gpa - region->region.guest_phys_addr));
1537 }
1538 
1539 /*
1540  * Address Host Virtual to VM Physical
1541  *
1542  * Input Args:
1543  *   vm - Virtual Machine
1544  *   hva - Host virtual address
1545  *
1546  * Output Args: None
1547  *
1548  * Return:
1549  *   Equivalent VM physical address
1550  *
1551  * Locates the memory region containing the host virtual address given
1552  * by hva, within the VM given by vm.  When found, the equivalent
1553  * VM physical address is returned. A TEST_ASSERT failure occurs if no
1554  * region containing hva exists.
1555  */
1556 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
1557 {
1558 	struct rb_node *node;
1559 
1560 	for (node = vm->regions.hva_tree.rb_node; node; ) {
1561 		struct userspace_mem_region *region =
1562 			container_of(node, struct userspace_mem_region, hva_node);
1563 
1564 		if (hva >= region->host_mem) {
1565 			if (hva <= (region->host_mem
1566 				+ region->region.memory_size - 1))
1567 				return (vm_paddr_t)((uintptr_t)
1568 					region->region.guest_phys_addr
1569 					+ (hva - (uintptr_t)region->host_mem));
1570 
1571 			node = node->rb_right;
1572 		} else
1573 			node = node->rb_left;
1574 	}
1575 
1576 	TEST_FAIL("No mapping to a guest physical address, hva: %p", hva);
1577 	return -1;
1578 }
1579 
1580 /*
1581  * Address VM physical to Host Virtual *alias*.
1582  *
1583  * Input Args:
1584  *   vm - Virtual Machine
1585  *   gpa - VM physical address
1586  *
1587  * Output Args: None
1588  *
1589  * Return:
1590  *   Equivalent address within the host virtual *alias* area, or NULL
1591  *   (without failing the test) if the guest memory is not shared (so
1592  *   no alias exists).
1593  *
1594  * Create a writable, shared virtual=>physical alias for the specific GPA.
1595  * The primary use case is to allow the host selftest to manipulate guest
1596  * memory without mapping said memory in the guest's address space. And, for
1597  * userfaultfd-based demand paging, to do so without triggering userfaults.
1598  */
1599 void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa)
1600 {
1601 	struct userspace_mem_region *region;
1602 	uintptr_t offset;
1603 
1604 	region = userspace_mem_region_find(vm, gpa, gpa);
1605 	if (!region)
1606 		return NULL;
1607 
1608 	if (!region->host_alias)
1609 		return NULL;
1610 
1611 	offset = gpa - region->region.guest_phys_addr;
1612 	return (void *) ((uintptr_t) region->host_alias + offset);
1613 }
1614 
1615 /* Create an interrupt controller chip for the specified VM. */
1616 void vm_create_irqchip(struct kvm_vm *vm)
1617 {
1618 	vm_ioctl(vm, KVM_CREATE_IRQCHIP, NULL);
1619 
1620 	vm->has_irqchip = true;
1621 }
1622 
1623 int _vcpu_run(struct kvm_vcpu *vcpu)
1624 {
1625 	int rc;
1626 
1627 	do {
1628 		rc = __vcpu_run(vcpu);
1629 	} while (rc == -1 && errno == EINTR);
1630 
1631 	assert_on_unhandled_exception(vcpu);
1632 
1633 	return rc;
1634 }
1635 
1636 /*
1637  * Invoke KVM_RUN on a vCPU until KVM returns something other than -EINTR.
1638  * Assert if the KVM returns an error (other than -EINTR).
1639  */
1640 void vcpu_run(struct kvm_vcpu *vcpu)
1641 {
1642 	int ret = _vcpu_run(vcpu);
1643 
1644 	TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_RUN, ret));
1645 }
1646 
1647 void vcpu_run_complete_io(struct kvm_vcpu *vcpu)
1648 {
1649 	int ret;
1650 
1651 	vcpu->run->immediate_exit = 1;
1652 	ret = __vcpu_run(vcpu);
1653 	vcpu->run->immediate_exit = 0;
1654 
1655 	TEST_ASSERT(ret == -1 && errno == EINTR,
1656 		    "KVM_RUN IOCTL didn't exit immediately, rc: %i, errno: %i",
1657 		    ret, errno);
1658 }
1659 
1660 /*
1661  * Get the list of guest registers which are supported for
1662  * KVM_GET_ONE_REG/KVM_SET_ONE_REG ioctls.  Returns a kvm_reg_list pointer,
1663  * it is the caller's responsibility to free the list.
1664  */
1665 struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu)
1666 {
1667 	struct kvm_reg_list reg_list_n = { .n = 0 }, *reg_list;
1668 	int ret;
1669 
1670 	ret = __vcpu_ioctl(vcpu, KVM_GET_REG_LIST, &reg_list_n);
1671 	TEST_ASSERT(ret == -1 && errno == E2BIG, "KVM_GET_REG_LIST n=0");
1672 
1673 	reg_list = calloc(1, sizeof(*reg_list) + reg_list_n.n * sizeof(__u64));
1674 	reg_list->n = reg_list_n.n;
1675 	vcpu_ioctl(vcpu, KVM_GET_REG_LIST, reg_list);
1676 	return reg_list;
1677 }
1678 
1679 void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu)
1680 {
1681 	uint32_t page_size = getpagesize();
1682 	uint32_t size = vcpu->vm->dirty_ring_size;
1683 
1684 	TEST_ASSERT(size > 0, "Should enable dirty ring first");
1685 
1686 	if (!vcpu->dirty_gfns) {
1687 		void *addr;
1688 
1689 		addr = mmap(NULL, size, PROT_READ, MAP_PRIVATE, vcpu->fd,
1690 			    page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
1691 		TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped private");
1692 
1693 		addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, vcpu->fd,
1694 			    page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
1695 		TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped exec");
1696 
1697 		addr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd,
1698 			    page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
1699 		TEST_ASSERT(addr != MAP_FAILED, "Dirty ring map failed");
1700 
1701 		vcpu->dirty_gfns = addr;
1702 		vcpu->dirty_gfns_count = size / sizeof(struct kvm_dirty_gfn);
1703 	}
1704 
1705 	return vcpu->dirty_gfns;
1706 }
1707 
1708 /*
1709  * Device Ioctl
1710  */
1711 
1712 int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr)
1713 {
1714 	struct kvm_device_attr attribute = {
1715 		.group = group,
1716 		.attr = attr,
1717 		.flags = 0,
1718 	};
1719 
1720 	return ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute);
1721 }
1722 
1723 int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type)
1724 {
1725 	struct kvm_create_device create_dev = {
1726 		.type = type,
1727 		.flags = KVM_CREATE_DEVICE_TEST,
1728 	};
1729 
1730 	return __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev);
1731 }
1732 
1733 int __kvm_create_device(struct kvm_vm *vm, uint64_t type)
1734 {
1735 	struct kvm_create_device create_dev = {
1736 		.type = type,
1737 		.fd = -1,
1738 		.flags = 0,
1739 	};
1740 	int err;
1741 
1742 	err = __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev);
1743 	TEST_ASSERT(err <= 0, "KVM_CREATE_DEVICE shouldn't return a positive value");
1744 	return err ? : create_dev.fd;
1745 }
1746 
1747 int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val)
1748 {
1749 	struct kvm_device_attr kvmattr = {
1750 		.group = group,
1751 		.attr = attr,
1752 		.flags = 0,
1753 		.addr = (uintptr_t)val,
1754 	};
1755 
1756 	return __kvm_ioctl(dev_fd, KVM_GET_DEVICE_ATTR, &kvmattr);
1757 }
1758 
1759 int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val)
1760 {
1761 	struct kvm_device_attr kvmattr = {
1762 		.group = group,
1763 		.attr = attr,
1764 		.flags = 0,
1765 		.addr = (uintptr_t)val,
1766 	};
1767 
1768 	return __kvm_ioctl(dev_fd, KVM_SET_DEVICE_ATTR, &kvmattr);
1769 }
1770 
1771 /*
1772  * IRQ related functions.
1773  */
1774 
1775 int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level)
1776 {
1777 	struct kvm_irq_level irq_level = {
1778 		.irq    = irq,
1779 		.level  = level,
1780 	};
1781 
1782 	return __vm_ioctl(vm, KVM_IRQ_LINE, &irq_level);
1783 }
1784 
1785 void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level)
1786 {
1787 	int ret = _kvm_irq_line(vm, irq, level);
1788 
1789 	TEST_ASSERT(ret >= 0, KVM_IOCTL_ERROR(KVM_IRQ_LINE, ret));
1790 }
1791 
1792 struct kvm_irq_routing *kvm_gsi_routing_create(void)
1793 {
1794 	struct kvm_irq_routing *routing;
1795 	size_t size;
1796 
1797 	size = sizeof(struct kvm_irq_routing);
1798 	/* Allocate space for the max number of entries: this wastes 196 KBs. */
1799 	size += KVM_MAX_IRQ_ROUTES * sizeof(struct kvm_irq_routing_entry);
1800 	routing = calloc(1, size);
1801 	assert(routing);
1802 
1803 	return routing;
1804 }
1805 
1806 void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing,
1807 		uint32_t gsi, uint32_t pin)
1808 {
1809 	int i;
1810 
1811 	assert(routing);
1812 	assert(routing->nr < KVM_MAX_IRQ_ROUTES);
1813 
1814 	i = routing->nr;
1815 	routing->entries[i].gsi = gsi;
1816 	routing->entries[i].type = KVM_IRQ_ROUTING_IRQCHIP;
1817 	routing->entries[i].flags = 0;
1818 	routing->entries[i].u.irqchip.irqchip = 0;
1819 	routing->entries[i].u.irqchip.pin = pin;
1820 	routing->nr++;
1821 }
1822 
1823 int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing)
1824 {
1825 	int ret;
1826 
1827 	assert(routing);
1828 	ret = __vm_ioctl(vm, KVM_SET_GSI_ROUTING, routing);
1829 	free(routing);
1830 
1831 	return ret;
1832 }
1833 
1834 void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing)
1835 {
1836 	int ret;
1837 
1838 	ret = _kvm_gsi_routing_write(vm, routing);
1839 	TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_GSI_ROUTING, ret));
1840 }
1841 
1842 /*
1843  * VM Dump
1844  *
1845  * Input Args:
1846  *   vm - Virtual Machine
1847  *   indent - Left margin indent amount
1848  *
1849  * Output Args:
1850  *   stream - Output FILE stream
1851  *
1852  * Return: None
1853  *
1854  * Dumps the current state of the VM given by vm, to the FILE stream
1855  * given by stream.
1856  */
1857 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
1858 {
1859 	int ctr;
1860 	struct userspace_mem_region *region;
1861 	struct kvm_vcpu *vcpu;
1862 
1863 	fprintf(stream, "%*smode: 0x%x\n", indent, "", vm->mode);
1864 	fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd);
1865 	fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size);
1866 	fprintf(stream, "%*sMem Regions:\n", indent, "");
1867 	hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) {
1868 		fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx "
1869 			"host_virt: %p\n", indent + 2, "",
1870 			(uint64_t) region->region.guest_phys_addr,
1871 			(uint64_t) region->region.memory_size,
1872 			region->host_mem);
1873 		fprintf(stream, "%*sunused_phy_pages: ", indent + 2, "");
1874 		sparsebit_dump(stream, region->unused_phy_pages, 0);
1875 	}
1876 	fprintf(stream, "%*sMapped Virtual Pages:\n", indent, "");
1877 	sparsebit_dump(stream, vm->vpages_mapped, indent + 2);
1878 	fprintf(stream, "%*spgd_created: %u\n", indent, "",
1879 		vm->pgd_created);
1880 	if (vm->pgd_created) {
1881 		fprintf(stream, "%*sVirtual Translation Tables:\n",
1882 			indent + 2, "");
1883 		virt_dump(stream, vm, indent + 4);
1884 	}
1885 	fprintf(stream, "%*sVCPUs:\n", indent, "");
1886 
1887 	list_for_each_entry(vcpu, &vm->vcpus, list)
1888 		vcpu_dump(stream, vcpu, indent + 2);
1889 }
1890 
1891 #define KVM_EXIT_STRING(x) {KVM_EXIT_##x, #x}
1892 
1893 /* Known KVM exit reasons */
1894 static struct exit_reason {
1895 	unsigned int reason;
1896 	const char *name;
1897 } exit_reasons_known[] = {
1898 	KVM_EXIT_STRING(UNKNOWN),
1899 	KVM_EXIT_STRING(EXCEPTION),
1900 	KVM_EXIT_STRING(IO),
1901 	KVM_EXIT_STRING(HYPERCALL),
1902 	KVM_EXIT_STRING(DEBUG),
1903 	KVM_EXIT_STRING(HLT),
1904 	KVM_EXIT_STRING(MMIO),
1905 	KVM_EXIT_STRING(IRQ_WINDOW_OPEN),
1906 	KVM_EXIT_STRING(SHUTDOWN),
1907 	KVM_EXIT_STRING(FAIL_ENTRY),
1908 	KVM_EXIT_STRING(INTR),
1909 	KVM_EXIT_STRING(SET_TPR),
1910 	KVM_EXIT_STRING(TPR_ACCESS),
1911 	KVM_EXIT_STRING(S390_SIEIC),
1912 	KVM_EXIT_STRING(S390_RESET),
1913 	KVM_EXIT_STRING(DCR),
1914 	KVM_EXIT_STRING(NMI),
1915 	KVM_EXIT_STRING(INTERNAL_ERROR),
1916 	KVM_EXIT_STRING(OSI),
1917 	KVM_EXIT_STRING(PAPR_HCALL),
1918 	KVM_EXIT_STRING(S390_UCONTROL),
1919 	KVM_EXIT_STRING(WATCHDOG),
1920 	KVM_EXIT_STRING(S390_TSCH),
1921 	KVM_EXIT_STRING(EPR),
1922 	KVM_EXIT_STRING(SYSTEM_EVENT),
1923 	KVM_EXIT_STRING(S390_STSI),
1924 	KVM_EXIT_STRING(IOAPIC_EOI),
1925 	KVM_EXIT_STRING(HYPERV),
1926 	KVM_EXIT_STRING(ARM_NISV),
1927 	KVM_EXIT_STRING(X86_RDMSR),
1928 	KVM_EXIT_STRING(X86_WRMSR),
1929 	KVM_EXIT_STRING(DIRTY_RING_FULL),
1930 	KVM_EXIT_STRING(AP_RESET_HOLD),
1931 	KVM_EXIT_STRING(X86_BUS_LOCK),
1932 	KVM_EXIT_STRING(XEN),
1933 	KVM_EXIT_STRING(RISCV_SBI),
1934 	KVM_EXIT_STRING(RISCV_CSR),
1935 	KVM_EXIT_STRING(NOTIFY),
1936 #ifdef KVM_EXIT_MEMORY_NOT_PRESENT
1937 	KVM_EXIT_STRING(MEMORY_NOT_PRESENT),
1938 #endif
1939 };
1940 
1941 /*
1942  * Exit Reason String
1943  *
1944  * Input Args:
1945  *   exit_reason - Exit reason
1946  *
1947  * Output Args: None
1948  *
1949  * Return:
1950  *   Constant string pointer describing the exit reason.
1951  *
1952  * Locates and returns a constant string that describes the KVM exit
1953  * reason given by exit_reason.  If no such string is found, a constant
1954  * string of "Unknown" is returned.
1955  */
1956 const char *exit_reason_str(unsigned int exit_reason)
1957 {
1958 	unsigned int n1;
1959 
1960 	for (n1 = 0; n1 < ARRAY_SIZE(exit_reasons_known); n1++) {
1961 		if (exit_reason == exit_reasons_known[n1].reason)
1962 			return exit_reasons_known[n1].name;
1963 	}
1964 
1965 	return "Unknown";
1966 }
1967 
1968 /*
1969  * Physical Contiguous Page Allocator
1970  *
1971  * Input Args:
1972  *   vm - Virtual Machine
1973  *   num - number of pages
1974  *   paddr_min - Physical address minimum
1975  *   memslot - Memory region to allocate page from
1976  *
1977  * Output Args: None
1978  *
1979  * Return:
1980  *   Starting physical address
1981  *
1982  * Within the VM specified by vm, locates a range of available physical
1983  * pages at or above paddr_min. If found, the pages are marked as in use
1984  * and their base address is returned. A TEST_ASSERT failure occurs if
1985  * not enough pages are available at or above paddr_min.
1986  */
1987 vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
1988 			      vm_paddr_t paddr_min, uint32_t memslot)
1989 {
1990 	struct userspace_mem_region *region;
1991 	sparsebit_idx_t pg, base;
1992 
1993 	TEST_ASSERT(num > 0, "Must allocate at least one page");
1994 
1995 	TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address "
1996 		"not divisible by page size.\n"
1997 		"  paddr_min: 0x%lx page_size: 0x%x",
1998 		paddr_min, vm->page_size);
1999 
2000 	region = memslot2region(vm, memslot);
2001 	base = pg = paddr_min >> vm->page_shift;
2002 
2003 	do {
2004 		for (; pg < base + num; ++pg) {
2005 			if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
2006 				base = pg = sparsebit_next_set(region->unused_phy_pages, pg);
2007 				break;
2008 			}
2009 		}
2010 	} while (pg && pg != base + num);
2011 
2012 	if (pg == 0) {
2013 		fprintf(stderr, "No guest physical page available, "
2014 			"paddr_min: 0x%lx page_size: 0x%x memslot: %u\n",
2015 			paddr_min, vm->page_size, memslot);
2016 		fputs("---- vm dump ----\n", stderr);
2017 		vm_dump(stderr, vm, 2);
2018 		abort();
2019 	}
2020 
2021 	for (pg = base; pg < base + num; ++pg)
2022 		sparsebit_clear(region->unused_phy_pages, pg);
2023 
2024 	return base * vm->page_size;
2025 }
2026 
2027 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
2028 			     uint32_t memslot)
2029 {
2030 	return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
2031 }
2032 
2033 vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm)
2034 {
2035 	return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR,
2036 				 vm->memslots[MEM_REGION_PT]);
2037 }
2038 
2039 /*
2040  * Address Guest Virtual to Host Virtual
2041  *
2042  * Input Args:
2043  *   vm - Virtual Machine
2044  *   gva - VM virtual address
2045  *
2046  * Output Args: None
2047  *
2048  * Return:
2049  *   Equivalent host virtual address
2050  */
2051 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva)
2052 {
2053 	return addr_gpa2hva(vm, addr_gva2gpa(vm, gva));
2054 }
2055 
2056 unsigned long __weak vm_compute_max_gfn(struct kvm_vm *vm)
2057 {
2058 	return ((1ULL << vm->pa_bits) >> vm->page_shift) - 1;
2059 }
2060 
2061 static unsigned int vm_calc_num_pages(unsigned int num_pages,
2062 				      unsigned int page_shift,
2063 				      unsigned int new_page_shift,
2064 				      bool ceil)
2065 {
2066 	unsigned int n = 1 << (new_page_shift - page_shift);
2067 
2068 	if (page_shift >= new_page_shift)
2069 		return num_pages * (1 << (page_shift - new_page_shift));
2070 
2071 	return num_pages / n + !!(ceil && num_pages % n);
2072 }
2073 
2074 static inline int getpageshift(void)
2075 {
2076 	return __builtin_ffs(getpagesize()) - 1;
2077 }
2078 
2079 unsigned int
2080 vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
2081 {
2082 	return vm_calc_num_pages(num_guest_pages,
2083 				 vm_guest_mode_params[mode].page_shift,
2084 				 getpageshift(), true);
2085 }
2086 
2087 unsigned int
2088 vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages)
2089 {
2090 	return vm_calc_num_pages(num_host_pages, getpageshift(),
2091 				 vm_guest_mode_params[mode].page_shift, false);
2092 }
2093 
2094 unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size)
2095 {
2096 	unsigned int n;
2097 	n = DIV_ROUND_UP(size, vm_guest_mode_params[mode].page_size);
2098 	return vm_adjust_num_guest_pages(mode, n);
2099 }
2100 
2101 /*
2102  * Read binary stats descriptors
2103  *
2104  * Input Args:
2105  *   stats_fd - the file descriptor for the binary stats file from which to read
2106  *   header - the binary stats metadata header corresponding to the given FD
2107  *
2108  * Output Args: None
2109  *
2110  * Return:
2111  *   A pointer to a newly allocated series of stat descriptors.
2112  *   Caller is responsible for freeing the returned kvm_stats_desc.
2113  *
2114  * Read the stats descriptors from the binary stats interface.
2115  */
2116 struct kvm_stats_desc *read_stats_descriptors(int stats_fd,
2117 					      struct kvm_stats_header *header)
2118 {
2119 	struct kvm_stats_desc *stats_desc;
2120 	ssize_t desc_size, total_size, ret;
2121 
2122 	desc_size = get_stats_descriptor_size(header);
2123 	total_size = header->num_desc * desc_size;
2124 
2125 	stats_desc = calloc(header->num_desc, desc_size);
2126 	TEST_ASSERT(stats_desc, "Allocate memory for stats descriptors");
2127 
2128 	ret = pread(stats_fd, stats_desc, total_size, header->desc_offset);
2129 	TEST_ASSERT(ret == total_size, "Read KVM stats descriptors");
2130 
2131 	return stats_desc;
2132 }
2133 
2134 /*
2135  * Read stat data for a particular stat
2136  *
2137  * Input Args:
2138  *   stats_fd - the file descriptor for the binary stats file from which to read
2139  *   header - the binary stats metadata header corresponding to the given FD
2140  *   desc - the binary stat metadata for the particular stat to be read
2141  *   max_elements - the maximum number of 8-byte values to read into data
2142  *
2143  * Output Args:
2144  *   data - the buffer into which stat data should be read
2145  *
2146  * Read the data values of a specified stat from the binary stats interface.
2147  */
2148 void read_stat_data(int stats_fd, struct kvm_stats_header *header,
2149 		    struct kvm_stats_desc *desc, uint64_t *data,
2150 		    size_t max_elements)
2151 {
2152 	size_t nr_elements = min_t(ssize_t, desc->size, max_elements);
2153 	size_t size = nr_elements * sizeof(*data);
2154 	ssize_t ret;
2155 
2156 	TEST_ASSERT(desc->size, "No elements in stat '%s'", desc->name);
2157 	TEST_ASSERT(max_elements, "Zero elements requested for stat '%s'", desc->name);
2158 
2159 	ret = pread(stats_fd, data, size,
2160 		    header->data_offset + desc->offset);
2161 
2162 	TEST_ASSERT(ret >= 0, "pread() failed on stat '%s', errno: %i (%s)",
2163 		    desc->name, errno, strerror(errno));
2164 	TEST_ASSERT(ret == size,
2165 		    "pread() on stat '%s' read %ld bytes, wanted %lu bytes",
2166 		    desc->name, size, ret);
2167 }
2168 
2169 /*
2170  * Read the data of the named stat
2171  *
2172  * Input Args:
2173  *   vm - the VM for which the stat should be read
2174  *   stat_name - the name of the stat to read
2175  *   max_elements - the maximum number of 8-byte values to read into data
2176  *
2177  * Output Args:
2178  *   data - the buffer into which stat data should be read
2179  *
2180  * Read the data values of a specified stat from the binary stats interface.
2181  */
2182 void __vm_get_stat(struct kvm_vm *vm, const char *stat_name, uint64_t *data,
2183 		   size_t max_elements)
2184 {
2185 	struct kvm_stats_desc *desc;
2186 	size_t size_desc;
2187 	int i;
2188 
2189 	if (!vm->stats_fd) {
2190 		vm->stats_fd = vm_get_stats_fd(vm);
2191 		read_stats_header(vm->stats_fd, &vm->stats_header);
2192 		vm->stats_desc = read_stats_descriptors(vm->stats_fd,
2193 							&vm->stats_header);
2194 	}
2195 
2196 	size_desc = get_stats_descriptor_size(&vm->stats_header);
2197 
2198 	for (i = 0; i < vm->stats_header.num_desc; ++i) {
2199 		desc = (void *)vm->stats_desc + (i * size_desc);
2200 
2201 		if (strcmp(desc->name, stat_name))
2202 			continue;
2203 
2204 		read_stat_data(vm->stats_fd, &vm->stats_header, desc,
2205 			       data, max_elements);
2206 
2207 		break;
2208 	}
2209 }
2210 
2211 __weak void kvm_arch_vm_post_create(struct kvm_vm *vm)
2212 {
2213 }
2214 
2215 __weak void kvm_selftest_arch_init(void)
2216 {
2217 }
2218 
2219 void __attribute((constructor)) kvm_selftest_init(void)
2220 {
2221 	/* Tell stdout not to buffer its content. */
2222 	setbuf(stdout, NULL);
2223 
2224 	kvm_selftest_arch_init();
2225 }
2226