1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * tools/testing/selftests/kvm/lib/kvm_util.c
4 *
5 * Copyright (C) 2018, Google LLC.
6 */
7 #include "test_util.h"
8 #include "kvm_util.h"
9 #include "processor.h"
10 #include "ucall_common.h"
11
12 #include <assert.h>
13 #include <sched.h>
14 #include <sys/mman.h>
15 #include <sys/types.h>
16 #include <sys/stat.h>
17 #include <unistd.h>
18 #include <linux/kernel.h>
19
20 #define KVM_UTIL_MIN_PFN 2
21
22 uint32_t guest_random_seed;
23 struct guest_random_state guest_rng;
24 static uint32_t last_guest_seed;
25
26 static int vcpu_mmap_sz(void);
27
open_path_or_exit(const char * path,int flags)28 int open_path_or_exit(const char *path, int flags)
29 {
30 int fd;
31
32 fd = open(path, flags);
33 __TEST_REQUIRE(fd >= 0 || errno != ENOENT, "Cannot open %s: %s", path, strerror(errno));
34 TEST_ASSERT(fd >= 0, "Failed to open '%s'", path);
35
36 return fd;
37 }
38
39 /*
40 * Open KVM_DEV_PATH if available, otherwise exit the entire program.
41 *
42 * Input Args:
43 * flags - The flags to pass when opening KVM_DEV_PATH.
44 *
45 * Return:
46 * The opened file descriptor of /dev/kvm.
47 */
_open_kvm_dev_path_or_exit(int flags)48 static int _open_kvm_dev_path_or_exit(int flags)
49 {
50 return open_path_or_exit(KVM_DEV_PATH, flags);
51 }
52
open_kvm_dev_path_or_exit(void)53 int open_kvm_dev_path_or_exit(void)
54 {
55 return _open_kvm_dev_path_or_exit(O_RDONLY);
56 }
57
get_module_param(const char * module_name,const char * param,void * buffer,size_t buffer_size)58 static ssize_t get_module_param(const char *module_name, const char *param,
59 void *buffer, size_t buffer_size)
60 {
61 const int path_size = 128;
62 char path[path_size];
63 ssize_t bytes_read;
64 int fd, r;
65
66 r = snprintf(path, path_size, "/sys/module/%s/parameters/%s",
67 module_name, param);
68 TEST_ASSERT(r < path_size,
69 "Failed to construct sysfs path in %d bytes.", path_size);
70
71 fd = open_path_or_exit(path, O_RDONLY);
72
73 bytes_read = read(fd, buffer, buffer_size);
74 TEST_ASSERT(bytes_read > 0, "read(%s) returned %ld, wanted %ld bytes",
75 path, bytes_read, buffer_size);
76
77 r = close(fd);
78 TEST_ASSERT(!r, "close(%s) failed", path);
79 return bytes_read;
80 }
81
get_module_param_integer(const char * module_name,const char * param)82 static int get_module_param_integer(const char *module_name, const char *param)
83 {
84 /*
85 * 16 bytes to hold a 64-bit value (1 byte per char), 1 byte for the
86 * NUL char, and 1 byte because the kernel sucks and inserts a newline
87 * at the end.
88 */
89 char value[16 + 1 + 1];
90 ssize_t r;
91
92 memset(value, '\0', sizeof(value));
93
94 r = get_module_param(module_name, param, value, sizeof(value));
95 TEST_ASSERT(value[r - 1] == '\n',
96 "Expected trailing newline, got char '%c'", value[r - 1]);
97
98 /*
99 * Squash the newline, otherwise atoi_paranoid() will complain about
100 * trailing non-NUL characters in the string.
101 */
102 value[r - 1] = '\0';
103 return atoi_paranoid(value);
104 }
105
get_module_param_bool(const char * module_name,const char * param)106 static bool get_module_param_bool(const char *module_name, const char *param)
107 {
108 char value;
109 ssize_t r;
110
111 r = get_module_param(module_name, param, &value, sizeof(value));
112 TEST_ASSERT_EQ(r, 1);
113
114 if (value == 'Y')
115 return true;
116 else if (value == 'N')
117 return false;
118
119 TEST_FAIL("Unrecognized value '%c' for boolean module param", value);
120 }
121
get_kvm_param_bool(const char * param)122 bool get_kvm_param_bool(const char *param)
123 {
124 return get_module_param_bool("kvm", param);
125 }
126
get_kvm_intel_param_bool(const char * param)127 bool get_kvm_intel_param_bool(const char *param)
128 {
129 return get_module_param_bool("kvm_intel", param);
130 }
131
get_kvm_amd_param_bool(const char * param)132 bool get_kvm_amd_param_bool(const char *param)
133 {
134 return get_module_param_bool("kvm_amd", param);
135 }
136
get_kvm_param_integer(const char * param)137 int get_kvm_param_integer(const char *param)
138 {
139 return get_module_param_integer("kvm", param);
140 }
141
get_kvm_intel_param_integer(const char * param)142 int get_kvm_intel_param_integer(const char *param)
143 {
144 return get_module_param_integer("kvm_intel", param);
145 }
146
get_kvm_amd_param_integer(const char * param)147 int get_kvm_amd_param_integer(const char *param)
148 {
149 return get_module_param_integer("kvm_amd", param);
150 }
151
152 /*
153 * Capability
154 *
155 * Input Args:
156 * cap - Capability
157 *
158 * Output Args: None
159 *
160 * Return:
161 * On success, the Value corresponding to the capability (KVM_CAP_*)
162 * specified by the value of cap. On failure a TEST_ASSERT failure
163 * is produced.
164 *
165 * Looks up and returns the value corresponding to the capability
166 * (KVM_CAP_*) given by cap.
167 */
kvm_check_cap(long cap)168 unsigned int kvm_check_cap(long cap)
169 {
170 int ret;
171 int kvm_fd;
172
173 kvm_fd = open_kvm_dev_path_or_exit();
174 ret = __kvm_ioctl(kvm_fd, KVM_CHECK_EXTENSION, (void *)cap);
175 TEST_ASSERT(ret >= 0, KVM_IOCTL_ERROR(KVM_CHECK_EXTENSION, ret));
176
177 close(kvm_fd);
178
179 return (unsigned int)ret;
180 }
181
vm_enable_dirty_ring(struct kvm_vm * vm,uint32_t ring_size)182 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size)
183 {
184 if (vm_check_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL))
185 vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL, ring_size);
186 else
187 vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING, ring_size);
188 vm->dirty_ring_size = ring_size;
189 }
190
vm_open(struct kvm_vm * vm)191 static void vm_open(struct kvm_vm *vm)
192 {
193 vm->kvm_fd = _open_kvm_dev_path_or_exit(O_RDWR);
194
195 TEST_REQUIRE(kvm_has_cap(KVM_CAP_IMMEDIATE_EXIT));
196
197 vm->fd = __kvm_ioctl(vm->kvm_fd, KVM_CREATE_VM, (void *)vm->type);
198 TEST_ASSERT(vm->fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm->fd));
199 }
200
vm_guest_mode_string(uint32_t i)201 const char *vm_guest_mode_string(uint32_t i)
202 {
203 static const char * const strings[] = {
204 [VM_MODE_P52V48_4K] = "PA-bits:52, VA-bits:48, 4K pages",
205 [VM_MODE_P52V48_16K] = "PA-bits:52, VA-bits:48, 16K pages",
206 [VM_MODE_P52V48_64K] = "PA-bits:52, VA-bits:48, 64K pages",
207 [VM_MODE_P48V48_4K] = "PA-bits:48, VA-bits:48, 4K pages",
208 [VM_MODE_P48V48_16K] = "PA-bits:48, VA-bits:48, 16K pages",
209 [VM_MODE_P48V48_64K] = "PA-bits:48, VA-bits:48, 64K pages",
210 [VM_MODE_P40V48_4K] = "PA-bits:40, VA-bits:48, 4K pages",
211 [VM_MODE_P40V48_16K] = "PA-bits:40, VA-bits:48, 16K pages",
212 [VM_MODE_P40V48_64K] = "PA-bits:40, VA-bits:48, 64K pages",
213 [VM_MODE_PXXV48_4K] = "PA-bits:ANY, VA-bits:48, 4K pages",
214 [VM_MODE_P47V64_4K] = "PA-bits:47, VA-bits:64, 4K pages",
215 [VM_MODE_P44V64_4K] = "PA-bits:44, VA-bits:64, 4K pages",
216 [VM_MODE_P36V48_4K] = "PA-bits:36, VA-bits:48, 4K pages",
217 [VM_MODE_P36V48_16K] = "PA-bits:36, VA-bits:48, 16K pages",
218 [VM_MODE_P36V48_64K] = "PA-bits:36, VA-bits:48, 64K pages",
219 [VM_MODE_P36V47_16K] = "PA-bits:36, VA-bits:47, 16K pages",
220 };
221 _Static_assert(sizeof(strings)/sizeof(char *) == NUM_VM_MODES,
222 "Missing new mode strings?");
223
224 TEST_ASSERT(i < NUM_VM_MODES, "Guest mode ID %d too big", i);
225
226 return strings[i];
227 }
228
229 const struct vm_guest_mode_params vm_guest_mode_params[] = {
230 [VM_MODE_P52V48_4K] = { 52, 48, 0x1000, 12 },
231 [VM_MODE_P52V48_16K] = { 52, 48, 0x4000, 14 },
232 [VM_MODE_P52V48_64K] = { 52, 48, 0x10000, 16 },
233 [VM_MODE_P48V48_4K] = { 48, 48, 0x1000, 12 },
234 [VM_MODE_P48V48_16K] = { 48, 48, 0x4000, 14 },
235 [VM_MODE_P48V48_64K] = { 48, 48, 0x10000, 16 },
236 [VM_MODE_P40V48_4K] = { 40, 48, 0x1000, 12 },
237 [VM_MODE_P40V48_16K] = { 40, 48, 0x4000, 14 },
238 [VM_MODE_P40V48_64K] = { 40, 48, 0x10000, 16 },
239 [VM_MODE_PXXV48_4K] = { 0, 0, 0x1000, 12 },
240 [VM_MODE_P47V64_4K] = { 47, 64, 0x1000, 12 },
241 [VM_MODE_P44V64_4K] = { 44, 64, 0x1000, 12 },
242 [VM_MODE_P36V48_4K] = { 36, 48, 0x1000, 12 },
243 [VM_MODE_P36V48_16K] = { 36, 48, 0x4000, 14 },
244 [VM_MODE_P36V48_64K] = { 36, 48, 0x10000, 16 },
245 [VM_MODE_P36V47_16K] = { 36, 47, 0x4000, 14 },
246 };
247 _Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES,
248 "Missing new mode params?");
249
250 /*
251 * Initializes vm->vpages_valid to match the canonical VA space of the
252 * architecture.
253 *
254 * The default implementation is valid for architectures which split the
255 * range addressed by a single page table into a low and high region
256 * based on the MSB of the VA. On architectures with this behavior
257 * the VA region spans [0, 2^(va_bits - 1)), [-(2^(va_bits - 1), -1].
258 */
vm_vaddr_populate_bitmap(struct kvm_vm * vm)259 __weak void vm_vaddr_populate_bitmap(struct kvm_vm *vm)
260 {
261 sparsebit_set_num(vm->vpages_valid,
262 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
263 sparsebit_set_num(vm->vpages_valid,
264 (~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift,
265 (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
266 }
267
____vm_create(struct vm_shape shape)268 struct kvm_vm *____vm_create(struct vm_shape shape)
269 {
270 struct kvm_vm *vm;
271
272 vm = calloc(1, sizeof(*vm));
273 TEST_ASSERT(vm != NULL, "Insufficient Memory");
274
275 INIT_LIST_HEAD(&vm->vcpus);
276 vm->regions.gpa_tree = RB_ROOT;
277 vm->regions.hva_tree = RB_ROOT;
278 hash_init(vm->regions.slot_hash);
279
280 vm->mode = shape.mode;
281 vm->type = shape.type;
282
283 vm->pa_bits = vm_guest_mode_params[vm->mode].pa_bits;
284 vm->va_bits = vm_guest_mode_params[vm->mode].va_bits;
285 vm->page_size = vm_guest_mode_params[vm->mode].page_size;
286 vm->page_shift = vm_guest_mode_params[vm->mode].page_shift;
287
288 /* Setup mode specific traits. */
289 switch (vm->mode) {
290 case VM_MODE_P52V48_4K:
291 vm->pgtable_levels = 4;
292 break;
293 case VM_MODE_P52V48_64K:
294 vm->pgtable_levels = 3;
295 break;
296 case VM_MODE_P48V48_4K:
297 vm->pgtable_levels = 4;
298 break;
299 case VM_MODE_P48V48_64K:
300 vm->pgtable_levels = 3;
301 break;
302 case VM_MODE_P40V48_4K:
303 case VM_MODE_P36V48_4K:
304 vm->pgtable_levels = 4;
305 break;
306 case VM_MODE_P40V48_64K:
307 case VM_MODE_P36V48_64K:
308 vm->pgtable_levels = 3;
309 break;
310 case VM_MODE_P52V48_16K:
311 case VM_MODE_P48V48_16K:
312 case VM_MODE_P40V48_16K:
313 case VM_MODE_P36V48_16K:
314 vm->pgtable_levels = 4;
315 break;
316 case VM_MODE_P36V47_16K:
317 vm->pgtable_levels = 3;
318 break;
319 case VM_MODE_PXXV48_4K:
320 #ifdef __x86_64__
321 kvm_get_cpu_address_width(&vm->pa_bits, &vm->va_bits);
322 kvm_init_vm_address_properties(vm);
323 /*
324 * Ignore KVM support for 5-level paging (vm->va_bits == 57),
325 * it doesn't take effect unless a CR4.LA57 is set, which it
326 * isn't for this mode (48-bit virtual address space).
327 */
328 TEST_ASSERT(vm->va_bits == 48 || vm->va_bits == 57,
329 "Linear address width (%d bits) not supported",
330 vm->va_bits);
331 pr_debug("Guest physical address width detected: %d\n",
332 vm->pa_bits);
333 vm->pgtable_levels = 4;
334 vm->va_bits = 48;
335 #else
336 TEST_FAIL("VM_MODE_PXXV48_4K not supported on non-x86 platforms");
337 #endif
338 break;
339 case VM_MODE_P47V64_4K:
340 vm->pgtable_levels = 5;
341 break;
342 case VM_MODE_P44V64_4K:
343 vm->pgtable_levels = 5;
344 break;
345 default:
346 TEST_FAIL("Unknown guest mode: 0x%x", vm->mode);
347 }
348
349 #ifdef __aarch64__
350 TEST_ASSERT(!vm->type, "ARM doesn't support test-provided types");
351 if (vm->pa_bits != 40)
352 vm->type = KVM_VM_TYPE_ARM_IPA_SIZE(vm->pa_bits);
353 #endif
354
355 vm_open(vm);
356
357 /* Limit to VA-bit canonical virtual addresses. */
358 vm->vpages_valid = sparsebit_alloc();
359 vm_vaddr_populate_bitmap(vm);
360
361 /* Limit physical addresses to PA-bits. */
362 vm->max_gfn = vm_compute_max_gfn(vm);
363
364 /* Allocate and setup memory for guest. */
365 vm->vpages_mapped = sparsebit_alloc();
366
367 return vm;
368 }
369
vm_nr_pages_required(enum vm_guest_mode mode,uint32_t nr_runnable_vcpus,uint64_t extra_mem_pages)370 static uint64_t vm_nr_pages_required(enum vm_guest_mode mode,
371 uint32_t nr_runnable_vcpus,
372 uint64_t extra_mem_pages)
373 {
374 uint64_t page_size = vm_guest_mode_params[mode].page_size;
375 uint64_t nr_pages;
376
377 TEST_ASSERT(nr_runnable_vcpus,
378 "Use vm_create_barebones() for VMs that _never_ have vCPUs");
379
380 TEST_ASSERT(nr_runnable_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS),
381 "nr_vcpus = %d too large for host, max-vcpus = %d",
382 nr_runnable_vcpus, kvm_check_cap(KVM_CAP_MAX_VCPUS));
383
384 /*
385 * Arbitrarily allocate 512 pages (2mb when page size is 4kb) for the
386 * test code and other per-VM assets that will be loaded into memslot0.
387 */
388 nr_pages = 512;
389
390 /* Account for the per-vCPU stacks on behalf of the test. */
391 nr_pages += nr_runnable_vcpus * DEFAULT_STACK_PGS;
392
393 /*
394 * Account for the number of pages needed for the page tables. The
395 * maximum page table size for a memory region will be when the
396 * smallest page size is used. Considering each page contains x page
397 * table descriptors, the total extra size for page tables (for extra
398 * N pages) will be: N/x+N/x^2+N/x^3+... which is definitely smaller
399 * than N/x*2.
400 */
401 nr_pages += (nr_pages + extra_mem_pages) / PTES_PER_MIN_PAGE * 2;
402
403 /* Account for the number of pages needed by ucall. */
404 nr_pages += ucall_nr_pages_required(page_size);
405
406 return vm_adjust_num_guest_pages(mode, nr_pages);
407 }
408
__vm_create(struct vm_shape shape,uint32_t nr_runnable_vcpus,uint64_t nr_extra_pages)409 struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
410 uint64_t nr_extra_pages)
411 {
412 uint64_t nr_pages = vm_nr_pages_required(shape.mode, nr_runnable_vcpus,
413 nr_extra_pages);
414 struct userspace_mem_region *slot0;
415 struct kvm_vm *vm;
416 int i;
417
418 pr_debug("%s: mode='%s' type='%d', pages='%ld'\n", __func__,
419 vm_guest_mode_string(shape.mode), shape.type, nr_pages);
420
421 vm = ____vm_create(shape);
422
423 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 0, 0, nr_pages, 0);
424 for (i = 0; i < NR_MEM_REGIONS; i++)
425 vm->memslots[i] = 0;
426
427 kvm_vm_elf_load(vm, program_invocation_name);
428
429 /*
430 * TODO: Add proper defines to protect the library's memslots, and then
431 * carve out memslot1 for the ucall MMIO address. KVM treats writes to
432 * read-only memslots as MMIO, and creating a read-only memslot for the
433 * MMIO region would prevent silently clobbering the MMIO region.
434 */
435 slot0 = memslot2region(vm, 0);
436 ucall_init(vm, slot0->region.guest_phys_addr + slot0->region.memory_size);
437
438 if (guest_random_seed != last_guest_seed) {
439 pr_info("Random seed: 0x%x\n", guest_random_seed);
440 last_guest_seed = guest_random_seed;
441 }
442 guest_rng = new_guest_random_state(guest_random_seed);
443 sync_global_to_guest(vm, guest_rng);
444
445 kvm_arch_vm_post_create(vm);
446
447 return vm;
448 }
449
450 /*
451 * VM Create with customized parameters
452 *
453 * Input Args:
454 * mode - VM Mode (e.g. VM_MODE_P52V48_4K)
455 * nr_vcpus - VCPU count
456 * extra_mem_pages - Non-slot0 physical memory total size
457 * guest_code - Guest entry point
458 * vcpuids - VCPU IDs
459 *
460 * Output Args: None
461 *
462 * Return:
463 * Pointer to opaque structure that describes the created VM.
464 *
465 * Creates a VM with the mode specified by mode (e.g. VM_MODE_P52V48_4K).
466 * extra_mem_pages is only used to calculate the maximum page table size,
467 * no real memory allocation for non-slot0 memory in this function.
468 */
__vm_create_with_vcpus(struct vm_shape shape,uint32_t nr_vcpus,uint64_t extra_mem_pages,void * guest_code,struct kvm_vcpu * vcpus[])469 struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus,
470 uint64_t extra_mem_pages,
471 void *guest_code, struct kvm_vcpu *vcpus[])
472 {
473 struct kvm_vm *vm;
474 int i;
475
476 TEST_ASSERT(!nr_vcpus || vcpus, "Must provide vCPU array");
477
478 vm = __vm_create(shape, nr_vcpus, extra_mem_pages);
479
480 for (i = 0; i < nr_vcpus; ++i)
481 vcpus[i] = vm_vcpu_add(vm, i, guest_code);
482
483 return vm;
484 }
485
__vm_create_shape_with_one_vcpu(struct vm_shape shape,struct kvm_vcpu ** vcpu,uint64_t extra_mem_pages,void * guest_code)486 struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape,
487 struct kvm_vcpu **vcpu,
488 uint64_t extra_mem_pages,
489 void *guest_code)
490 {
491 struct kvm_vcpu *vcpus[1];
492 struct kvm_vm *vm;
493
494 vm = __vm_create_with_vcpus(shape, 1, extra_mem_pages, guest_code, vcpus);
495
496 *vcpu = vcpus[0];
497 return vm;
498 }
499
500 /*
501 * VM Restart
502 *
503 * Input Args:
504 * vm - VM that has been released before
505 *
506 * Output Args: None
507 *
508 * Reopens the file descriptors associated to the VM and reinstates the
509 * global state, such as the irqchip and the memory regions that are mapped
510 * into the guest.
511 */
kvm_vm_restart(struct kvm_vm * vmp)512 void kvm_vm_restart(struct kvm_vm *vmp)
513 {
514 int ctr;
515 struct userspace_mem_region *region;
516
517 vm_open(vmp);
518 if (vmp->has_irqchip)
519 vm_create_irqchip(vmp);
520
521 hash_for_each(vmp->regions.slot_hash, ctr, region, slot_node) {
522 int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION2, ®ion->region);
523
524 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION2 IOCTL failed,\n"
525 " rc: %i errno: %i\n"
526 " slot: %u flags: 0x%x\n"
527 " guest_phys_addr: 0x%llx size: 0x%llx",
528 ret, errno, region->region.slot,
529 region->region.flags,
530 region->region.guest_phys_addr,
531 region->region.memory_size);
532 }
533 }
534
vm_arch_vcpu_recreate(struct kvm_vm * vm,uint32_t vcpu_id)535 __weak struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm,
536 uint32_t vcpu_id)
537 {
538 return __vm_vcpu_add(vm, vcpu_id);
539 }
540
vm_recreate_with_one_vcpu(struct kvm_vm * vm)541 struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm)
542 {
543 kvm_vm_restart(vm);
544
545 return vm_vcpu_recreate(vm, 0);
546 }
547
kvm_pin_this_task_to_pcpu(uint32_t pcpu)548 void kvm_pin_this_task_to_pcpu(uint32_t pcpu)
549 {
550 cpu_set_t mask;
551 int r;
552
553 CPU_ZERO(&mask);
554 CPU_SET(pcpu, &mask);
555 r = sched_setaffinity(0, sizeof(mask), &mask);
556 TEST_ASSERT(!r, "sched_setaffinity() failed for pCPU '%u'.", pcpu);
557 }
558
parse_pcpu(const char * cpu_str,const cpu_set_t * allowed_mask)559 static uint32_t parse_pcpu(const char *cpu_str, const cpu_set_t *allowed_mask)
560 {
561 uint32_t pcpu = atoi_non_negative("CPU number", cpu_str);
562
563 TEST_ASSERT(CPU_ISSET(pcpu, allowed_mask),
564 "Not allowed to run on pCPU '%d', check cgroups?", pcpu);
565 return pcpu;
566 }
567
kvm_print_vcpu_pinning_help(void)568 void kvm_print_vcpu_pinning_help(void)
569 {
570 const char *name = program_invocation_name;
571
572 printf(" -c: Pin tasks to physical CPUs. Takes a list of comma separated\n"
573 " values (target pCPU), one for each vCPU, plus an optional\n"
574 " entry for the main application task (specified via entry\n"
575 " <nr_vcpus + 1>). If used, entries must be provided for all\n"
576 " vCPUs, i.e. pinning vCPUs is all or nothing.\n\n"
577 " E.g. to create 3 vCPUs, pin vCPU0=>pCPU22, vCPU1=>pCPU23,\n"
578 " vCPU2=>pCPU24, and pin the application task to pCPU50:\n\n"
579 " %s -v 3 -c 22,23,24,50\n\n"
580 " To leave the application task unpinned, drop the final entry:\n\n"
581 " %s -v 3 -c 22,23,24\n\n"
582 " (default: no pinning)\n", name, name);
583 }
584
kvm_parse_vcpu_pinning(const char * pcpus_string,uint32_t vcpu_to_pcpu[],int nr_vcpus)585 void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],
586 int nr_vcpus)
587 {
588 cpu_set_t allowed_mask;
589 char *cpu, *cpu_list;
590 char delim[2] = ",";
591 int i, r;
592
593 cpu_list = strdup(pcpus_string);
594 TEST_ASSERT(cpu_list, "strdup() allocation failed.");
595
596 r = sched_getaffinity(0, sizeof(allowed_mask), &allowed_mask);
597 TEST_ASSERT(!r, "sched_getaffinity() failed");
598
599 cpu = strtok(cpu_list, delim);
600
601 /* 1. Get all pcpus for vcpus. */
602 for (i = 0; i < nr_vcpus; i++) {
603 TEST_ASSERT(cpu, "pCPU not provided for vCPU '%d'", i);
604 vcpu_to_pcpu[i] = parse_pcpu(cpu, &allowed_mask);
605 cpu = strtok(NULL, delim);
606 }
607
608 /* 2. Check if the main worker needs to be pinned. */
609 if (cpu) {
610 kvm_pin_this_task_to_pcpu(parse_pcpu(cpu, &allowed_mask));
611 cpu = strtok(NULL, delim);
612 }
613
614 TEST_ASSERT(!cpu, "pCPU list contains trailing garbage characters '%s'", cpu);
615 free(cpu_list);
616 }
617
618 /*
619 * Userspace Memory Region Find
620 *
621 * Input Args:
622 * vm - Virtual Machine
623 * start - Starting VM physical address
624 * end - Ending VM physical address, inclusive.
625 *
626 * Output Args: None
627 *
628 * Return:
629 * Pointer to overlapping region, NULL if no such region.
630 *
631 * Searches for a region with any physical memory that overlaps with
632 * any portion of the guest physical addresses from start to end
633 * inclusive. If multiple overlapping regions exist, a pointer to any
634 * of the regions is returned. Null is returned only when no overlapping
635 * region exists.
636 */
637 static struct userspace_mem_region *
userspace_mem_region_find(struct kvm_vm * vm,uint64_t start,uint64_t end)638 userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end)
639 {
640 struct rb_node *node;
641
642 for (node = vm->regions.gpa_tree.rb_node; node; ) {
643 struct userspace_mem_region *region =
644 container_of(node, struct userspace_mem_region, gpa_node);
645 uint64_t existing_start = region->region.guest_phys_addr;
646 uint64_t existing_end = region->region.guest_phys_addr
647 + region->region.memory_size - 1;
648 if (start <= existing_end && end >= existing_start)
649 return region;
650
651 if (start < existing_start)
652 node = node->rb_left;
653 else
654 node = node->rb_right;
655 }
656
657 return NULL;
658 }
659
vcpu_arch_free(struct kvm_vcpu * vcpu)660 __weak void vcpu_arch_free(struct kvm_vcpu *vcpu)
661 {
662
663 }
664
665 /*
666 * VM VCPU Remove
667 *
668 * Input Args:
669 * vcpu - VCPU to remove
670 *
671 * Output Args: None
672 *
673 * Return: None, TEST_ASSERT failures for all error conditions
674 *
675 * Removes a vCPU from a VM and frees its resources.
676 */
vm_vcpu_rm(struct kvm_vm * vm,struct kvm_vcpu * vcpu)677 static void vm_vcpu_rm(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
678 {
679 int ret;
680
681 if (vcpu->dirty_gfns) {
682 ret = munmap(vcpu->dirty_gfns, vm->dirty_ring_size);
683 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
684 vcpu->dirty_gfns = NULL;
685 }
686
687 ret = munmap(vcpu->run, vcpu_mmap_sz());
688 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
689
690 ret = close(vcpu->fd);
691 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret));
692
693 list_del(&vcpu->list);
694
695 vcpu_arch_free(vcpu);
696 free(vcpu);
697 }
698
kvm_vm_release(struct kvm_vm * vmp)699 void kvm_vm_release(struct kvm_vm *vmp)
700 {
701 struct kvm_vcpu *vcpu, *tmp;
702 int ret;
703
704 list_for_each_entry_safe(vcpu, tmp, &vmp->vcpus, list)
705 vm_vcpu_rm(vmp, vcpu);
706
707 ret = close(vmp->fd);
708 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret));
709
710 ret = close(vmp->kvm_fd);
711 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret));
712 }
713
__vm_mem_region_delete(struct kvm_vm * vm,struct userspace_mem_region * region,bool unlink)714 static void __vm_mem_region_delete(struct kvm_vm *vm,
715 struct userspace_mem_region *region,
716 bool unlink)
717 {
718 int ret;
719
720 if (unlink) {
721 rb_erase(®ion->gpa_node, &vm->regions.gpa_tree);
722 rb_erase(®ion->hva_node, &vm->regions.hva_tree);
723 hash_del(®ion->slot_node);
724 }
725
726 region->region.memory_size = 0;
727 vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, ®ion->region);
728
729 sparsebit_free(®ion->unused_phy_pages);
730 sparsebit_free(®ion->protected_phy_pages);
731 ret = munmap(region->mmap_start, region->mmap_size);
732 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
733 if (region->fd >= 0) {
734 /* There's an extra map when using shared memory. */
735 ret = munmap(region->mmap_alias, region->mmap_size);
736 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
737 close(region->fd);
738 }
739 if (region->region.guest_memfd >= 0)
740 close(region->region.guest_memfd);
741
742 free(region);
743 }
744
745 /*
746 * Destroys and frees the VM pointed to by vmp.
747 */
kvm_vm_free(struct kvm_vm * vmp)748 void kvm_vm_free(struct kvm_vm *vmp)
749 {
750 int ctr;
751 struct hlist_node *node;
752 struct userspace_mem_region *region;
753
754 if (vmp == NULL)
755 return;
756
757 /* Free cached stats metadata and close FD */
758 if (vmp->stats_fd) {
759 free(vmp->stats_desc);
760 close(vmp->stats_fd);
761 }
762
763 /* Free userspace_mem_regions. */
764 hash_for_each_safe(vmp->regions.slot_hash, ctr, node, region, slot_node)
765 __vm_mem_region_delete(vmp, region, false);
766
767 /* Free sparsebit arrays. */
768 sparsebit_free(&vmp->vpages_valid);
769 sparsebit_free(&vmp->vpages_mapped);
770
771 kvm_vm_release(vmp);
772
773 /* Free the structure describing the VM. */
774 free(vmp);
775 }
776
kvm_memfd_alloc(size_t size,bool hugepages)777 int kvm_memfd_alloc(size_t size, bool hugepages)
778 {
779 int memfd_flags = MFD_CLOEXEC;
780 int fd, r;
781
782 if (hugepages)
783 memfd_flags |= MFD_HUGETLB;
784
785 fd = memfd_create("kvm_selftest", memfd_flags);
786 TEST_ASSERT(fd != -1, __KVM_SYSCALL_ERROR("memfd_create()", fd));
787
788 r = ftruncate(fd, size);
789 TEST_ASSERT(!r, __KVM_SYSCALL_ERROR("ftruncate()", r));
790
791 r = fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0, size);
792 TEST_ASSERT(!r, __KVM_SYSCALL_ERROR("fallocate()", r));
793
794 return fd;
795 }
796
797 /*
798 * Memory Compare, host virtual to guest virtual
799 *
800 * Input Args:
801 * hva - Starting host virtual address
802 * vm - Virtual Machine
803 * gva - Starting guest virtual address
804 * len - number of bytes to compare
805 *
806 * Output Args: None
807 *
808 * Input/Output Args: None
809 *
810 * Return:
811 * Returns 0 if the bytes starting at hva for a length of len
812 * are equal the guest virtual bytes starting at gva. Returns
813 * a value < 0, if bytes at hva are less than those at gva.
814 * Otherwise a value > 0 is returned.
815 *
816 * Compares the bytes starting at the host virtual address hva, for
817 * a length of len, to the guest bytes starting at the guest virtual
818 * address given by gva.
819 */
kvm_memcmp_hva_gva(void * hva,struct kvm_vm * vm,vm_vaddr_t gva,size_t len)820 int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len)
821 {
822 size_t amt;
823
824 /*
825 * Compare a batch of bytes until either a match is found
826 * or all the bytes have been compared.
827 */
828 for (uintptr_t offset = 0; offset < len; offset += amt) {
829 uintptr_t ptr1 = (uintptr_t)hva + offset;
830
831 /*
832 * Determine host address for guest virtual address
833 * at offset.
834 */
835 uintptr_t ptr2 = (uintptr_t)addr_gva2hva(vm, gva + offset);
836
837 /*
838 * Determine amount to compare on this pass.
839 * Don't allow the comparsion to cross a page boundary.
840 */
841 amt = len - offset;
842 if ((ptr1 >> vm->page_shift) != ((ptr1 + amt) >> vm->page_shift))
843 amt = vm->page_size - (ptr1 % vm->page_size);
844 if ((ptr2 >> vm->page_shift) != ((ptr2 + amt) >> vm->page_shift))
845 amt = vm->page_size - (ptr2 % vm->page_size);
846
847 assert((ptr1 >> vm->page_shift) == ((ptr1 + amt - 1) >> vm->page_shift));
848 assert((ptr2 >> vm->page_shift) == ((ptr2 + amt - 1) >> vm->page_shift));
849
850 /*
851 * Perform the comparison. If there is a difference
852 * return that result to the caller, otherwise need
853 * to continue on looking for a mismatch.
854 */
855 int ret = memcmp((void *)ptr1, (void *)ptr2, amt);
856 if (ret != 0)
857 return ret;
858 }
859
860 /*
861 * No mismatch found. Let the caller know the two memory
862 * areas are equal.
863 */
864 return 0;
865 }
866
vm_userspace_mem_region_gpa_insert(struct rb_root * gpa_tree,struct userspace_mem_region * region)867 static void vm_userspace_mem_region_gpa_insert(struct rb_root *gpa_tree,
868 struct userspace_mem_region *region)
869 {
870 struct rb_node **cur, *parent;
871
872 for (cur = &gpa_tree->rb_node, parent = NULL; *cur; ) {
873 struct userspace_mem_region *cregion;
874
875 cregion = container_of(*cur, typeof(*cregion), gpa_node);
876 parent = *cur;
877 if (region->region.guest_phys_addr <
878 cregion->region.guest_phys_addr)
879 cur = &(*cur)->rb_left;
880 else {
881 TEST_ASSERT(region->region.guest_phys_addr !=
882 cregion->region.guest_phys_addr,
883 "Duplicate GPA in region tree");
884
885 cur = &(*cur)->rb_right;
886 }
887 }
888
889 rb_link_node(®ion->gpa_node, parent, cur);
890 rb_insert_color(®ion->gpa_node, gpa_tree);
891 }
892
vm_userspace_mem_region_hva_insert(struct rb_root * hva_tree,struct userspace_mem_region * region)893 static void vm_userspace_mem_region_hva_insert(struct rb_root *hva_tree,
894 struct userspace_mem_region *region)
895 {
896 struct rb_node **cur, *parent;
897
898 for (cur = &hva_tree->rb_node, parent = NULL; *cur; ) {
899 struct userspace_mem_region *cregion;
900
901 cregion = container_of(*cur, typeof(*cregion), hva_node);
902 parent = *cur;
903 if (region->host_mem < cregion->host_mem)
904 cur = &(*cur)->rb_left;
905 else {
906 TEST_ASSERT(region->host_mem !=
907 cregion->host_mem,
908 "Duplicate HVA in region tree");
909
910 cur = &(*cur)->rb_right;
911 }
912 }
913
914 rb_link_node(®ion->hva_node, parent, cur);
915 rb_insert_color(®ion->hva_node, hva_tree);
916 }
917
918
__vm_set_user_memory_region(struct kvm_vm * vm,uint32_t slot,uint32_t flags,uint64_t gpa,uint64_t size,void * hva)919 int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
920 uint64_t gpa, uint64_t size, void *hva)
921 {
922 struct kvm_userspace_memory_region region = {
923 .slot = slot,
924 .flags = flags,
925 .guest_phys_addr = gpa,
926 .memory_size = size,
927 .userspace_addr = (uintptr_t)hva,
928 };
929
930 return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion);
931 }
932
vm_set_user_memory_region(struct kvm_vm * vm,uint32_t slot,uint32_t flags,uint64_t gpa,uint64_t size,void * hva)933 void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
934 uint64_t gpa, uint64_t size, void *hva)
935 {
936 int ret = __vm_set_user_memory_region(vm, slot, flags, gpa, size, hva);
937
938 TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION failed, errno = %d (%s)",
939 errno, strerror(errno));
940 }
941
942 #define TEST_REQUIRE_SET_USER_MEMORY_REGION2() \
943 __TEST_REQUIRE(kvm_has_cap(KVM_CAP_USER_MEMORY2), \
944 "KVM selftests now require KVM_SET_USER_MEMORY_REGION2 (introduced in v6.8)")
945
__vm_set_user_memory_region2(struct kvm_vm * vm,uint32_t slot,uint32_t flags,uint64_t gpa,uint64_t size,void * hva,uint32_t guest_memfd,uint64_t guest_memfd_offset)946 int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
947 uint64_t gpa, uint64_t size, void *hva,
948 uint32_t guest_memfd, uint64_t guest_memfd_offset)
949 {
950 struct kvm_userspace_memory_region2 region = {
951 .slot = slot,
952 .flags = flags,
953 .guest_phys_addr = gpa,
954 .memory_size = size,
955 .userspace_addr = (uintptr_t)hva,
956 .guest_memfd = guest_memfd,
957 .guest_memfd_offset = guest_memfd_offset,
958 };
959
960 TEST_REQUIRE_SET_USER_MEMORY_REGION2();
961
962 return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION2, ®ion);
963 }
964
vm_set_user_memory_region2(struct kvm_vm * vm,uint32_t slot,uint32_t flags,uint64_t gpa,uint64_t size,void * hva,uint32_t guest_memfd,uint64_t guest_memfd_offset)965 void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
966 uint64_t gpa, uint64_t size, void *hva,
967 uint32_t guest_memfd, uint64_t guest_memfd_offset)
968 {
969 int ret = __vm_set_user_memory_region2(vm, slot, flags, gpa, size, hva,
970 guest_memfd, guest_memfd_offset);
971
972 TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION2 failed, errno = %d (%s)",
973 errno, strerror(errno));
974 }
975
976
977 /* FIXME: This thing needs to be ripped apart and rewritten. */
vm_mem_add(struct kvm_vm * vm,enum vm_mem_backing_src_type src_type,uint64_t guest_paddr,uint32_t slot,uint64_t npages,uint32_t flags,int guest_memfd,uint64_t guest_memfd_offset)978 void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
979 uint64_t guest_paddr, uint32_t slot, uint64_t npages,
980 uint32_t flags, int guest_memfd, uint64_t guest_memfd_offset)
981 {
982 int ret;
983 struct userspace_mem_region *region;
984 size_t backing_src_pagesz = get_backing_src_pagesz(src_type);
985 size_t mem_size = npages * vm->page_size;
986 size_t alignment;
987
988 TEST_REQUIRE_SET_USER_MEMORY_REGION2();
989
990 TEST_ASSERT(vm_adjust_num_guest_pages(vm->mode, npages) == npages,
991 "Number of guest pages is not compatible with the host. "
992 "Try npages=%d", vm_adjust_num_guest_pages(vm->mode, npages));
993
994 TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical "
995 "address not on a page boundary.\n"
996 " guest_paddr: 0x%lx vm->page_size: 0x%x",
997 guest_paddr, vm->page_size);
998 TEST_ASSERT((((guest_paddr >> vm->page_shift) + npages) - 1)
999 <= vm->max_gfn, "Physical range beyond maximum "
1000 "supported physical address,\n"
1001 " guest_paddr: 0x%lx npages: 0x%lx\n"
1002 " vm->max_gfn: 0x%lx vm->page_size: 0x%x",
1003 guest_paddr, npages, vm->max_gfn, vm->page_size);
1004
1005 /*
1006 * Confirm a mem region with an overlapping address doesn't
1007 * already exist.
1008 */
1009 region = (struct userspace_mem_region *) userspace_mem_region_find(
1010 vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1);
1011 if (region != NULL)
1012 TEST_FAIL("overlapping userspace_mem_region already "
1013 "exists\n"
1014 " requested guest_paddr: 0x%lx npages: 0x%lx "
1015 "page_size: 0x%x\n"
1016 " existing guest_paddr: 0x%lx size: 0x%lx",
1017 guest_paddr, npages, vm->page_size,
1018 (uint64_t) region->region.guest_phys_addr,
1019 (uint64_t) region->region.memory_size);
1020
1021 /* Confirm no region with the requested slot already exists. */
1022 hash_for_each_possible(vm->regions.slot_hash, region, slot_node,
1023 slot) {
1024 if (region->region.slot != slot)
1025 continue;
1026
1027 TEST_FAIL("A mem region with the requested slot "
1028 "already exists.\n"
1029 " requested slot: %u paddr: 0x%lx npages: 0x%lx\n"
1030 " existing slot: %u paddr: 0x%lx size: 0x%lx",
1031 slot, guest_paddr, npages,
1032 region->region.slot,
1033 (uint64_t) region->region.guest_phys_addr,
1034 (uint64_t) region->region.memory_size);
1035 }
1036
1037 /* Allocate and initialize new mem region structure. */
1038 region = calloc(1, sizeof(*region));
1039 TEST_ASSERT(region != NULL, "Insufficient Memory");
1040 region->mmap_size = mem_size;
1041
1042 #ifdef __s390x__
1043 /* On s390x, the host address must be aligned to 1M (due to PGSTEs) */
1044 alignment = 0x100000;
1045 #else
1046 alignment = 1;
1047 #endif
1048
1049 /*
1050 * When using THP mmap is not guaranteed to returned a hugepage aligned
1051 * address so we have to pad the mmap. Padding is not needed for HugeTLB
1052 * because mmap will always return an address aligned to the HugeTLB
1053 * page size.
1054 */
1055 if (src_type == VM_MEM_SRC_ANONYMOUS_THP)
1056 alignment = max(backing_src_pagesz, alignment);
1057
1058 TEST_ASSERT_EQ(guest_paddr, align_up(guest_paddr, backing_src_pagesz));
1059
1060 /* Add enough memory to align up if necessary */
1061 if (alignment > 1)
1062 region->mmap_size += alignment;
1063
1064 region->fd = -1;
1065 if (backing_src_is_shared(src_type))
1066 region->fd = kvm_memfd_alloc(region->mmap_size,
1067 src_type == VM_MEM_SRC_SHARED_HUGETLB);
1068
1069 region->mmap_start = mmap(NULL, region->mmap_size,
1070 PROT_READ | PROT_WRITE,
1071 vm_mem_backing_src_alias(src_type)->flag,
1072 region->fd, 0);
1073 TEST_ASSERT(region->mmap_start != MAP_FAILED,
1074 __KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED));
1075
1076 TEST_ASSERT(!is_backing_src_hugetlb(src_type) ||
1077 region->mmap_start == align_ptr_up(region->mmap_start, backing_src_pagesz),
1078 "mmap_start %p is not aligned to HugeTLB page size 0x%lx",
1079 region->mmap_start, backing_src_pagesz);
1080
1081 /* Align host address */
1082 region->host_mem = align_ptr_up(region->mmap_start, alignment);
1083
1084 /* As needed perform madvise */
1085 if ((src_type == VM_MEM_SRC_ANONYMOUS ||
1086 src_type == VM_MEM_SRC_ANONYMOUS_THP) && thp_configured()) {
1087 ret = madvise(region->host_mem, mem_size,
1088 src_type == VM_MEM_SRC_ANONYMOUS ? MADV_NOHUGEPAGE : MADV_HUGEPAGE);
1089 TEST_ASSERT(ret == 0, "madvise failed, addr: %p length: 0x%lx src_type: %s",
1090 region->host_mem, mem_size,
1091 vm_mem_backing_src_alias(src_type)->name);
1092 }
1093
1094 region->backing_src_type = src_type;
1095
1096 if (flags & KVM_MEM_GUEST_MEMFD) {
1097 if (guest_memfd < 0) {
1098 uint32_t guest_memfd_flags = 0;
1099 TEST_ASSERT(!guest_memfd_offset,
1100 "Offset must be zero when creating new guest_memfd");
1101 guest_memfd = vm_create_guest_memfd(vm, mem_size, guest_memfd_flags);
1102 } else {
1103 /*
1104 * Install a unique fd for each memslot so that the fd
1105 * can be closed when the region is deleted without
1106 * needing to track if the fd is owned by the framework
1107 * or by the caller.
1108 */
1109 guest_memfd = dup(guest_memfd);
1110 TEST_ASSERT(guest_memfd >= 0, __KVM_SYSCALL_ERROR("dup()", guest_memfd));
1111 }
1112
1113 region->region.guest_memfd = guest_memfd;
1114 region->region.guest_memfd_offset = guest_memfd_offset;
1115 } else {
1116 region->region.guest_memfd = -1;
1117 }
1118
1119 region->unused_phy_pages = sparsebit_alloc();
1120 if (vm_arch_has_protected_memory(vm))
1121 region->protected_phy_pages = sparsebit_alloc();
1122 sparsebit_set_num(region->unused_phy_pages,
1123 guest_paddr >> vm->page_shift, npages);
1124 region->region.slot = slot;
1125 region->region.flags = flags;
1126 region->region.guest_phys_addr = guest_paddr;
1127 region->region.memory_size = npages * vm->page_size;
1128 region->region.userspace_addr = (uintptr_t) region->host_mem;
1129 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, ®ion->region);
1130 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION2 IOCTL failed,\n"
1131 " rc: %i errno: %i\n"
1132 " slot: %u flags: 0x%x\n"
1133 " guest_phys_addr: 0x%lx size: 0x%lx guest_memfd: %d",
1134 ret, errno, slot, flags,
1135 guest_paddr, (uint64_t) region->region.memory_size,
1136 region->region.guest_memfd);
1137
1138 /* Add to quick lookup data structures */
1139 vm_userspace_mem_region_gpa_insert(&vm->regions.gpa_tree, region);
1140 vm_userspace_mem_region_hva_insert(&vm->regions.hva_tree, region);
1141 hash_add(vm->regions.slot_hash, ®ion->slot_node, slot);
1142
1143 /* If shared memory, create an alias. */
1144 if (region->fd >= 0) {
1145 region->mmap_alias = mmap(NULL, region->mmap_size,
1146 PROT_READ | PROT_WRITE,
1147 vm_mem_backing_src_alias(src_type)->flag,
1148 region->fd, 0);
1149 TEST_ASSERT(region->mmap_alias != MAP_FAILED,
1150 __KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED));
1151
1152 /* Align host alias address */
1153 region->host_alias = align_ptr_up(region->mmap_alias, alignment);
1154 }
1155 }
1156
vm_userspace_mem_region_add(struct kvm_vm * vm,enum vm_mem_backing_src_type src_type,uint64_t guest_paddr,uint32_t slot,uint64_t npages,uint32_t flags)1157 void vm_userspace_mem_region_add(struct kvm_vm *vm,
1158 enum vm_mem_backing_src_type src_type,
1159 uint64_t guest_paddr, uint32_t slot,
1160 uint64_t npages, uint32_t flags)
1161 {
1162 vm_mem_add(vm, src_type, guest_paddr, slot, npages, flags, -1, 0);
1163 }
1164
1165 /*
1166 * Memslot to region
1167 *
1168 * Input Args:
1169 * vm - Virtual Machine
1170 * memslot - KVM memory slot ID
1171 *
1172 * Output Args: None
1173 *
1174 * Return:
1175 * Pointer to memory region structure that describe memory region
1176 * using kvm memory slot ID given by memslot. TEST_ASSERT failure
1177 * on error (e.g. currently no memory region using memslot as a KVM
1178 * memory slot ID).
1179 */
1180 struct userspace_mem_region *
memslot2region(struct kvm_vm * vm,uint32_t memslot)1181 memslot2region(struct kvm_vm *vm, uint32_t memslot)
1182 {
1183 struct userspace_mem_region *region;
1184
1185 hash_for_each_possible(vm->regions.slot_hash, region, slot_node,
1186 memslot)
1187 if (region->region.slot == memslot)
1188 return region;
1189
1190 fprintf(stderr, "No mem region with the requested slot found,\n"
1191 " requested slot: %u\n", memslot);
1192 fputs("---- vm dump ----\n", stderr);
1193 vm_dump(stderr, vm, 2);
1194 TEST_FAIL("Mem region not found");
1195 return NULL;
1196 }
1197
1198 /*
1199 * VM Memory Region Flags Set
1200 *
1201 * Input Args:
1202 * vm - Virtual Machine
1203 * flags - Starting guest physical address
1204 *
1205 * Output Args: None
1206 *
1207 * Return: None
1208 *
1209 * Sets the flags of the memory region specified by the value of slot,
1210 * to the values given by flags.
1211 */
vm_mem_region_set_flags(struct kvm_vm * vm,uint32_t slot,uint32_t flags)1212 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags)
1213 {
1214 int ret;
1215 struct userspace_mem_region *region;
1216
1217 region = memslot2region(vm, slot);
1218
1219 region->region.flags = flags;
1220
1221 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, ®ion->region);
1222
1223 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION2 IOCTL failed,\n"
1224 " rc: %i errno: %i slot: %u flags: 0x%x",
1225 ret, errno, slot, flags);
1226 }
1227
1228 /*
1229 * VM Memory Region Move
1230 *
1231 * Input Args:
1232 * vm - Virtual Machine
1233 * slot - Slot of the memory region to move
1234 * new_gpa - Starting guest physical address
1235 *
1236 * Output Args: None
1237 *
1238 * Return: None
1239 *
1240 * Change the gpa of a memory region.
1241 */
vm_mem_region_move(struct kvm_vm * vm,uint32_t slot,uint64_t new_gpa)1242 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa)
1243 {
1244 struct userspace_mem_region *region;
1245 int ret;
1246
1247 region = memslot2region(vm, slot);
1248
1249 region->region.guest_phys_addr = new_gpa;
1250
1251 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, ®ion->region);
1252
1253 TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION2 failed\n"
1254 "ret: %i errno: %i slot: %u new_gpa: 0x%lx",
1255 ret, errno, slot, new_gpa);
1256 }
1257
1258 /*
1259 * VM Memory Region Delete
1260 *
1261 * Input Args:
1262 * vm - Virtual Machine
1263 * slot - Slot of the memory region to delete
1264 *
1265 * Output Args: None
1266 *
1267 * Return: None
1268 *
1269 * Delete a memory region.
1270 */
vm_mem_region_delete(struct kvm_vm * vm,uint32_t slot)1271 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot)
1272 {
1273 __vm_mem_region_delete(vm, memslot2region(vm, slot), true);
1274 }
1275
vm_guest_mem_fallocate(struct kvm_vm * vm,uint64_t base,uint64_t size,bool punch_hole)1276 void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t base, uint64_t size,
1277 bool punch_hole)
1278 {
1279 const int mode = FALLOC_FL_KEEP_SIZE | (punch_hole ? FALLOC_FL_PUNCH_HOLE : 0);
1280 struct userspace_mem_region *region;
1281 uint64_t end = base + size;
1282 uint64_t gpa, len;
1283 off_t fd_offset;
1284 int ret;
1285
1286 for (gpa = base; gpa < end; gpa += len) {
1287 uint64_t offset;
1288
1289 region = userspace_mem_region_find(vm, gpa, gpa);
1290 TEST_ASSERT(region && region->region.flags & KVM_MEM_GUEST_MEMFD,
1291 "Private memory region not found for GPA 0x%lx", gpa);
1292
1293 offset = gpa - region->region.guest_phys_addr;
1294 fd_offset = region->region.guest_memfd_offset + offset;
1295 len = min_t(uint64_t, end - gpa, region->region.memory_size - offset);
1296
1297 ret = fallocate(region->region.guest_memfd, mode, fd_offset, len);
1298 TEST_ASSERT(!ret, "fallocate() failed to %s at %lx (len = %lu), fd = %d, mode = %x, offset = %lx",
1299 punch_hole ? "punch hole" : "allocate", gpa, len,
1300 region->region.guest_memfd, mode, fd_offset);
1301 }
1302 }
1303
1304 /* Returns the size of a vCPU's kvm_run structure. */
vcpu_mmap_sz(void)1305 static int vcpu_mmap_sz(void)
1306 {
1307 int dev_fd, ret;
1308
1309 dev_fd = open_kvm_dev_path_or_exit();
1310
1311 ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL);
1312 TEST_ASSERT(ret >= sizeof(struct kvm_run),
1313 KVM_IOCTL_ERROR(KVM_GET_VCPU_MMAP_SIZE, ret));
1314
1315 close(dev_fd);
1316
1317 return ret;
1318 }
1319
vcpu_exists(struct kvm_vm * vm,uint32_t vcpu_id)1320 static bool vcpu_exists(struct kvm_vm *vm, uint32_t vcpu_id)
1321 {
1322 struct kvm_vcpu *vcpu;
1323
1324 list_for_each_entry(vcpu, &vm->vcpus, list) {
1325 if (vcpu->id == vcpu_id)
1326 return true;
1327 }
1328
1329 return false;
1330 }
1331
1332 /*
1333 * Adds a virtual CPU to the VM specified by vm with the ID given by vcpu_id.
1334 * No additional vCPU setup is done. Returns the vCPU.
1335 */
__vm_vcpu_add(struct kvm_vm * vm,uint32_t vcpu_id)1336 struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
1337 {
1338 struct kvm_vcpu *vcpu;
1339
1340 /* Confirm a vcpu with the specified id doesn't already exist. */
1341 TEST_ASSERT(!vcpu_exists(vm, vcpu_id), "vCPU%d already exists", vcpu_id);
1342
1343 /* Allocate and initialize new vcpu structure. */
1344 vcpu = calloc(1, sizeof(*vcpu));
1345 TEST_ASSERT(vcpu != NULL, "Insufficient Memory");
1346
1347 vcpu->vm = vm;
1348 vcpu->id = vcpu_id;
1349 vcpu->fd = __vm_ioctl(vm, KVM_CREATE_VCPU, (void *)(unsigned long)vcpu_id);
1350 TEST_ASSERT_VM_VCPU_IOCTL(vcpu->fd >= 0, KVM_CREATE_VCPU, vcpu->fd, vm);
1351
1352 TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->run), "vcpu mmap size "
1353 "smaller than expected, vcpu_mmap_sz: %i expected_min: %zi",
1354 vcpu_mmap_sz(), sizeof(*vcpu->run));
1355 vcpu->run = (struct kvm_run *) mmap(NULL, vcpu_mmap_sz(),
1356 PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 0);
1357 TEST_ASSERT(vcpu->run != MAP_FAILED,
1358 __KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED));
1359
1360 /* Add to linked-list of VCPUs. */
1361 list_add(&vcpu->list, &vm->vcpus);
1362
1363 return vcpu;
1364 }
1365
1366 /*
1367 * VM Virtual Address Unused Gap
1368 *
1369 * Input Args:
1370 * vm - Virtual Machine
1371 * sz - Size (bytes)
1372 * vaddr_min - Minimum Virtual Address
1373 *
1374 * Output Args: None
1375 *
1376 * Return:
1377 * Lowest virtual address at or below vaddr_min, with at least
1378 * sz unused bytes. TEST_ASSERT failure if no area of at least
1379 * size sz is available.
1380 *
1381 * Within the VM specified by vm, locates the lowest starting virtual
1382 * address >= vaddr_min, that has at least sz unallocated bytes. A
1383 * TEST_ASSERT failure occurs for invalid input or no area of at least
1384 * sz unallocated bytes >= vaddr_min is available.
1385 */
vm_vaddr_unused_gap(struct kvm_vm * vm,size_t sz,vm_vaddr_t vaddr_min)1386 vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
1387 vm_vaddr_t vaddr_min)
1388 {
1389 uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift;
1390
1391 /* Determine lowest permitted virtual page index. */
1392 uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift;
1393 if ((pgidx_start * vm->page_size) < vaddr_min)
1394 goto no_va_found;
1395
1396 /* Loop over section with enough valid virtual page indexes. */
1397 if (!sparsebit_is_set_num(vm->vpages_valid,
1398 pgidx_start, pages))
1399 pgidx_start = sparsebit_next_set_num(vm->vpages_valid,
1400 pgidx_start, pages);
1401 do {
1402 /*
1403 * Are there enough unused virtual pages available at
1404 * the currently proposed starting virtual page index.
1405 * If not, adjust proposed starting index to next
1406 * possible.
1407 */
1408 if (sparsebit_is_clear_num(vm->vpages_mapped,
1409 pgidx_start, pages))
1410 goto va_found;
1411 pgidx_start = sparsebit_next_clear_num(vm->vpages_mapped,
1412 pgidx_start, pages);
1413 if (pgidx_start == 0)
1414 goto no_va_found;
1415
1416 /*
1417 * If needed, adjust proposed starting virtual address,
1418 * to next range of valid virtual addresses.
1419 */
1420 if (!sparsebit_is_set_num(vm->vpages_valid,
1421 pgidx_start, pages)) {
1422 pgidx_start = sparsebit_next_set_num(
1423 vm->vpages_valid, pgidx_start, pages);
1424 if (pgidx_start == 0)
1425 goto no_va_found;
1426 }
1427 } while (pgidx_start != 0);
1428
1429 no_va_found:
1430 TEST_FAIL("No vaddr of specified pages available, pages: 0x%lx", pages);
1431
1432 /* NOT REACHED */
1433 return -1;
1434
1435 va_found:
1436 TEST_ASSERT(sparsebit_is_set_num(vm->vpages_valid,
1437 pgidx_start, pages),
1438 "Unexpected, invalid virtual page index range,\n"
1439 " pgidx_start: 0x%lx\n"
1440 " pages: 0x%lx",
1441 pgidx_start, pages);
1442 TEST_ASSERT(sparsebit_is_clear_num(vm->vpages_mapped,
1443 pgidx_start, pages),
1444 "Unexpected, pages already mapped,\n"
1445 " pgidx_start: 0x%lx\n"
1446 " pages: 0x%lx",
1447 pgidx_start, pages);
1448
1449 return pgidx_start * vm->page_size;
1450 }
1451
____vm_vaddr_alloc(struct kvm_vm * vm,size_t sz,vm_vaddr_t vaddr_min,enum kvm_mem_region_type type,bool protected)1452 static vm_vaddr_t ____vm_vaddr_alloc(struct kvm_vm *vm, size_t sz,
1453 vm_vaddr_t vaddr_min,
1454 enum kvm_mem_region_type type,
1455 bool protected)
1456 {
1457 uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
1458
1459 virt_pgd_alloc(vm);
1460 vm_paddr_t paddr = __vm_phy_pages_alloc(vm, pages,
1461 KVM_UTIL_MIN_PFN * vm->page_size,
1462 vm->memslots[type], protected);
1463
1464 /*
1465 * Find an unused range of virtual page addresses of at least
1466 * pages in length.
1467 */
1468 vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min);
1469
1470 /* Map the virtual pages. */
1471 for (vm_vaddr_t vaddr = vaddr_start; pages > 0;
1472 pages--, vaddr += vm->page_size, paddr += vm->page_size) {
1473
1474 virt_pg_map(vm, vaddr, paddr);
1475
1476 sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift);
1477 }
1478
1479 return vaddr_start;
1480 }
1481
__vm_vaddr_alloc(struct kvm_vm * vm,size_t sz,vm_vaddr_t vaddr_min,enum kvm_mem_region_type type)1482 vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
1483 enum kvm_mem_region_type type)
1484 {
1485 return ____vm_vaddr_alloc(vm, sz, vaddr_min, type,
1486 vm_arch_has_protected_memory(vm));
1487 }
1488
vm_vaddr_alloc_shared(struct kvm_vm * vm,size_t sz,vm_vaddr_t vaddr_min,enum kvm_mem_region_type type)1489 vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz,
1490 vm_vaddr_t vaddr_min,
1491 enum kvm_mem_region_type type)
1492 {
1493 return ____vm_vaddr_alloc(vm, sz, vaddr_min, type, false);
1494 }
1495
1496 /*
1497 * VM Virtual Address Allocate
1498 *
1499 * Input Args:
1500 * vm - Virtual Machine
1501 * sz - Size in bytes
1502 * vaddr_min - Minimum starting virtual address
1503 *
1504 * Output Args: None
1505 *
1506 * Return:
1507 * Starting guest virtual address
1508 *
1509 * Allocates at least sz bytes within the virtual address space of the vm
1510 * given by vm. The allocated bytes are mapped to a virtual address >=
1511 * the address given by vaddr_min. Note that each allocation uses a
1512 * a unique set of pages, with the minimum real allocation being at least
1513 * a page. The allocated physical space comes from the TEST_DATA memory region.
1514 */
vm_vaddr_alloc(struct kvm_vm * vm,size_t sz,vm_vaddr_t vaddr_min)1515 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
1516 {
1517 return __vm_vaddr_alloc(vm, sz, vaddr_min, MEM_REGION_TEST_DATA);
1518 }
1519
1520 /*
1521 * VM Virtual Address Allocate Pages
1522 *
1523 * Input Args:
1524 * vm - Virtual Machine
1525 *
1526 * Output Args: None
1527 *
1528 * Return:
1529 * Starting guest virtual address
1530 *
1531 * Allocates at least N system pages worth of bytes within the virtual address
1532 * space of the vm.
1533 */
vm_vaddr_alloc_pages(struct kvm_vm * vm,int nr_pages)1534 vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages)
1535 {
1536 return vm_vaddr_alloc(vm, nr_pages * getpagesize(), KVM_UTIL_MIN_VADDR);
1537 }
1538
__vm_vaddr_alloc_page(struct kvm_vm * vm,enum kvm_mem_region_type type)1539 vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type)
1540 {
1541 return __vm_vaddr_alloc(vm, getpagesize(), KVM_UTIL_MIN_VADDR, type);
1542 }
1543
1544 /*
1545 * VM Virtual Address Allocate Page
1546 *
1547 * Input Args:
1548 * vm - Virtual Machine
1549 *
1550 * Output Args: None
1551 *
1552 * Return:
1553 * Starting guest virtual address
1554 *
1555 * Allocates at least one system page worth of bytes within the virtual address
1556 * space of the vm.
1557 */
vm_vaddr_alloc_page(struct kvm_vm * vm)1558 vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm)
1559 {
1560 return vm_vaddr_alloc_pages(vm, 1);
1561 }
1562
1563 /*
1564 * Map a range of VM virtual address to the VM's physical address
1565 *
1566 * Input Args:
1567 * vm - Virtual Machine
1568 * vaddr - Virtuall address to map
1569 * paddr - VM Physical Address
1570 * npages - The number of pages to map
1571 *
1572 * Output Args: None
1573 *
1574 * Return: None
1575 *
1576 * Within the VM given by @vm, creates a virtual translation for
1577 * @npages starting at @vaddr to the page range starting at @paddr.
1578 */
virt_map(struct kvm_vm * vm,uint64_t vaddr,uint64_t paddr,unsigned int npages)1579 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
1580 unsigned int npages)
1581 {
1582 size_t page_size = vm->page_size;
1583 size_t size = npages * page_size;
1584
1585 TEST_ASSERT(vaddr + size > vaddr, "Vaddr overflow");
1586 TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
1587
1588 while (npages--) {
1589 virt_pg_map(vm, vaddr, paddr);
1590 sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift);
1591
1592 vaddr += page_size;
1593 paddr += page_size;
1594 }
1595 }
1596
1597 /*
1598 * Address VM Physical to Host Virtual
1599 *
1600 * Input Args:
1601 * vm - Virtual Machine
1602 * gpa - VM physical address
1603 *
1604 * Output Args: None
1605 *
1606 * Return:
1607 * Equivalent host virtual address
1608 *
1609 * Locates the memory region containing the VM physical address given
1610 * by gpa, within the VM given by vm. When found, the host virtual
1611 * address providing the memory to the vm physical address is returned.
1612 * A TEST_ASSERT failure occurs if no region containing gpa exists.
1613 */
addr_gpa2hva(struct kvm_vm * vm,vm_paddr_t gpa)1614 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
1615 {
1616 struct userspace_mem_region *region;
1617
1618 gpa = vm_untag_gpa(vm, gpa);
1619
1620 region = userspace_mem_region_find(vm, gpa, gpa);
1621 if (!region) {
1622 TEST_FAIL("No vm physical memory at 0x%lx", gpa);
1623 return NULL;
1624 }
1625
1626 return (void *)((uintptr_t)region->host_mem
1627 + (gpa - region->region.guest_phys_addr));
1628 }
1629
1630 /*
1631 * Address Host Virtual to VM Physical
1632 *
1633 * Input Args:
1634 * vm - Virtual Machine
1635 * hva - Host virtual address
1636 *
1637 * Output Args: None
1638 *
1639 * Return:
1640 * Equivalent VM physical address
1641 *
1642 * Locates the memory region containing the host virtual address given
1643 * by hva, within the VM given by vm. When found, the equivalent
1644 * VM physical address is returned. A TEST_ASSERT failure occurs if no
1645 * region containing hva exists.
1646 */
addr_hva2gpa(struct kvm_vm * vm,void * hva)1647 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
1648 {
1649 struct rb_node *node;
1650
1651 for (node = vm->regions.hva_tree.rb_node; node; ) {
1652 struct userspace_mem_region *region =
1653 container_of(node, struct userspace_mem_region, hva_node);
1654
1655 if (hva >= region->host_mem) {
1656 if (hva <= (region->host_mem
1657 + region->region.memory_size - 1))
1658 return (vm_paddr_t)((uintptr_t)
1659 region->region.guest_phys_addr
1660 + (hva - (uintptr_t)region->host_mem));
1661
1662 node = node->rb_right;
1663 } else
1664 node = node->rb_left;
1665 }
1666
1667 TEST_FAIL("No mapping to a guest physical address, hva: %p", hva);
1668 return -1;
1669 }
1670
1671 /*
1672 * Address VM physical to Host Virtual *alias*.
1673 *
1674 * Input Args:
1675 * vm - Virtual Machine
1676 * gpa - VM physical address
1677 *
1678 * Output Args: None
1679 *
1680 * Return:
1681 * Equivalent address within the host virtual *alias* area, or NULL
1682 * (without failing the test) if the guest memory is not shared (so
1683 * no alias exists).
1684 *
1685 * Create a writable, shared virtual=>physical alias for the specific GPA.
1686 * The primary use case is to allow the host selftest to manipulate guest
1687 * memory without mapping said memory in the guest's address space. And, for
1688 * userfaultfd-based demand paging, to do so without triggering userfaults.
1689 */
addr_gpa2alias(struct kvm_vm * vm,vm_paddr_t gpa)1690 void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa)
1691 {
1692 struct userspace_mem_region *region;
1693 uintptr_t offset;
1694
1695 region = userspace_mem_region_find(vm, gpa, gpa);
1696 if (!region)
1697 return NULL;
1698
1699 if (!region->host_alias)
1700 return NULL;
1701
1702 offset = gpa - region->region.guest_phys_addr;
1703 return (void *) ((uintptr_t) region->host_alias + offset);
1704 }
1705
1706 /* Create an interrupt controller chip for the specified VM. */
vm_create_irqchip(struct kvm_vm * vm)1707 void vm_create_irqchip(struct kvm_vm *vm)
1708 {
1709 vm_ioctl(vm, KVM_CREATE_IRQCHIP, NULL);
1710
1711 vm->has_irqchip = true;
1712 }
1713
_vcpu_run(struct kvm_vcpu * vcpu)1714 int _vcpu_run(struct kvm_vcpu *vcpu)
1715 {
1716 int rc;
1717
1718 do {
1719 rc = __vcpu_run(vcpu);
1720 } while (rc == -1 && errno == EINTR);
1721
1722 assert_on_unhandled_exception(vcpu);
1723
1724 return rc;
1725 }
1726
1727 /*
1728 * Invoke KVM_RUN on a vCPU until KVM returns something other than -EINTR.
1729 * Assert if the KVM returns an error (other than -EINTR).
1730 */
vcpu_run(struct kvm_vcpu * vcpu)1731 void vcpu_run(struct kvm_vcpu *vcpu)
1732 {
1733 int ret = _vcpu_run(vcpu);
1734
1735 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_RUN, ret));
1736 }
1737
vcpu_run_complete_io(struct kvm_vcpu * vcpu)1738 void vcpu_run_complete_io(struct kvm_vcpu *vcpu)
1739 {
1740 int ret;
1741
1742 vcpu->run->immediate_exit = 1;
1743 ret = __vcpu_run(vcpu);
1744 vcpu->run->immediate_exit = 0;
1745
1746 TEST_ASSERT(ret == -1 && errno == EINTR,
1747 "KVM_RUN IOCTL didn't exit immediately, rc: %i, errno: %i",
1748 ret, errno);
1749 }
1750
1751 /*
1752 * Get the list of guest registers which are supported for
1753 * KVM_GET_ONE_REG/KVM_SET_ONE_REG ioctls. Returns a kvm_reg_list pointer,
1754 * it is the caller's responsibility to free the list.
1755 */
vcpu_get_reg_list(struct kvm_vcpu * vcpu)1756 struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu)
1757 {
1758 struct kvm_reg_list reg_list_n = { .n = 0 }, *reg_list;
1759 int ret;
1760
1761 ret = __vcpu_ioctl(vcpu, KVM_GET_REG_LIST, ®_list_n);
1762 TEST_ASSERT(ret == -1 && errno == E2BIG, "KVM_GET_REG_LIST n=0");
1763
1764 reg_list = calloc(1, sizeof(*reg_list) + reg_list_n.n * sizeof(__u64));
1765 reg_list->n = reg_list_n.n;
1766 vcpu_ioctl(vcpu, KVM_GET_REG_LIST, reg_list);
1767 return reg_list;
1768 }
1769
vcpu_map_dirty_ring(struct kvm_vcpu * vcpu)1770 void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu)
1771 {
1772 uint32_t page_size = getpagesize();
1773 uint32_t size = vcpu->vm->dirty_ring_size;
1774
1775 TEST_ASSERT(size > 0, "Should enable dirty ring first");
1776
1777 if (!vcpu->dirty_gfns) {
1778 void *addr;
1779
1780 addr = mmap(NULL, size, PROT_READ, MAP_PRIVATE, vcpu->fd,
1781 page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
1782 TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped private");
1783
1784 addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, vcpu->fd,
1785 page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
1786 TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped exec");
1787
1788 addr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd,
1789 page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
1790 TEST_ASSERT(addr != MAP_FAILED, "Dirty ring map failed");
1791
1792 vcpu->dirty_gfns = addr;
1793 vcpu->dirty_gfns_count = size / sizeof(struct kvm_dirty_gfn);
1794 }
1795
1796 return vcpu->dirty_gfns;
1797 }
1798
1799 /*
1800 * Device Ioctl
1801 */
1802
__kvm_has_device_attr(int dev_fd,uint32_t group,uint64_t attr)1803 int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr)
1804 {
1805 struct kvm_device_attr attribute = {
1806 .group = group,
1807 .attr = attr,
1808 .flags = 0,
1809 };
1810
1811 return ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute);
1812 }
1813
__kvm_test_create_device(struct kvm_vm * vm,uint64_t type)1814 int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type)
1815 {
1816 struct kvm_create_device create_dev = {
1817 .type = type,
1818 .flags = KVM_CREATE_DEVICE_TEST,
1819 };
1820
1821 return __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev);
1822 }
1823
__kvm_create_device(struct kvm_vm * vm,uint64_t type)1824 int __kvm_create_device(struct kvm_vm *vm, uint64_t type)
1825 {
1826 struct kvm_create_device create_dev = {
1827 .type = type,
1828 .fd = -1,
1829 .flags = 0,
1830 };
1831 int err;
1832
1833 err = __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev);
1834 TEST_ASSERT(err <= 0, "KVM_CREATE_DEVICE shouldn't return a positive value");
1835 return err ? : create_dev.fd;
1836 }
1837
__kvm_device_attr_get(int dev_fd,uint32_t group,uint64_t attr,void * val)1838 int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val)
1839 {
1840 struct kvm_device_attr kvmattr = {
1841 .group = group,
1842 .attr = attr,
1843 .flags = 0,
1844 .addr = (uintptr_t)val,
1845 };
1846
1847 return __kvm_ioctl(dev_fd, KVM_GET_DEVICE_ATTR, &kvmattr);
1848 }
1849
__kvm_device_attr_set(int dev_fd,uint32_t group,uint64_t attr,void * val)1850 int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val)
1851 {
1852 struct kvm_device_attr kvmattr = {
1853 .group = group,
1854 .attr = attr,
1855 .flags = 0,
1856 .addr = (uintptr_t)val,
1857 };
1858
1859 return __kvm_ioctl(dev_fd, KVM_SET_DEVICE_ATTR, &kvmattr);
1860 }
1861
1862 /*
1863 * IRQ related functions.
1864 */
1865
_kvm_irq_line(struct kvm_vm * vm,uint32_t irq,int level)1866 int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level)
1867 {
1868 struct kvm_irq_level irq_level = {
1869 .irq = irq,
1870 .level = level,
1871 };
1872
1873 return __vm_ioctl(vm, KVM_IRQ_LINE, &irq_level);
1874 }
1875
kvm_irq_line(struct kvm_vm * vm,uint32_t irq,int level)1876 void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level)
1877 {
1878 int ret = _kvm_irq_line(vm, irq, level);
1879
1880 TEST_ASSERT(ret >= 0, KVM_IOCTL_ERROR(KVM_IRQ_LINE, ret));
1881 }
1882
kvm_gsi_routing_create(void)1883 struct kvm_irq_routing *kvm_gsi_routing_create(void)
1884 {
1885 struct kvm_irq_routing *routing;
1886 size_t size;
1887
1888 size = sizeof(struct kvm_irq_routing);
1889 /* Allocate space for the max number of entries: this wastes 196 KBs. */
1890 size += KVM_MAX_IRQ_ROUTES * sizeof(struct kvm_irq_routing_entry);
1891 routing = calloc(1, size);
1892 assert(routing);
1893
1894 return routing;
1895 }
1896
kvm_gsi_routing_irqchip_add(struct kvm_irq_routing * routing,uint32_t gsi,uint32_t pin)1897 void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing,
1898 uint32_t gsi, uint32_t pin)
1899 {
1900 int i;
1901
1902 assert(routing);
1903 assert(routing->nr < KVM_MAX_IRQ_ROUTES);
1904
1905 i = routing->nr;
1906 routing->entries[i].gsi = gsi;
1907 routing->entries[i].type = KVM_IRQ_ROUTING_IRQCHIP;
1908 routing->entries[i].flags = 0;
1909 routing->entries[i].u.irqchip.irqchip = 0;
1910 routing->entries[i].u.irqchip.pin = pin;
1911 routing->nr++;
1912 }
1913
_kvm_gsi_routing_write(struct kvm_vm * vm,struct kvm_irq_routing * routing)1914 int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing)
1915 {
1916 int ret;
1917
1918 assert(routing);
1919 ret = __vm_ioctl(vm, KVM_SET_GSI_ROUTING, routing);
1920 free(routing);
1921
1922 return ret;
1923 }
1924
kvm_gsi_routing_write(struct kvm_vm * vm,struct kvm_irq_routing * routing)1925 void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing)
1926 {
1927 int ret;
1928
1929 ret = _kvm_gsi_routing_write(vm, routing);
1930 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_GSI_ROUTING, ret));
1931 }
1932
1933 /*
1934 * VM Dump
1935 *
1936 * Input Args:
1937 * vm - Virtual Machine
1938 * indent - Left margin indent amount
1939 *
1940 * Output Args:
1941 * stream - Output FILE stream
1942 *
1943 * Return: None
1944 *
1945 * Dumps the current state of the VM given by vm, to the FILE stream
1946 * given by stream.
1947 */
vm_dump(FILE * stream,struct kvm_vm * vm,uint8_t indent)1948 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
1949 {
1950 int ctr;
1951 struct userspace_mem_region *region;
1952 struct kvm_vcpu *vcpu;
1953
1954 fprintf(stream, "%*smode: 0x%x\n", indent, "", vm->mode);
1955 fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd);
1956 fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size);
1957 fprintf(stream, "%*sMem Regions:\n", indent, "");
1958 hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) {
1959 fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx "
1960 "host_virt: %p\n", indent + 2, "",
1961 (uint64_t) region->region.guest_phys_addr,
1962 (uint64_t) region->region.memory_size,
1963 region->host_mem);
1964 fprintf(stream, "%*sunused_phy_pages: ", indent + 2, "");
1965 sparsebit_dump(stream, region->unused_phy_pages, 0);
1966 if (region->protected_phy_pages) {
1967 fprintf(stream, "%*sprotected_phy_pages: ", indent + 2, "");
1968 sparsebit_dump(stream, region->protected_phy_pages, 0);
1969 }
1970 }
1971 fprintf(stream, "%*sMapped Virtual Pages:\n", indent, "");
1972 sparsebit_dump(stream, vm->vpages_mapped, indent + 2);
1973 fprintf(stream, "%*spgd_created: %u\n", indent, "",
1974 vm->pgd_created);
1975 if (vm->pgd_created) {
1976 fprintf(stream, "%*sVirtual Translation Tables:\n",
1977 indent + 2, "");
1978 virt_dump(stream, vm, indent + 4);
1979 }
1980 fprintf(stream, "%*sVCPUs:\n", indent, "");
1981
1982 list_for_each_entry(vcpu, &vm->vcpus, list)
1983 vcpu_dump(stream, vcpu, indent + 2);
1984 }
1985
1986 #define KVM_EXIT_STRING(x) {KVM_EXIT_##x, #x}
1987
1988 /* Known KVM exit reasons */
1989 static struct exit_reason {
1990 unsigned int reason;
1991 const char *name;
1992 } exit_reasons_known[] = {
1993 KVM_EXIT_STRING(UNKNOWN),
1994 KVM_EXIT_STRING(EXCEPTION),
1995 KVM_EXIT_STRING(IO),
1996 KVM_EXIT_STRING(HYPERCALL),
1997 KVM_EXIT_STRING(DEBUG),
1998 KVM_EXIT_STRING(HLT),
1999 KVM_EXIT_STRING(MMIO),
2000 KVM_EXIT_STRING(IRQ_WINDOW_OPEN),
2001 KVM_EXIT_STRING(SHUTDOWN),
2002 KVM_EXIT_STRING(FAIL_ENTRY),
2003 KVM_EXIT_STRING(INTR),
2004 KVM_EXIT_STRING(SET_TPR),
2005 KVM_EXIT_STRING(TPR_ACCESS),
2006 KVM_EXIT_STRING(S390_SIEIC),
2007 KVM_EXIT_STRING(S390_RESET),
2008 KVM_EXIT_STRING(DCR),
2009 KVM_EXIT_STRING(NMI),
2010 KVM_EXIT_STRING(INTERNAL_ERROR),
2011 KVM_EXIT_STRING(OSI),
2012 KVM_EXIT_STRING(PAPR_HCALL),
2013 KVM_EXIT_STRING(S390_UCONTROL),
2014 KVM_EXIT_STRING(WATCHDOG),
2015 KVM_EXIT_STRING(S390_TSCH),
2016 KVM_EXIT_STRING(EPR),
2017 KVM_EXIT_STRING(SYSTEM_EVENT),
2018 KVM_EXIT_STRING(S390_STSI),
2019 KVM_EXIT_STRING(IOAPIC_EOI),
2020 KVM_EXIT_STRING(HYPERV),
2021 KVM_EXIT_STRING(ARM_NISV),
2022 KVM_EXIT_STRING(X86_RDMSR),
2023 KVM_EXIT_STRING(X86_WRMSR),
2024 KVM_EXIT_STRING(DIRTY_RING_FULL),
2025 KVM_EXIT_STRING(AP_RESET_HOLD),
2026 KVM_EXIT_STRING(X86_BUS_LOCK),
2027 KVM_EXIT_STRING(XEN),
2028 KVM_EXIT_STRING(RISCV_SBI),
2029 KVM_EXIT_STRING(RISCV_CSR),
2030 KVM_EXIT_STRING(NOTIFY),
2031 #ifdef KVM_EXIT_MEMORY_NOT_PRESENT
2032 KVM_EXIT_STRING(MEMORY_NOT_PRESENT),
2033 #endif
2034 };
2035
2036 /*
2037 * Exit Reason String
2038 *
2039 * Input Args:
2040 * exit_reason - Exit reason
2041 *
2042 * Output Args: None
2043 *
2044 * Return:
2045 * Constant string pointer describing the exit reason.
2046 *
2047 * Locates and returns a constant string that describes the KVM exit
2048 * reason given by exit_reason. If no such string is found, a constant
2049 * string of "Unknown" is returned.
2050 */
exit_reason_str(unsigned int exit_reason)2051 const char *exit_reason_str(unsigned int exit_reason)
2052 {
2053 unsigned int n1;
2054
2055 for (n1 = 0; n1 < ARRAY_SIZE(exit_reasons_known); n1++) {
2056 if (exit_reason == exit_reasons_known[n1].reason)
2057 return exit_reasons_known[n1].name;
2058 }
2059
2060 return "Unknown";
2061 }
2062
2063 /*
2064 * Physical Contiguous Page Allocator
2065 *
2066 * Input Args:
2067 * vm - Virtual Machine
2068 * num - number of pages
2069 * paddr_min - Physical address minimum
2070 * memslot - Memory region to allocate page from
2071 * protected - True if the pages will be used as protected/private memory
2072 *
2073 * Output Args: None
2074 *
2075 * Return:
2076 * Starting physical address
2077 *
2078 * Within the VM specified by vm, locates a range of available physical
2079 * pages at or above paddr_min. If found, the pages are marked as in use
2080 * and their base address is returned. A TEST_ASSERT failure occurs if
2081 * not enough pages are available at or above paddr_min.
2082 */
__vm_phy_pages_alloc(struct kvm_vm * vm,size_t num,vm_paddr_t paddr_min,uint32_t memslot,bool protected)2083 vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
2084 vm_paddr_t paddr_min, uint32_t memslot,
2085 bool protected)
2086 {
2087 struct userspace_mem_region *region;
2088 sparsebit_idx_t pg, base;
2089
2090 TEST_ASSERT(num > 0, "Must allocate at least one page");
2091
2092 TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address "
2093 "not divisible by page size.\n"
2094 " paddr_min: 0x%lx page_size: 0x%x",
2095 paddr_min, vm->page_size);
2096
2097 region = memslot2region(vm, memslot);
2098 TEST_ASSERT(!protected || region->protected_phy_pages,
2099 "Region doesn't support protected memory");
2100
2101 base = pg = paddr_min >> vm->page_shift;
2102 do {
2103 for (; pg < base + num; ++pg) {
2104 if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
2105 base = pg = sparsebit_next_set(region->unused_phy_pages, pg);
2106 break;
2107 }
2108 }
2109 } while (pg && pg != base + num);
2110
2111 if (pg == 0) {
2112 fprintf(stderr, "No guest physical page available, "
2113 "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n",
2114 paddr_min, vm->page_size, memslot);
2115 fputs("---- vm dump ----\n", stderr);
2116 vm_dump(stderr, vm, 2);
2117 abort();
2118 }
2119
2120 for (pg = base; pg < base + num; ++pg) {
2121 sparsebit_clear(region->unused_phy_pages, pg);
2122 if (protected)
2123 sparsebit_set(region->protected_phy_pages, pg);
2124 }
2125
2126 return base * vm->page_size;
2127 }
2128
vm_phy_page_alloc(struct kvm_vm * vm,vm_paddr_t paddr_min,uint32_t memslot)2129 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
2130 uint32_t memslot)
2131 {
2132 return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
2133 }
2134
vm_alloc_page_table(struct kvm_vm * vm)2135 vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm)
2136 {
2137 return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR,
2138 vm->memslots[MEM_REGION_PT]);
2139 }
2140
2141 /*
2142 * Address Guest Virtual to Host Virtual
2143 *
2144 * Input Args:
2145 * vm - Virtual Machine
2146 * gva - VM virtual address
2147 *
2148 * Output Args: None
2149 *
2150 * Return:
2151 * Equivalent host virtual address
2152 */
addr_gva2hva(struct kvm_vm * vm,vm_vaddr_t gva)2153 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva)
2154 {
2155 return addr_gpa2hva(vm, addr_gva2gpa(vm, gva));
2156 }
2157
vm_compute_max_gfn(struct kvm_vm * vm)2158 unsigned long __weak vm_compute_max_gfn(struct kvm_vm *vm)
2159 {
2160 return ((1ULL << vm->pa_bits) >> vm->page_shift) - 1;
2161 }
2162
vm_calc_num_pages(unsigned int num_pages,unsigned int page_shift,unsigned int new_page_shift,bool ceil)2163 static unsigned int vm_calc_num_pages(unsigned int num_pages,
2164 unsigned int page_shift,
2165 unsigned int new_page_shift,
2166 bool ceil)
2167 {
2168 unsigned int n = 1 << (new_page_shift - page_shift);
2169
2170 if (page_shift >= new_page_shift)
2171 return num_pages * (1 << (page_shift - new_page_shift));
2172
2173 return num_pages / n + !!(ceil && num_pages % n);
2174 }
2175
getpageshift(void)2176 static inline int getpageshift(void)
2177 {
2178 return __builtin_ffs(getpagesize()) - 1;
2179 }
2180
2181 unsigned int
vm_num_host_pages(enum vm_guest_mode mode,unsigned int num_guest_pages)2182 vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
2183 {
2184 return vm_calc_num_pages(num_guest_pages,
2185 vm_guest_mode_params[mode].page_shift,
2186 getpageshift(), true);
2187 }
2188
2189 unsigned int
vm_num_guest_pages(enum vm_guest_mode mode,unsigned int num_host_pages)2190 vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages)
2191 {
2192 return vm_calc_num_pages(num_host_pages, getpageshift(),
2193 vm_guest_mode_params[mode].page_shift, false);
2194 }
2195
vm_calc_num_guest_pages(enum vm_guest_mode mode,size_t size)2196 unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size)
2197 {
2198 unsigned int n;
2199 n = DIV_ROUND_UP(size, vm_guest_mode_params[mode].page_size);
2200 return vm_adjust_num_guest_pages(mode, n);
2201 }
2202
2203 /*
2204 * Read binary stats descriptors
2205 *
2206 * Input Args:
2207 * stats_fd - the file descriptor for the binary stats file from which to read
2208 * header - the binary stats metadata header corresponding to the given FD
2209 *
2210 * Output Args: None
2211 *
2212 * Return:
2213 * A pointer to a newly allocated series of stat descriptors.
2214 * Caller is responsible for freeing the returned kvm_stats_desc.
2215 *
2216 * Read the stats descriptors from the binary stats interface.
2217 */
read_stats_descriptors(int stats_fd,struct kvm_stats_header * header)2218 struct kvm_stats_desc *read_stats_descriptors(int stats_fd,
2219 struct kvm_stats_header *header)
2220 {
2221 struct kvm_stats_desc *stats_desc;
2222 ssize_t desc_size, total_size, ret;
2223
2224 desc_size = get_stats_descriptor_size(header);
2225 total_size = header->num_desc * desc_size;
2226
2227 stats_desc = calloc(header->num_desc, desc_size);
2228 TEST_ASSERT(stats_desc, "Allocate memory for stats descriptors");
2229
2230 ret = pread(stats_fd, stats_desc, total_size, header->desc_offset);
2231 TEST_ASSERT(ret == total_size, "Read KVM stats descriptors");
2232
2233 return stats_desc;
2234 }
2235
2236 /*
2237 * Read stat data for a particular stat
2238 *
2239 * Input Args:
2240 * stats_fd - the file descriptor for the binary stats file from which to read
2241 * header - the binary stats metadata header corresponding to the given FD
2242 * desc - the binary stat metadata for the particular stat to be read
2243 * max_elements - the maximum number of 8-byte values to read into data
2244 *
2245 * Output Args:
2246 * data - the buffer into which stat data should be read
2247 *
2248 * Read the data values of a specified stat from the binary stats interface.
2249 */
read_stat_data(int stats_fd,struct kvm_stats_header * header,struct kvm_stats_desc * desc,uint64_t * data,size_t max_elements)2250 void read_stat_data(int stats_fd, struct kvm_stats_header *header,
2251 struct kvm_stats_desc *desc, uint64_t *data,
2252 size_t max_elements)
2253 {
2254 size_t nr_elements = min_t(ssize_t, desc->size, max_elements);
2255 size_t size = nr_elements * sizeof(*data);
2256 ssize_t ret;
2257
2258 TEST_ASSERT(desc->size, "No elements in stat '%s'", desc->name);
2259 TEST_ASSERT(max_elements, "Zero elements requested for stat '%s'", desc->name);
2260
2261 ret = pread(stats_fd, data, size,
2262 header->data_offset + desc->offset);
2263
2264 TEST_ASSERT(ret >= 0, "pread() failed on stat '%s', errno: %i (%s)",
2265 desc->name, errno, strerror(errno));
2266 TEST_ASSERT(ret == size,
2267 "pread() on stat '%s' read %ld bytes, wanted %lu bytes",
2268 desc->name, size, ret);
2269 }
2270
2271 /*
2272 * Read the data of the named stat
2273 *
2274 * Input Args:
2275 * vm - the VM for which the stat should be read
2276 * stat_name - the name of the stat to read
2277 * max_elements - the maximum number of 8-byte values to read into data
2278 *
2279 * Output Args:
2280 * data - the buffer into which stat data should be read
2281 *
2282 * Read the data values of a specified stat from the binary stats interface.
2283 */
__vm_get_stat(struct kvm_vm * vm,const char * stat_name,uint64_t * data,size_t max_elements)2284 void __vm_get_stat(struct kvm_vm *vm, const char *stat_name, uint64_t *data,
2285 size_t max_elements)
2286 {
2287 struct kvm_stats_desc *desc;
2288 size_t size_desc;
2289 int i;
2290
2291 if (!vm->stats_fd) {
2292 vm->stats_fd = vm_get_stats_fd(vm);
2293 read_stats_header(vm->stats_fd, &vm->stats_header);
2294 vm->stats_desc = read_stats_descriptors(vm->stats_fd,
2295 &vm->stats_header);
2296 }
2297
2298 size_desc = get_stats_descriptor_size(&vm->stats_header);
2299
2300 for (i = 0; i < vm->stats_header.num_desc; ++i) {
2301 desc = (void *)vm->stats_desc + (i * size_desc);
2302
2303 if (strcmp(desc->name, stat_name))
2304 continue;
2305
2306 read_stat_data(vm->stats_fd, &vm->stats_header, desc,
2307 data, max_elements);
2308
2309 break;
2310 }
2311 }
2312
kvm_arch_vm_post_create(struct kvm_vm * vm)2313 __weak void kvm_arch_vm_post_create(struct kvm_vm *vm)
2314 {
2315 }
2316
kvm_selftest_arch_init(void)2317 __weak void kvm_selftest_arch_init(void)
2318 {
2319 }
2320
kvm_selftest_init(void)2321 void __attribute((constructor)) kvm_selftest_init(void)
2322 {
2323 /* Tell stdout not to buffer its content. */
2324 setbuf(stdout, NULL);
2325
2326 guest_random_seed = last_guest_seed = random();
2327 pr_info("Random seed: 0x%x\n", guest_random_seed);
2328
2329 kvm_selftest_arch_init();
2330 }
2331
vm_is_gpa_protected(struct kvm_vm * vm,vm_paddr_t paddr)2332 bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr)
2333 {
2334 sparsebit_idx_t pg = 0;
2335 struct userspace_mem_region *region;
2336
2337 if (!vm_arch_has_protected_memory(vm))
2338 return false;
2339
2340 region = userspace_mem_region_find(vm, paddr, paddr);
2341 TEST_ASSERT(region, "No vm physical memory at 0x%lx", paddr);
2342
2343 pg = paddr >> vm->page_shift;
2344 return sparsebit_is_set(region->protected_phy_pages, pg);
2345 }
2346