1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * tools/testing/selftests/kvm/lib/kvm_util.c 4 * 5 * Copyright (C) 2018, Google LLC. 6 */ 7 8 #define _GNU_SOURCE /* for program_invocation_name */ 9 #include "test_util.h" 10 #include "kvm_util.h" 11 #include "processor.h" 12 13 #include <assert.h> 14 #include <sched.h> 15 #include <sys/mman.h> 16 #include <sys/types.h> 17 #include <sys/stat.h> 18 #include <unistd.h> 19 #include <linux/kernel.h> 20 21 #define KVM_UTIL_MIN_PFN 2 22 23 static int vcpu_mmap_sz(void); 24 25 int open_path_or_exit(const char *path, int flags) 26 { 27 int fd; 28 29 fd = open(path, flags); 30 __TEST_REQUIRE(fd >= 0 || errno != ENOENT, "Cannot open %s: %s", path, strerror(errno)); 31 TEST_ASSERT(fd >= 0, "Failed to open '%s'", path); 32 33 return fd; 34 } 35 36 /* 37 * Open KVM_DEV_PATH if available, otherwise exit the entire program. 38 * 39 * Input Args: 40 * flags - The flags to pass when opening KVM_DEV_PATH. 41 * 42 * Return: 43 * The opened file descriptor of /dev/kvm. 44 */ 45 static int _open_kvm_dev_path_or_exit(int flags) 46 { 47 return open_path_or_exit(KVM_DEV_PATH, flags); 48 } 49 50 int open_kvm_dev_path_or_exit(void) 51 { 52 return _open_kvm_dev_path_or_exit(O_RDONLY); 53 } 54 55 static ssize_t get_module_param(const char *module_name, const char *param, 56 void *buffer, size_t buffer_size) 57 { 58 const int path_size = 128; 59 char path[path_size]; 60 ssize_t bytes_read; 61 int fd, r; 62 63 r = snprintf(path, path_size, "/sys/module/%s/parameters/%s", 64 module_name, param); 65 TEST_ASSERT(r < path_size, 66 "Failed to construct sysfs path in %d bytes.", path_size); 67 68 fd = open_path_or_exit(path, O_RDONLY); 69 70 bytes_read = read(fd, buffer, buffer_size); 71 TEST_ASSERT(bytes_read > 0, "read(%s) returned %ld, wanted %ld bytes", 72 path, bytes_read, buffer_size); 73 74 r = close(fd); 75 TEST_ASSERT(!r, "close(%s) failed", path); 76 return bytes_read; 77 } 78 79 static int get_module_param_integer(const char *module_name, const char *param) 80 { 81 /* 82 * 16 bytes to hold a 64-bit value (1 byte per char), 1 byte for the 83 * NUL char, and 1 byte because the kernel sucks and inserts a newline 84 * at the end. 85 */ 86 char value[16 + 1 + 1]; 87 ssize_t r; 88 89 memset(value, '\0', sizeof(value)); 90 91 r = get_module_param(module_name, param, value, sizeof(value)); 92 TEST_ASSERT(value[r - 1] == '\n', 93 "Expected trailing newline, got char '%c'", value[r - 1]); 94 95 /* 96 * Squash the newline, otherwise atoi_paranoid() will complain about 97 * trailing non-NUL characters in the string. 98 */ 99 value[r - 1] = '\0'; 100 return atoi_paranoid(value); 101 } 102 103 static bool get_module_param_bool(const char *module_name, const char *param) 104 { 105 char value; 106 ssize_t r; 107 108 r = get_module_param(module_name, param, &value, sizeof(value)); 109 TEST_ASSERT_EQ(r, 1); 110 111 if (value == 'Y') 112 return true; 113 else if (value == 'N') 114 return false; 115 116 TEST_FAIL("Unrecognized value '%c' for boolean module param", value); 117 } 118 119 bool get_kvm_param_bool(const char *param) 120 { 121 return get_module_param_bool("kvm", param); 122 } 123 124 bool get_kvm_intel_param_bool(const char *param) 125 { 126 return get_module_param_bool("kvm_intel", param); 127 } 128 129 bool get_kvm_amd_param_bool(const char *param) 130 { 131 return get_module_param_bool("kvm_amd", param); 132 } 133 134 int get_kvm_param_integer(const char *param) 135 { 136 return get_module_param_integer("kvm", param); 137 } 138 139 int get_kvm_intel_param_integer(const char *param) 140 { 141 return get_module_param_integer("kvm_intel", param); 142 } 143 144 int get_kvm_amd_param_integer(const char *param) 145 { 146 return get_module_param_integer("kvm_amd", param); 147 } 148 149 /* 150 * Capability 151 * 152 * Input Args: 153 * cap - Capability 154 * 155 * Output Args: None 156 * 157 * Return: 158 * On success, the Value corresponding to the capability (KVM_CAP_*) 159 * specified by the value of cap. On failure a TEST_ASSERT failure 160 * is produced. 161 * 162 * Looks up and returns the value corresponding to the capability 163 * (KVM_CAP_*) given by cap. 164 */ 165 unsigned int kvm_check_cap(long cap) 166 { 167 int ret; 168 int kvm_fd; 169 170 kvm_fd = open_kvm_dev_path_or_exit(); 171 ret = __kvm_ioctl(kvm_fd, KVM_CHECK_EXTENSION, (void *)cap); 172 TEST_ASSERT(ret >= 0, KVM_IOCTL_ERROR(KVM_CHECK_EXTENSION, ret)); 173 174 close(kvm_fd); 175 176 return (unsigned int)ret; 177 } 178 179 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size) 180 { 181 if (vm_check_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL)) 182 vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL, ring_size); 183 else 184 vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING, ring_size); 185 vm->dirty_ring_size = ring_size; 186 } 187 188 static void vm_open(struct kvm_vm *vm) 189 { 190 vm->kvm_fd = _open_kvm_dev_path_or_exit(O_RDWR); 191 192 TEST_REQUIRE(kvm_has_cap(KVM_CAP_IMMEDIATE_EXIT)); 193 194 vm->fd = __kvm_ioctl(vm->kvm_fd, KVM_CREATE_VM, (void *)vm->type); 195 TEST_ASSERT(vm->fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm->fd)); 196 } 197 198 const char *vm_guest_mode_string(uint32_t i) 199 { 200 static const char * const strings[] = { 201 [VM_MODE_P52V48_4K] = "PA-bits:52, VA-bits:48, 4K pages", 202 [VM_MODE_P52V48_16K] = "PA-bits:52, VA-bits:48, 16K pages", 203 [VM_MODE_P52V48_64K] = "PA-bits:52, VA-bits:48, 64K pages", 204 [VM_MODE_P48V48_4K] = "PA-bits:48, VA-bits:48, 4K pages", 205 [VM_MODE_P48V48_16K] = "PA-bits:48, VA-bits:48, 16K pages", 206 [VM_MODE_P48V48_64K] = "PA-bits:48, VA-bits:48, 64K pages", 207 [VM_MODE_P40V48_4K] = "PA-bits:40, VA-bits:48, 4K pages", 208 [VM_MODE_P40V48_16K] = "PA-bits:40, VA-bits:48, 16K pages", 209 [VM_MODE_P40V48_64K] = "PA-bits:40, VA-bits:48, 64K pages", 210 [VM_MODE_PXXV48_4K] = "PA-bits:ANY, VA-bits:48, 4K pages", 211 [VM_MODE_P47V64_4K] = "PA-bits:47, VA-bits:64, 4K pages", 212 [VM_MODE_P44V64_4K] = "PA-bits:44, VA-bits:64, 4K pages", 213 [VM_MODE_P36V48_4K] = "PA-bits:36, VA-bits:48, 4K pages", 214 [VM_MODE_P36V48_16K] = "PA-bits:36, VA-bits:48, 16K pages", 215 [VM_MODE_P36V48_64K] = "PA-bits:36, VA-bits:48, 64K pages", 216 [VM_MODE_P36V47_16K] = "PA-bits:36, VA-bits:47, 16K pages", 217 }; 218 _Static_assert(sizeof(strings)/sizeof(char *) == NUM_VM_MODES, 219 "Missing new mode strings?"); 220 221 TEST_ASSERT(i < NUM_VM_MODES, "Guest mode ID %d too big", i); 222 223 return strings[i]; 224 } 225 226 const struct vm_guest_mode_params vm_guest_mode_params[] = { 227 [VM_MODE_P52V48_4K] = { 52, 48, 0x1000, 12 }, 228 [VM_MODE_P52V48_16K] = { 52, 48, 0x4000, 14 }, 229 [VM_MODE_P52V48_64K] = { 52, 48, 0x10000, 16 }, 230 [VM_MODE_P48V48_4K] = { 48, 48, 0x1000, 12 }, 231 [VM_MODE_P48V48_16K] = { 48, 48, 0x4000, 14 }, 232 [VM_MODE_P48V48_64K] = { 48, 48, 0x10000, 16 }, 233 [VM_MODE_P40V48_4K] = { 40, 48, 0x1000, 12 }, 234 [VM_MODE_P40V48_16K] = { 40, 48, 0x4000, 14 }, 235 [VM_MODE_P40V48_64K] = { 40, 48, 0x10000, 16 }, 236 [VM_MODE_PXXV48_4K] = { 0, 0, 0x1000, 12 }, 237 [VM_MODE_P47V64_4K] = { 47, 64, 0x1000, 12 }, 238 [VM_MODE_P44V64_4K] = { 44, 64, 0x1000, 12 }, 239 [VM_MODE_P36V48_4K] = { 36, 48, 0x1000, 12 }, 240 [VM_MODE_P36V48_16K] = { 36, 48, 0x4000, 14 }, 241 [VM_MODE_P36V48_64K] = { 36, 48, 0x10000, 16 }, 242 [VM_MODE_P36V47_16K] = { 36, 47, 0x4000, 14 }, 243 }; 244 _Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES, 245 "Missing new mode params?"); 246 247 /* 248 * Initializes vm->vpages_valid to match the canonical VA space of the 249 * architecture. 250 * 251 * The default implementation is valid for architectures which split the 252 * range addressed by a single page table into a low and high region 253 * based on the MSB of the VA. On architectures with this behavior 254 * the VA region spans [0, 2^(va_bits - 1)), [-(2^(va_bits - 1), -1]. 255 */ 256 __weak void vm_vaddr_populate_bitmap(struct kvm_vm *vm) 257 { 258 sparsebit_set_num(vm->vpages_valid, 259 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift); 260 sparsebit_set_num(vm->vpages_valid, 261 (~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift, 262 (1ULL << (vm->va_bits - 1)) >> vm->page_shift); 263 } 264 265 struct kvm_vm *____vm_create(struct vm_shape shape) 266 { 267 struct kvm_vm *vm; 268 269 vm = calloc(1, sizeof(*vm)); 270 TEST_ASSERT(vm != NULL, "Insufficient Memory"); 271 272 INIT_LIST_HEAD(&vm->vcpus); 273 vm->regions.gpa_tree = RB_ROOT; 274 vm->regions.hva_tree = RB_ROOT; 275 hash_init(vm->regions.slot_hash); 276 277 vm->mode = shape.mode; 278 vm->type = shape.type; 279 280 vm->pa_bits = vm_guest_mode_params[vm->mode].pa_bits; 281 vm->va_bits = vm_guest_mode_params[vm->mode].va_bits; 282 vm->page_size = vm_guest_mode_params[vm->mode].page_size; 283 vm->page_shift = vm_guest_mode_params[vm->mode].page_shift; 284 285 /* Setup mode specific traits. */ 286 switch (vm->mode) { 287 case VM_MODE_P52V48_4K: 288 vm->pgtable_levels = 4; 289 break; 290 case VM_MODE_P52V48_64K: 291 vm->pgtable_levels = 3; 292 break; 293 case VM_MODE_P48V48_4K: 294 vm->pgtable_levels = 4; 295 break; 296 case VM_MODE_P48V48_64K: 297 vm->pgtable_levels = 3; 298 break; 299 case VM_MODE_P40V48_4K: 300 case VM_MODE_P36V48_4K: 301 vm->pgtable_levels = 4; 302 break; 303 case VM_MODE_P40V48_64K: 304 case VM_MODE_P36V48_64K: 305 vm->pgtable_levels = 3; 306 break; 307 case VM_MODE_P52V48_16K: 308 case VM_MODE_P48V48_16K: 309 case VM_MODE_P40V48_16K: 310 case VM_MODE_P36V48_16K: 311 vm->pgtable_levels = 4; 312 break; 313 case VM_MODE_P36V47_16K: 314 vm->pgtable_levels = 3; 315 break; 316 case VM_MODE_PXXV48_4K: 317 #ifdef __x86_64__ 318 kvm_get_cpu_address_width(&vm->pa_bits, &vm->va_bits); 319 kvm_init_vm_address_properties(vm); 320 /* 321 * Ignore KVM support for 5-level paging (vm->va_bits == 57), 322 * it doesn't take effect unless a CR4.LA57 is set, which it 323 * isn't for this mode (48-bit virtual address space). 324 */ 325 TEST_ASSERT(vm->va_bits == 48 || vm->va_bits == 57, 326 "Linear address width (%d bits) not supported", 327 vm->va_bits); 328 pr_debug("Guest physical address width detected: %d\n", 329 vm->pa_bits); 330 vm->pgtable_levels = 4; 331 vm->va_bits = 48; 332 #else 333 TEST_FAIL("VM_MODE_PXXV48_4K not supported on non-x86 platforms"); 334 #endif 335 break; 336 case VM_MODE_P47V64_4K: 337 vm->pgtable_levels = 5; 338 break; 339 case VM_MODE_P44V64_4K: 340 vm->pgtable_levels = 5; 341 break; 342 default: 343 TEST_FAIL("Unknown guest mode: 0x%x", vm->mode); 344 } 345 346 #ifdef __aarch64__ 347 TEST_ASSERT(!vm->type, "ARM doesn't support test-provided types"); 348 if (vm->pa_bits != 40) 349 vm->type = KVM_VM_TYPE_ARM_IPA_SIZE(vm->pa_bits); 350 #endif 351 352 vm_open(vm); 353 354 /* Limit to VA-bit canonical virtual addresses. */ 355 vm->vpages_valid = sparsebit_alloc(); 356 vm_vaddr_populate_bitmap(vm); 357 358 /* Limit physical addresses to PA-bits. */ 359 vm->max_gfn = vm_compute_max_gfn(vm); 360 361 /* Allocate and setup memory for guest. */ 362 vm->vpages_mapped = sparsebit_alloc(); 363 364 return vm; 365 } 366 367 static uint64_t vm_nr_pages_required(enum vm_guest_mode mode, 368 uint32_t nr_runnable_vcpus, 369 uint64_t extra_mem_pages) 370 { 371 uint64_t page_size = vm_guest_mode_params[mode].page_size; 372 uint64_t nr_pages; 373 374 TEST_ASSERT(nr_runnable_vcpus, 375 "Use vm_create_barebones() for VMs that _never_ have vCPUs"); 376 377 TEST_ASSERT(nr_runnable_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS), 378 "nr_vcpus = %d too large for host, max-vcpus = %d", 379 nr_runnable_vcpus, kvm_check_cap(KVM_CAP_MAX_VCPUS)); 380 381 /* 382 * Arbitrarily allocate 512 pages (2mb when page size is 4kb) for the 383 * test code and other per-VM assets that will be loaded into memslot0. 384 */ 385 nr_pages = 512; 386 387 /* Account for the per-vCPU stacks on behalf of the test. */ 388 nr_pages += nr_runnable_vcpus * DEFAULT_STACK_PGS; 389 390 /* 391 * Account for the number of pages needed for the page tables. The 392 * maximum page table size for a memory region will be when the 393 * smallest page size is used. Considering each page contains x page 394 * table descriptors, the total extra size for page tables (for extra 395 * N pages) will be: N/x+N/x^2+N/x^3+... which is definitely smaller 396 * than N/x*2. 397 */ 398 nr_pages += (nr_pages + extra_mem_pages) / PTES_PER_MIN_PAGE * 2; 399 400 /* Account for the number of pages needed by ucall. */ 401 nr_pages += ucall_nr_pages_required(page_size); 402 403 return vm_adjust_num_guest_pages(mode, nr_pages); 404 } 405 406 struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus, 407 uint64_t nr_extra_pages) 408 { 409 uint64_t nr_pages = vm_nr_pages_required(shape.mode, nr_runnable_vcpus, 410 nr_extra_pages); 411 struct userspace_mem_region *slot0; 412 struct kvm_vm *vm; 413 int i; 414 415 pr_debug("%s: mode='%s' type='%d', pages='%ld'\n", __func__, 416 vm_guest_mode_string(shape.mode), shape.type, nr_pages); 417 418 vm = ____vm_create(shape); 419 420 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 0, 0, nr_pages, 0); 421 for (i = 0; i < NR_MEM_REGIONS; i++) 422 vm->memslots[i] = 0; 423 424 kvm_vm_elf_load(vm, program_invocation_name); 425 426 /* 427 * TODO: Add proper defines to protect the library's memslots, and then 428 * carve out memslot1 for the ucall MMIO address. KVM treats writes to 429 * read-only memslots as MMIO, and creating a read-only memslot for the 430 * MMIO region would prevent silently clobbering the MMIO region. 431 */ 432 slot0 = memslot2region(vm, 0); 433 ucall_init(vm, slot0->region.guest_phys_addr + slot0->region.memory_size); 434 435 kvm_arch_vm_post_create(vm); 436 437 return vm; 438 } 439 440 /* 441 * VM Create with customized parameters 442 * 443 * Input Args: 444 * mode - VM Mode (e.g. VM_MODE_P52V48_4K) 445 * nr_vcpus - VCPU count 446 * extra_mem_pages - Non-slot0 physical memory total size 447 * guest_code - Guest entry point 448 * vcpuids - VCPU IDs 449 * 450 * Output Args: None 451 * 452 * Return: 453 * Pointer to opaque structure that describes the created VM. 454 * 455 * Creates a VM with the mode specified by mode (e.g. VM_MODE_P52V48_4K). 456 * extra_mem_pages is only used to calculate the maximum page table size, 457 * no real memory allocation for non-slot0 memory in this function. 458 */ 459 struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus, 460 uint64_t extra_mem_pages, 461 void *guest_code, struct kvm_vcpu *vcpus[]) 462 { 463 struct kvm_vm *vm; 464 int i; 465 466 TEST_ASSERT(!nr_vcpus || vcpus, "Must provide vCPU array"); 467 468 vm = __vm_create(shape, nr_vcpus, extra_mem_pages); 469 470 for (i = 0; i < nr_vcpus; ++i) 471 vcpus[i] = vm_vcpu_add(vm, i, guest_code); 472 473 return vm; 474 } 475 476 struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape, 477 struct kvm_vcpu **vcpu, 478 uint64_t extra_mem_pages, 479 void *guest_code) 480 { 481 struct kvm_vcpu *vcpus[1]; 482 struct kvm_vm *vm; 483 484 vm = __vm_create_with_vcpus(shape, 1, extra_mem_pages, guest_code, vcpus); 485 486 *vcpu = vcpus[0]; 487 return vm; 488 } 489 490 /* 491 * VM Restart 492 * 493 * Input Args: 494 * vm - VM that has been released before 495 * 496 * Output Args: None 497 * 498 * Reopens the file descriptors associated to the VM and reinstates the 499 * global state, such as the irqchip and the memory regions that are mapped 500 * into the guest. 501 */ 502 void kvm_vm_restart(struct kvm_vm *vmp) 503 { 504 int ctr; 505 struct userspace_mem_region *region; 506 507 vm_open(vmp); 508 if (vmp->has_irqchip) 509 vm_create_irqchip(vmp); 510 511 hash_for_each(vmp->regions.slot_hash, ctr, region, slot_node) { 512 int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION2, ®ion->region); 513 514 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION2 IOCTL failed,\n" 515 " rc: %i errno: %i\n" 516 " slot: %u flags: 0x%x\n" 517 " guest_phys_addr: 0x%llx size: 0x%llx", 518 ret, errno, region->region.slot, 519 region->region.flags, 520 region->region.guest_phys_addr, 521 region->region.memory_size); 522 } 523 } 524 525 __weak struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, 526 uint32_t vcpu_id) 527 { 528 return __vm_vcpu_add(vm, vcpu_id); 529 } 530 531 struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm) 532 { 533 kvm_vm_restart(vm); 534 535 return vm_vcpu_recreate(vm, 0); 536 } 537 538 void kvm_pin_this_task_to_pcpu(uint32_t pcpu) 539 { 540 cpu_set_t mask; 541 int r; 542 543 CPU_ZERO(&mask); 544 CPU_SET(pcpu, &mask); 545 r = sched_setaffinity(0, sizeof(mask), &mask); 546 TEST_ASSERT(!r, "sched_setaffinity() failed for pCPU '%u'.", pcpu); 547 } 548 549 static uint32_t parse_pcpu(const char *cpu_str, const cpu_set_t *allowed_mask) 550 { 551 uint32_t pcpu = atoi_non_negative("CPU number", cpu_str); 552 553 TEST_ASSERT(CPU_ISSET(pcpu, allowed_mask), 554 "Not allowed to run on pCPU '%d', check cgroups?", pcpu); 555 return pcpu; 556 } 557 558 void kvm_print_vcpu_pinning_help(void) 559 { 560 const char *name = program_invocation_name; 561 562 printf(" -c: Pin tasks to physical CPUs. Takes a list of comma separated\n" 563 " values (target pCPU), one for each vCPU, plus an optional\n" 564 " entry for the main application task (specified via entry\n" 565 " <nr_vcpus + 1>). If used, entries must be provided for all\n" 566 " vCPUs, i.e. pinning vCPUs is all or nothing.\n\n" 567 " E.g. to create 3 vCPUs, pin vCPU0=>pCPU22, vCPU1=>pCPU23,\n" 568 " vCPU2=>pCPU24, and pin the application task to pCPU50:\n\n" 569 " %s -v 3 -c 22,23,24,50\n\n" 570 " To leave the application task unpinned, drop the final entry:\n\n" 571 " %s -v 3 -c 22,23,24\n\n" 572 " (default: no pinning)\n", name, name); 573 } 574 575 void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[], 576 int nr_vcpus) 577 { 578 cpu_set_t allowed_mask; 579 char *cpu, *cpu_list; 580 char delim[2] = ","; 581 int i, r; 582 583 cpu_list = strdup(pcpus_string); 584 TEST_ASSERT(cpu_list, "strdup() allocation failed."); 585 586 r = sched_getaffinity(0, sizeof(allowed_mask), &allowed_mask); 587 TEST_ASSERT(!r, "sched_getaffinity() failed"); 588 589 cpu = strtok(cpu_list, delim); 590 591 /* 1. Get all pcpus for vcpus. */ 592 for (i = 0; i < nr_vcpus; i++) { 593 TEST_ASSERT(cpu, "pCPU not provided for vCPU '%d'", i); 594 vcpu_to_pcpu[i] = parse_pcpu(cpu, &allowed_mask); 595 cpu = strtok(NULL, delim); 596 } 597 598 /* 2. Check if the main worker needs to be pinned. */ 599 if (cpu) { 600 kvm_pin_this_task_to_pcpu(parse_pcpu(cpu, &allowed_mask)); 601 cpu = strtok(NULL, delim); 602 } 603 604 TEST_ASSERT(!cpu, "pCPU list contains trailing garbage characters '%s'", cpu); 605 free(cpu_list); 606 } 607 608 /* 609 * Userspace Memory Region Find 610 * 611 * Input Args: 612 * vm - Virtual Machine 613 * start - Starting VM physical address 614 * end - Ending VM physical address, inclusive. 615 * 616 * Output Args: None 617 * 618 * Return: 619 * Pointer to overlapping region, NULL if no such region. 620 * 621 * Searches for a region with any physical memory that overlaps with 622 * any portion of the guest physical addresses from start to end 623 * inclusive. If multiple overlapping regions exist, a pointer to any 624 * of the regions is returned. Null is returned only when no overlapping 625 * region exists. 626 */ 627 static struct userspace_mem_region * 628 userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end) 629 { 630 struct rb_node *node; 631 632 for (node = vm->regions.gpa_tree.rb_node; node; ) { 633 struct userspace_mem_region *region = 634 container_of(node, struct userspace_mem_region, gpa_node); 635 uint64_t existing_start = region->region.guest_phys_addr; 636 uint64_t existing_end = region->region.guest_phys_addr 637 + region->region.memory_size - 1; 638 if (start <= existing_end && end >= existing_start) 639 return region; 640 641 if (start < existing_start) 642 node = node->rb_left; 643 else 644 node = node->rb_right; 645 } 646 647 return NULL; 648 } 649 650 __weak void vcpu_arch_free(struct kvm_vcpu *vcpu) 651 { 652 653 } 654 655 /* 656 * VM VCPU Remove 657 * 658 * Input Args: 659 * vcpu - VCPU to remove 660 * 661 * Output Args: None 662 * 663 * Return: None, TEST_ASSERT failures for all error conditions 664 * 665 * Removes a vCPU from a VM and frees its resources. 666 */ 667 static void vm_vcpu_rm(struct kvm_vm *vm, struct kvm_vcpu *vcpu) 668 { 669 int ret; 670 671 if (vcpu->dirty_gfns) { 672 ret = munmap(vcpu->dirty_gfns, vm->dirty_ring_size); 673 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret)); 674 vcpu->dirty_gfns = NULL; 675 } 676 677 ret = munmap(vcpu->run, vcpu_mmap_sz()); 678 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret)); 679 680 ret = close(vcpu->fd); 681 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret)); 682 683 list_del(&vcpu->list); 684 685 vcpu_arch_free(vcpu); 686 free(vcpu); 687 } 688 689 void kvm_vm_release(struct kvm_vm *vmp) 690 { 691 struct kvm_vcpu *vcpu, *tmp; 692 int ret; 693 694 list_for_each_entry_safe(vcpu, tmp, &vmp->vcpus, list) 695 vm_vcpu_rm(vmp, vcpu); 696 697 ret = close(vmp->fd); 698 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret)); 699 700 ret = close(vmp->kvm_fd); 701 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret)); 702 } 703 704 static void __vm_mem_region_delete(struct kvm_vm *vm, 705 struct userspace_mem_region *region, 706 bool unlink) 707 { 708 int ret; 709 710 if (unlink) { 711 rb_erase(®ion->gpa_node, &vm->regions.gpa_tree); 712 rb_erase(®ion->hva_node, &vm->regions.hva_tree); 713 hash_del(®ion->slot_node); 714 } 715 716 region->region.memory_size = 0; 717 vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, ®ion->region); 718 719 sparsebit_free(®ion->unused_phy_pages); 720 sparsebit_free(®ion->protected_phy_pages); 721 ret = munmap(region->mmap_start, region->mmap_size); 722 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret)); 723 if (region->fd >= 0) { 724 /* There's an extra map when using shared memory. */ 725 ret = munmap(region->mmap_alias, region->mmap_size); 726 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret)); 727 close(region->fd); 728 } 729 if (region->region.guest_memfd >= 0) 730 close(region->region.guest_memfd); 731 732 free(region); 733 } 734 735 /* 736 * Destroys and frees the VM pointed to by vmp. 737 */ 738 void kvm_vm_free(struct kvm_vm *vmp) 739 { 740 int ctr; 741 struct hlist_node *node; 742 struct userspace_mem_region *region; 743 744 if (vmp == NULL) 745 return; 746 747 /* Free cached stats metadata and close FD */ 748 if (vmp->stats_fd) { 749 free(vmp->stats_desc); 750 close(vmp->stats_fd); 751 } 752 753 /* Free userspace_mem_regions. */ 754 hash_for_each_safe(vmp->regions.slot_hash, ctr, node, region, slot_node) 755 __vm_mem_region_delete(vmp, region, false); 756 757 /* Free sparsebit arrays. */ 758 sparsebit_free(&vmp->vpages_valid); 759 sparsebit_free(&vmp->vpages_mapped); 760 761 kvm_vm_release(vmp); 762 763 /* Free the structure describing the VM. */ 764 free(vmp); 765 } 766 767 int kvm_memfd_alloc(size_t size, bool hugepages) 768 { 769 int memfd_flags = MFD_CLOEXEC; 770 int fd, r; 771 772 if (hugepages) 773 memfd_flags |= MFD_HUGETLB; 774 775 fd = memfd_create("kvm_selftest", memfd_flags); 776 TEST_ASSERT(fd != -1, __KVM_SYSCALL_ERROR("memfd_create()", fd)); 777 778 r = ftruncate(fd, size); 779 TEST_ASSERT(!r, __KVM_SYSCALL_ERROR("ftruncate()", r)); 780 781 r = fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0, size); 782 TEST_ASSERT(!r, __KVM_SYSCALL_ERROR("fallocate()", r)); 783 784 return fd; 785 } 786 787 /* 788 * Memory Compare, host virtual to guest virtual 789 * 790 * Input Args: 791 * hva - Starting host virtual address 792 * vm - Virtual Machine 793 * gva - Starting guest virtual address 794 * len - number of bytes to compare 795 * 796 * Output Args: None 797 * 798 * Input/Output Args: None 799 * 800 * Return: 801 * Returns 0 if the bytes starting at hva for a length of len 802 * are equal the guest virtual bytes starting at gva. Returns 803 * a value < 0, if bytes at hva are less than those at gva. 804 * Otherwise a value > 0 is returned. 805 * 806 * Compares the bytes starting at the host virtual address hva, for 807 * a length of len, to the guest bytes starting at the guest virtual 808 * address given by gva. 809 */ 810 int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len) 811 { 812 size_t amt; 813 814 /* 815 * Compare a batch of bytes until either a match is found 816 * or all the bytes have been compared. 817 */ 818 for (uintptr_t offset = 0; offset < len; offset += amt) { 819 uintptr_t ptr1 = (uintptr_t)hva + offset; 820 821 /* 822 * Determine host address for guest virtual address 823 * at offset. 824 */ 825 uintptr_t ptr2 = (uintptr_t)addr_gva2hva(vm, gva + offset); 826 827 /* 828 * Determine amount to compare on this pass. 829 * Don't allow the comparsion to cross a page boundary. 830 */ 831 amt = len - offset; 832 if ((ptr1 >> vm->page_shift) != ((ptr1 + amt) >> vm->page_shift)) 833 amt = vm->page_size - (ptr1 % vm->page_size); 834 if ((ptr2 >> vm->page_shift) != ((ptr2 + amt) >> vm->page_shift)) 835 amt = vm->page_size - (ptr2 % vm->page_size); 836 837 assert((ptr1 >> vm->page_shift) == ((ptr1 + amt - 1) >> vm->page_shift)); 838 assert((ptr2 >> vm->page_shift) == ((ptr2 + amt - 1) >> vm->page_shift)); 839 840 /* 841 * Perform the comparison. If there is a difference 842 * return that result to the caller, otherwise need 843 * to continue on looking for a mismatch. 844 */ 845 int ret = memcmp((void *)ptr1, (void *)ptr2, amt); 846 if (ret != 0) 847 return ret; 848 } 849 850 /* 851 * No mismatch found. Let the caller know the two memory 852 * areas are equal. 853 */ 854 return 0; 855 } 856 857 static void vm_userspace_mem_region_gpa_insert(struct rb_root *gpa_tree, 858 struct userspace_mem_region *region) 859 { 860 struct rb_node **cur, *parent; 861 862 for (cur = &gpa_tree->rb_node, parent = NULL; *cur; ) { 863 struct userspace_mem_region *cregion; 864 865 cregion = container_of(*cur, typeof(*cregion), gpa_node); 866 parent = *cur; 867 if (region->region.guest_phys_addr < 868 cregion->region.guest_phys_addr) 869 cur = &(*cur)->rb_left; 870 else { 871 TEST_ASSERT(region->region.guest_phys_addr != 872 cregion->region.guest_phys_addr, 873 "Duplicate GPA in region tree"); 874 875 cur = &(*cur)->rb_right; 876 } 877 } 878 879 rb_link_node(®ion->gpa_node, parent, cur); 880 rb_insert_color(®ion->gpa_node, gpa_tree); 881 } 882 883 static void vm_userspace_mem_region_hva_insert(struct rb_root *hva_tree, 884 struct userspace_mem_region *region) 885 { 886 struct rb_node **cur, *parent; 887 888 for (cur = &hva_tree->rb_node, parent = NULL; *cur; ) { 889 struct userspace_mem_region *cregion; 890 891 cregion = container_of(*cur, typeof(*cregion), hva_node); 892 parent = *cur; 893 if (region->host_mem < cregion->host_mem) 894 cur = &(*cur)->rb_left; 895 else { 896 TEST_ASSERT(region->host_mem != 897 cregion->host_mem, 898 "Duplicate HVA in region tree"); 899 900 cur = &(*cur)->rb_right; 901 } 902 } 903 904 rb_link_node(®ion->hva_node, parent, cur); 905 rb_insert_color(®ion->hva_node, hva_tree); 906 } 907 908 909 int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 910 uint64_t gpa, uint64_t size, void *hva) 911 { 912 struct kvm_userspace_memory_region region = { 913 .slot = slot, 914 .flags = flags, 915 .guest_phys_addr = gpa, 916 .memory_size = size, 917 .userspace_addr = (uintptr_t)hva, 918 }; 919 920 return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion); 921 } 922 923 void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 924 uint64_t gpa, uint64_t size, void *hva) 925 { 926 int ret = __vm_set_user_memory_region(vm, slot, flags, gpa, size, hva); 927 928 TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION failed, errno = %d (%s)", 929 errno, strerror(errno)); 930 } 931 932 int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 933 uint64_t gpa, uint64_t size, void *hva, 934 uint32_t guest_memfd, uint64_t guest_memfd_offset) 935 { 936 struct kvm_userspace_memory_region2 region = { 937 .slot = slot, 938 .flags = flags, 939 .guest_phys_addr = gpa, 940 .memory_size = size, 941 .userspace_addr = (uintptr_t)hva, 942 .guest_memfd = guest_memfd, 943 .guest_memfd_offset = guest_memfd_offset, 944 }; 945 946 return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION2, ®ion); 947 } 948 949 void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 950 uint64_t gpa, uint64_t size, void *hva, 951 uint32_t guest_memfd, uint64_t guest_memfd_offset) 952 { 953 int ret = __vm_set_user_memory_region2(vm, slot, flags, gpa, size, hva, 954 guest_memfd, guest_memfd_offset); 955 956 TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION2 failed, errno = %d (%s)", 957 errno, strerror(errno)); 958 } 959 960 961 /* FIXME: This thing needs to be ripped apart and rewritten. */ 962 void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, 963 uint64_t guest_paddr, uint32_t slot, uint64_t npages, 964 uint32_t flags, int guest_memfd, uint64_t guest_memfd_offset) 965 { 966 int ret; 967 struct userspace_mem_region *region; 968 size_t backing_src_pagesz = get_backing_src_pagesz(src_type); 969 size_t mem_size = npages * vm->page_size; 970 size_t alignment; 971 972 TEST_ASSERT(vm_adjust_num_guest_pages(vm->mode, npages) == npages, 973 "Number of guest pages is not compatible with the host. " 974 "Try npages=%d", vm_adjust_num_guest_pages(vm->mode, npages)); 975 976 TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical " 977 "address not on a page boundary.\n" 978 " guest_paddr: 0x%lx vm->page_size: 0x%x", 979 guest_paddr, vm->page_size); 980 TEST_ASSERT((((guest_paddr >> vm->page_shift) + npages) - 1) 981 <= vm->max_gfn, "Physical range beyond maximum " 982 "supported physical address,\n" 983 " guest_paddr: 0x%lx npages: 0x%lx\n" 984 " vm->max_gfn: 0x%lx vm->page_size: 0x%x", 985 guest_paddr, npages, vm->max_gfn, vm->page_size); 986 987 /* 988 * Confirm a mem region with an overlapping address doesn't 989 * already exist. 990 */ 991 region = (struct userspace_mem_region *) userspace_mem_region_find( 992 vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1); 993 if (region != NULL) 994 TEST_FAIL("overlapping userspace_mem_region already " 995 "exists\n" 996 " requested guest_paddr: 0x%lx npages: 0x%lx " 997 "page_size: 0x%x\n" 998 " existing guest_paddr: 0x%lx size: 0x%lx", 999 guest_paddr, npages, vm->page_size, 1000 (uint64_t) region->region.guest_phys_addr, 1001 (uint64_t) region->region.memory_size); 1002 1003 /* Confirm no region with the requested slot already exists. */ 1004 hash_for_each_possible(vm->regions.slot_hash, region, slot_node, 1005 slot) { 1006 if (region->region.slot != slot) 1007 continue; 1008 1009 TEST_FAIL("A mem region with the requested slot " 1010 "already exists.\n" 1011 " requested slot: %u paddr: 0x%lx npages: 0x%lx\n" 1012 " existing slot: %u paddr: 0x%lx size: 0x%lx", 1013 slot, guest_paddr, npages, 1014 region->region.slot, 1015 (uint64_t) region->region.guest_phys_addr, 1016 (uint64_t) region->region.memory_size); 1017 } 1018 1019 /* Allocate and initialize new mem region structure. */ 1020 region = calloc(1, sizeof(*region)); 1021 TEST_ASSERT(region != NULL, "Insufficient Memory"); 1022 region->mmap_size = mem_size; 1023 1024 #ifdef __s390x__ 1025 /* On s390x, the host address must be aligned to 1M (due to PGSTEs) */ 1026 alignment = 0x100000; 1027 #else 1028 alignment = 1; 1029 #endif 1030 1031 /* 1032 * When using THP mmap is not guaranteed to returned a hugepage aligned 1033 * address so we have to pad the mmap. Padding is not needed for HugeTLB 1034 * because mmap will always return an address aligned to the HugeTLB 1035 * page size. 1036 */ 1037 if (src_type == VM_MEM_SRC_ANONYMOUS_THP) 1038 alignment = max(backing_src_pagesz, alignment); 1039 1040 TEST_ASSERT_EQ(guest_paddr, align_up(guest_paddr, backing_src_pagesz)); 1041 1042 /* Add enough memory to align up if necessary */ 1043 if (alignment > 1) 1044 region->mmap_size += alignment; 1045 1046 region->fd = -1; 1047 if (backing_src_is_shared(src_type)) 1048 region->fd = kvm_memfd_alloc(region->mmap_size, 1049 src_type == VM_MEM_SRC_SHARED_HUGETLB); 1050 1051 region->mmap_start = mmap(NULL, region->mmap_size, 1052 PROT_READ | PROT_WRITE, 1053 vm_mem_backing_src_alias(src_type)->flag, 1054 region->fd, 0); 1055 TEST_ASSERT(region->mmap_start != MAP_FAILED, 1056 __KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED)); 1057 1058 TEST_ASSERT(!is_backing_src_hugetlb(src_type) || 1059 region->mmap_start == align_ptr_up(region->mmap_start, backing_src_pagesz), 1060 "mmap_start %p is not aligned to HugeTLB page size 0x%lx", 1061 region->mmap_start, backing_src_pagesz); 1062 1063 /* Align host address */ 1064 region->host_mem = align_ptr_up(region->mmap_start, alignment); 1065 1066 /* As needed perform madvise */ 1067 if ((src_type == VM_MEM_SRC_ANONYMOUS || 1068 src_type == VM_MEM_SRC_ANONYMOUS_THP) && thp_configured()) { 1069 ret = madvise(region->host_mem, mem_size, 1070 src_type == VM_MEM_SRC_ANONYMOUS ? MADV_NOHUGEPAGE : MADV_HUGEPAGE); 1071 TEST_ASSERT(ret == 0, "madvise failed, addr: %p length: 0x%lx src_type: %s", 1072 region->host_mem, mem_size, 1073 vm_mem_backing_src_alias(src_type)->name); 1074 } 1075 1076 region->backing_src_type = src_type; 1077 1078 if (flags & KVM_MEM_GUEST_MEMFD) { 1079 if (guest_memfd < 0) { 1080 uint32_t guest_memfd_flags = 0; 1081 TEST_ASSERT(!guest_memfd_offset, 1082 "Offset must be zero when creating new guest_memfd"); 1083 guest_memfd = vm_create_guest_memfd(vm, mem_size, guest_memfd_flags); 1084 } else { 1085 /* 1086 * Install a unique fd for each memslot so that the fd 1087 * can be closed when the region is deleted without 1088 * needing to track if the fd is owned by the framework 1089 * or by the caller. 1090 */ 1091 guest_memfd = dup(guest_memfd); 1092 TEST_ASSERT(guest_memfd >= 0, __KVM_SYSCALL_ERROR("dup()", guest_memfd)); 1093 } 1094 1095 region->region.guest_memfd = guest_memfd; 1096 region->region.guest_memfd_offset = guest_memfd_offset; 1097 } else { 1098 region->region.guest_memfd = -1; 1099 } 1100 1101 region->unused_phy_pages = sparsebit_alloc(); 1102 if (vm_arch_has_protected_memory(vm)) 1103 region->protected_phy_pages = sparsebit_alloc(); 1104 sparsebit_set_num(region->unused_phy_pages, 1105 guest_paddr >> vm->page_shift, npages); 1106 region->region.slot = slot; 1107 region->region.flags = flags; 1108 region->region.guest_phys_addr = guest_paddr; 1109 region->region.memory_size = npages * vm->page_size; 1110 region->region.userspace_addr = (uintptr_t) region->host_mem; 1111 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, ®ion->region); 1112 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION2 IOCTL failed,\n" 1113 " rc: %i errno: %i\n" 1114 " slot: %u flags: 0x%x\n" 1115 " guest_phys_addr: 0x%lx size: 0x%lx guest_memfd: %d", 1116 ret, errno, slot, flags, 1117 guest_paddr, (uint64_t) region->region.memory_size, 1118 region->region.guest_memfd); 1119 1120 /* Add to quick lookup data structures */ 1121 vm_userspace_mem_region_gpa_insert(&vm->regions.gpa_tree, region); 1122 vm_userspace_mem_region_hva_insert(&vm->regions.hva_tree, region); 1123 hash_add(vm->regions.slot_hash, ®ion->slot_node, slot); 1124 1125 /* If shared memory, create an alias. */ 1126 if (region->fd >= 0) { 1127 region->mmap_alias = mmap(NULL, region->mmap_size, 1128 PROT_READ | PROT_WRITE, 1129 vm_mem_backing_src_alias(src_type)->flag, 1130 region->fd, 0); 1131 TEST_ASSERT(region->mmap_alias != MAP_FAILED, 1132 __KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED)); 1133 1134 /* Align host alias address */ 1135 region->host_alias = align_ptr_up(region->mmap_alias, alignment); 1136 } 1137 } 1138 1139 void vm_userspace_mem_region_add(struct kvm_vm *vm, 1140 enum vm_mem_backing_src_type src_type, 1141 uint64_t guest_paddr, uint32_t slot, 1142 uint64_t npages, uint32_t flags) 1143 { 1144 vm_mem_add(vm, src_type, guest_paddr, slot, npages, flags, -1, 0); 1145 } 1146 1147 /* 1148 * Memslot to region 1149 * 1150 * Input Args: 1151 * vm - Virtual Machine 1152 * memslot - KVM memory slot ID 1153 * 1154 * Output Args: None 1155 * 1156 * Return: 1157 * Pointer to memory region structure that describe memory region 1158 * using kvm memory slot ID given by memslot. TEST_ASSERT failure 1159 * on error (e.g. currently no memory region using memslot as a KVM 1160 * memory slot ID). 1161 */ 1162 struct userspace_mem_region * 1163 memslot2region(struct kvm_vm *vm, uint32_t memslot) 1164 { 1165 struct userspace_mem_region *region; 1166 1167 hash_for_each_possible(vm->regions.slot_hash, region, slot_node, 1168 memslot) 1169 if (region->region.slot == memslot) 1170 return region; 1171 1172 fprintf(stderr, "No mem region with the requested slot found,\n" 1173 " requested slot: %u\n", memslot); 1174 fputs("---- vm dump ----\n", stderr); 1175 vm_dump(stderr, vm, 2); 1176 TEST_FAIL("Mem region not found"); 1177 return NULL; 1178 } 1179 1180 /* 1181 * VM Memory Region Flags Set 1182 * 1183 * Input Args: 1184 * vm - Virtual Machine 1185 * flags - Starting guest physical address 1186 * 1187 * Output Args: None 1188 * 1189 * Return: None 1190 * 1191 * Sets the flags of the memory region specified by the value of slot, 1192 * to the values given by flags. 1193 */ 1194 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags) 1195 { 1196 int ret; 1197 struct userspace_mem_region *region; 1198 1199 region = memslot2region(vm, slot); 1200 1201 region->region.flags = flags; 1202 1203 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, ®ion->region); 1204 1205 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION2 IOCTL failed,\n" 1206 " rc: %i errno: %i slot: %u flags: 0x%x", 1207 ret, errno, slot, flags); 1208 } 1209 1210 /* 1211 * VM Memory Region Move 1212 * 1213 * Input Args: 1214 * vm - Virtual Machine 1215 * slot - Slot of the memory region to move 1216 * new_gpa - Starting guest physical address 1217 * 1218 * Output Args: None 1219 * 1220 * Return: None 1221 * 1222 * Change the gpa of a memory region. 1223 */ 1224 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa) 1225 { 1226 struct userspace_mem_region *region; 1227 int ret; 1228 1229 region = memslot2region(vm, slot); 1230 1231 region->region.guest_phys_addr = new_gpa; 1232 1233 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, ®ion->region); 1234 1235 TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION2 failed\n" 1236 "ret: %i errno: %i slot: %u new_gpa: 0x%lx", 1237 ret, errno, slot, new_gpa); 1238 } 1239 1240 /* 1241 * VM Memory Region Delete 1242 * 1243 * Input Args: 1244 * vm - Virtual Machine 1245 * slot - Slot of the memory region to delete 1246 * 1247 * Output Args: None 1248 * 1249 * Return: None 1250 * 1251 * Delete a memory region. 1252 */ 1253 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot) 1254 { 1255 __vm_mem_region_delete(vm, memslot2region(vm, slot), true); 1256 } 1257 1258 void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t base, uint64_t size, 1259 bool punch_hole) 1260 { 1261 const int mode = FALLOC_FL_KEEP_SIZE | (punch_hole ? FALLOC_FL_PUNCH_HOLE : 0); 1262 struct userspace_mem_region *region; 1263 uint64_t end = base + size; 1264 uint64_t gpa, len; 1265 off_t fd_offset; 1266 int ret; 1267 1268 for (gpa = base; gpa < end; gpa += len) { 1269 uint64_t offset; 1270 1271 region = userspace_mem_region_find(vm, gpa, gpa); 1272 TEST_ASSERT(region && region->region.flags & KVM_MEM_GUEST_MEMFD, 1273 "Private memory region not found for GPA 0x%lx", gpa); 1274 1275 offset = gpa - region->region.guest_phys_addr; 1276 fd_offset = region->region.guest_memfd_offset + offset; 1277 len = min_t(uint64_t, end - gpa, region->region.memory_size - offset); 1278 1279 ret = fallocate(region->region.guest_memfd, mode, fd_offset, len); 1280 TEST_ASSERT(!ret, "fallocate() failed to %s at %lx (len = %lu), fd = %d, mode = %x, offset = %lx", 1281 punch_hole ? "punch hole" : "allocate", gpa, len, 1282 region->region.guest_memfd, mode, fd_offset); 1283 } 1284 } 1285 1286 /* Returns the size of a vCPU's kvm_run structure. */ 1287 static int vcpu_mmap_sz(void) 1288 { 1289 int dev_fd, ret; 1290 1291 dev_fd = open_kvm_dev_path_or_exit(); 1292 1293 ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL); 1294 TEST_ASSERT(ret >= sizeof(struct kvm_run), 1295 KVM_IOCTL_ERROR(KVM_GET_VCPU_MMAP_SIZE, ret)); 1296 1297 close(dev_fd); 1298 1299 return ret; 1300 } 1301 1302 static bool vcpu_exists(struct kvm_vm *vm, uint32_t vcpu_id) 1303 { 1304 struct kvm_vcpu *vcpu; 1305 1306 list_for_each_entry(vcpu, &vm->vcpus, list) { 1307 if (vcpu->id == vcpu_id) 1308 return true; 1309 } 1310 1311 return false; 1312 } 1313 1314 /* 1315 * Adds a virtual CPU to the VM specified by vm with the ID given by vcpu_id. 1316 * No additional vCPU setup is done. Returns the vCPU. 1317 */ 1318 struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) 1319 { 1320 struct kvm_vcpu *vcpu; 1321 1322 /* Confirm a vcpu with the specified id doesn't already exist. */ 1323 TEST_ASSERT(!vcpu_exists(vm, vcpu_id), "vCPU%d already exists", vcpu_id); 1324 1325 /* Allocate and initialize new vcpu structure. */ 1326 vcpu = calloc(1, sizeof(*vcpu)); 1327 TEST_ASSERT(vcpu != NULL, "Insufficient Memory"); 1328 1329 vcpu->vm = vm; 1330 vcpu->id = vcpu_id; 1331 vcpu->fd = __vm_ioctl(vm, KVM_CREATE_VCPU, (void *)(unsigned long)vcpu_id); 1332 TEST_ASSERT_VM_VCPU_IOCTL(vcpu->fd >= 0, KVM_CREATE_VCPU, vcpu->fd, vm); 1333 1334 TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->run), "vcpu mmap size " 1335 "smaller than expected, vcpu_mmap_sz: %i expected_min: %zi", 1336 vcpu_mmap_sz(), sizeof(*vcpu->run)); 1337 vcpu->run = (struct kvm_run *) mmap(NULL, vcpu_mmap_sz(), 1338 PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 0); 1339 TEST_ASSERT(vcpu->run != MAP_FAILED, 1340 __KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED)); 1341 1342 /* Add to linked-list of VCPUs. */ 1343 list_add(&vcpu->list, &vm->vcpus); 1344 1345 return vcpu; 1346 } 1347 1348 /* 1349 * VM Virtual Address Unused Gap 1350 * 1351 * Input Args: 1352 * vm - Virtual Machine 1353 * sz - Size (bytes) 1354 * vaddr_min - Minimum Virtual Address 1355 * 1356 * Output Args: None 1357 * 1358 * Return: 1359 * Lowest virtual address at or below vaddr_min, with at least 1360 * sz unused bytes. TEST_ASSERT failure if no area of at least 1361 * size sz is available. 1362 * 1363 * Within the VM specified by vm, locates the lowest starting virtual 1364 * address >= vaddr_min, that has at least sz unallocated bytes. A 1365 * TEST_ASSERT failure occurs for invalid input or no area of at least 1366 * sz unallocated bytes >= vaddr_min is available. 1367 */ 1368 vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, 1369 vm_vaddr_t vaddr_min) 1370 { 1371 uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift; 1372 1373 /* Determine lowest permitted virtual page index. */ 1374 uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift; 1375 if ((pgidx_start * vm->page_size) < vaddr_min) 1376 goto no_va_found; 1377 1378 /* Loop over section with enough valid virtual page indexes. */ 1379 if (!sparsebit_is_set_num(vm->vpages_valid, 1380 pgidx_start, pages)) 1381 pgidx_start = sparsebit_next_set_num(vm->vpages_valid, 1382 pgidx_start, pages); 1383 do { 1384 /* 1385 * Are there enough unused virtual pages available at 1386 * the currently proposed starting virtual page index. 1387 * If not, adjust proposed starting index to next 1388 * possible. 1389 */ 1390 if (sparsebit_is_clear_num(vm->vpages_mapped, 1391 pgidx_start, pages)) 1392 goto va_found; 1393 pgidx_start = sparsebit_next_clear_num(vm->vpages_mapped, 1394 pgidx_start, pages); 1395 if (pgidx_start == 0) 1396 goto no_va_found; 1397 1398 /* 1399 * If needed, adjust proposed starting virtual address, 1400 * to next range of valid virtual addresses. 1401 */ 1402 if (!sparsebit_is_set_num(vm->vpages_valid, 1403 pgidx_start, pages)) { 1404 pgidx_start = sparsebit_next_set_num( 1405 vm->vpages_valid, pgidx_start, pages); 1406 if (pgidx_start == 0) 1407 goto no_va_found; 1408 } 1409 } while (pgidx_start != 0); 1410 1411 no_va_found: 1412 TEST_FAIL("No vaddr of specified pages available, pages: 0x%lx", pages); 1413 1414 /* NOT REACHED */ 1415 return -1; 1416 1417 va_found: 1418 TEST_ASSERT(sparsebit_is_set_num(vm->vpages_valid, 1419 pgidx_start, pages), 1420 "Unexpected, invalid virtual page index range,\n" 1421 " pgidx_start: 0x%lx\n" 1422 " pages: 0x%lx", 1423 pgidx_start, pages); 1424 TEST_ASSERT(sparsebit_is_clear_num(vm->vpages_mapped, 1425 pgidx_start, pages), 1426 "Unexpected, pages already mapped,\n" 1427 " pgidx_start: 0x%lx\n" 1428 " pages: 0x%lx", 1429 pgidx_start, pages); 1430 1431 return pgidx_start * vm->page_size; 1432 } 1433 1434 static vm_vaddr_t ____vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, 1435 vm_vaddr_t vaddr_min, 1436 enum kvm_mem_region_type type, 1437 bool protected) 1438 { 1439 uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0); 1440 1441 virt_pgd_alloc(vm); 1442 vm_paddr_t paddr = __vm_phy_pages_alloc(vm, pages, 1443 KVM_UTIL_MIN_PFN * vm->page_size, 1444 vm->memslots[type], protected); 1445 1446 /* 1447 * Find an unused range of virtual page addresses of at least 1448 * pages in length. 1449 */ 1450 vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min); 1451 1452 /* Map the virtual pages. */ 1453 for (vm_vaddr_t vaddr = vaddr_start; pages > 0; 1454 pages--, vaddr += vm->page_size, paddr += vm->page_size) { 1455 1456 virt_pg_map(vm, vaddr, paddr); 1457 1458 sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift); 1459 } 1460 1461 return vaddr_start; 1462 } 1463 1464 vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, 1465 enum kvm_mem_region_type type) 1466 { 1467 return ____vm_vaddr_alloc(vm, sz, vaddr_min, type, 1468 vm_arch_has_protected_memory(vm)); 1469 } 1470 1471 vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz, 1472 vm_vaddr_t vaddr_min, 1473 enum kvm_mem_region_type type) 1474 { 1475 return ____vm_vaddr_alloc(vm, sz, vaddr_min, type, false); 1476 } 1477 1478 /* 1479 * VM Virtual Address Allocate 1480 * 1481 * Input Args: 1482 * vm - Virtual Machine 1483 * sz - Size in bytes 1484 * vaddr_min - Minimum starting virtual address 1485 * 1486 * Output Args: None 1487 * 1488 * Return: 1489 * Starting guest virtual address 1490 * 1491 * Allocates at least sz bytes within the virtual address space of the vm 1492 * given by vm. The allocated bytes are mapped to a virtual address >= 1493 * the address given by vaddr_min. Note that each allocation uses a 1494 * a unique set of pages, with the minimum real allocation being at least 1495 * a page. The allocated physical space comes from the TEST_DATA memory region. 1496 */ 1497 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min) 1498 { 1499 return __vm_vaddr_alloc(vm, sz, vaddr_min, MEM_REGION_TEST_DATA); 1500 } 1501 1502 /* 1503 * VM Virtual Address Allocate Pages 1504 * 1505 * Input Args: 1506 * vm - Virtual Machine 1507 * 1508 * Output Args: None 1509 * 1510 * Return: 1511 * Starting guest virtual address 1512 * 1513 * Allocates at least N system pages worth of bytes within the virtual address 1514 * space of the vm. 1515 */ 1516 vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages) 1517 { 1518 return vm_vaddr_alloc(vm, nr_pages * getpagesize(), KVM_UTIL_MIN_VADDR); 1519 } 1520 1521 vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type) 1522 { 1523 return __vm_vaddr_alloc(vm, getpagesize(), KVM_UTIL_MIN_VADDR, type); 1524 } 1525 1526 /* 1527 * VM Virtual Address Allocate Page 1528 * 1529 * Input Args: 1530 * vm - Virtual Machine 1531 * 1532 * Output Args: None 1533 * 1534 * Return: 1535 * Starting guest virtual address 1536 * 1537 * Allocates at least one system page worth of bytes within the virtual address 1538 * space of the vm. 1539 */ 1540 vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm) 1541 { 1542 return vm_vaddr_alloc_pages(vm, 1); 1543 } 1544 1545 /* 1546 * Map a range of VM virtual address to the VM's physical address 1547 * 1548 * Input Args: 1549 * vm - Virtual Machine 1550 * vaddr - Virtuall address to map 1551 * paddr - VM Physical Address 1552 * npages - The number of pages to map 1553 * 1554 * Output Args: None 1555 * 1556 * Return: None 1557 * 1558 * Within the VM given by @vm, creates a virtual translation for 1559 * @npages starting at @vaddr to the page range starting at @paddr. 1560 */ 1561 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, 1562 unsigned int npages) 1563 { 1564 size_t page_size = vm->page_size; 1565 size_t size = npages * page_size; 1566 1567 TEST_ASSERT(vaddr + size > vaddr, "Vaddr overflow"); 1568 TEST_ASSERT(paddr + size > paddr, "Paddr overflow"); 1569 1570 while (npages--) { 1571 virt_pg_map(vm, vaddr, paddr); 1572 sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift); 1573 1574 vaddr += page_size; 1575 paddr += page_size; 1576 } 1577 } 1578 1579 /* 1580 * Address VM Physical to Host Virtual 1581 * 1582 * Input Args: 1583 * vm - Virtual Machine 1584 * gpa - VM physical address 1585 * 1586 * Output Args: None 1587 * 1588 * Return: 1589 * Equivalent host virtual address 1590 * 1591 * Locates the memory region containing the VM physical address given 1592 * by gpa, within the VM given by vm. When found, the host virtual 1593 * address providing the memory to the vm physical address is returned. 1594 * A TEST_ASSERT failure occurs if no region containing gpa exists. 1595 */ 1596 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa) 1597 { 1598 struct userspace_mem_region *region; 1599 1600 gpa = vm_untag_gpa(vm, gpa); 1601 1602 region = userspace_mem_region_find(vm, gpa, gpa); 1603 if (!region) { 1604 TEST_FAIL("No vm physical memory at 0x%lx", gpa); 1605 return NULL; 1606 } 1607 1608 return (void *)((uintptr_t)region->host_mem 1609 + (gpa - region->region.guest_phys_addr)); 1610 } 1611 1612 /* 1613 * Address Host Virtual to VM Physical 1614 * 1615 * Input Args: 1616 * vm - Virtual Machine 1617 * hva - Host virtual address 1618 * 1619 * Output Args: None 1620 * 1621 * Return: 1622 * Equivalent VM physical address 1623 * 1624 * Locates the memory region containing the host virtual address given 1625 * by hva, within the VM given by vm. When found, the equivalent 1626 * VM physical address is returned. A TEST_ASSERT failure occurs if no 1627 * region containing hva exists. 1628 */ 1629 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva) 1630 { 1631 struct rb_node *node; 1632 1633 for (node = vm->regions.hva_tree.rb_node; node; ) { 1634 struct userspace_mem_region *region = 1635 container_of(node, struct userspace_mem_region, hva_node); 1636 1637 if (hva >= region->host_mem) { 1638 if (hva <= (region->host_mem 1639 + region->region.memory_size - 1)) 1640 return (vm_paddr_t)((uintptr_t) 1641 region->region.guest_phys_addr 1642 + (hva - (uintptr_t)region->host_mem)); 1643 1644 node = node->rb_right; 1645 } else 1646 node = node->rb_left; 1647 } 1648 1649 TEST_FAIL("No mapping to a guest physical address, hva: %p", hva); 1650 return -1; 1651 } 1652 1653 /* 1654 * Address VM physical to Host Virtual *alias*. 1655 * 1656 * Input Args: 1657 * vm - Virtual Machine 1658 * gpa - VM physical address 1659 * 1660 * Output Args: None 1661 * 1662 * Return: 1663 * Equivalent address within the host virtual *alias* area, or NULL 1664 * (without failing the test) if the guest memory is not shared (so 1665 * no alias exists). 1666 * 1667 * Create a writable, shared virtual=>physical alias for the specific GPA. 1668 * The primary use case is to allow the host selftest to manipulate guest 1669 * memory without mapping said memory in the guest's address space. And, for 1670 * userfaultfd-based demand paging, to do so without triggering userfaults. 1671 */ 1672 void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa) 1673 { 1674 struct userspace_mem_region *region; 1675 uintptr_t offset; 1676 1677 region = userspace_mem_region_find(vm, gpa, gpa); 1678 if (!region) 1679 return NULL; 1680 1681 if (!region->host_alias) 1682 return NULL; 1683 1684 offset = gpa - region->region.guest_phys_addr; 1685 return (void *) ((uintptr_t) region->host_alias + offset); 1686 } 1687 1688 /* Create an interrupt controller chip for the specified VM. */ 1689 void vm_create_irqchip(struct kvm_vm *vm) 1690 { 1691 vm_ioctl(vm, KVM_CREATE_IRQCHIP, NULL); 1692 1693 vm->has_irqchip = true; 1694 } 1695 1696 int _vcpu_run(struct kvm_vcpu *vcpu) 1697 { 1698 int rc; 1699 1700 do { 1701 rc = __vcpu_run(vcpu); 1702 } while (rc == -1 && errno == EINTR); 1703 1704 assert_on_unhandled_exception(vcpu); 1705 1706 return rc; 1707 } 1708 1709 /* 1710 * Invoke KVM_RUN on a vCPU until KVM returns something other than -EINTR. 1711 * Assert if the KVM returns an error (other than -EINTR). 1712 */ 1713 void vcpu_run(struct kvm_vcpu *vcpu) 1714 { 1715 int ret = _vcpu_run(vcpu); 1716 1717 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_RUN, ret)); 1718 } 1719 1720 void vcpu_run_complete_io(struct kvm_vcpu *vcpu) 1721 { 1722 int ret; 1723 1724 vcpu->run->immediate_exit = 1; 1725 ret = __vcpu_run(vcpu); 1726 vcpu->run->immediate_exit = 0; 1727 1728 TEST_ASSERT(ret == -1 && errno == EINTR, 1729 "KVM_RUN IOCTL didn't exit immediately, rc: %i, errno: %i", 1730 ret, errno); 1731 } 1732 1733 /* 1734 * Get the list of guest registers which are supported for 1735 * KVM_GET_ONE_REG/KVM_SET_ONE_REG ioctls. Returns a kvm_reg_list pointer, 1736 * it is the caller's responsibility to free the list. 1737 */ 1738 struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu) 1739 { 1740 struct kvm_reg_list reg_list_n = { .n = 0 }, *reg_list; 1741 int ret; 1742 1743 ret = __vcpu_ioctl(vcpu, KVM_GET_REG_LIST, ®_list_n); 1744 TEST_ASSERT(ret == -1 && errno == E2BIG, "KVM_GET_REG_LIST n=0"); 1745 1746 reg_list = calloc(1, sizeof(*reg_list) + reg_list_n.n * sizeof(__u64)); 1747 reg_list->n = reg_list_n.n; 1748 vcpu_ioctl(vcpu, KVM_GET_REG_LIST, reg_list); 1749 return reg_list; 1750 } 1751 1752 void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu) 1753 { 1754 uint32_t page_size = getpagesize(); 1755 uint32_t size = vcpu->vm->dirty_ring_size; 1756 1757 TEST_ASSERT(size > 0, "Should enable dirty ring first"); 1758 1759 if (!vcpu->dirty_gfns) { 1760 void *addr; 1761 1762 addr = mmap(NULL, size, PROT_READ, MAP_PRIVATE, vcpu->fd, 1763 page_size * KVM_DIRTY_LOG_PAGE_OFFSET); 1764 TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped private"); 1765 1766 addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, vcpu->fd, 1767 page_size * KVM_DIRTY_LOG_PAGE_OFFSET); 1768 TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped exec"); 1769 1770 addr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 1771 page_size * KVM_DIRTY_LOG_PAGE_OFFSET); 1772 TEST_ASSERT(addr != MAP_FAILED, "Dirty ring map failed"); 1773 1774 vcpu->dirty_gfns = addr; 1775 vcpu->dirty_gfns_count = size / sizeof(struct kvm_dirty_gfn); 1776 } 1777 1778 return vcpu->dirty_gfns; 1779 } 1780 1781 /* 1782 * Device Ioctl 1783 */ 1784 1785 int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr) 1786 { 1787 struct kvm_device_attr attribute = { 1788 .group = group, 1789 .attr = attr, 1790 .flags = 0, 1791 }; 1792 1793 return ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute); 1794 } 1795 1796 int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type) 1797 { 1798 struct kvm_create_device create_dev = { 1799 .type = type, 1800 .flags = KVM_CREATE_DEVICE_TEST, 1801 }; 1802 1803 return __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev); 1804 } 1805 1806 int __kvm_create_device(struct kvm_vm *vm, uint64_t type) 1807 { 1808 struct kvm_create_device create_dev = { 1809 .type = type, 1810 .fd = -1, 1811 .flags = 0, 1812 }; 1813 int err; 1814 1815 err = __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev); 1816 TEST_ASSERT(err <= 0, "KVM_CREATE_DEVICE shouldn't return a positive value"); 1817 return err ? : create_dev.fd; 1818 } 1819 1820 int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val) 1821 { 1822 struct kvm_device_attr kvmattr = { 1823 .group = group, 1824 .attr = attr, 1825 .flags = 0, 1826 .addr = (uintptr_t)val, 1827 }; 1828 1829 return __kvm_ioctl(dev_fd, KVM_GET_DEVICE_ATTR, &kvmattr); 1830 } 1831 1832 int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val) 1833 { 1834 struct kvm_device_attr kvmattr = { 1835 .group = group, 1836 .attr = attr, 1837 .flags = 0, 1838 .addr = (uintptr_t)val, 1839 }; 1840 1841 return __kvm_ioctl(dev_fd, KVM_SET_DEVICE_ATTR, &kvmattr); 1842 } 1843 1844 /* 1845 * IRQ related functions. 1846 */ 1847 1848 int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level) 1849 { 1850 struct kvm_irq_level irq_level = { 1851 .irq = irq, 1852 .level = level, 1853 }; 1854 1855 return __vm_ioctl(vm, KVM_IRQ_LINE, &irq_level); 1856 } 1857 1858 void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level) 1859 { 1860 int ret = _kvm_irq_line(vm, irq, level); 1861 1862 TEST_ASSERT(ret >= 0, KVM_IOCTL_ERROR(KVM_IRQ_LINE, ret)); 1863 } 1864 1865 struct kvm_irq_routing *kvm_gsi_routing_create(void) 1866 { 1867 struct kvm_irq_routing *routing; 1868 size_t size; 1869 1870 size = sizeof(struct kvm_irq_routing); 1871 /* Allocate space for the max number of entries: this wastes 196 KBs. */ 1872 size += KVM_MAX_IRQ_ROUTES * sizeof(struct kvm_irq_routing_entry); 1873 routing = calloc(1, size); 1874 assert(routing); 1875 1876 return routing; 1877 } 1878 1879 void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing, 1880 uint32_t gsi, uint32_t pin) 1881 { 1882 int i; 1883 1884 assert(routing); 1885 assert(routing->nr < KVM_MAX_IRQ_ROUTES); 1886 1887 i = routing->nr; 1888 routing->entries[i].gsi = gsi; 1889 routing->entries[i].type = KVM_IRQ_ROUTING_IRQCHIP; 1890 routing->entries[i].flags = 0; 1891 routing->entries[i].u.irqchip.irqchip = 0; 1892 routing->entries[i].u.irqchip.pin = pin; 1893 routing->nr++; 1894 } 1895 1896 int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing) 1897 { 1898 int ret; 1899 1900 assert(routing); 1901 ret = __vm_ioctl(vm, KVM_SET_GSI_ROUTING, routing); 1902 free(routing); 1903 1904 return ret; 1905 } 1906 1907 void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing) 1908 { 1909 int ret; 1910 1911 ret = _kvm_gsi_routing_write(vm, routing); 1912 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_GSI_ROUTING, ret)); 1913 } 1914 1915 /* 1916 * VM Dump 1917 * 1918 * Input Args: 1919 * vm - Virtual Machine 1920 * indent - Left margin indent amount 1921 * 1922 * Output Args: 1923 * stream - Output FILE stream 1924 * 1925 * Return: None 1926 * 1927 * Dumps the current state of the VM given by vm, to the FILE stream 1928 * given by stream. 1929 */ 1930 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) 1931 { 1932 int ctr; 1933 struct userspace_mem_region *region; 1934 struct kvm_vcpu *vcpu; 1935 1936 fprintf(stream, "%*smode: 0x%x\n", indent, "", vm->mode); 1937 fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd); 1938 fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size); 1939 fprintf(stream, "%*sMem Regions:\n", indent, ""); 1940 hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) { 1941 fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx " 1942 "host_virt: %p\n", indent + 2, "", 1943 (uint64_t) region->region.guest_phys_addr, 1944 (uint64_t) region->region.memory_size, 1945 region->host_mem); 1946 fprintf(stream, "%*sunused_phy_pages: ", indent + 2, ""); 1947 sparsebit_dump(stream, region->unused_phy_pages, 0); 1948 if (region->protected_phy_pages) { 1949 fprintf(stream, "%*sprotected_phy_pages: ", indent + 2, ""); 1950 sparsebit_dump(stream, region->protected_phy_pages, 0); 1951 } 1952 } 1953 fprintf(stream, "%*sMapped Virtual Pages:\n", indent, ""); 1954 sparsebit_dump(stream, vm->vpages_mapped, indent + 2); 1955 fprintf(stream, "%*spgd_created: %u\n", indent, "", 1956 vm->pgd_created); 1957 if (vm->pgd_created) { 1958 fprintf(stream, "%*sVirtual Translation Tables:\n", 1959 indent + 2, ""); 1960 virt_dump(stream, vm, indent + 4); 1961 } 1962 fprintf(stream, "%*sVCPUs:\n", indent, ""); 1963 1964 list_for_each_entry(vcpu, &vm->vcpus, list) 1965 vcpu_dump(stream, vcpu, indent + 2); 1966 } 1967 1968 #define KVM_EXIT_STRING(x) {KVM_EXIT_##x, #x} 1969 1970 /* Known KVM exit reasons */ 1971 static struct exit_reason { 1972 unsigned int reason; 1973 const char *name; 1974 } exit_reasons_known[] = { 1975 KVM_EXIT_STRING(UNKNOWN), 1976 KVM_EXIT_STRING(EXCEPTION), 1977 KVM_EXIT_STRING(IO), 1978 KVM_EXIT_STRING(HYPERCALL), 1979 KVM_EXIT_STRING(DEBUG), 1980 KVM_EXIT_STRING(HLT), 1981 KVM_EXIT_STRING(MMIO), 1982 KVM_EXIT_STRING(IRQ_WINDOW_OPEN), 1983 KVM_EXIT_STRING(SHUTDOWN), 1984 KVM_EXIT_STRING(FAIL_ENTRY), 1985 KVM_EXIT_STRING(INTR), 1986 KVM_EXIT_STRING(SET_TPR), 1987 KVM_EXIT_STRING(TPR_ACCESS), 1988 KVM_EXIT_STRING(S390_SIEIC), 1989 KVM_EXIT_STRING(S390_RESET), 1990 KVM_EXIT_STRING(DCR), 1991 KVM_EXIT_STRING(NMI), 1992 KVM_EXIT_STRING(INTERNAL_ERROR), 1993 KVM_EXIT_STRING(OSI), 1994 KVM_EXIT_STRING(PAPR_HCALL), 1995 KVM_EXIT_STRING(S390_UCONTROL), 1996 KVM_EXIT_STRING(WATCHDOG), 1997 KVM_EXIT_STRING(S390_TSCH), 1998 KVM_EXIT_STRING(EPR), 1999 KVM_EXIT_STRING(SYSTEM_EVENT), 2000 KVM_EXIT_STRING(S390_STSI), 2001 KVM_EXIT_STRING(IOAPIC_EOI), 2002 KVM_EXIT_STRING(HYPERV), 2003 KVM_EXIT_STRING(ARM_NISV), 2004 KVM_EXIT_STRING(X86_RDMSR), 2005 KVM_EXIT_STRING(X86_WRMSR), 2006 KVM_EXIT_STRING(DIRTY_RING_FULL), 2007 KVM_EXIT_STRING(AP_RESET_HOLD), 2008 KVM_EXIT_STRING(X86_BUS_LOCK), 2009 KVM_EXIT_STRING(XEN), 2010 KVM_EXIT_STRING(RISCV_SBI), 2011 KVM_EXIT_STRING(RISCV_CSR), 2012 KVM_EXIT_STRING(NOTIFY), 2013 #ifdef KVM_EXIT_MEMORY_NOT_PRESENT 2014 KVM_EXIT_STRING(MEMORY_NOT_PRESENT), 2015 #endif 2016 }; 2017 2018 /* 2019 * Exit Reason String 2020 * 2021 * Input Args: 2022 * exit_reason - Exit reason 2023 * 2024 * Output Args: None 2025 * 2026 * Return: 2027 * Constant string pointer describing the exit reason. 2028 * 2029 * Locates and returns a constant string that describes the KVM exit 2030 * reason given by exit_reason. If no such string is found, a constant 2031 * string of "Unknown" is returned. 2032 */ 2033 const char *exit_reason_str(unsigned int exit_reason) 2034 { 2035 unsigned int n1; 2036 2037 for (n1 = 0; n1 < ARRAY_SIZE(exit_reasons_known); n1++) { 2038 if (exit_reason == exit_reasons_known[n1].reason) 2039 return exit_reasons_known[n1].name; 2040 } 2041 2042 return "Unknown"; 2043 } 2044 2045 /* 2046 * Physical Contiguous Page Allocator 2047 * 2048 * Input Args: 2049 * vm - Virtual Machine 2050 * num - number of pages 2051 * paddr_min - Physical address minimum 2052 * memslot - Memory region to allocate page from 2053 * protected - True if the pages will be used as protected/private memory 2054 * 2055 * Output Args: None 2056 * 2057 * Return: 2058 * Starting physical address 2059 * 2060 * Within the VM specified by vm, locates a range of available physical 2061 * pages at or above paddr_min. If found, the pages are marked as in use 2062 * and their base address is returned. A TEST_ASSERT failure occurs if 2063 * not enough pages are available at or above paddr_min. 2064 */ 2065 vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, 2066 vm_paddr_t paddr_min, uint32_t memslot, 2067 bool protected) 2068 { 2069 struct userspace_mem_region *region; 2070 sparsebit_idx_t pg, base; 2071 2072 TEST_ASSERT(num > 0, "Must allocate at least one page"); 2073 2074 TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address " 2075 "not divisible by page size.\n" 2076 " paddr_min: 0x%lx page_size: 0x%x", 2077 paddr_min, vm->page_size); 2078 2079 region = memslot2region(vm, memslot); 2080 TEST_ASSERT(!protected || region->protected_phy_pages, 2081 "Region doesn't support protected memory"); 2082 2083 base = pg = paddr_min >> vm->page_shift; 2084 do { 2085 for (; pg < base + num; ++pg) { 2086 if (!sparsebit_is_set(region->unused_phy_pages, pg)) { 2087 base = pg = sparsebit_next_set(region->unused_phy_pages, pg); 2088 break; 2089 } 2090 } 2091 } while (pg && pg != base + num); 2092 2093 if (pg == 0) { 2094 fprintf(stderr, "No guest physical page available, " 2095 "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n", 2096 paddr_min, vm->page_size, memslot); 2097 fputs("---- vm dump ----\n", stderr); 2098 vm_dump(stderr, vm, 2); 2099 abort(); 2100 } 2101 2102 for (pg = base; pg < base + num; ++pg) { 2103 sparsebit_clear(region->unused_phy_pages, pg); 2104 if (protected) 2105 sparsebit_set(region->protected_phy_pages, pg); 2106 } 2107 2108 return base * vm->page_size; 2109 } 2110 2111 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, 2112 uint32_t memslot) 2113 { 2114 return vm_phy_pages_alloc(vm, 1, paddr_min, memslot); 2115 } 2116 2117 vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm) 2118 { 2119 return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 2120 vm->memslots[MEM_REGION_PT]); 2121 } 2122 2123 /* 2124 * Address Guest Virtual to Host Virtual 2125 * 2126 * Input Args: 2127 * vm - Virtual Machine 2128 * gva - VM virtual address 2129 * 2130 * Output Args: None 2131 * 2132 * Return: 2133 * Equivalent host virtual address 2134 */ 2135 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva) 2136 { 2137 return addr_gpa2hva(vm, addr_gva2gpa(vm, gva)); 2138 } 2139 2140 unsigned long __weak vm_compute_max_gfn(struct kvm_vm *vm) 2141 { 2142 return ((1ULL << vm->pa_bits) >> vm->page_shift) - 1; 2143 } 2144 2145 static unsigned int vm_calc_num_pages(unsigned int num_pages, 2146 unsigned int page_shift, 2147 unsigned int new_page_shift, 2148 bool ceil) 2149 { 2150 unsigned int n = 1 << (new_page_shift - page_shift); 2151 2152 if (page_shift >= new_page_shift) 2153 return num_pages * (1 << (page_shift - new_page_shift)); 2154 2155 return num_pages / n + !!(ceil && num_pages % n); 2156 } 2157 2158 static inline int getpageshift(void) 2159 { 2160 return __builtin_ffs(getpagesize()) - 1; 2161 } 2162 2163 unsigned int 2164 vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages) 2165 { 2166 return vm_calc_num_pages(num_guest_pages, 2167 vm_guest_mode_params[mode].page_shift, 2168 getpageshift(), true); 2169 } 2170 2171 unsigned int 2172 vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages) 2173 { 2174 return vm_calc_num_pages(num_host_pages, getpageshift(), 2175 vm_guest_mode_params[mode].page_shift, false); 2176 } 2177 2178 unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size) 2179 { 2180 unsigned int n; 2181 n = DIV_ROUND_UP(size, vm_guest_mode_params[mode].page_size); 2182 return vm_adjust_num_guest_pages(mode, n); 2183 } 2184 2185 /* 2186 * Read binary stats descriptors 2187 * 2188 * Input Args: 2189 * stats_fd - the file descriptor for the binary stats file from which to read 2190 * header - the binary stats metadata header corresponding to the given FD 2191 * 2192 * Output Args: None 2193 * 2194 * Return: 2195 * A pointer to a newly allocated series of stat descriptors. 2196 * Caller is responsible for freeing the returned kvm_stats_desc. 2197 * 2198 * Read the stats descriptors from the binary stats interface. 2199 */ 2200 struct kvm_stats_desc *read_stats_descriptors(int stats_fd, 2201 struct kvm_stats_header *header) 2202 { 2203 struct kvm_stats_desc *stats_desc; 2204 ssize_t desc_size, total_size, ret; 2205 2206 desc_size = get_stats_descriptor_size(header); 2207 total_size = header->num_desc * desc_size; 2208 2209 stats_desc = calloc(header->num_desc, desc_size); 2210 TEST_ASSERT(stats_desc, "Allocate memory for stats descriptors"); 2211 2212 ret = pread(stats_fd, stats_desc, total_size, header->desc_offset); 2213 TEST_ASSERT(ret == total_size, "Read KVM stats descriptors"); 2214 2215 return stats_desc; 2216 } 2217 2218 /* 2219 * Read stat data for a particular stat 2220 * 2221 * Input Args: 2222 * stats_fd - the file descriptor for the binary stats file from which to read 2223 * header - the binary stats metadata header corresponding to the given FD 2224 * desc - the binary stat metadata for the particular stat to be read 2225 * max_elements - the maximum number of 8-byte values to read into data 2226 * 2227 * Output Args: 2228 * data - the buffer into which stat data should be read 2229 * 2230 * Read the data values of a specified stat from the binary stats interface. 2231 */ 2232 void read_stat_data(int stats_fd, struct kvm_stats_header *header, 2233 struct kvm_stats_desc *desc, uint64_t *data, 2234 size_t max_elements) 2235 { 2236 size_t nr_elements = min_t(ssize_t, desc->size, max_elements); 2237 size_t size = nr_elements * sizeof(*data); 2238 ssize_t ret; 2239 2240 TEST_ASSERT(desc->size, "No elements in stat '%s'", desc->name); 2241 TEST_ASSERT(max_elements, "Zero elements requested for stat '%s'", desc->name); 2242 2243 ret = pread(stats_fd, data, size, 2244 header->data_offset + desc->offset); 2245 2246 TEST_ASSERT(ret >= 0, "pread() failed on stat '%s', errno: %i (%s)", 2247 desc->name, errno, strerror(errno)); 2248 TEST_ASSERT(ret == size, 2249 "pread() on stat '%s' read %ld bytes, wanted %lu bytes", 2250 desc->name, size, ret); 2251 } 2252 2253 /* 2254 * Read the data of the named stat 2255 * 2256 * Input Args: 2257 * vm - the VM for which the stat should be read 2258 * stat_name - the name of the stat to read 2259 * max_elements - the maximum number of 8-byte values to read into data 2260 * 2261 * Output Args: 2262 * data - the buffer into which stat data should be read 2263 * 2264 * Read the data values of a specified stat from the binary stats interface. 2265 */ 2266 void __vm_get_stat(struct kvm_vm *vm, const char *stat_name, uint64_t *data, 2267 size_t max_elements) 2268 { 2269 struct kvm_stats_desc *desc; 2270 size_t size_desc; 2271 int i; 2272 2273 if (!vm->stats_fd) { 2274 vm->stats_fd = vm_get_stats_fd(vm); 2275 read_stats_header(vm->stats_fd, &vm->stats_header); 2276 vm->stats_desc = read_stats_descriptors(vm->stats_fd, 2277 &vm->stats_header); 2278 } 2279 2280 size_desc = get_stats_descriptor_size(&vm->stats_header); 2281 2282 for (i = 0; i < vm->stats_header.num_desc; ++i) { 2283 desc = (void *)vm->stats_desc + (i * size_desc); 2284 2285 if (strcmp(desc->name, stat_name)) 2286 continue; 2287 2288 read_stat_data(vm->stats_fd, &vm->stats_header, desc, 2289 data, max_elements); 2290 2291 break; 2292 } 2293 } 2294 2295 __weak void kvm_arch_vm_post_create(struct kvm_vm *vm) 2296 { 2297 } 2298 2299 __weak void kvm_selftest_arch_init(void) 2300 { 2301 } 2302 2303 void __attribute((constructor)) kvm_selftest_init(void) 2304 { 2305 /* Tell stdout not to buffer its content. */ 2306 setbuf(stdout, NULL); 2307 2308 kvm_selftest_arch_init(); 2309 } 2310 2311 bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr) 2312 { 2313 sparsebit_idx_t pg = 0; 2314 struct userspace_mem_region *region; 2315 2316 if (!vm_arch_has_protected_memory(vm)) 2317 return false; 2318 2319 region = userspace_mem_region_find(vm, paddr, paddr); 2320 TEST_ASSERT(region, "No vm physical memory at 0x%lx", paddr); 2321 2322 pg = paddr >> vm->page_shift; 2323 return sparsebit_is_set(region->protected_phy_pages, pg); 2324 } 2325