1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * tools/testing/selftests/kvm/lib/kvm_util.c 4 * 5 * Copyright (C) 2018, Google LLC. 6 */ 7 #include "test_util.h" 8 #include "kvm_util.h" 9 #include "processor.h" 10 #include "ucall_common.h" 11 12 #include <assert.h> 13 #include <sched.h> 14 #include <sys/mman.h> 15 #include <sys/resource.h> 16 #include <sys/types.h> 17 #include <sys/stat.h> 18 #include <unistd.h> 19 #include <linux/kernel.h> 20 21 #define KVM_UTIL_MIN_PFN 2 22 23 uint32_t guest_random_seed; 24 struct guest_random_state guest_rng; 25 static uint32_t last_guest_seed; 26 27 static int vcpu_mmap_sz(void); 28 29 int open_path_or_exit(const char *path, int flags) 30 { 31 int fd; 32 33 fd = open(path, flags); 34 __TEST_REQUIRE(fd >= 0 || errno != ENOENT, "Cannot open %s: %s", path, strerror(errno)); 35 TEST_ASSERT(fd >= 0, "Failed to open '%s'", path); 36 37 return fd; 38 } 39 40 /* 41 * Open KVM_DEV_PATH if available, otherwise exit the entire program. 42 * 43 * Input Args: 44 * flags - The flags to pass when opening KVM_DEV_PATH. 45 * 46 * Return: 47 * The opened file descriptor of /dev/kvm. 48 */ 49 static int _open_kvm_dev_path_or_exit(int flags) 50 { 51 return open_path_or_exit(KVM_DEV_PATH, flags); 52 } 53 54 int open_kvm_dev_path_or_exit(void) 55 { 56 return _open_kvm_dev_path_or_exit(O_RDONLY); 57 } 58 59 static ssize_t get_module_param(const char *module_name, const char *param, 60 void *buffer, size_t buffer_size) 61 { 62 const int path_size = 128; 63 char path[path_size]; 64 ssize_t bytes_read; 65 int fd, r; 66 67 /* Verify KVM is loaded, to provide a more helpful SKIP message. */ 68 close(open_kvm_dev_path_or_exit()); 69 70 r = snprintf(path, path_size, "/sys/module/%s/parameters/%s", 71 module_name, param); 72 TEST_ASSERT(r < path_size, 73 "Failed to construct sysfs path in %d bytes.", path_size); 74 75 fd = open_path_or_exit(path, O_RDONLY); 76 77 bytes_read = read(fd, buffer, buffer_size); 78 TEST_ASSERT(bytes_read > 0, "read(%s) returned %ld, wanted %ld bytes", 79 path, bytes_read, buffer_size); 80 81 r = close(fd); 82 TEST_ASSERT(!r, "close(%s) failed", path); 83 return bytes_read; 84 } 85 86 static int get_module_param_integer(const char *module_name, const char *param) 87 { 88 /* 89 * 16 bytes to hold a 64-bit value (1 byte per char), 1 byte for the 90 * NUL char, and 1 byte because the kernel sucks and inserts a newline 91 * at the end. 92 */ 93 char value[16 + 1 + 1]; 94 ssize_t r; 95 96 memset(value, '\0', sizeof(value)); 97 98 r = get_module_param(module_name, param, value, sizeof(value)); 99 TEST_ASSERT(value[r - 1] == '\n', 100 "Expected trailing newline, got char '%c'", value[r - 1]); 101 102 /* 103 * Squash the newline, otherwise atoi_paranoid() will complain about 104 * trailing non-NUL characters in the string. 105 */ 106 value[r - 1] = '\0'; 107 return atoi_paranoid(value); 108 } 109 110 static bool get_module_param_bool(const char *module_name, const char *param) 111 { 112 char value; 113 ssize_t r; 114 115 r = get_module_param(module_name, param, &value, sizeof(value)); 116 TEST_ASSERT_EQ(r, 1); 117 118 if (value == 'Y') 119 return true; 120 else if (value == 'N') 121 return false; 122 123 TEST_FAIL("Unrecognized value '%c' for boolean module param", value); 124 } 125 126 bool get_kvm_param_bool(const char *param) 127 { 128 return get_module_param_bool("kvm", param); 129 } 130 131 bool get_kvm_intel_param_bool(const char *param) 132 { 133 return get_module_param_bool("kvm_intel", param); 134 } 135 136 bool get_kvm_amd_param_bool(const char *param) 137 { 138 return get_module_param_bool("kvm_amd", param); 139 } 140 141 int get_kvm_param_integer(const char *param) 142 { 143 return get_module_param_integer("kvm", param); 144 } 145 146 int get_kvm_intel_param_integer(const char *param) 147 { 148 return get_module_param_integer("kvm_intel", param); 149 } 150 151 int get_kvm_amd_param_integer(const char *param) 152 { 153 return get_module_param_integer("kvm_amd", param); 154 } 155 156 /* 157 * Capability 158 * 159 * Input Args: 160 * cap - Capability 161 * 162 * Output Args: None 163 * 164 * Return: 165 * On success, the Value corresponding to the capability (KVM_CAP_*) 166 * specified by the value of cap. On failure a TEST_ASSERT failure 167 * is produced. 168 * 169 * Looks up and returns the value corresponding to the capability 170 * (KVM_CAP_*) given by cap. 171 */ 172 unsigned int kvm_check_cap(long cap) 173 { 174 int ret; 175 int kvm_fd; 176 177 kvm_fd = open_kvm_dev_path_or_exit(); 178 ret = __kvm_ioctl(kvm_fd, KVM_CHECK_EXTENSION, (void *)cap); 179 TEST_ASSERT(ret >= 0, KVM_IOCTL_ERROR(KVM_CHECK_EXTENSION, ret)); 180 181 close(kvm_fd); 182 183 return (unsigned int)ret; 184 } 185 186 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size) 187 { 188 if (vm_check_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL)) 189 vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL, ring_size); 190 else 191 vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING, ring_size); 192 vm->dirty_ring_size = ring_size; 193 } 194 195 static void vm_open(struct kvm_vm *vm) 196 { 197 vm->kvm_fd = _open_kvm_dev_path_or_exit(O_RDWR); 198 199 TEST_REQUIRE(kvm_has_cap(KVM_CAP_IMMEDIATE_EXIT)); 200 201 vm->fd = __kvm_ioctl(vm->kvm_fd, KVM_CREATE_VM, (void *)vm->type); 202 TEST_ASSERT(vm->fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm->fd)); 203 204 if (kvm_has_cap(KVM_CAP_BINARY_STATS_FD)) 205 vm->stats.fd = vm_get_stats_fd(vm); 206 else 207 vm->stats.fd = -1; 208 } 209 210 const char *vm_guest_mode_string(uint32_t i) 211 { 212 static const char * const strings[] = { 213 [VM_MODE_P52V48_4K] = "PA-bits:52, VA-bits:48, 4K pages", 214 [VM_MODE_P52V48_16K] = "PA-bits:52, VA-bits:48, 16K pages", 215 [VM_MODE_P52V48_64K] = "PA-bits:52, VA-bits:48, 64K pages", 216 [VM_MODE_P48V48_4K] = "PA-bits:48, VA-bits:48, 4K pages", 217 [VM_MODE_P48V48_16K] = "PA-bits:48, VA-bits:48, 16K pages", 218 [VM_MODE_P48V48_64K] = "PA-bits:48, VA-bits:48, 64K pages", 219 [VM_MODE_P40V48_4K] = "PA-bits:40, VA-bits:48, 4K pages", 220 [VM_MODE_P40V48_16K] = "PA-bits:40, VA-bits:48, 16K pages", 221 [VM_MODE_P40V48_64K] = "PA-bits:40, VA-bits:48, 64K pages", 222 [VM_MODE_PXXV48_4K] = "PA-bits:ANY, VA-bits:48, 4K pages", 223 [VM_MODE_P47V64_4K] = "PA-bits:47, VA-bits:64, 4K pages", 224 [VM_MODE_P44V64_4K] = "PA-bits:44, VA-bits:64, 4K pages", 225 [VM_MODE_P36V48_4K] = "PA-bits:36, VA-bits:48, 4K pages", 226 [VM_MODE_P36V48_16K] = "PA-bits:36, VA-bits:48, 16K pages", 227 [VM_MODE_P36V48_64K] = "PA-bits:36, VA-bits:48, 64K pages", 228 [VM_MODE_P47V47_16K] = "PA-bits:47, VA-bits:47, 16K pages", 229 [VM_MODE_P36V47_16K] = "PA-bits:36, VA-bits:47, 16K pages", 230 }; 231 _Static_assert(sizeof(strings)/sizeof(char *) == NUM_VM_MODES, 232 "Missing new mode strings?"); 233 234 TEST_ASSERT(i < NUM_VM_MODES, "Guest mode ID %d too big", i); 235 236 return strings[i]; 237 } 238 239 const struct vm_guest_mode_params vm_guest_mode_params[] = { 240 [VM_MODE_P52V48_4K] = { 52, 48, 0x1000, 12 }, 241 [VM_MODE_P52V48_16K] = { 52, 48, 0x4000, 14 }, 242 [VM_MODE_P52V48_64K] = { 52, 48, 0x10000, 16 }, 243 [VM_MODE_P48V48_4K] = { 48, 48, 0x1000, 12 }, 244 [VM_MODE_P48V48_16K] = { 48, 48, 0x4000, 14 }, 245 [VM_MODE_P48V48_64K] = { 48, 48, 0x10000, 16 }, 246 [VM_MODE_P40V48_4K] = { 40, 48, 0x1000, 12 }, 247 [VM_MODE_P40V48_16K] = { 40, 48, 0x4000, 14 }, 248 [VM_MODE_P40V48_64K] = { 40, 48, 0x10000, 16 }, 249 [VM_MODE_PXXV48_4K] = { 0, 0, 0x1000, 12 }, 250 [VM_MODE_P47V64_4K] = { 47, 64, 0x1000, 12 }, 251 [VM_MODE_P44V64_4K] = { 44, 64, 0x1000, 12 }, 252 [VM_MODE_P36V48_4K] = { 36, 48, 0x1000, 12 }, 253 [VM_MODE_P36V48_16K] = { 36, 48, 0x4000, 14 }, 254 [VM_MODE_P36V48_64K] = { 36, 48, 0x10000, 16 }, 255 [VM_MODE_P47V47_16K] = { 47, 47, 0x4000, 14 }, 256 [VM_MODE_P36V47_16K] = { 36, 47, 0x4000, 14 }, 257 }; 258 _Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES, 259 "Missing new mode params?"); 260 261 /* 262 * Initializes vm->vpages_valid to match the canonical VA space of the 263 * architecture. 264 * 265 * The default implementation is valid for architectures which split the 266 * range addressed by a single page table into a low and high region 267 * based on the MSB of the VA. On architectures with this behavior 268 * the VA region spans [0, 2^(va_bits - 1)), [-(2^(va_bits - 1), -1]. 269 */ 270 __weak void vm_vaddr_populate_bitmap(struct kvm_vm *vm) 271 { 272 sparsebit_set_num(vm->vpages_valid, 273 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift); 274 sparsebit_set_num(vm->vpages_valid, 275 (~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift, 276 (1ULL << (vm->va_bits - 1)) >> vm->page_shift); 277 } 278 279 struct kvm_vm *____vm_create(struct vm_shape shape) 280 { 281 struct kvm_vm *vm; 282 283 vm = calloc(1, sizeof(*vm)); 284 TEST_ASSERT(vm != NULL, "Insufficient Memory"); 285 286 INIT_LIST_HEAD(&vm->vcpus); 287 vm->regions.gpa_tree = RB_ROOT; 288 vm->regions.hva_tree = RB_ROOT; 289 hash_init(vm->regions.slot_hash); 290 291 vm->mode = shape.mode; 292 vm->type = shape.type; 293 294 vm->pa_bits = vm_guest_mode_params[vm->mode].pa_bits; 295 vm->va_bits = vm_guest_mode_params[vm->mode].va_bits; 296 vm->page_size = vm_guest_mode_params[vm->mode].page_size; 297 vm->page_shift = vm_guest_mode_params[vm->mode].page_shift; 298 299 /* Setup mode specific traits. */ 300 switch (vm->mode) { 301 case VM_MODE_P52V48_4K: 302 vm->pgtable_levels = 4; 303 break; 304 case VM_MODE_P52V48_64K: 305 vm->pgtable_levels = 3; 306 break; 307 case VM_MODE_P48V48_4K: 308 vm->pgtable_levels = 4; 309 break; 310 case VM_MODE_P48V48_64K: 311 vm->pgtable_levels = 3; 312 break; 313 case VM_MODE_P40V48_4K: 314 case VM_MODE_P36V48_4K: 315 vm->pgtable_levels = 4; 316 break; 317 case VM_MODE_P40V48_64K: 318 case VM_MODE_P36V48_64K: 319 vm->pgtable_levels = 3; 320 break; 321 case VM_MODE_P52V48_16K: 322 case VM_MODE_P48V48_16K: 323 case VM_MODE_P40V48_16K: 324 case VM_MODE_P36V48_16K: 325 vm->pgtable_levels = 4; 326 break; 327 case VM_MODE_P47V47_16K: 328 case VM_MODE_P36V47_16K: 329 vm->pgtable_levels = 3; 330 break; 331 case VM_MODE_PXXV48_4K: 332 #ifdef __x86_64__ 333 kvm_get_cpu_address_width(&vm->pa_bits, &vm->va_bits); 334 kvm_init_vm_address_properties(vm); 335 /* 336 * Ignore KVM support for 5-level paging (vm->va_bits == 57), 337 * it doesn't take effect unless a CR4.LA57 is set, which it 338 * isn't for this mode (48-bit virtual address space). 339 */ 340 TEST_ASSERT(vm->va_bits == 48 || vm->va_bits == 57, 341 "Linear address width (%d bits) not supported", 342 vm->va_bits); 343 pr_debug("Guest physical address width detected: %d\n", 344 vm->pa_bits); 345 vm->pgtable_levels = 4; 346 vm->va_bits = 48; 347 #else 348 TEST_FAIL("VM_MODE_PXXV48_4K not supported on non-x86 platforms"); 349 #endif 350 break; 351 case VM_MODE_P47V64_4K: 352 vm->pgtable_levels = 5; 353 break; 354 case VM_MODE_P44V64_4K: 355 vm->pgtable_levels = 5; 356 break; 357 default: 358 TEST_FAIL("Unknown guest mode: 0x%x", vm->mode); 359 } 360 361 #ifdef __aarch64__ 362 TEST_ASSERT(!vm->type, "ARM doesn't support test-provided types"); 363 if (vm->pa_bits != 40) 364 vm->type = KVM_VM_TYPE_ARM_IPA_SIZE(vm->pa_bits); 365 #endif 366 367 vm_open(vm); 368 369 /* Limit to VA-bit canonical virtual addresses. */ 370 vm->vpages_valid = sparsebit_alloc(); 371 vm_vaddr_populate_bitmap(vm); 372 373 /* Limit physical addresses to PA-bits. */ 374 vm->max_gfn = vm_compute_max_gfn(vm); 375 376 /* Allocate and setup memory for guest. */ 377 vm->vpages_mapped = sparsebit_alloc(); 378 379 return vm; 380 } 381 382 static uint64_t vm_nr_pages_required(enum vm_guest_mode mode, 383 uint32_t nr_runnable_vcpus, 384 uint64_t extra_mem_pages) 385 { 386 uint64_t page_size = vm_guest_mode_params[mode].page_size; 387 uint64_t nr_pages; 388 389 TEST_ASSERT(nr_runnable_vcpus, 390 "Use vm_create_barebones() for VMs that _never_ have vCPUs"); 391 392 TEST_ASSERT(nr_runnable_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS), 393 "nr_vcpus = %d too large for host, max-vcpus = %d", 394 nr_runnable_vcpus, kvm_check_cap(KVM_CAP_MAX_VCPUS)); 395 396 /* 397 * Arbitrarily allocate 512 pages (2mb when page size is 4kb) for the 398 * test code and other per-VM assets that will be loaded into memslot0. 399 */ 400 nr_pages = 512; 401 402 /* Account for the per-vCPU stacks on behalf of the test. */ 403 nr_pages += nr_runnable_vcpus * DEFAULT_STACK_PGS; 404 405 /* 406 * Account for the number of pages needed for the page tables. The 407 * maximum page table size for a memory region will be when the 408 * smallest page size is used. Considering each page contains x page 409 * table descriptors, the total extra size for page tables (for extra 410 * N pages) will be: N/x+N/x^2+N/x^3+... which is definitely smaller 411 * than N/x*2. 412 */ 413 nr_pages += (nr_pages + extra_mem_pages) / PTES_PER_MIN_PAGE * 2; 414 415 /* Account for the number of pages needed by ucall. */ 416 nr_pages += ucall_nr_pages_required(page_size); 417 418 return vm_adjust_num_guest_pages(mode, nr_pages); 419 } 420 421 void kvm_set_files_rlimit(uint32_t nr_vcpus) 422 { 423 /* 424 * Each vCPU will open two file descriptors: the vCPU itself and the 425 * vCPU's binary stats file descriptor. Add an arbitrary amount of 426 * buffer for all other files a test may open. 427 */ 428 int nr_fds_wanted = nr_vcpus * 2 + 100; 429 struct rlimit rl; 430 431 /* 432 * Check that we're allowed to open nr_fds_wanted file descriptors and 433 * try raising the limits if needed. 434 */ 435 TEST_ASSERT(!getrlimit(RLIMIT_NOFILE, &rl), "getrlimit() failed!"); 436 437 if (rl.rlim_cur < nr_fds_wanted) { 438 rl.rlim_cur = nr_fds_wanted; 439 if (rl.rlim_max < nr_fds_wanted) { 440 int old_rlim_max = rl.rlim_max; 441 442 rl.rlim_max = nr_fds_wanted; 443 __TEST_REQUIRE(setrlimit(RLIMIT_NOFILE, &rl) >= 0, 444 "RLIMIT_NOFILE hard limit is too low (%d, wanted %d)", 445 old_rlim_max, nr_fds_wanted); 446 } else { 447 TEST_ASSERT(!setrlimit(RLIMIT_NOFILE, &rl), "setrlimit() failed!"); 448 } 449 } 450 451 } 452 453 static bool is_guest_memfd_required(struct vm_shape shape) 454 { 455 #ifdef __x86_64__ 456 return shape.type == KVM_X86_SNP_VM; 457 #else 458 return false; 459 #endif 460 } 461 462 struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus, 463 uint64_t nr_extra_pages) 464 { 465 uint64_t nr_pages = vm_nr_pages_required(shape.mode, nr_runnable_vcpus, 466 nr_extra_pages); 467 struct userspace_mem_region *slot0; 468 struct kvm_vm *vm; 469 int i, flags; 470 471 kvm_set_files_rlimit(nr_runnable_vcpus); 472 473 pr_debug("%s: mode='%s' type='%d', pages='%ld'\n", __func__, 474 vm_guest_mode_string(shape.mode), shape.type, nr_pages); 475 476 vm = ____vm_create(shape); 477 478 /* 479 * Force GUEST_MEMFD for the primary memory region if necessary, e.g. 480 * for CoCo VMs that require GUEST_MEMFD backed private memory. 481 */ 482 flags = 0; 483 if (is_guest_memfd_required(shape)) 484 flags |= KVM_MEM_GUEST_MEMFD; 485 486 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 0, 0, nr_pages, flags); 487 for (i = 0; i < NR_MEM_REGIONS; i++) 488 vm->memslots[i] = 0; 489 490 kvm_vm_elf_load(vm, program_invocation_name); 491 492 /* 493 * TODO: Add proper defines to protect the library's memslots, and then 494 * carve out memslot1 for the ucall MMIO address. KVM treats writes to 495 * read-only memslots as MMIO, and creating a read-only memslot for the 496 * MMIO region would prevent silently clobbering the MMIO region. 497 */ 498 slot0 = memslot2region(vm, 0); 499 ucall_init(vm, slot0->region.guest_phys_addr + slot0->region.memory_size); 500 501 if (guest_random_seed != last_guest_seed) { 502 pr_info("Random seed: 0x%x\n", guest_random_seed); 503 last_guest_seed = guest_random_seed; 504 } 505 guest_rng = new_guest_random_state(guest_random_seed); 506 sync_global_to_guest(vm, guest_rng); 507 508 kvm_arch_vm_post_create(vm); 509 510 return vm; 511 } 512 513 /* 514 * VM Create with customized parameters 515 * 516 * Input Args: 517 * mode - VM Mode (e.g. VM_MODE_P52V48_4K) 518 * nr_vcpus - VCPU count 519 * extra_mem_pages - Non-slot0 physical memory total size 520 * guest_code - Guest entry point 521 * vcpuids - VCPU IDs 522 * 523 * Output Args: None 524 * 525 * Return: 526 * Pointer to opaque structure that describes the created VM. 527 * 528 * Creates a VM with the mode specified by mode (e.g. VM_MODE_P52V48_4K). 529 * extra_mem_pages is only used to calculate the maximum page table size, 530 * no real memory allocation for non-slot0 memory in this function. 531 */ 532 struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus, 533 uint64_t extra_mem_pages, 534 void *guest_code, struct kvm_vcpu *vcpus[]) 535 { 536 struct kvm_vm *vm; 537 int i; 538 539 TEST_ASSERT(!nr_vcpus || vcpus, "Must provide vCPU array"); 540 541 vm = __vm_create(shape, nr_vcpus, extra_mem_pages); 542 543 for (i = 0; i < nr_vcpus; ++i) 544 vcpus[i] = vm_vcpu_add(vm, i, guest_code); 545 546 return vm; 547 } 548 549 struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape, 550 struct kvm_vcpu **vcpu, 551 uint64_t extra_mem_pages, 552 void *guest_code) 553 { 554 struct kvm_vcpu *vcpus[1]; 555 struct kvm_vm *vm; 556 557 vm = __vm_create_with_vcpus(shape, 1, extra_mem_pages, guest_code, vcpus); 558 559 *vcpu = vcpus[0]; 560 return vm; 561 } 562 563 /* 564 * VM Restart 565 * 566 * Input Args: 567 * vm - VM that has been released before 568 * 569 * Output Args: None 570 * 571 * Reopens the file descriptors associated to the VM and reinstates the 572 * global state, such as the irqchip and the memory regions that are mapped 573 * into the guest. 574 */ 575 void kvm_vm_restart(struct kvm_vm *vmp) 576 { 577 int ctr; 578 struct userspace_mem_region *region; 579 580 vm_open(vmp); 581 if (vmp->has_irqchip) 582 vm_create_irqchip(vmp); 583 584 hash_for_each(vmp->regions.slot_hash, ctr, region, slot_node) { 585 int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION2, ®ion->region); 586 587 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION2 IOCTL failed,\n" 588 " rc: %i errno: %i\n" 589 " slot: %u flags: 0x%x\n" 590 " guest_phys_addr: 0x%llx size: 0x%llx", 591 ret, errno, region->region.slot, 592 region->region.flags, 593 region->region.guest_phys_addr, 594 region->region.memory_size); 595 } 596 } 597 598 __weak struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, 599 uint32_t vcpu_id) 600 { 601 return __vm_vcpu_add(vm, vcpu_id); 602 } 603 604 struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm) 605 { 606 kvm_vm_restart(vm); 607 608 return vm_vcpu_recreate(vm, 0); 609 } 610 611 void kvm_pin_this_task_to_pcpu(uint32_t pcpu) 612 { 613 cpu_set_t mask; 614 int r; 615 616 CPU_ZERO(&mask); 617 CPU_SET(pcpu, &mask); 618 r = sched_setaffinity(0, sizeof(mask), &mask); 619 TEST_ASSERT(!r, "sched_setaffinity() failed for pCPU '%u'.", pcpu); 620 } 621 622 static uint32_t parse_pcpu(const char *cpu_str, const cpu_set_t *allowed_mask) 623 { 624 uint32_t pcpu = atoi_non_negative("CPU number", cpu_str); 625 626 TEST_ASSERT(CPU_ISSET(pcpu, allowed_mask), 627 "Not allowed to run on pCPU '%d', check cgroups?", pcpu); 628 return pcpu; 629 } 630 631 void kvm_print_vcpu_pinning_help(void) 632 { 633 const char *name = program_invocation_name; 634 635 printf(" -c: Pin tasks to physical CPUs. Takes a list of comma separated\n" 636 " values (target pCPU), one for each vCPU, plus an optional\n" 637 " entry for the main application task (specified via entry\n" 638 " <nr_vcpus + 1>). If used, entries must be provided for all\n" 639 " vCPUs, i.e. pinning vCPUs is all or nothing.\n\n" 640 " E.g. to create 3 vCPUs, pin vCPU0=>pCPU22, vCPU1=>pCPU23,\n" 641 " vCPU2=>pCPU24, and pin the application task to pCPU50:\n\n" 642 " %s -v 3 -c 22,23,24,50\n\n" 643 " To leave the application task unpinned, drop the final entry:\n\n" 644 " %s -v 3 -c 22,23,24\n\n" 645 " (default: no pinning)\n", name, name); 646 } 647 648 void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[], 649 int nr_vcpus) 650 { 651 cpu_set_t allowed_mask; 652 char *cpu, *cpu_list; 653 char delim[2] = ","; 654 int i, r; 655 656 cpu_list = strdup(pcpus_string); 657 TEST_ASSERT(cpu_list, "strdup() allocation failed."); 658 659 r = sched_getaffinity(0, sizeof(allowed_mask), &allowed_mask); 660 TEST_ASSERT(!r, "sched_getaffinity() failed"); 661 662 cpu = strtok(cpu_list, delim); 663 664 /* 1. Get all pcpus for vcpus. */ 665 for (i = 0; i < nr_vcpus; i++) { 666 TEST_ASSERT(cpu, "pCPU not provided for vCPU '%d'", i); 667 vcpu_to_pcpu[i] = parse_pcpu(cpu, &allowed_mask); 668 cpu = strtok(NULL, delim); 669 } 670 671 /* 2. Check if the main worker needs to be pinned. */ 672 if (cpu) { 673 kvm_pin_this_task_to_pcpu(parse_pcpu(cpu, &allowed_mask)); 674 cpu = strtok(NULL, delim); 675 } 676 677 TEST_ASSERT(!cpu, "pCPU list contains trailing garbage characters '%s'", cpu); 678 free(cpu_list); 679 } 680 681 /* 682 * Userspace Memory Region Find 683 * 684 * Input Args: 685 * vm - Virtual Machine 686 * start - Starting VM physical address 687 * end - Ending VM physical address, inclusive. 688 * 689 * Output Args: None 690 * 691 * Return: 692 * Pointer to overlapping region, NULL if no such region. 693 * 694 * Searches for a region with any physical memory that overlaps with 695 * any portion of the guest physical addresses from start to end 696 * inclusive. If multiple overlapping regions exist, a pointer to any 697 * of the regions is returned. Null is returned only when no overlapping 698 * region exists. 699 */ 700 static struct userspace_mem_region * 701 userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end) 702 { 703 struct rb_node *node; 704 705 for (node = vm->regions.gpa_tree.rb_node; node; ) { 706 struct userspace_mem_region *region = 707 container_of(node, struct userspace_mem_region, gpa_node); 708 uint64_t existing_start = region->region.guest_phys_addr; 709 uint64_t existing_end = region->region.guest_phys_addr 710 + region->region.memory_size - 1; 711 if (start <= existing_end && end >= existing_start) 712 return region; 713 714 if (start < existing_start) 715 node = node->rb_left; 716 else 717 node = node->rb_right; 718 } 719 720 return NULL; 721 } 722 723 static void kvm_stats_release(struct kvm_binary_stats *stats) 724 { 725 int ret; 726 727 if (stats->fd < 0) 728 return; 729 730 if (stats->desc) { 731 free(stats->desc); 732 stats->desc = NULL; 733 } 734 735 ret = close(stats->fd); 736 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret)); 737 stats->fd = -1; 738 } 739 740 __weak void vcpu_arch_free(struct kvm_vcpu *vcpu) 741 { 742 743 } 744 745 /* 746 * VM VCPU Remove 747 * 748 * Input Args: 749 * vcpu - VCPU to remove 750 * 751 * Output Args: None 752 * 753 * Return: None, TEST_ASSERT failures for all error conditions 754 * 755 * Removes a vCPU from a VM and frees its resources. 756 */ 757 static void vm_vcpu_rm(struct kvm_vm *vm, struct kvm_vcpu *vcpu) 758 { 759 int ret; 760 761 if (vcpu->dirty_gfns) { 762 ret = munmap(vcpu->dirty_gfns, vm->dirty_ring_size); 763 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret)); 764 vcpu->dirty_gfns = NULL; 765 } 766 767 ret = munmap(vcpu->run, vcpu_mmap_sz()); 768 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret)); 769 770 ret = close(vcpu->fd); 771 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret)); 772 773 kvm_stats_release(&vcpu->stats); 774 775 list_del(&vcpu->list); 776 777 vcpu_arch_free(vcpu); 778 free(vcpu); 779 } 780 781 void kvm_vm_release(struct kvm_vm *vmp) 782 { 783 struct kvm_vcpu *vcpu, *tmp; 784 int ret; 785 786 list_for_each_entry_safe(vcpu, tmp, &vmp->vcpus, list) 787 vm_vcpu_rm(vmp, vcpu); 788 789 ret = close(vmp->fd); 790 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret)); 791 792 ret = close(vmp->kvm_fd); 793 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret)); 794 795 /* Free cached stats metadata and close FD */ 796 kvm_stats_release(&vmp->stats); 797 } 798 799 static void __vm_mem_region_delete(struct kvm_vm *vm, 800 struct userspace_mem_region *region) 801 { 802 int ret; 803 804 rb_erase(®ion->gpa_node, &vm->regions.gpa_tree); 805 rb_erase(®ion->hva_node, &vm->regions.hva_tree); 806 hash_del(®ion->slot_node); 807 808 sparsebit_free(®ion->unused_phy_pages); 809 sparsebit_free(®ion->protected_phy_pages); 810 ret = munmap(region->mmap_start, region->mmap_size); 811 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret)); 812 if (region->fd >= 0) { 813 /* There's an extra map when using shared memory. */ 814 ret = munmap(region->mmap_alias, region->mmap_size); 815 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret)); 816 close(region->fd); 817 } 818 if (region->region.guest_memfd >= 0) 819 close(region->region.guest_memfd); 820 821 free(region); 822 } 823 824 /* 825 * Destroys and frees the VM pointed to by vmp. 826 */ 827 void kvm_vm_free(struct kvm_vm *vmp) 828 { 829 int ctr; 830 struct hlist_node *node; 831 struct userspace_mem_region *region; 832 833 if (vmp == NULL) 834 return; 835 836 /* Free userspace_mem_regions. */ 837 hash_for_each_safe(vmp->regions.slot_hash, ctr, node, region, slot_node) 838 __vm_mem_region_delete(vmp, region); 839 840 /* Free sparsebit arrays. */ 841 sparsebit_free(&vmp->vpages_valid); 842 sparsebit_free(&vmp->vpages_mapped); 843 844 kvm_vm_release(vmp); 845 846 /* Free the structure describing the VM. */ 847 free(vmp); 848 } 849 850 int kvm_memfd_alloc(size_t size, bool hugepages) 851 { 852 int memfd_flags = MFD_CLOEXEC; 853 int fd, r; 854 855 if (hugepages) 856 memfd_flags |= MFD_HUGETLB; 857 858 fd = memfd_create("kvm_selftest", memfd_flags); 859 TEST_ASSERT(fd != -1, __KVM_SYSCALL_ERROR("memfd_create()", fd)); 860 861 r = ftruncate(fd, size); 862 TEST_ASSERT(!r, __KVM_SYSCALL_ERROR("ftruncate()", r)); 863 864 r = fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0, size); 865 TEST_ASSERT(!r, __KVM_SYSCALL_ERROR("fallocate()", r)); 866 867 return fd; 868 } 869 870 static void vm_userspace_mem_region_gpa_insert(struct rb_root *gpa_tree, 871 struct userspace_mem_region *region) 872 { 873 struct rb_node **cur, *parent; 874 875 for (cur = &gpa_tree->rb_node, parent = NULL; *cur; ) { 876 struct userspace_mem_region *cregion; 877 878 cregion = container_of(*cur, typeof(*cregion), gpa_node); 879 parent = *cur; 880 if (region->region.guest_phys_addr < 881 cregion->region.guest_phys_addr) 882 cur = &(*cur)->rb_left; 883 else { 884 TEST_ASSERT(region->region.guest_phys_addr != 885 cregion->region.guest_phys_addr, 886 "Duplicate GPA in region tree"); 887 888 cur = &(*cur)->rb_right; 889 } 890 } 891 892 rb_link_node(®ion->gpa_node, parent, cur); 893 rb_insert_color(®ion->gpa_node, gpa_tree); 894 } 895 896 static void vm_userspace_mem_region_hva_insert(struct rb_root *hva_tree, 897 struct userspace_mem_region *region) 898 { 899 struct rb_node **cur, *parent; 900 901 for (cur = &hva_tree->rb_node, parent = NULL; *cur; ) { 902 struct userspace_mem_region *cregion; 903 904 cregion = container_of(*cur, typeof(*cregion), hva_node); 905 parent = *cur; 906 if (region->host_mem < cregion->host_mem) 907 cur = &(*cur)->rb_left; 908 else { 909 TEST_ASSERT(region->host_mem != 910 cregion->host_mem, 911 "Duplicate HVA in region tree"); 912 913 cur = &(*cur)->rb_right; 914 } 915 } 916 917 rb_link_node(®ion->hva_node, parent, cur); 918 rb_insert_color(®ion->hva_node, hva_tree); 919 } 920 921 922 int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 923 uint64_t gpa, uint64_t size, void *hva) 924 { 925 struct kvm_userspace_memory_region region = { 926 .slot = slot, 927 .flags = flags, 928 .guest_phys_addr = gpa, 929 .memory_size = size, 930 .userspace_addr = (uintptr_t)hva, 931 }; 932 933 return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion); 934 } 935 936 void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 937 uint64_t gpa, uint64_t size, void *hva) 938 { 939 int ret = __vm_set_user_memory_region(vm, slot, flags, gpa, size, hva); 940 941 TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION failed, errno = %d (%s)", 942 errno, strerror(errno)); 943 } 944 945 #define TEST_REQUIRE_SET_USER_MEMORY_REGION2() \ 946 __TEST_REQUIRE(kvm_has_cap(KVM_CAP_USER_MEMORY2), \ 947 "KVM selftests now require KVM_SET_USER_MEMORY_REGION2 (introduced in v6.8)") 948 949 int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 950 uint64_t gpa, uint64_t size, void *hva, 951 uint32_t guest_memfd, uint64_t guest_memfd_offset) 952 { 953 struct kvm_userspace_memory_region2 region = { 954 .slot = slot, 955 .flags = flags, 956 .guest_phys_addr = gpa, 957 .memory_size = size, 958 .userspace_addr = (uintptr_t)hva, 959 .guest_memfd = guest_memfd, 960 .guest_memfd_offset = guest_memfd_offset, 961 }; 962 963 TEST_REQUIRE_SET_USER_MEMORY_REGION2(); 964 965 return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION2, ®ion); 966 } 967 968 void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 969 uint64_t gpa, uint64_t size, void *hva, 970 uint32_t guest_memfd, uint64_t guest_memfd_offset) 971 { 972 int ret = __vm_set_user_memory_region2(vm, slot, flags, gpa, size, hva, 973 guest_memfd, guest_memfd_offset); 974 975 TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION2 failed, errno = %d (%s)", 976 errno, strerror(errno)); 977 } 978 979 980 /* FIXME: This thing needs to be ripped apart and rewritten. */ 981 void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, 982 uint64_t guest_paddr, uint32_t slot, uint64_t npages, 983 uint32_t flags, int guest_memfd, uint64_t guest_memfd_offset) 984 { 985 int ret; 986 struct userspace_mem_region *region; 987 size_t backing_src_pagesz = get_backing_src_pagesz(src_type); 988 size_t mem_size = npages * vm->page_size; 989 size_t alignment; 990 991 TEST_REQUIRE_SET_USER_MEMORY_REGION2(); 992 993 TEST_ASSERT(vm_adjust_num_guest_pages(vm->mode, npages) == npages, 994 "Number of guest pages is not compatible with the host. " 995 "Try npages=%d", vm_adjust_num_guest_pages(vm->mode, npages)); 996 997 TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical " 998 "address not on a page boundary.\n" 999 " guest_paddr: 0x%lx vm->page_size: 0x%x", 1000 guest_paddr, vm->page_size); 1001 TEST_ASSERT((((guest_paddr >> vm->page_shift) + npages) - 1) 1002 <= vm->max_gfn, "Physical range beyond maximum " 1003 "supported physical address,\n" 1004 " guest_paddr: 0x%lx npages: 0x%lx\n" 1005 " vm->max_gfn: 0x%lx vm->page_size: 0x%x", 1006 guest_paddr, npages, vm->max_gfn, vm->page_size); 1007 1008 /* 1009 * Confirm a mem region with an overlapping address doesn't 1010 * already exist. 1011 */ 1012 region = (struct userspace_mem_region *) userspace_mem_region_find( 1013 vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1); 1014 if (region != NULL) 1015 TEST_FAIL("overlapping userspace_mem_region already " 1016 "exists\n" 1017 " requested guest_paddr: 0x%lx npages: 0x%lx " 1018 "page_size: 0x%x\n" 1019 " existing guest_paddr: 0x%lx size: 0x%lx", 1020 guest_paddr, npages, vm->page_size, 1021 (uint64_t) region->region.guest_phys_addr, 1022 (uint64_t) region->region.memory_size); 1023 1024 /* Confirm no region with the requested slot already exists. */ 1025 hash_for_each_possible(vm->regions.slot_hash, region, slot_node, 1026 slot) { 1027 if (region->region.slot != slot) 1028 continue; 1029 1030 TEST_FAIL("A mem region with the requested slot " 1031 "already exists.\n" 1032 " requested slot: %u paddr: 0x%lx npages: 0x%lx\n" 1033 " existing slot: %u paddr: 0x%lx size: 0x%lx", 1034 slot, guest_paddr, npages, 1035 region->region.slot, 1036 (uint64_t) region->region.guest_phys_addr, 1037 (uint64_t) region->region.memory_size); 1038 } 1039 1040 /* Allocate and initialize new mem region structure. */ 1041 region = calloc(1, sizeof(*region)); 1042 TEST_ASSERT(region != NULL, "Insufficient Memory"); 1043 region->mmap_size = mem_size; 1044 1045 #ifdef __s390x__ 1046 /* On s390x, the host address must be aligned to 1M (due to PGSTEs) */ 1047 alignment = 0x100000; 1048 #else 1049 alignment = 1; 1050 #endif 1051 1052 /* 1053 * When using THP mmap is not guaranteed to returned a hugepage aligned 1054 * address so we have to pad the mmap. Padding is not needed for HugeTLB 1055 * because mmap will always return an address aligned to the HugeTLB 1056 * page size. 1057 */ 1058 if (src_type == VM_MEM_SRC_ANONYMOUS_THP) 1059 alignment = max(backing_src_pagesz, alignment); 1060 1061 TEST_ASSERT_EQ(guest_paddr, align_up(guest_paddr, backing_src_pagesz)); 1062 1063 /* Add enough memory to align up if necessary */ 1064 if (alignment > 1) 1065 region->mmap_size += alignment; 1066 1067 region->fd = -1; 1068 if (backing_src_is_shared(src_type)) 1069 region->fd = kvm_memfd_alloc(region->mmap_size, 1070 src_type == VM_MEM_SRC_SHARED_HUGETLB); 1071 1072 region->mmap_start = mmap(NULL, region->mmap_size, 1073 PROT_READ | PROT_WRITE, 1074 vm_mem_backing_src_alias(src_type)->flag, 1075 region->fd, 0); 1076 TEST_ASSERT(region->mmap_start != MAP_FAILED, 1077 __KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED)); 1078 1079 TEST_ASSERT(!is_backing_src_hugetlb(src_type) || 1080 region->mmap_start == align_ptr_up(region->mmap_start, backing_src_pagesz), 1081 "mmap_start %p is not aligned to HugeTLB page size 0x%lx", 1082 region->mmap_start, backing_src_pagesz); 1083 1084 /* Align host address */ 1085 region->host_mem = align_ptr_up(region->mmap_start, alignment); 1086 1087 /* As needed perform madvise */ 1088 if ((src_type == VM_MEM_SRC_ANONYMOUS || 1089 src_type == VM_MEM_SRC_ANONYMOUS_THP) && thp_configured()) { 1090 ret = madvise(region->host_mem, mem_size, 1091 src_type == VM_MEM_SRC_ANONYMOUS ? MADV_NOHUGEPAGE : MADV_HUGEPAGE); 1092 TEST_ASSERT(ret == 0, "madvise failed, addr: %p length: 0x%lx src_type: %s", 1093 region->host_mem, mem_size, 1094 vm_mem_backing_src_alias(src_type)->name); 1095 } 1096 1097 region->backing_src_type = src_type; 1098 1099 if (flags & KVM_MEM_GUEST_MEMFD) { 1100 if (guest_memfd < 0) { 1101 uint32_t guest_memfd_flags = 0; 1102 TEST_ASSERT(!guest_memfd_offset, 1103 "Offset must be zero when creating new guest_memfd"); 1104 guest_memfd = vm_create_guest_memfd(vm, mem_size, guest_memfd_flags); 1105 } else { 1106 /* 1107 * Install a unique fd for each memslot so that the fd 1108 * can be closed when the region is deleted without 1109 * needing to track if the fd is owned by the framework 1110 * or by the caller. 1111 */ 1112 guest_memfd = dup(guest_memfd); 1113 TEST_ASSERT(guest_memfd >= 0, __KVM_SYSCALL_ERROR("dup()", guest_memfd)); 1114 } 1115 1116 region->region.guest_memfd = guest_memfd; 1117 region->region.guest_memfd_offset = guest_memfd_offset; 1118 } else { 1119 region->region.guest_memfd = -1; 1120 } 1121 1122 region->unused_phy_pages = sparsebit_alloc(); 1123 if (vm_arch_has_protected_memory(vm)) 1124 region->protected_phy_pages = sparsebit_alloc(); 1125 sparsebit_set_num(region->unused_phy_pages, 1126 guest_paddr >> vm->page_shift, npages); 1127 region->region.slot = slot; 1128 region->region.flags = flags; 1129 region->region.guest_phys_addr = guest_paddr; 1130 region->region.memory_size = npages * vm->page_size; 1131 region->region.userspace_addr = (uintptr_t) region->host_mem; 1132 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, ®ion->region); 1133 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION2 IOCTL failed,\n" 1134 " rc: %i errno: %i\n" 1135 " slot: %u flags: 0x%x\n" 1136 " guest_phys_addr: 0x%lx size: 0x%lx guest_memfd: %d", 1137 ret, errno, slot, flags, 1138 guest_paddr, (uint64_t) region->region.memory_size, 1139 region->region.guest_memfd); 1140 1141 /* Add to quick lookup data structures */ 1142 vm_userspace_mem_region_gpa_insert(&vm->regions.gpa_tree, region); 1143 vm_userspace_mem_region_hva_insert(&vm->regions.hva_tree, region); 1144 hash_add(vm->regions.slot_hash, ®ion->slot_node, slot); 1145 1146 /* If shared memory, create an alias. */ 1147 if (region->fd >= 0) { 1148 region->mmap_alias = mmap(NULL, region->mmap_size, 1149 PROT_READ | PROT_WRITE, 1150 vm_mem_backing_src_alias(src_type)->flag, 1151 region->fd, 0); 1152 TEST_ASSERT(region->mmap_alias != MAP_FAILED, 1153 __KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED)); 1154 1155 /* Align host alias address */ 1156 region->host_alias = align_ptr_up(region->mmap_alias, alignment); 1157 } 1158 } 1159 1160 void vm_userspace_mem_region_add(struct kvm_vm *vm, 1161 enum vm_mem_backing_src_type src_type, 1162 uint64_t guest_paddr, uint32_t slot, 1163 uint64_t npages, uint32_t flags) 1164 { 1165 vm_mem_add(vm, src_type, guest_paddr, slot, npages, flags, -1, 0); 1166 } 1167 1168 /* 1169 * Memslot to region 1170 * 1171 * Input Args: 1172 * vm - Virtual Machine 1173 * memslot - KVM memory slot ID 1174 * 1175 * Output Args: None 1176 * 1177 * Return: 1178 * Pointer to memory region structure that describe memory region 1179 * using kvm memory slot ID given by memslot. TEST_ASSERT failure 1180 * on error (e.g. currently no memory region using memslot as a KVM 1181 * memory slot ID). 1182 */ 1183 struct userspace_mem_region * 1184 memslot2region(struct kvm_vm *vm, uint32_t memslot) 1185 { 1186 struct userspace_mem_region *region; 1187 1188 hash_for_each_possible(vm->regions.slot_hash, region, slot_node, 1189 memslot) 1190 if (region->region.slot == memslot) 1191 return region; 1192 1193 fprintf(stderr, "No mem region with the requested slot found,\n" 1194 " requested slot: %u\n", memslot); 1195 fputs("---- vm dump ----\n", stderr); 1196 vm_dump(stderr, vm, 2); 1197 TEST_FAIL("Mem region not found"); 1198 return NULL; 1199 } 1200 1201 /* 1202 * VM Memory Region Flags Set 1203 * 1204 * Input Args: 1205 * vm - Virtual Machine 1206 * flags - Starting guest physical address 1207 * 1208 * Output Args: None 1209 * 1210 * Return: None 1211 * 1212 * Sets the flags of the memory region specified by the value of slot, 1213 * to the values given by flags. 1214 */ 1215 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags) 1216 { 1217 int ret; 1218 struct userspace_mem_region *region; 1219 1220 region = memslot2region(vm, slot); 1221 1222 region->region.flags = flags; 1223 1224 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, ®ion->region); 1225 1226 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION2 IOCTL failed,\n" 1227 " rc: %i errno: %i slot: %u flags: 0x%x", 1228 ret, errno, slot, flags); 1229 } 1230 1231 /* 1232 * VM Memory Region Move 1233 * 1234 * Input Args: 1235 * vm - Virtual Machine 1236 * slot - Slot of the memory region to move 1237 * new_gpa - Starting guest physical address 1238 * 1239 * Output Args: None 1240 * 1241 * Return: None 1242 * 1243 * Change the gpa of a memory region. 1244 */ 1245 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa) 1246 { 1247 struct userspace_mem_region *region; 1248 int ret; 1249 1250 region = memslot2region(vm, slot); 1251 1252 region->region.guest_phys_addr = new_gpa; 1253 1254 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, ®ion->region); 1255 1256 TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION2 failed\n" 1257 "ret: %i errno: %i slot: %u new_gpa: 0x%lx", 1258 ret, errno, slot, new_gpa); 1259 } 1260 1261 /* 1262 * VM Memory Region Delete 1263 * 1264 * Input Args: 1265 * vm - Virtual Machine 1266 * slot - Slot of the memory region to delete 1267 * 1268 * Output Args: None 1269 * 1270 * Return: None 1271 * 1272 * Delete a memory region. 1273 */ 1274 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot) 1275 { 1276 struct userspace_mem_region *region = memslot2region(vm, slot); 1277 1278 region->region.memory_size = 0; 1279 vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, ®ion->region); 1280 1281 __vm_mem_region_delete(vm, region); 1282 } 1283 1284 void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t base, uint64_t size, 1285 bool punch_hole) 1286 { 1287 const int mode = FALLOC_FL_KEEP_SIZE | (punch_hole ? FALLOC_FL_PUNCH_HOLE : 0); 1288 struct userspace_mem_region *region; 1289 uint64_t end = base + size; 1290 uint64_t gpa, len; 1291 off_t fd_offset; 1292 int ret; 1293 1294 for (gpa = base; gpa < end; gpa += len) { 1295 uint64_t offset; 1296 1297 region = userspace_mem_region_find(vm, gpa, gpa); 1298 TEST_ASSERT(region && region->region.flags & KVM_MEM_GUEST_MEMFD, 1299 "Private memory region not found for GPA 0x%lx", gpa); 1300 1301 offset = gpa - region->region.guest_phys_addr; 1302 fd_offset = region->region.guest_memfd_offset + offset; 1303 len = min_t(uint64_t, end - gpa, region->region.memory_size - offset); 1304 1305 ret = fallocate(region->region.guest_memfd, mode, fd_offset, len); 1306 TEST_ASSERT(!ret, "fallocate() failed to %s at %lx (len = %lu), fd = %d, mode = %x, offset = %lx", 1307 punch_hole ? "punch hole" : "allocate", gpa, len, 1308 region->region.guest_memfd, mode, fd_offset); 1309 } 1310 } 1311 1312 /* Returns the size of a vCPU's kvm_run structure. */ 1313 static int vcpu_mmap_sz(void) 1314 { 1315 int dev_fd, ret; 1316 1317 dev_fd = open_kvm_dev_path_or_exit(); 1318 1319 ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL); 1320 TEST_ASSERT(ret >= sizeof(struct kvm_run), 1321 KVM_IOCTL_ERROR(KVM_GET_VCPU_MMAP_SIZE, ret)); 1322 1323 close(dev_fd); 1324 1325 return ret; 1326 } 1327 1328 static bool vcpu_exists(struct kvm_vm *vm, uint32_t vcpu_id) 1329 { 1330 struct kvm_vcpu *vcpu; 1331 1332 list_for_each_entry(vcpu, &vm->vcpus, list) { 1333 if (vcpu->id == vcpu_id) 1334 return true; 1335 } 1336 1337 return false; 1338 } 1339 1340 /* 1341 * Adds a virtual CPU to the VM specified by vm with the ID given by vcpu_id. 1342 * No additional vCPU setup is done. Returns the vCPU. 1343 */ 1344 struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) 1345 { 1346 struct kvm_vcpu *vcpu; 1347 1348 /* Confirm a vcpu with the specified id doesn't already exist. */ 1349 TEST_ASSERT(!vcpu_exists(vm, vcpu_id), "vCPU%d already exists", vcpu_id); 1350 1351 /* Allocate and initialize new vcpu structure. */ 1352 vcpu = calloc(1, sizeof(*vcpu)); 1353 TEST_ASSERT(vcpu != NULL, "Insufficient Memory"); 1354 1355 vcpu->vm = vm; 1356 vcpu->id = vcpu_id; 1357 vcpu->fd = __vm_ioctl(vm, KVM_CREATE_VCPU, (void *)(unsigned long)vcpu_id); 1358 TEST_ASSERT_VM_VCPU_IOCTL(vcpu->fd >= 0, KVM_CREATE_VCPU, vcpu->fd, vm); 1359 1360 TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->run), "vcpu mmap size " 1361 "smaller than expected, vcpu_mmap_sz: %i expected_min: %zi", 1362 vcpu_mmap_sz(), sizeof(*vcpu->run)); 1363 vcpu->run = (struct kvm_run *) mmap(NULL, vcpu_mmap_sz(), 1364 PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 0); 1365 TEST_ASSERT(vcpu->run != MAP_FAILED, 1366 __KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED)); 1367 1368 if (kvm_has_cap(KVM_CAP_BINARY_STATS_FD)) 1369 vcpu->stats.fd = vcpu_get_stats_fd(vcpu); 1370 else 1371 vcpu->stats.fd = -1; 1372 1373 /* Add to linked-list of VCPUs. */ 1374 list_add(&vcpu->list, &vm->vcpus); 1375 1376 return vcpu; 1377 } 1378 1379 /* 1380 * VM Virtual Address Unused Gap 1381 * 1382 * Input Args: 1383 * vm - Virtual Machine 1384 * sz - Size (bytes) 1385 * vaddr_min - Minimum Virtual Address 1386 * 1387 * Output Args: None 1388 * 1389 * Return: 1390 * Lowest virtual address at or below vaddr_min, with at least 1391 * sz unused bytes. TEST_ASSERT failure if no area of at least 1392 * size sz is available. 1393 * 1394 * Within the VM specified by vm, locates the lowest starting virtual 1395 * address >= vaddr_min, that has at least sz unallocated bytes. A 1396 * TEST_ASSERT failure occurs for invalid input or no area of at least 1397 * sz unallocated bytes >= vaddr_min is available. 1398 */ 1399 vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, 1400 vm_vaddr_t vaddr_min) 1401 { 1402 uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift; 1403 1404 /* Determine lowest permitted virtual page index. */ 1405 uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift; 1406 if ((pgidx_start * vm->page_size) < vaddr_min) 1407 goto no_va_found; 1408 1409 /* Loop over section with enough valid virtual page indexes. */ 1410 if (!sparsebit_is_set_num(vm->vpages_valid, 1411 pgidx_start, pages)) 1412 pgidx_start = sparsebit_next_set_num(vm->vpages_valid, 1413 pgidx_start, pages); 1414 do { 1415 /* 1416 * Are there enough unused virtual pages available at 1417 * the currently proposed starting virtual page index. 1418 * If not, adjust proposed starting index to next 1419 * possible. 1420 */ 1421 if (sparsebit_is_clear_num(vm->vpages_mapped, 1422 pgidx_start, pages)) 1423 goto va_found; 1424 pgidx_start = sparsebit_next_clear_num(vm->vpages_mapped, 1425 pgidx_start, pages); 1426 if (pgidx_start == 0) 1427 goto no_va_found; 1428 1429 /* 1430 * If needed, adjust proposed starting virtual address, 1431 * to next range of valid virtual addresses. 1432 */ 1433 if (!sparsebit_is_set_num(vm->vpages_valid, 1434 pgidx_start, pages)) { 1435 pgidx_start = sparsebit_next_set_num( 1436 vm->vpages_valid, pgidx_start, pages); 1437 if (pgidx_start == 0) 1438 goto no_va_found; 1439 } 1440 } while (pgidx_start != 0); 1441 1442 no_va_found: 1443 TEST_FAIL("No vaddr of specified pages available, pages: 0x%lx", pages); 1444 1445 /* NOT REACHED */ 1446 return -1; 1447 1448 va_found: 1449 TEST_ASSERT(sparsebit_is_set_num(vm->vpages_valid, 1450 pgidx_start, pages), 1451 "Unexpected, invalid virtual page index range,\n" 1452 " pgidx_start: 0x%lx\n" 1453 " pages: 0x%lx", 1454 pgidx_start, pages); 1455 TEST_ASSERT(sparsebit_is_clear_num(vm->vpages_mapped, 1456 pgidx_start, pages), 1457 "Unexpected, pages already mapped,\n" 1458 " pgidx_start: 0x%lx\n" 1459 " pages: 0x%lx", 1460 pgidx_start, pages); 1461 1462 return pgidx_start * vm->page_size; 1463 } 1464 1465 static vm_vaddr_t ____vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, 1466 vm_vaddr_t vaddr_min, 1467 enum kvm_mem_region_type type, 1468 bool protected) 1469 { 1470 uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0); 1471 1472 virt_pgd_alloc(vm); 1473 vm_paddr_t paddr = __vm_phy_pages_alloc(vm, pages, 1474 KVM_UTIL_MIN_PFN * vm->page_size, 1475 vm->memslots[type], protected); 1476 1477 /* 1478 * Find an unused range of virtual page addresses of at least 1479 * pages in length. 1480 */ 1481 vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min); 1482 1483 /* Map the virtual pages. */ 1484 for (vm_vaddr_t vaddr = vaddr_start; pages > 0; 1485 pages--, vaddr += vm->page_size, paddr += vm->page_size) { 1486 1487 virt_pg_map(vm, vaddr, paddr); 1488 1489 sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift); 1490 } 1491 1492 return vaddr_start; 1493 } 1494 1495 vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, 1496 enum kvm_mem_region_type type) 1497 { 1498 return ____vm_vaddr_alloc(vm, sz, vaddr_min, type, 1499 vm_arch_has_protected_memory(vm)); 1500 } 1501 1502 vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz, 1503 vm_vaddr_t vaddr_min, 1504 enum kvm_mem_region_type type) 1505 { 1506 return ____vm_vaddr_alloc(vm, sz, vaddr_min, type, false); 1507 } 1508 1509 /* 1510 * VM Virtual Address Allocate 1511 * 1512 * Input Args: 1513 * vm - Virtual Machine 1514 * sz - Size in bytes 1515 * vaddr_min - Minimum starting virtual address 1516 * 1517 * Output Args: None 1518 * 1519 * Return: 1520 * Starting guest virtual address 1521 * 1522 * Allocates at least sz bytes within the virtual address space of the vm 1523 * given by vm. The allocated bytes are mapped to a virtual address >= 1524 * the address given by vaddr_min. Note that each allocation uses a 1525 * a unique set of pages, with the minimum real allocation being at least 1526 * a page. The allocated physical space comes from the TEST_DATA memory region. 1527 */ 1528 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min) 1529 { 1530 return __vm_vaddr_alloc(vm, sz, vaddr_min, MEM_REGION_TEST_DATA); 1531 } 1532 1533 /* 1534 * VM Virtual Address Allocate Pages 1535 * 1536 * Input Args: 1537 * vm - Virtual Machine 1538 * 1539 * Output Args: None 1540 * 1541 * Return: 1542 * Starting guest virtual address 1543 * 1544 * Allocates at least N system pages worth of bytes within the virtual address 1545 * space of the vm. 1546 */ 1547 vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages) 1548 { 1549 return vm_vaddr_alloc(vm, nr_pages * getpagesize(), KVM_UTIL_MIN_VADDR); 1550 } 1551 1552 vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type) 1553 { 1554 return __vm_vaddr_alloc(vm, getpagesize(), KVM_UTIL_MIN_VADDR, type); 1555 } 1556 1557 /* 1558 * VM Virtual Address Allocate Page 1559 * 1560 * Input Args: 1561 * vm - Virtual Machine 1562 * 1563 * Output Args: None 1564 * 1565 * Return: 1566 * Starting guest virtual address 1567 * 1568 * Allocates at least one system page worth of bytes within the virtual address 1569 * space of the vm. 1570 */ 1571 vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm) 1572 { 1573 return vm_vaddr_alloc_pages(vm, 1); 1574 } 1575 1576 /* 1577 * Map a range of VM virtual address to the VM's physical address 1578 * 1579 * Input Args: 1580 * vm - Virtual Machine 1581 * vaddr - Virtuall address to map 1582 * paddr - VM Physical Address 1583 * npages - The number of pages to map 1584 * 1585 * Output Args: None 1586 * 1587 * Return: None 1588 * 1589 * Within the VM given by @vm, creates a virtual translation for 1590 * @npages starting at @vaddr to the page range starting at @paddr. 1591 */ 1592 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, 1593 unsigned int npages) 1594 { 1595 size_t page_size = vm->page_size; 1596 size_t size = npages * page_size; 1597 1598 TEST_ASSERT(vaddr + size > vaddr, "Vaddr overflow"); 1599 TEST_ASSERT(paddr + size > paddr, "Paddr overflow"); 1600 1601 while (npages--) { 1602 virt_pg_map(vm, vaddr, paddr); 1603 sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift); 1604 1605 vaddr += page_size; 1606 paddr += page_size; 1607 } 1608 } 1609 1610 /* 1611 * Address VM Physical to Host Virtual 1612 * 1613 * Input Args: 1614 * vm - Virtual Machine 1615 * gpa - VM physical address 1616 * 1617 * Output Args: None 1618 * 1619 * Return: 1620 * Equivalent host virtual address 1621 * 1622 * Locates the memory region containing the VM physical address given 1623 * by gpa, within the VM given by vm. When found, the host virtual 1624 * address providing the memory to the vm physical address is returned. 1625 * A TEST_ASSERT failure occurs if no region containing gpa exists. 1626 */ 1627 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa) 1628 { 1629 struct userspace_mem_region *region; 1630 1631 gpa = vm_untag_gpa(vm, gpa); 1632 1633 region = userspace_mem_region_find(vm, gpa, gpa); 1634 if (!region) { 1635 TEST_FAIL("No vm physical memory at 0x%lx", gpa); 1636 return NULL; 1637 } 1638 1639 return (void *)((uintptr_t)region->host_mem 1640 + (gpa - region->region.guest_phys_addr)); 1641 } 1642 1643 /* 1644 * Address Host Virtual to VM Physical 1645 * 1646 * Input Args: 1647 * vm - Virtual Machine 1648 * hva - Host virtual address 1649 * 1650 * Output Args: None 1651 * 1652 * Return: 1653 * Equivalent VM physical address 1654 * 1655 * Locates the memory region containing the host virtual address given 1656 * by hva, within the VM given by vm. When found, the equivalent 1657 * VM physical address is returned. A TEST_ASSERT failure occurs if no 1658 * region containing hva exists. 1659 */ 1660 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva) 1661 { 1662 struct rb_node *node; 1663 1664 for (node = vm->regions.hva_tree.rb_node; node; ) { 1665 struct userspace_mem_region *region = 1666 container_of(node, struct userspace_mem_region, hva_node); 1667 1668 if (hva >= region->host_mem) { 1669 if (hva <= (region->host_mem 1670 + region->region.memory_size - 1)) 1671 return (vm_paddr_t)((uintptr_t) 1672 region->region.guest_phys_addr 1673 + (hva - (uintptr_t)region->host_mem)); 1674 1675 node = node->rb_right; 1676 } else 1677 node = node->rb_left; 1678 } 1679 1680 TEST_FAIL("No mapping to a guest physical address, hva: %p", hva); 1681 return -1; 1682 } 1683 1684 /* 1685 * Address VM physical to Host Virtual *alias*. 1686 * 1687 * Input Args: 1688 * vm - Virtual Machine 1689 * gpa - VM physical address 1690 * 1691 * Output Args: None 1692 * 1693 * Return: 1694 * Equivalent address within the host virtual *alias* area, or NULL 1695 * (without failing the test) if the guest memory is not shared (so 1696 * no alias exists). 1697 * 1698 * Create a writable, shared virtual=>physical alias for the specific GPA. 1699 * The primary use case is to allow the host selftest to manipulate guest 1700 * memory without mapping said memory in the guest's address space. And, for 1701 * userfaultfd-based demand paging, to do so without triggering userfaults. 1702 */ 1703 void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa) 1704 { 1705 struct userspace_mem_region *region; 1706 uintptr_t offset; 1707 1708 region = userspace_mem_region_find(vm, gpa, gpa); 1709 if (!region) 1710 return NULL; 1711 1712 if (!region->host_alias) 1713 return NULL; 1714 1715 offset = gpa - region->region.guest_phys_addr; 1716 return (void *) ((uintptr_t) region->host_alias + offset); 1717 } 1718 1719 /* Create an interrupt controller chip for the specified VM. */ 1720 void vm_create_irqchip(struct kvm_vm *vm) 1721 { 1722 vm_ioctl(vm, KVM_CREATE_IRQCHIP, NULL); 1723 1724 vm->has_irqchip = true; 1725 } 1726 1727 int _vcpu_run(struct kvm_vcpu *vcpu) 1728 { 1729 int rc; 1730 1731 do { 1732 rc = __vcpu_run(vcpu); 1733 } while (rc == -1 && errno == EINTR); 1734 1735 if (!rc) 1736 assert_on_unhandled_exception(vcpu); 1737 1738 return rc; 1739 } 1740 1741 /* 1742 * Invoke KVM_RUN on a vCPU until KVM returns something other than -EINTR. 1743 * Assert if the KVM returns an error (other than -EINTR). 1744 */ 1745 void vcpu_run(struct kvm_vcpu *vcpu) 1746 { 1747 int ret = _vcpu_run(vcpu); 1748 1749 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_RUN, ret)); 1750 } 1751 1752 void vcpu_run_complete_io(struct kvm_vcpu *vcpu) 1753 { 1754 int ret; 1755 1756 vcpu->run->immediate_exit = 1; 1757 ret = __vcpu_run(vcpu); 1758 vcpu->run->immediate_exit = 0; 1759 1760 TEST_ASSERT(ret == -1 && errno == EINTR, 1761 "KVM_RUN IOCTL didn't exit immediately, rc: %i, errno: %i", 1762 ret, errno); 1763 } 1764 1765 /* 1766 * Get the list of guest registers which are supported for 1767 * KVM_GET_ONE_REG/KVM_SET_ONE_REG ioctls. Returns a kvm_reg_list pointer, 1768 * it is the caller's responsibility to free the list. 1769 */ 1770 struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu) 1771 { 1772 struct kvm_reg_list reg_list_n = { .n = 0 }, *reg_list; 1773 int ret; 1774 1775 ret = __vcpu_ioctl(vcpu, KVM_GET_REG_LIST, ®_list_n); 1776 TEST_ASSERT(ret == -1 && errno == E2BIG, "KVM_GET_REG_LIST n=0"); 1777 1778 reg_list = calloc(1, sizeof(*reg_list) + reg_list_n.n * sizeof(__u64)); 1779 reg_list->n = reg_list_n.n; 1780 vcpu_ioctl(vcpu, KVM_GET_REG_LIST, reg_list); 1781 return reg_list; 1782 } 1783 1784 void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu) 1785 { 1786 uint32_t page_size = getpagesize(); 1787 uint32_t size = vcpu->vm->dirty_ring_size; 1788 1789 TEST_ASSERT(size > 0, "Should enable dirty ring first"); 1790 1791 if (!vcpu->dirty_gfns) { 1792 void *addr; 1793 1794 addr = mmap(NULL, size, PROT_READ, MAP_PRIVATE, vcpu->fd, 1795 page_size * KVM_DIRTY_LOG_PAGE_OFFSET); 1796 TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped private"); 1797 1798 addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, vcpu->fd, 1799 page_size * KVM_DIRTY_LOG_PAGE_OFFSET); 1800 TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped exec"); 1801 1802 addr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 1803 page_size * KVM_DIRTY_LOG_PAGE_OFFSET); 1804 TEST_ASSERT(addr != MAP_FAILED, "Dirty ring map failed"); 1805 1806 vcpu->dirty_gfns = addr; 1807 vcpu->dirty_gfns_count = size / sizeof(struct kvm_dirty_gfn); 1808 } 1809 1810 return vcpu->dirty_gfns; 1811 } 1812 1813 /* 1814 * Device Ioctl 1815 */ 1816 1817 int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr) 1818 { 1819 struct kvm_device_attr attribute = { 1820 .group = group, 1821 .attr = attr, 1822 .flags = 0, 1823 }; 1824 1825 return ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute); 1826 } 1827 1828 int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type) 1829 { 1830 struct kvm_create_device create_dev = { 1831 .type = type, 1832 .flags = KVM_CREATE_DEVICE_TEST, 1833 }; 1834 1835 return __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev); 1836 } 1837 1838 int __kvm_create_device(struct kvm_vm *vm, uint64_t type) 1839 { 1840 struct kvm_create_device create_dev = { 1841 .type = type, 1842 .fd = -1, 1843 .flags = 0, 1844 }; 1845 int err; 1846 1847 err = __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev); 1848 TEST_ASSERT(err <= 0, "KVM_CREATE_DEVICE shouldn't return a positive value"); 1849 return err ? : create_dev.fd; 1850 } 1851 1852 int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val) 1853 { 1854 struct kvm_device_attr kvmattr = { 1855 .group = group, 1856 .attr = attr, 1857 .flags = 0, 1858 .addr = (uintptr_t)val, 1859 }; 1860 1861 return __kvm_ioctl(dev_fd, KVM_GET_DEVICE_ATTR, &kvmattr); 1862 } 1863 1864 int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val) 1865 { 1866 struct kvm_device_attr kvmattr = { 1867 .group = group, 1868 .attr = attr, 1869 .flags = 0, 1870 .addr = (uintptr_t)val, 1871 }; 1872 1873 return __kvm_ioctl(dev_fd, KVM_SET_DEVICE_ATTR, &kvmattr); 1874 } 1875 1876 /* 1877 * IRQ related functions. 1878 */ 1879 1880 int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level) 1881 { 1882 struct kvm_irq_level irq_level = { 1883 .irq = irq, 1884 .level = level, 1885 }; 1886 1887 return __vm_ioctl(vm, KVM_IRQ_LINE, &irq_level); 1888 } 1889 1890 void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level) 1891 { 1892 int ret = _kvm_irq_line(vm, irq, level); 1893 1894 TEST_ASSERT(ret >= 0, KVM_IOCTL_ERROR(KVM_IRQ_LINE, ret)); 1895 } 1896 1897 struct kvm_irq_routing *kvm_gsi_routing_create(void) 1898 { 1899 struct kvm_irq_routing *routing; 1900 size_t size; 1901 1902 size = sizeof(struct kvm_irq_routing); 1903 /* Allocate space for the max number of entries: this wastes 196 KBs. */ 1904 size += KVM_MAX_IRQ_ROUTES * sizeof(struct kvm_irq_routing_entry); 1905 routing = calloc(1, size); 1906 assert(routing); 1907 1908 return routing; 1909 } 1910 1911 void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing, 1912 uint32_t gsi, uint32_t pin) 1913 { 1914 int i; 1915 1916 assert(routing); 1917 assert(routing->nr < KVM_MAX_IRQ_ROUTES); 1918 1919 i = routing->nr; 1920 routing->entries[i].gsi = gsi; 1921 routing->entries[i].type = KVM_IRQ_ROUTING_IRQCHIP; 1922 routing->entries[i].flags = 0; 1923 routing->entries[i].u.irqchip.irqchip = 0; 1924 routing->entries[i].u.irqchip.pin = pin; 1925 routing->nr++; 1926 } 1927 1928 int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing) 1929 { 1930 int ret; 1931 1932 assert(routing); 1933 ret = __vm_ioctl(vm, KVM_SET_GSI_ROUTING, routing); 1934 free(routing); 1935 1936 return ret; 1937 } 1938 1939 void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing) 1940 { 1941 int ret; 1942 1943 ret = _kvm_gsi_routing_write(vm, routing); 1944 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_GSI_ROUTING, ret)); 1945 } 1946 1947 /* 1948 * VM Dump 1949 * 1950 * Input Args: 1951 * vm - Virtual Machine 1952 * indent - Left margin indent amount 1953 * 1954 * Output Args: 1955 * stream - Output FILE stream 1956 * 1957 * Return: None 1958 * 1959 * Dumps the current state of the VM given by vm, to the FILE stream 1960 * given by stream. 1961 */ 1962 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) 1963 { 1964 int ctr; 1965 struct userspace_mem_region *region; 1966 struct kvm_vcpu *vcpu; 1967 1968 fprintf(stream, "%*smode: 0x%x\n", indent, "", vm->mode); 1969 fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd); 1970 fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size); 1971 fprintf(stream, "%*sMem Regions:\n", indent, ""); 1972 hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) { 1973 fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx " 1974 "host_virt: %p\n", indent + 2, "", 1975 (uint64_t) region->region.guest_phys_addr, 1976 (uint64_t) region->region.memory_size, 1977 region->host_mem); 1978 fprintf(stream, "%*sunused_phy_pages: ", indent + 2, ""); 1979 sparsebit_dump(stream, region->unused_phy_pages, 0); 1980 if (region->protected_phy_pages) { 1981 fprintf(stream, "%*sprotected_phy_pages: ", indent + 2, ""); 1982 sparsebit_dump(stream, region->protected_phy_pages, 0); 1983 } 1984 } 1985 fprintf(stream, "%*sMapped Virtual Pages:\n", indent, ""); 1986 sparsebit_dump(stream, vm->vpages_mapped, indent + 2); 1987 fprintf(stream, "%*spgd_created: %u\n", indent, "", 1988 vm->pgd_created); 1989 if (vm->pgd_created) { 1990 fprintf(stream, "%*sVirtual Translation Tables:\n", 1991 indent + 2, ""); 1992 virt_dump(stream, vm, indent + 4); 1993 } 1994 fprintf(stream, "%*sVCPUs:\n", indent, ""); 1995 1996 list_for_each_entry(vcpu, &vm->vcpus, list) 1997 vcpu_dump(stream, vcpu, indent + 2); 1998 } 1999 2000 #define KVM_EXIT_STRING(x) {KVM_EXIT_##x, #x} 2001 2002 /* Known KVM exit reasons */ 2003 static struct exit_reason { 2004 unsigned int reason; 2005 const char *name; 2006 } exit_reasons_known[] = { 2007 KVM_EXIT_STRING(UNKNOWN), 2008 KVM_EXIT_STRING(EXCEPTION), 2009 KVM_EXIT_STRING(IO), 2010 KVM_EXIT_STRING(HYPERCALL), 2011 KVM_EXIT_STRING(DEBUG), 2012 KVM_EXIT_STRING(HLT), 2013 KVM_EXIT_STRING(MMIO), 2014 KVM_EXIT_STRING(IRQ_WINDOW_OPEN), 2015 KVM_EXIT_STRING(SHUTDOWN), 2016 KVM_EXIT_STRING(FAIL_ENTRY), 2017 KVM_EXIT_STRING(INTR), 2018 KVM_EXIT_STRING(SET_TPR), 2019 KVM_EXIT_STRING(TPR_ACCESS), 2020 KVM_EXIT_STRING(S390_SIEIC), 2021 KVM_EXIT_STRING(S390_RESET), 2022 KVM_EXIT_STRING(DCR), 2023 KVM_EXIT_STRING(NMI), 2024 KVM_EXIT_STRING(INTERNAL_ERROR), 2025 KVM_EXIT_STRING(OSI), 2026 KVM_EXIT_STRING(PAPR_HCALL), 2027 KVM_EXIT_STRING(S390_UCONTROL), 2028 KVM_EXIT_STRING(WATCHDOG), 2029 KVM_EXIT_STRING(S390_TSCH), 2030 KVM_EXIT_STRING(EPR), 2031 KVM_EXIT_STRING(SYSTEM_EVENT), 2032 KVM_EXIT_STRING(S390_STSI), 2033 KVM_EXIT_STRING(IOAPIC_EOI), 2034 KVM_EXIT_STRING(HYPERV), 2035 KVM_EXIT_STRING(ARM_NISV), 2036 KVM_EXIT_STRING(X86_RDMSR), 2037 KVM_EXIT_STRING(X86_WRMSR), 2038 KVM_EXIT_STRING(DIRTY_RING_FULL), 2039 KVM_EXIT_STRING(AP_RESET_HOLD), 2040 KVM_EXIT_STRING(X86_BUS_LOCK), 2041 KVM_EXIT_STRING(XEN), 2042 KVM_EXIT_STRING(RISCV_SBI), 2043 KVM_EXIT_STRING(RISCV_CSR), 2044 KVM_EXIT_STRING(NOTIFY), 2045 KVM_EXIT_STRING(LOONGARCH_IOCSR), 2046 KVM_EXIT_STRING(MEMORY_FAULT), 2047 }; 2048 2049 /* 2050 * Exit Reason String 2051 * 2052 * Input Args: 2053 * exit_reason - Exit reason 2054 * 2055 * Output Args: None 2056 * 2057 * Return: 2058 * Constant string pointer describing the exit reason. 2059 * 2060 * Locates and returns a constant string that describes the KVM exit 2061 * reason given by exit_reason. If no such string is found, a constant 2062 * string of "Unknown" is returned. 2063 */ 2064 const char *exit_reason_str(unsigned int exit_reason) 2065 { 2066 unsigned int n1; 2067 2068 for (n1 = 0; n1 < ARRAY_SIZE(exit_reasons_known); n1++) { 2069 if (exit_reason == exit_reasons_known[n1].reason) 2070 return exit_reasons_known[n1].name; 2071 } 2072 2073 return "Unknown"; 2074 } 2075 2076 /* 2077 * Physical Contiguous Page Allocator 2078 * 2079 * Input Args: 2080 * vm - Virtual Machine 2081 * num - number of pages 2082 * paddr_min - Physical address minimum 2083 * memslot - Memory region to allocate page from 2084 * protected - True if the pages will be used as protected/private memory 2085 * 2086 * Output Args: None 2087 * 2088 * Return: 2089 * Starting physical address 2090 * 2091 * Within the VM specified by vm, locates a range of available physical 2092 * pages at or above paddr_min. If found, the pages are marked as in use 2093 * and their base address is returned. A TEST_ASSERT failure occurs if 2094 * not enough pages are available at or above paddr_min. 2095 */ 2096 vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, 2097 vm_paddr_t paddr_min, uint32_t memslot, 2098 bool protected) 2099 { 2100 struct userspace_mem_region *region; 2101 sparsebit_idx_t pg, base; 2102 2103 TEST_ASSERT(num > 0, "Must allocate at least one page"); 2104 2105 TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address " 2106 "not divisible by page size.\n" 2107 " paddr_min: 0x%lx page_size: 0x%x", 2108 paddr_min, vm->page_size); 2109 2110 region = memslot2region(vm, memslot); 2111 TEST_ASSERT(!protected || region->protected_phy_pages, 2112 "Region doesn't support protected memory"); 2113 2114 base = pg = paddr_min >> vm->page_shift; 2115 do { 2116 for (; pg < base + num; ++pg) { 2117 if (!sparsebit_is_set(region->unused_phy_pages, pg)) { 2118 base = pg = sparsebit_next_set(region->unused_phy_pages, pg); 2119 break; 2120 } 2121 } 2122 } while (pg && pg != base + num); 2123 2124 if (pg == 0) { 2125 fprintf(stderr, "No guest physical page available, " 2126 "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n", 2127 paddr_min, vm->page_size, memslot); 2128 fputs("---- vm dump ----\n", stderr); 2129 vm_dump(stderr, vm, 2); 2130 abort(); 2131 } 2132 2133 for (pg = base; pg < base + num; ++pg) { 2134 sparsebit_clear(region->unused_phy_pages, pg); 2135 if (protected) 2136 sparsebit_set(region->protected_phy_pages, pg); 2137 } 2138 2139 return base * vm->page_size; 2140 } 2141 2142 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, 2143 uint32_t memslot) 2144 { 2145 return vm_phy_pages_alloc(vm, 1, paddr_min, memslot); 2146 } 2147 2148 vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm) 2149 { 2150 return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 2151 vm->memslots[MEM_REGION_PT]); 2152 } 2153 2154 /* 2155 * Address Guest Virtual to Host Virtual 2156 * 2157 * Input Args: 2158 * vm - Virtual Machine 2159 * gva - VM virtual address 2160 * 2161 * Output Args: None 2162 * 2163 * Return: 2164 * Equivalent host virtual address 2165 */ 2166 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva) 2167 { 2168 return addr_gpa2hva(vm, addr_gva2gpa(vm, gva)); 2169 } 2170 2171 unsigned long __weak vm_compute_max_gfn(struct kvm_vm *vm) 2172 { 2173 return ((1ULL << vm->pa_bits) >> vm->page_shift) - 1; 2174 } 2175 2176 static unsigned int vm_calc_num_pages(unsigned int num_pages, 2177 unsigned int page_shift, 2178 unsigned int new_page_shift, 2179 bool ceil) 2180 { 2181 unsigned int n = 1 << (new_page_shift - page_shift); 2182 2183 if (page_shift >= new_page_shift) 2184 return num_pages * (1 << (page_shift - new_page_shift)); 2185 2186 return num_pages / n + !!(ceil && num_pages % n); 2187 } 2188 2189 static inline int getpageshift(void) 2190 { 2191 return __builtin_ffs(getpagesize()) - 1; 2192 } 2193 2194 unsigned int 2195 vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages) 2196 { 2197 return vm_calc_num_pages(num_guest_pages, 2198 vm_guest_mode_params[mode].page_shift, 2199 getpageshift(), true); 2200 } 2201 2202 unsigned int 2203 vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages) 2204 { 2205 return vm_calc_num_pages(num_host_pages, getpageshift(), 2206 vm_guest_mode_params[mode].page_shift, false); 2207 } 2208 2209 unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size) 2210 { 2211 unsigned int n; 2212 n = DIV_ROUND_UP(size, vm_guest_mode_params[mode].page_size); 2213 return vm_adjust_num_guest_pages(mode, n); 2214 } 2215 2216 /* 2217 * Read binary stats descriptors 2218 * 2219 * Input Args: 2220 * stats_fd - the file descriptor for the binary stats file from which to read 2221 * header - the binary stats metadata header corresponding to the given FD 2222 * 2223 * Output Args: None 2224 * 2225 * Return: 2226 * A pointer to a newly allocated series of stat descriptors. 2227 * Caller is responsible for freeing the returned kvm_stats_desc. 2228 * 2229 * Read the stats descriptors from the binary stats interface. 2230 */ 2231 struct kvm_stats_desc *read_stats_descriptors(int stats_fd, 2232 struct kvm_stats_header *header) 2233 { 2234 struct kvm_stats_desc *stats_desc; 2235 ssize_t desc_size, total_size, ret; 2236 2237 desc_size = get_stats_descriptor_size(header); 2238 total_size = header->num_desc * desc_size; 2239 2240 stats_desc = calloc(header->num_desc, desc_size); 2241 TEST_ASSERT(stats_desc, "Allocate memory for stats descriptors"); 2242 2243 ret = pread(stats_fd, stats_desc, total_size, header->desc_offset); 2244 TEST_ASSERT(ret == total_size, "Read KVM stats descriptors"); 2245 2246 return stats_desc; 2247 } 2248 2249 /* 2250 * Read stat data for a particular stat 2251 * 2252 * Input Args: 2253 * stats_fd - the file descriptor for the binary stats file from which to read 2254 * header - the binary stats metadata header corresponding to the given FD 2255 * desc - the binary stat metadata for the particular stat to be read 2256 * max_elements - the maximum number of 8-byte values to read into data 2257 * 2258 * Output Args: 2259 * data - the buffer into which stat data should be read 2260 * 2261 * Read the data values of a specified stat from the binary stats interface. 2262 */ 2263 void read_stat_data(int stats_fd, struct kvm_stats_header *header, 2264 struct kvm_stats_desc *desc, uint64_t *data, 2265 size_t max_elements) 2266 { 2267 size_t nr_elements = min_t(ssize_t, desc->size, max_elements); 2268 size_t size = nr_elements * sizeof(*data); 2269 ssize_t ret; 2270 2271 TEST_ASSERT(desc->size, "No elements in stat '%s'", desc->name); 2272 TEST_ASSERT(max_elements, "Zero elements requested for stat '%s'", desc->name); 2273 2274 ret = pread(stats_fd, data, size, 2275 header->data_offset + desc->offset); 2276 2277 TEST_ASSERT(ret >= 0, "pread() failed on stat '%s', errno: %i (%s)", 2278 desc->name, errno, strerror(errno)); 2279 TEST_ASSERT(ret == size, 2280 "pread() on stat '%s' read %ld bytes, wanted %lu bytes", 2281 desc->name, size, ret); 2282 } 2283 2284 void kvm_get_stat(struct kvm_binary_stats *stats, const char *name, 2285 uint64_t *data, size_t max_elements) 2286 { 2287 struct kvm_stats_desc *desc; 2288 size_t size_desc; 2289 int i; 2290 2291 if (!stats->desc) { 2292 read_stats_header(stats->fd, &stats->header); 2293 stats->desc = read_stats_descriptors(stats->fd, &stats->header); 2294 } 2295 2296 size_desc = get_stats_descriptor_size(&stats->header); 2297 2298 for (i = 0; i < stats->header.num_desc; ++i) { 2299 desc = (void *)stats->desc + (i * size_desc); 2300 2301 if (strcmp(desc->name, name)) 2302 continue; 2303 2304 read_stat_data(stats->fd, &stats->header, desc, data, max_elements); 2305 return; 2306 } 2307 2308 TEST_FAIL("Unable to find stat '%s'", name); 2309 } 2310 2311 __weak void kvm_arch_vm_post_create(struct kvm_vm *vm) 2312 { 2313 } 2314 2315 __weak void kvm_selftest_arch_init(void) 2316 { 2317 } 2318 2319 void __attribute((constructor)) kvm_selftest_init(void) 2320 { 2321 /* Tell stdout not to buffer its content. */ 2322 setbuf(stdout, NULL); 2323 2324 guest_random_seed = last_guest_seed = random(); 2325 pr_info("Random seed: 0x%x\n", guest_random_seed); 2326 2327 kvm_selftest_arch_init(); 2328 } 2329 2330 bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr) 2331 { 2332 sparsebit_idx_t pg = 0; 2333 struct userspace_mem_region *region; 2334 2335 if (!vm_arch_has_protected_memory(vm)) 2336 return false; 2337 2338 region = userspace_mem_region_find(vm, paddr, paddr); 2339 TEST_ASSERT(region, "No vm physical memory at 0x%lx", paddr); 2340 2341 pg = paddr >> vm->page_shift; 2342 return sparsebit_is_set(region->protected_phy_pages, pg); 2343 } 2344