1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2018, Google LLC. 4 */ 5 #ifndef SELFTEST_KVM_UTIL_H 6 #define SELFTEST_KVM_UTIL_H 7 8 #include "test_util.h" 9 10 #include <linux/compiler.h> 11 #include "linux/hashtable.h" 12 #include "linux/list.h" 13 #include <linux/kernel.h> 14 #include <linux/kvm.h> 15 #include "linux/rbtree.h" 16 #include <linux/types.h> 17 18 #include <asm/atomic.h> 19 #include <asm/kvm.h> 20 21 #include <sys/eventfd.h> 22 #include <sys/ioctl.h> 23 24 #include <pthread.h> 25 26 #include "kvm_syscalls.h" 27 #include "kvm_util_arch.h" 28 #include "kvm_util_types.h" 29 #include "sparsebit.h" 30 31 #define KVM_DEV_PATH "/dev/kvm" 32 #define KVM_MAX_VCPUS 512 33 34 #define NSEC_PER_SEC 1000000000L 35 36 struct userspace_mem_region { 37 struct kvm_userspace_memory_region2 region; 38 struct sparsebit *unused_phy_pages; 39 struct sparsebit *protected_phy_pages; 40 int fd; 41 off_t offset; 42 enum vm_mem_backing_src_type backing_src_type; 43 void *host_mem; 44 void *host_alias; 45 void *mmap_start; 46 void *mmap_alias; 47 size_t mmap_size; 48 struct rb_node gpa_node; 49 struct rb_node hva_node; 50 struct hlist_node slot_node; 51 }; 52 53 struct kvm_binary_stats { 54 int fd; 55 struct kvm_stats_header header; 56 struct kvm_stats_desc *desc; 57 }; 58 59 struct kvm_vcpu { 60 struct list_head list; 61 uint32_t id; 62 int fd; 63 struct kvm_vm *vm; 64 struct kvm_run *run; 65 #ifdef __x86_64__ 66 struct kvm_cpuid2 *cpuid; 67 #endif 68 #ifdef __aarch64__ 69 struct kvm_vcpu_init init; 70 #endif 71 struct kvm_binary_stats stats; 72 struct kvm_dirty_gfn *dirty_gfns; 73 uint32_t fetch_index; 74 uint32_t dirty_gfns_count; 75 }; 76 77 struct userspace_mem_regions { 78 struct rb_root gpa_tree; 79 struct rb_root hva_tree; 80 DECLARE_HASHTABLE(slot_hash, 9); 81 }; 82 83 enum kvm_mem_region_type { 84 MEM_REGION_CODE, 85 MEM_REGION_DATA, 86 MEM_REGION_PT, 87 MEM_REGION_TEST_DATA, 88 NR_MEM_REGIONS, 89 }; 90 91 struct kvm_mmu { 92 bool pgd_created; 93 uint64_t pgd; 94 int pgtable_levels; 95 96 struct kvm_mmu_arch arch; 97 }; 98 99 struct kvm_vm { 100 int mode; 101 unsigned long type; 102 int kvm_fd; 103 int fd; 104 unsigned int page_size; 105 unsigned int page_shift; 106 unsigned int pa_bits; 107 unsigned int va_bits; 108 uint64_t max_gfn; 109 struct list_head vcpus; 110 struct userspace_mem_regions regions; 111 struct sparsebit *vpages_valid; 112 struct sparsebit *vpages_mapped; 113 bool has_irqchip; 114 gpa_t ucall_mmio_addr; 115 gva_t handlers; 116 uint32_t dirty_ring_size; 117 uint64_t gpa_tag_mask; 118 119 /* 120 * "mmu" is the guest's stage-1, with a short name because the vast 121 * majority of tests only care about the stage-1 MMU. 122 */ 123 struct kvm_mmu mmu; 124 struct kvm_mmu stage2_mmu; 125 126 struct kvm_vm_arch arch; 127 128 struct kvm_binary_stats stats; 129 130 /* 131 * KVM region slots. These are the default memslots used by page 132 * allocators, e.g., lib/elf uses the memslots[MEM_REGION_CODE] 133 * memslot. 134 */ 135 uint32_t memslots[NR_MEM_REGIONS]; 136 }; 137 138 struct vcpu_reg_sublist { 139 const char *name; 140 long capability; 141 int feature; 142 int feature_type; 143 bool finalize; 144 __u64 *regs; 145 __u64 regs_n; 146 __u64 *rejects_set; 147 __u64 rejects_set_n; 148 __u64 *skips_set; 149 __u64 skips_set_n; 150 }; 151 152 struct vcpu_reg_list { 153 char *name; 154 struct vcpu_reg_sublist sublists[]; 155 }; 156 157 #define for_each_sublist(c, s) \ 158 for ((s) = &(c)->sublists[0]; (s)->regs; ++(s)) 159 160 #define kvm_for_each_vcpu(vm, i, vcpu) \ 161 for ((i) = 0; (i) <= (vm)->last_vcpu_id; (i)++) \ 162 if (!((vcpu) = vm->vcpus[i])) \ 163 continue; \ 164 else 165 166 struct userspace_mem_region * 167 memslot2region(struct kvm_vm *vm, uint32_t memslot); 168 169 static inline struct userspace_mem_region *vm_get_mem_region(struct kvm_vm *vm, 170 enum kvm_mem_region_type type) 171 { 172 assert(type < NR_MEM_REGIONS); 173 return memslot2region(vm, vm->memslots[type]); 174 } 175 176 /* Minimum allocated guest virtual and physical addresses */ 177 #define KVM_UTIL_MIN_VADDR 0x2000 178 #define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000 179 180 #define DEFAULT_GUEST_STACK_VADDR_MIN 0xab6000 181 #define DEFAULT_STACK_PGS 5 182 183 enum vm_guest_mode { 184 VM_MODE_P52V48_4K, 185 VM_MODE_P52V48_16K, 186 VM_MODE_P52V48_64K, 187 VM_MODE_P48V48_4K, 188 VM_MODE_P48V48_16K, 189 VM_MODE_P48V48_64K, 190 VM_MODE_P40V48_4K, 191 VM_MODE_P40V48_16K, 192 VM_MODE_P40V48_64K, 193 VM_MODE_PXXVYY_4K, /* For 48-bit or 57-bit VA, depending on host support */ 194 VM_MODE_P47V64_4K, 195 VM_MODE_P44V64_4K, 196 VM_MODE_P36V48_4K, 197 VM_MODE_P36V48_16K, 198 VM_MODE_P36V48_64K, 199 VM_MODE_P47V47_16K, 200 VM_MODE_P36V47_16K, 201 202 VM_MODE_P56V57_4K, /* For riscv64 */ 203 VM_MODE_P56V48_4K, 204 VM_MODE_P56V39_4K, 205 VM_MODE_P50V57_4K, 206 VM_MODE_P50V48_4K, 207 VM_MODE_P50V39_4K, 208 VM_MODE_P41V57_4K, 209 VM_MODE_P41V48_4K, 210 VM_MODE_P41V39_4K, 211 212 NUM_VM_MODES, 213 }; 214 215 struct vm_shape { 216 uint32_t type; 217 uint8_t mode; 218 uint8_t pad0; 219 uint16_t pad1; 220 }; 221 222 kvm_static_assert(sizeof(struct vm_shape) == sizeof(uint64_t)); 223 224 #define VM_TYPE_DEFAULT 0 225 226 #define VM_SHAPE(__mode) \ 227 ({ \ 228 struct vm_shape shape = { \ 229 .mode = (__mode), \ 230 .type = VM_TYPE_DEFAULT \ 231 }; \ 232 \ 233 shape; \ 234 }) 235 236 extern enum vm_guest_mode vm_mode_default; 237 238 #if defined(__aarch64__) 239 240 #define VM_MODE_DEFAULT vm_mode_default 241 #define MIN_PAGE_SHIFT 12U 242 #define ptes_per_page(page_size) ((page_size) / 8) 243 244 #elif defined(__x86_64__) 245 246 #define VM_MODE_DEFAULT VM_MODE_PXXVYY_4K 247 #define MIN_PAGE_SHIFT 12U 248 #define ptes_per_page(page_size) ((page_size) / 8) 249 250 #elif defined(__s390x__) 251 252 #define VM_MODE_DEFAULT VM_MODE_P44V64_4K 253 #define MIN_PAGE_SHIFT 12U 254 #define ptes_per_page(page_size) ((page_size) / 16) 255 256 #elif defined(__riscv) 257 258 #if __riscv_xlen == 32 259 #error "RISC-V 32-bit kvm selftests not supported" 260 #endif 261 262 #define VM_MODE_DEFAULT vm_mode_default 263 #define MIN_PAGE_SHIFT 12U 264 #define ptes_per_page(page_size) ((page_size) / 8) 265 266 #elif defined(__loongarch__) 267 #define VM_MODE_DEFAULT VM_MODE_P47V47_16K 268 #define MIN_PAGE_SHIFT 12U 269 #define ptes_per_page(page_size) ((page_size) / 8) 270 271 #endif 272 273 #define VM_SHAPE_DEFAULT VM_SHAPE(VM_MODE_DEFAULT) 274 275 #define MIN_PAGE_SIZE (1U << MIN_PAGE_SHIFT) 276 #define PTES_PER_MIN_PAGE ptes_per_page(MIN_PAGE_SIZE) 277 278 struct vm_guest_mode_params { 279 unsigned int pa_bits; 280 unsigned int va_bits; 281 unsigned int page_size; 282 unsigned int page_shift; 283 }; 284 extern const struct vm_guest_mode_params vm_guest_mode_params[]; 285 286 int __open_path_or_exit(const char *path, int flags, const char *enoent_help); 287 int open_path_or_exit(const char *path, int flags); 288 int open_kvm_dev_path_or_exit(void); 289 290 int kvm_get_module_param_integer(const char *module_name, const char *param); 291 bool kvm_get_module_param_bool(const char *module_name, const char *param); 292 293 static inline bool get_kvm_param_bool(const char *param) 294 { 295 return kvm_get_module_param_bool("kvm", param); 296 } 297 298 static inline int get_kvm_param_integer(const char *param) 299 { 300 return kvm_get_module_param_integer("kvm", param); 301 } 302 303 unsigned int kvm_check_cap(long cap); 304 305 static inline bool kvm_has_cap(long cap) 306 { 307 return kvm_check_cap(cap); 308 } 309 310 /* 311 * Use the "inner", double-underscore macro when reporting errors from within 312 * other macros so that the name of ioctl() and not its literal numeric value 313 * is printed on error. The "outer" macro is strongly preferred when reporting 314 * errors "directly", i.e. without an additional layer of macros, as it reduces 315 * the probability of passing in the wrong string. 316 */ 317 #define __KVM_IOCTL_ERROR(_name, _ret) __KVM_SYSCALL_ERROR(_name, _ret) 318 #define KVM_IOCTL_ERROR(_ioctl, _ret) __KVM_IOCTL_ERROR(#_ioctl, _ret) 319 320 #define kvm_do_ioctl(fd, cmd, arg) \ 321 ({ \ 322 kvm_static_assert(!_IOC_SIZE(cmd) || sizeof(*arg) == _IOC_SIZE(cmd)); \ 323 ioctl(fd, cmd, arg); \ 324 }) 325 326 #define __kvm_ioctl(kvm_fd, cmd, arg) \ 327 kvm_do_ioctl(kvm_fd, cmd, arg) 328 329 #define kvm_ioctl(kvm_fd, cmd, arg) \ 330 ({ \ 331 int ret = __kvm_ioctl(kvm_fd, cmd, arg); \ 332 \ 333 TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(#cmd, ret)); \ 334 }) 335 336 static __always_inline void static_assert_is_vm(struct kvm_vm *vm) { } 337 338 #define __vm_ioctl(vm, cmd, arg) \ 339 ({ \ 340 static_assert_is_vm(vm); \ 341 kvm_do_ioctl((vm)->fd, cmd, arg); \ 342 }) 343 344 /* 345 * Assert that a VM or vCPU ioctl() succeeded, with extra magic to detect if 346 * the ioctl() failed because KVM killed/bugged the VM. To detect a dead VM, 347 * probe KVM_CAP_USER_MEMORY, which (a) has been supported by KVM since before 348 * selftests existed and (b) should never outright fail, i.e. is supposed to 349 * return 0 or 1. If KVM kills a VM, KVM returns -EIO for all ioctl()s for the 350 * VM and its vCPUs, including KVM_CHECK_EXTENSION. 351 */ 352 #define __TEST_ASSERT_VM_VCPU_IOCTL(cond, name, ret, vm) \ 353 do { \ 354 int __errno = errno; \ 355 \ 356 static_assert_is_vm(vm); \ 357 \ 358 if (cond) \ 359 break; \ 360 \ 361 if (errno == EIO && \ 362 __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)KVM_CAP_USER_MEMORY) < 0) { \ 363 TEST_ASSERT(errno == EIO, "KVM killed the VM, should return -EIO"); \ 364 TEST_FAIL("KVM killed/bugged the VM, check the kernel log for clues"); \ 365 } \ 366 errno = __errno; \ 367 TEST_ASSERT(cond, __KVM_IOCTL_ERROR(name, ret)); \ 368 } while (0) 369 370 #define TEST_ASSERT_VM_VCPU_IOCTL(cond, cmd, ret, vm) \ 371 __TEST_ASSERT_VM_VCPU_IOCTL(cond, #cmd, ret, vm) 372 373 #define vm_ioctl(vm, cmd, arg) \ 374 ({ \ 375 int ret = __vm_ioctl(vm, cmd, arg); \ 376 \ 377 __TEST_ASSERT_VM_VCPU_IOCTL(!ret, #cmd, ret, vm); \ 378 }) 379 380 static __always_inline void static_assert_is_vcpu(struct kvm_vcpu *vcpu) { } 381 382 #define __vcpu_ioctl(vcpu, cmd, arg) \ 383 ({ \ 384 static_assert_is_vcpu(vcpu); \ 385 kvm_do_ioctl((vcpu)->fd, cmd, arg); \ 386 }) 387 388 #define vcpu_ioctl(vcpu, cmd, arg) \ 389 ({ \ 390 int ret = __vcpu_ioctl(vcpu, cmd, arg); \ 391 \ 392 __TEST_ASSERT_VM_VCPU_IOCTL(!ret, #cmd, ret, (vcpu)->vm); \ 393 }) 394 395 /* 396 * Looks up and returns the value corresponding to the capability 397 * (KVM_CAP_*) given by cap. 398 */ 399 static inline int vm_check_cap(struct kvm_vm *vm, long cap) 400 { 401 int ret = __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)cap); 402 403 TEST_ASSERT_VM_VCPU_IOCTL(ret >= 0, KVM_CHECK_EXTENSION, ret, vm); 404 return ret; 405 } 406 407 static inline int __vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0) 408 { 409 struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } }; 410 411 return __vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap); 412 } 413 static inline void vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0) 414 { 415 struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } }; 416 417 vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap); 418 } 419 420 static inline void vm_set_memory_attributes(struct kvm_vm *vm, uint64_t gpa, 421 uint64_t size, uint64_t attributes) 422 { 423 struct kvm_memory_attributes attr = { 424 .attributes = attributes, 425 .address = gpa, 426 .size = size, 427 .flags = 0, 428 }; 429 430 /* 431 * KVM_SET_MEMORY_ATTRIBUTES overwrites _all_ attributes. These flows 432 * need significant enhancements to support multiple attributes. 433 */ 434 TEST_ASSERT(!attributes || attributes == KVM_MEMORY_ATTRIBUTE_PRIVATE, 435 "Update me to support multiple attributes!"); 436 437 vm_ioctl(vm, KVM_SET_MEMORY_ATTRIBUTES, &attr); 438 } 439 440 441 static inline void vm_mem_set_private(struct kvm_vm *vm, uint64_t gpa, 442 uint64_t size) 443 { 444 vm_set_memory_attributes(vm, gpa, size, KVM_MEMORY_ATTRIBUTE_PRIVATE); 445 } 446 447 static inline void vm_mem_set_shared(struct kvm_vm *vm, uint64_t gpa, 448 uint64_t size) 449 { 450 vm_set_memory_attributes(vm, gpa, size, 0); 451 } 452 453 void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t gpa, uint64_t size, 454 bool punch_hole); 455 456 static inline void vm_guest_mem_punch_hole(struct kvm_vm *vm, uint64_t gpa, 457 uint64_t size) 458 { 459 vm_guest_mem_fallocate(vm, gpa, size, true); 460 } 461 462 static inline void vm_guest_mem_allocate(struct kvm_vm *vm, uint64_t gpa, 463 uint64_t size) 464 { 465 vm_guest_mem_fallocate(vm, gpa, size, false); 466 } 467 468 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size); 469 const char *vm_guest_mode_string(uint32_t i); 470 471 void kvm_vm_free(struct kvm_vm *vmp); 472 void kvm_vm_restart(struct kvm_vm *vmp); 473 void kvm_vm_release(struct kvm_vm *vmp); 474 void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename); 475 int kvm_memfd_alloc(size_t size, bool hugepages); 476 477 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent); 478 479 static inline void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log) 480 { 481 struct kvm_dirty_log args = { .dirty_bitmap = log, .slot = slot }; 482 483 vm_ioctl(vm, KVM_GET_DIRTY_LOG, &args); 484 } 485 486 static inline void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log, 487 uint64_t first_page, uint32_t num_pages) 488 { 489 struct kvm_clear_dirty_log args = { 490 .dirty_bitmap = log, 491 .slot = slot, 492 .first_page = first_page, 493 .num_pages = num_pages 494 }; 495 496 vm_ioctl(vm, KVM_CLEAR_DIRTY_LOG, &args); 497 } 498 499 static inline uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm) 500 { 501 return __vm_ioctl(vm, KVM_RESET_DIRTY_RINGS, NULL); 502 } 503 504 static inline void kvm_vm_register_coalesced_io(struct kvm_vm *vm, 505 uint64_t address, 506 uint64_t size, bool pio) 507 { 508 struct kvm_coalesced_mmio_zone zone = { 509 .addr = address, 510 .size = size, 511 .pio = pio, 512 }; 513 514 vm_ioctl(vm, KVM_REGISTER_COALESCED_MMIO, &zone); 515 } 516 517 static inline void kvm_vm_unregister_coalesced_io(struct kvm_vm *vm, 518 uint64_t address, 519 uint64_t size, bool pio) 520 { 521 struct kvm_coalesced_mmio_zone zone = { 522 .addr = address, 523 .size = size, 524 .pio = pio, 525 }; 526 527 vm_ioctl(vm, KVM_UNREGISTER_COALESCED_MMIO, &zone); 528 } 529 530 static inline int vm_get_stats_fd(struct kvm_vm *vm) 531 { 532 int fd = __vm_ioctl(vm, KVM_GET_STATS_FD, NULL); 533 534 TEST_ASSERT_VM_VCPU_IOCTL(fd >= 0, KVM_GET_STATS_FD, fd, vm); 535 return fd; 536 } 537 538 static inline int __kvm_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd, 539 uint32_t flags) 540 { 541 struct kvm_irqfd irqfd = { 542 .fd = eventfd, 543 .gsi = gsi, 544 .flags = flags, 545 .resamplefd = -1, 546 }; 547 548 return __vm_ioctl(vm, KVM_IRQFD, &irqfd); 549 } 550 551 static inline void kvm_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd, 552 uint32_t flags) 553 { 554 int ret = __kvm_irqfd(vm, gsi, eventfd, flags); 555 556 TEST_ASSERT_VM_VCPU_IOCTL(!ret, KVM_IRQFD, ret, vm); 557 } 558 559 static inline void kvm_assign_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd) 560 { 561 kvm_irqfd(vm, gsi, eventfd, 0); 562 } 563 564 static inline void kvm_deassign_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd) 565 { 566 kvm_irqfd(vm, gsi, eventfd, KVM_IRQFD_FLAG_DEASSIGN); 567 } 568 569 static inline int kvm_new_eventfd(void) 570 { 571 int fd = eventfd(0, 0); 572 573 TEST_ASSERT(fd >= 0, __KVM_SYSCALL_ERROR("eventfd()", fd)); 574 return fd; 575 } 576 577 static inline void read_stats_header(int stats_fd, struct kvm_stats_header *header) 578 { 579 ssize_t ret; 580 581 ret = pread(stats_fd, header, sizeof(*header), 0); 582 TEST_ASSERT(ret == sizeof(*header), 583 "Failed to read '%lu' header bytes, ret = '%ld'", 584 sizeof(*header), ret); 585 } 586 587 struct kvm_stats_desc *read_stats_descriptors(int stats_fd, 588 struct kvm_stats_header *header); 589 590 static inline ssize_t get_stats_descriptor_size(struct kvm_stats_header *header) 591 { 592 /* 593 * The base size of the descriptor is defined by KVM's ABI, but the 594 * size of the name field is variable, as far as KVM's ABI is 595 * concerned. For a given instance of KVM, the name field is the same 596 * size for all stats and is provided in the overall stats header. 597 */ 598 return sizeof(struct kvm_stats_desc) + header->name_size; 599 } 600 601 static inline struct kvm_stats_desc *get_stats_descriptor(struct kvm_stats_desc *stats, 602 int index, 603 struct kvm_stats_header *header) 604 { 605 /* 606 * Note, size_desc includes the size of the name field, which is 607 * variable. i.e. this is NOT equivalent to &stats_desc[i]. 608 */ 609 return (void *)stats + index * get_stats_descriptor_size(header); 610 } 611 612 void read_stat_data(int stats_fd, struct kvm_stats_header *header, 613 struct kvm_stats_desc *desc, uint64_t *data, 614 size_t max_elements); 615 616 void kvm_get_stat(struct kvm_binary_stats *stats, const char *name, 617 uint64_t *data, size_t max_elements); 618 619 #define __get_stat(stats, stat) \ 620 ({ \ 621 uint64_t data; \ 622 \ 623 kvm_get_stat(stats, #stat, &data, 1); \ 624 data; \ 625 }) 626 627 #define vm_get_stat(vm, stat) __get_stat(&(vm)->stats, stat) 628 #define vcpu_get_stat(vcpu, stat) __get_stat(&(vcpu)->stats, stat) 629 630 static inline bool read_smt_control(char *buf, size_t buf_size) 631 { 632 FILE *f = fopen("/sys/devices/system/cpu/smt/control", "r"); 633 bool ret; 634 635 if (!f) 636 return false; 637 638 ret = fread(buf, sizeof(*buf), buf_size, f) > 0; 639 fclose(f); 640 641 return ret; 642 } 643 644 static inline bool is_smt_possible(void) 645 { 646 char buf[16]; 647 648 if (read_smt_control(buf, sizeof(buf)) && 649 (!strncmp(buf, "forceoff", 8) || !strncmp(buf, "notsupported", 12))) 650 return false; 651 652 return true; 653 } 654 655 static inline bool is_smt_on(void) 656 { 657 char buf[16]; 658 659 if (read_smt_control(buf, sizeof(buf)) && !strncmp(buf, "on", 2)) 660 return true; 661 662 return false; 663 } 664 665 void vm_create_irqchip(struct kvm_vm *vm); 666 667 static inline int __vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size, 668 uint64_t flags) 669 { 670 struct kvm_create_guest_memfd guest_memfd = { 671 .size = size, 672 .flags = flags, 673 }; 674 675 return __vm_ioctl(vm, KVM_CREATE_GUEST_MEMFD, &guest_memfd); 676 } 677 678 static inline int vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size, 679 uint64_t flags) 680 { 681 int fd = __vm_create_guest_memfd(vm, size, flags); 682 683 TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_GUEST_MEMFD, fd)); 684 return fd; 685 } 686 687 void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 688 uint64_t gpa, uint64_t size, void *hva); 689 int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 690 uint64_t gpa, uint64_t size, void *hva); 691 void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 692 uint64_t gpa, uint64_t size, void *hva, 693 uint32_t guest_memfd, uint64_t guest_memfd_offset); 694 int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 695 uint64_t gpa, uint64_t size, void *hva, 696 uint32_t guest_memfd, uint64_t guest_memfd_offset); 697 698 void vm_userspace_mem_region_add(struct kvm_vm *vm, 699 enum vm_mem_backing_src_type src_type, 700 uint64_t gpa, uint32_t slot, uint64_t npages, 701 uint32_t flags); 702 void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, 703 uint64_t gpa, uint32_t slot, uint64_t npages, uint32_t flags, 704 int guest_memfd_fd, uint64_t guest_memfd_offset); 705 706 #ifndef vm_arch_has_protected_memory 707 static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm) 708 { 709 return false; 710 } 711 #endif 712 713 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags); 714 void vm_mem_region_reload(struct kvm_vm *vm, uint32_t slot); 715 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa); 716 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot); 717 struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id); 718 void vm_populate_vaddr_bitmap(struct kvm_vm *vm); 719 gva_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, gva_t vaddr_min); 720 gva_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, gva_t vaddr_min); 721 gva_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, gva_t vaddr_min, 722 enum kvm_mem_region_type type); 723 gva_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz, gva_t vaddr_min, 724 enum kvm_mem_region_type type); 725 gva_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages); 726 gva_t __vm_vaddr_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type); 727 gva_t vm_vaddr_alloc_page(struct kvm_vm *vm); 728 729 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, 730 unsigned int npages); 731 void *addr_gpa2hva(struct kvm_vm *vm, gpa_t gpa); 732 void *addr_gva2hva(struct kvm_vm *vm, gva_t gva); 733 gpa_t addr_hva2gpa(struct kvm_vm *vm, void *hva); 734 void *addr_gpa2alias(struct kvm_vm *vm, gpa_t gpa); 735 736 #ifndef vcpu_arch_put_guest 737 #define vcpu_arch_put_guest(mem, val) do { (mem) = (val); } while (0) 738 #endif 739 740 static inline gpa_t vm_untag_gpa(struct kvm_vm *vm, gpa_t gpa) 741 { 742 return gpa & ~vm->gpa_tag_mask; 743 } 744 745 void vcpu_run(struct kvm_vcpu *vcpu); 746 int _vcpu_run(struct kvm_vcpu *vcpu); 747 748 static inline int __vcpu_run(struct kvm_vcpu *vcpu) 749 { 750 return __vcpu_ioctl(vcpu, KVM_RUN, NULL); 751 } 752 753 void vcpu_run_complete_io(struct kvm_vcpu *vcpu); 754 struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu); 755 756 static inline void vcpu_enable_cap(struct kvm_vcpu *vcpu, uint32_t cap, 757 uint64_t arg0) 758 { 759 struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } }; 760 761 vcpu_ioctl(vcpu, KVM_ENABLE_CAP, &enable_cap); 762 } 763 764 static inline void vcpu_guest_debug_set(struct kvm_vcpu *vcpu, 765 struct kvm_guest_debug *debug) 766 { 767 vcpu_ioctl(vcpu, KVM_SET_GUEST_DEBUG, debug); 768 } 769 770 static inline void vcpu_mp_state_get(struct kvm_vcpu *vcpu, 771 struct kvm_mp_state *mp_state) 772 { 773 vcpu_ioctl(vcpu, KVM_GET_MP_STATE, mp_state); 774 } 775 static inline void vcpu_mp_state_set(struct kvm_vcpu *vcpu, 776 struct kvm_mp_state *mp_state) 777 { 778 vcpu_ioctl(vcpu, KVM_SET_MP_STATE, mp_state); 779 } 780 781 static inline void vcpu_regs_get(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 782 { 783 vcpu_ioctl(vcpu, KVM_GET_REGS, regs); 784 } 785 786 static inline void vcpu_regs_set(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 787 { 788 vcpu_ioctl(vcpu, KVM_SET_REGS, regs); 789 } 790 static inline void vcpu_sregs_get(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 791 { 792 vcpu_ioctl(vcpu, KVM_GET_SREGS, sregs); 793 794 } 795 static inline void vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 796 { 797 vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs); 798 } 799 static inline int _vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 800 { 801 return __vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs); 802 } 803 static inline void vcpu_fpu_get(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 804 { 805 vcpu_ioctl(vcpu, KVM_GET_FPU, fpu); 806 } 807 static inline void vcpu_fpu_set(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 808 { 809 vcpu_ioctl(vcpu, KVM_SET_FPU, fpu); 810 } 811 812 static inline int __vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr) 813 { 814 struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr }; 815 816 return __vcpu_ioctl(vcpu, KVM_GET_ONE_REG, ®); 817 } 818 static inline int __vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val) 819 { 820 struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val }; 821 822 return __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®); 823 } 824 static inline uint64_t vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id) 825 { 826 uint64_t val; 827 struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val }; 828 829 TEST_ASSERT(KVM_REG_SIZE(id) <= sizeof(val), "Reg %lx too big", id); 830 831 vcpu_ioctl(vcpu, KVM_GET_ONE_REG, ®); 832 return val; 833 } 834 static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val) 835 { 836 struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val }; 837 838 TEST_ASSERT(KVM_REG_SIZE(id) <= sizeof(val), "Reg %lx too big", id); 839 840 vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®); 841 } 842 843 #ifdef __KVM_HAVE_VCPU_EVENTS 844 static inline void vcpu_events_get(struct kvm_vcpu *vcpu, 845 struct kvm_vcpu_events *events) 846 { 847 vcpu_ioctl(vcpu, KVM_GET_VCPU_EVENTS, events); 848 } 849 static inline void vcpu_events_set(struct kvm_vcpu *vcpu, 850 struct kvm_vcpu_events *events) 851 { 852 vcpu_ioctl(vcpu, KVM_SET_VCPU_EVENTS, events); 853 } 854 #endif 855 #ifdef __x86_64__ 856 static inline void vcpu_nested_state_get(struct kvm_vcpu *vcpu, 857 struct kvm_nested_state *state) 858 { 859 vcpu_ioctl(vcpu, KVM_GET_NESTED_STATE, state); 860 } 861 static inline int __vcpu_nested_state_set(struct kvm_vcpu *vcpu, 862 struct kvm_nested_state *state) 863 { 864 return __vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state); 865 } 866 867 static inline void vcpu_nested_state_set(struct kvm_vcpu *vcpu, 868 struct kvm_nested_state *state) 869 { 870 vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state); 871 } 872 #endif 873 static inline int vcpu_get_stats_fd(struct kvm_vcpu *vcpu) 874 { 875 int fd = __vcpu_ioctl(vcpu, KVM_GET_STATS_FD, NULL); 876 877 TEST_ASSERT_VM_VCPU_IOCTL(fd >= 0, KVM_CHECK_EXTENSION, fd, vcpu->vm); 878 return fd; 879 } 880 881 int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr); 882 883 static inline void kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr) 884 { 885 int ret = __kvm_has_device_attr(dev_fd, group, attr); 886 887 TEST_ASSERT(!ret, "KVM_HAS_DEVICE_ATTR failed, rc: %i errno: %i", ret, errno); 888 } 889 890 int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val); 891 892 static inline void kvm_device_attr_get(int dev_fd, uint32_t group, 893 uint64_t attr, void *val) 894 { 895 int ret = __kvm_device_attr_get(dev_fd, group, attr, val); 896 897 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_GET_DEVICE_ATTR, ret)); 898 } 899 900 int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val); 901 902 static inline void kvm_device_attr_set(int dev_fd, uint32_t group, 903 uint64_t attr, void *val) 904 { 905 int ret = __kvm_device_attr_set(dev_fd, group, attr, val); 906 907 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_DEVICE_ATTR, ret)); 908 } 909 910 static inline int __vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group, 911 uint64_t attr) 912 { 913 return __kvm_has_device_attr(vcpu->fd, group, attr); 914 } 915 916 static inline void vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group, 917 uint64_t attr) 918 { 919 kvm_has_device_attr(vcpu->fd, group, attr); 920 } 921 922 static inline int __vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group, 923 uint64_t attr, void *val) 924 { 925 return __kvm_device_attr_get(vcpu->fd, group, attr, val); 926 } 927 928 static inline void vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group, 929 uint64_t attr, void *val) 930 { 931 kvm_device_attr_get(vcpu->fd, group, attr, val); 932 } 933 934 static inline int __vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group, 935 uint64_t attr, void *val) 936 { 937 return __kvm_device_attr_set(vcpu->fd, group, attr, val); 938 } 939 940 static inline void vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group, 941 uint64_t attr, void *val) 942 { 943 kvm_device_attr_set(vcpu->fd, group, attr, val); 944 } 945 946 int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type); 947 int __kvm_create_device(struct kvm_vm *vm, uint64_t type); 948 949 static inline int kvm_create_device(struct kvm_vm *vm, uint64_t type) 950 { 951 int fd = __kvm_create_device(vm, type); 952 953 TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_DEVICE, fd)); 954 return fd; 955 } 956 957 void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu); 958 959 /* 960 * VM VCPU Args Set 961 * 962 * Input Args: 963 * vcpu - vCPU 964 * num - number of arguments 965 * ... - arguments, each of type uint64_t 966 * 967 * Output Args: None 968 * 969 * Return: None 970 * 971 * Sets the first @num input parameters for the function at @vcpu's entry point, 972 * per the C calling convention of the architecture, to the values given as 973 * variable args. Each of the variable args is expected to be of type uint64_t. 974 * The maximum @num can be is specific to the architecture. 975 */ 976 void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...); 977 978 void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level); 979 int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level); 980 981 #define KVM_MAX_IRQ_ROUTES 4096 982 983 struct kvm_irq_routing *kvm_gsi_routing_create(void); 984 void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing, 985 uint32_t gsi, uint32_t pin); 986 int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing); 987 void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing); 988 989 const char *exit_reason_str(unsigned int exit_reason); 990 991 gpa_t vm_phy_page_alloc(struct kvm_vm *vm, gpa_t paddr_min, uint32_t memslot); 992 gpa_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, 993 gpa_t paddr_min, uint32_t memslot, 994 bool protected); 995 gpa_t vm_alloc_page_table(struct kvm_vm *vm); 996 997 static inline gpa_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, 998 gpa_t paddr_min, uint32_t memslot) 999 { 1000 /* 1001 * By default, allocate memory as protected for VMs that support 1002 * protected memory, as the majority of memory for such VMs is 1003 * protected, i.e. using shared memory is effectively opt-in. 1004 */ 1005 return __vm_phy_pages_alloc(vm, num, paddr_min, memslot, 1006 vm_arch_has_protected_memory(vm)); 1007 } 1008 1009 /* 1010 * ____vm_create() does KVM_CREATE_VM and little else. __vm_create() also 1011 * loads the test binary into guest memory and creates an IRQ chip (x86 only). 1012 * __vm_create() does NOT create vCPUs, @nr_runnable_vcpus is used purely to 1013 * calculate the amount of memory needed for per-vCPU data, e.g. stacks. 1014 */ 1015 struct kvm_vm *____vm_create(struct vm_shape shape); 1016 struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus, 1017 uint64_t nr_extra_pages); 1018 1019 static inline struct kvm_vm *vm_create_barebones(void) 1020 { 1021 return ____vm_create(VM_SHAPE_DEFAULT); 1022 } 1023 1024 static inline struct kvm_vm *vm_create_barebones_type(unsigned long type) 1025 { 1026 const struct vm_shape shape = { 1027 .mode = VM_MODE_DEFAULT, 1028 .type = type, 1029 }; 1030 1031 return ____vm_create(shape); 1032 } 1033 1034 static inline struct kvm_vm *vm_create(uint32_t nr_runnable_vcpus) 1035 { 1036 return __vm_create(VM_SHAPE_DEFAULT, nr_runnable_vcpus, 0); 1037 } 1038 1039 struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus, 1040 uint64_t extra_mem_pages, 1041 void *guest_code, struct kvm_vcpu *vcpus[]); 1042 1043 static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus, 1044 void *guest_code, 1045 struct kvm_vcpu *vcpus[]) 1046 { 1047 return __vm_create_with_vcpus(VM_SHAPE_DEFAULT, nr_vcpus, 0, 1048 guest_code, vcpus); 1049 } 1050 1051 1052 struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape, 1053 struct kvm_vcpu **vcpu, 1054 uint64_t extra_mem_pages, 1055 void *guest_code); 1056 1057 /* 1058 * Create a VM with a single vCPU with reasonable defaults and @extra_mem_pages 1059 * additional pages of guest memory. Returns the VM and vCPU (via out param). 1060 */ 1061 static inline struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu, 1062 uint64_t extra_mem_pages, 1063 void *guest_code) 1064 { 1065 return __vm_create_shape_with_one_vcpu(VM_SHAPE_DEFAULT, vcpu, 1066 extra_mem_pages, guest_code); 1067 } 1068 1069 static inline struct kvm_vm *vm_create_with_one_vcpu(struct kvm_vcpu **vcpu, 1070 void *guest_code) 1071 { 1072 return __vm_create_with_one_vcpu(vcpu, 0, guest_code); 1073 } 1074 1075 static inline struct kvm_vm *vm_create_shape_with_one_vcpu(struct vm_shape shape, 1076 struct kvm_vcpu **vcpu, 1077 void *guest_code) 1078 { 1079 return __vm_create_shape_with_one_vcpu(shape, vcpu, 0, guest_code); 1080 } 1081 1082 struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm); 1083 1084 void kvm_set_files_rlimit(uint32_t nr_vcpus); 1085 1086 int __pin_task_to_cpu(pthread_t task, int cpu); 1087 1088 static inline void pin_task_to_cpu(pthread_t task, int cpu) 1089 { 1090 int r; 1091 1092 r = __pin_task_to_cpu(task, cpu); 1093 TEST_ASSERT(!r, "Failed to set thread affinity to pCPU '%u'", cpu); 1094 } 1095 1096 static inline int pin_task_to_any_cpu(pthread_t task) 1097 { 1098 int cpu = sched_getcpu(); 1099 1100 pin_task_to_cpu(task, cpu); 1101 return cpu; 1102 } 1103 1104 static inline void pin_self_to_cpu(int cpu) 1105 { 1106 pin_task_to_cpu(pthread_self(), cpu); 1107 } 1108 1109 static inline int pin_self_to_any_cpu(void) 1110 { 1111 return pin_task_to_any_cpu(pthread_self()); 1112 } 1113 1114 void kvm_print_vcpu_pinning_help(void); 1115 void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[], 1116 int nr_vcpus); 1117 1118 unsigned long vm_compute_max_gfn(struct kvm_vm *vm); 1119 unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size); 1120 unsigned int vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages); 1121 unsigned int vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages); 1122 static inline unsigned int 1123 vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages) 1124 { 1125 unsigned int n; 1126 n = vm_num_guest_pages(mode, vm_num_host_pages(mode, num_guest_pages)); 1127 return n; 1128 } 1129 1130 #define sync_global_to_guest(vm, g) ({ \ 1131 typeof(g) *_p = addr_gva2hva(vm, (gva_t)&(g)); \ 1132 memcpy(_p, &(g), sizeof(g)); \ 1133 }) 1134 1135 #define sync_global_from_guest(vm, g) ({ \ 1136 typeof(g) *_p = addr_gva2hva(vm, (gva_t)&(g)); \ 1137 memcpy(&(g), _p, sizeof(g)); \ 1138 }) 1139 1140 /* 1141 * Write a global value, but only in the VM's (guest's) domain. Primarily used 1142 * for "globals" that hold per-VM values (VMs always duplicate code and global 1143 * data into their own region of physical memory), but can be used anytime it's 1144 * undesirable to change the host's copy of the global. 1145 */ 1146 #define write_guest_global(vm, g, val) ({ \ 1147 typeof(g) *_p = addr_gva2hva(vm, (gva_t)&(g)); \ 1148 typeof(g) _val = val; \ 1149 \ 1150 memcpy(_p, &(_val), sizeof(g)); \ 1151 }) 1152 1153 void assert_on_unhandled_exception(struct kvm_vcpu *vcpu); 1154 1155 void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, 1156 uint8_t indent); 1157 1158 static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu, 1159 uint8_t indent) 1160 { 1161 vcpu_arch_dump(stream, vcpu, indent); 1162 } 1163 1164 /* 1165 * Adds a vCPU with reasonable defaults (e.g. a stack) 1166 * 1167 * Input Args: 1168 * vm - Virtual Machine 1169 * vcpu_id - The id of the VCPU to add to the VM. 1170 */ 1171 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id); 1172 void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code); 1173 1174 static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, 1175 void *guest_code) 1176 { 1177 struct kvm_vcpu *vcpu = vm_arch_vcpu_add(vm, vcpu_id); 1178 1179 vcpu_arch_set_entry_point(vcpu, guest_code); 1180 1181 return vcpu; 1182 } 1183 1184 /* Re-create a vCPU after restarting a VM, e.g. for state save/restore tests. */ 1185 struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id); 1186 1187 static inline struct kvm_vcpu *vm_vcpu_recreate(struct kvm_vm *vm, 1188 uint32_t vcpu_id) 1189 { 1190 return vm_arch_vcpu_recreate(vm, vcpu_id); 1191 } 1192 1193 void vcpu_arch_free(struct kvm_vcpu *vcpu); 1194 1195 void virt_arch_pgd_alloc(struct kvm_vm *vm); 1196 1197 static inline void virt_pgd_alloc(struct kvm_vm *vm) 1198 { 1199 virt_arch_pgd_alloc(vm); 1200 } 1201 1202 /* 1203 * VM Virtual Page Map 1204 * 1205 * Input Args: 1206 * vm - Virtual Machine 1207 * vaddr - VM Virtual Address 1208 * paddr - VM Physical Address 1209 * memslot - Memory region slot for new virtual translation tables 1210 * 1211 * Output Args: None 1212 * 1213 * Return: None 1214 * 1215 * Within @vm, creates a virtual translation for the page starting 1216 * at @vaddr to the page starting at @paddr. 1217 */ 1218 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr); 1219 1220 static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) 1221 { 1222 virt_arch_pg_map(vm, vaddr, paddr); 1223 sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift); 1224 } 1225 1226 1227 /* 1228 * Address Guest Virtual to Guest Physical 1229 * 1230 * Input Args: 1231 * vm - Virtual Machine 1232 * gva - VM virtual address 1233 * 1234 * Output Args: None 1235 * 1236 * Return: 1237 * Equivalent VM physical address 1238 * 1239 * Returns the VM physical address of the translated VM virtual 1240 * address given by @gva. 1241 */ 1242 gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva); 1243 1244 static inline gpa_t addr_gva2gpa(struct kvm_vm *vm, gva_t gva) 1245 { 1246 return addr_arch_gva2gpa(vm, gva); 1247 } 1248 1249 /* 1250 * Virtual Translation Tables Dump 1251 * 1252 * Input Args: 1253 * stream - Output FILE stream 1254 * vm - Virtual Machine 1255 * indent - Left margin indent amount 1256 * 1257 * Output Args: None 1258 * 1259 * Return: None 1260 * 1261 * Dumps to the FILE stream given by @stream, the contents of all the 1262 * virtual translation tables for the VM given by @vm. 1263 */ 1264 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent); 1265 1266 static inline void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) 1267 { 1268 virt_arch_dump(stream, vm, indent); 1269 } 1270 1271 1272 static inline int __vm_disable_nx_huge_pages(struct kvm_vm *vm) 1273 { 1274 return __vm_enable_cap(vm, KVM_CAP_VM_DISABLE_NX_HUGE_PAGES, 0); 1275 } 1276 1277 static inline uint64_t vm_page_align(struct kvm_vm *vm, uint64_t v) 1278 { 1279 return (v + vm->page_size - 1) & ~(vm->page_size - 1); 1280 } 1281 1282 /* 1283 * Arch hook that is invoked via a constructor, i.e. before executing main(), 1284 * to allow for arch-specific setup that is common to all tests, e.g. computing 1285 * the default guest "mode". 1286 */ 1287 void kvm_selftest_arch_init(void); 1288 1289 void kvm_arch_vm_post_create(struct kvm_vm *vm, unsigned int nr_vcpus); 1290 void kvm_arch_vm_finalize_vcpus(struct kvm_vm *vm); 1291 void kvm_arch_vm_release(struct kvm_vm *vm); 1292 1293 bool vm_is_gpa_protected(struct kvm_vm *vm, gpa_t paddr); 1294 1295 uint32_t guest_get_vcpuid(void); 1296 1297 bool kvm_arch_has_default_irqchip(void); 1298 1299 #endif /* SELFTEST_KVM_UTIL_H */ 1300