1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2018, Google LLC. 4 */ 5 #ifndef SELFTEST_KVM_UTIL_H 6 #define SELFTEST_KVM_UTIL_H 7 8 #include "test_util.h" 9 10 #include <linux/compiler.h> 11 #include "linux/hashtable.h" 12 #include "linux/list.h" 13 #include <linux/kernel.h> 14 #include <linux/kvm.h> 15 #include "linux/rbtree.h" 16 #include <linux/types.h> 17 18 #include <asm/atomic.h> 19 #include <asm/kvm.h> 20 21 #include <sys/eventfd.h> 22 #include <sys/ioctl.h> 23 24 #include <pthread.h> 25 26 #include "kvm_util_arch.h" 27 #include "kvm_util_types.h" 28 #include "sparsebit.h" 29 30 #define KVM_DEV_PATH "/dev/kvm" 31 #define KVM_MAX_VCPUS 512 32 33 #define NSEC_PER_SEC 1000000000L 34 35 struct userspace_mem_region { 36 struct kvm_userspace_memory_region2 region; 37 struct sparsebit *unused_phy_pages; 38 struct sparsebit *protected_phy_pages; 39 int fd; 40 off_t offset; 41 enum vm_mem_backing_src_type backing_src_type; 42 void *host_mem; 43 void *host_alias; 44 void *mmap_start; 45 void *mmap_alias; 46 size_t mmap_size; 47 struct rb_node gpa_node; 48 struct rb_node hva_node; 49 struct hlist_node slot_node; 50 }; 51 52 struct kvm_binary_stats { 53 int fd; 54 struct kvm_stats_header header; 55 struct kvm_stats_desc *desc; 56 }; 57 58 struct kvm_vcpu { 59 struct list_head list; 60 uint32_t id; 61 int fd; 62 struct kvm_vm *vm; 63 struct kvm_run *run; 64 #ifdef __x86_64__ 65 struct kvm_cpuid2 *cpuid; 66 #endif 67 struct kvm_binary_stats stats; 68 struct kvm_dirty_gfn *dirty_gfns; 69 uint32_t fetch_index; 70 uint32_t dirty_gfns_count; 71 }; 72 73 struct userspace_mem_regions { 74 struct rb_root gpa_tree; 75 struct rb_root hva_tree; 76 DECLARE_HASHTABLE(slot_hash, 9); 77 }; 78 79 enum kvm_mem_region_type { 80 MEM_REGION_CODE, 81 MEM_REGION_DATA, 82 MEM_REGION_PT, 83 MEM_REGION_TEST_DATA, 84 NR_MEM_REGIONS, 85 }; 86 87 struct kvm_vm { 88 int mode; 89 unsigned long type; 90 int kvm_fd; 91 int fd; 92 unsigned int pgtable_levels; 93 unsigned int page_size; 94 unsigned int page_shift; 95 unsigned int pa_bits; 96 unsigned int va_bits; 97 uint64_t max_gfn; 98 struct list_head vcpus; 99 struct userspace_mem_regions regions; 100 struct sparsebit *vpages_valid; 101 struct sparsebit *vpages_mapped; 102 bool has_irqchip; 103 bool pgd_created; 104 vm_paddr_t ucall_mmio_addr; 105 vm_paddr_t pgd; 106 vm_vaddr_t handlers; 107 uint32_t dirty_ring_size; 108 uint64_t gpa_tag_mask; 109 110 struct kvm_vm_arch arch; 111 112 struct kvm_binary_stats stats; 113 114 /* 115 * KVM region slots. These are the default memslots used by page 116 * allocators, e.g., lib/elf uses the memslots[MEM_REGION_CODE] 117 * memslot. 118 */ 119 uint32_t memslots[NR_MEM_REGIONS]; 120 }; 121 122 struct vcpu_reg_sublist { 123 const char *name; 124 long capability; 125 int feature; 126 int feature_type; 127 bool finalize; 128 __u64 *regs; 129 __u64 regs_n; 130 __u64 *rejects_set; 131 __u64 rejects_set_n; 132 __u64 *skips_set; 133 __u64 skips_set_n; 134 }; 135 136 struct vcpu_reg_list { 137 char *name; 138 struct vcpu_reg_sublist sublists[]; 139 }; 140 141 #define for_each_sublist(c, s) \ 142 for ((s) = &(c)->sublists[0]; (s)->regs; ++(s)) 143 144 #define kvm_for_each_vcpu(vm, i, vcpu) \ 145 for ((i) = 0; (i) <= (vm)->last_vcpu_id; (i)++) \ 146 if (!((vcpu) = vm->vcpus[i])) \ 147 continue; \ 148 else 149 150 struct userspace_mem_region * 151 memslot2region(struct kvm_vm *vm, uint32_t memslot); 152 153 static inline struct userspace_mem_region *vm_get_mem_region(struct kvm_vm *vm, 154 enum kvm_mem_region_type type) 155 { 156 assert(type < NR_MEM_REGIONS); 157 return memslot2region(vm, vm->memslots[type]); 158 } 159 160 /* Minimum allocated guest virtual and physical addresses */ 161 #define KVM_UTIL_MIN_VADDR 0x2000 162 #define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000 163 164 #define DEFAULT_GUEST_STACK_VADDR_MIN 0xab6000 165 #define DEFAULT_STACK_PGS 5 166 167 enum vm_guest_mode { 168 VM_MODE_P52V48_4K, 169 VM_MODE_P52V48_16K, 170 VM_MODE_P52V48_64K, 171 VM_MODE_P48V48_4K, 172 VM_MODE_P48V48_16K, 173 VM_MODE_P48V48_64K, 174 VM_MODE_P40V48_4K, 175 VM_MODE_P40V48_16K, 176 VM_MODE_P40V48_64K, 177 VM_MODE_PXXV48_4K, /* For 48bits VA but ANY bits PA */ 178 VM_MODE_P47V64_4K, 179 VM_MODE_P44V64_4K, 180 VM_MODE_P36V48_4K, 181 VM_MODE_P36V48_16K, 182 VM_MODE_P36V48_64K, 183 VM_MODE_P47V47_16K, 184 VM_MODE_P36V47_16K, 185 NUM_VM_MODES, 186 }; 187 188 struct vm_shape { 189 uint32_t type; 190 uint8_t mode; 191 uint8_t pad0; 192 uint16_t pad1; 193 }; 194 195 kvm_static_assert(sizeof(struct vm_shape) == sizeof(uint64_t)); 196 197 #define VM_TYPE_DEFAULT 0 198 199 #define VM_SHAPE(__mode) \ 200 ({ \ 201 struct vm_shape shape = { \ 202 .mode = (__mode), \ 203 .type = VM_TYPE_DEFAULT \ 204 }; \ 205 \ 206 shape; \ 207 }) 208 209 #if defined(__aarch64__) 210 211 extern enum vm_guest_mode vm_mode_default; 212 213 #define VM_MODE_DEFAULT vm_mode_default 214 #define MIN_PAGE_SHIFT 12U 215 #define ptes_per_page(page_size) ((page_size) / 8) 216 217 #elif defined(__x86_64__) 218 219 #define VM_MODE_DEFAULT VM_MODE_PXXV48_4K 220 #define MIN_PAGE_SHIFT 12U 221 #define ptes_per_page(page_size) ((page_size) / 8) 222 223 #elif defined(__s390x__) 224 225 #define VM_MODE_DEFAULT VM_MODE_P44V64_4K 226 #define MIN_PAGE_SHIFT 12U 227 #define ptes_per_page(page_size) ((page_size) / 16) 228 229 #elif defined(__riscv) 230 231 #if __riscv_xlen == 32 232 #error "RISC-V 32-bit kvm selftests not supported" 233 #endif 234 235 #define VM_MODE_DEFAULT VM_MODE_P40V48_4K 236 #define MIN_PAGE_SHIFT 12U 237 #define ptes_per_page(page_size) ((page_size) / 8) 238 239 #elif defined(__loongarch__) 240 #define VM_MODE_DEFAULT VM_MODE_P47V47_16K 241 #define MIN_PAGE_SHIFT 12U 242 #define ptes_per_page(page_size) ((page_size) / 8) 243 244 #endif 245 246 #define VM_SHAPE_DEFAULT VM_SHAPE(VM_MODE_DEFAULT) 247 248 #define MIN_PAGE_SIZE (1U << MIN_PAGE_SHIFT) 249 #define PTES_PER_MIN_PAGE ptes_per_page(MIN_PAGE_SIZE) 250 251 struct vm_guest_mode_params { 252 unsigned int pa_bits; 253 unsigned int va_bits; 254 unsigned int page_size; 255 unsigned int page_shift; 256 }; 257 extern const struct vm_guest_mode_params vm_guest_mode_params[]; 258 259 int __open_path_or_exit(const char *path, int flags, const char *enoent_help); 260 int open_path_or_exit(const char *path, int flags); 261 int open_kvm_dev_path_or_exit(void); 262 263 int kvm_get_module_param_integer(const char *module_name, const char *param); 264 bool kvm_get_module_param_bool(const char *module_name, const char *param); 265 266 static inline bool get_kvm_param_bool(const char *param) 267 { 268 return kvm_get_module_param_bool("kvm", param); 269 } 270 271 static inline int get_kvm_param_integer(const char *param) 272 { 273 return kvm_get_module_param_integer("kvm", param); 274 } 275 276 unsigned int kvm_check_cap(long cap); 277 278 static inline bool kvm_has_cap(long cap) 279 { 280 return kvm_check_cap(cap); 281 } 282 283 #define __KVM_SYSCALL_ERROR(_name, _ret) \ 284 "%s failed, rc: %i errno: %i (%s)", (_name), (_ret), errno, strerror(errno) 285 286 /* 287 * Use the "inner", double-underscore macro when reporting errors from within 288 * other macros so that the name of ioctl() and not its literal numeric value 289 * is printed on error. The "outer" macro is strongly preferred when reporting 290 * errors "directly", i.e. without an additional layer of macros, as it reduces 291 * the probability of passing in the wrong string. 292 */ 293 #define __KVM_IOCTL_ERROR(_name, _ret) __KVM_SYSCALL_ERROR(_name, _ret) 294 #define KVM_IOCTL_ERROR(_ioctl, _ret) __KVM_IOCTL_ERROR(#_ioctl, _ret) 295 296 #define kvm_do_ioctl(fd, cmd, arg) \ 297 ({ \ 298 kvm_static_assert(!_IOC_SIZE(cmd) || sizeof(*arg) == _IOC_SIZE(cmd)); \ 299 ioctl(fd, cmd, arg); \ 300 }) 301 302 #define __kvm_ioctl(kvm_fd, cmd, arg) \ 303 kvm_do_ioctl(kvm_fd, cmd, arg) 304 305 #define kvm_ioctl(kvm_fd, cmd, arg) \ 306 ({ \ 307 int ret = __kvm_ioctl(kvm_fd, cmd, arg); \ 308 \ 309 TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(#cmd, ret)); \ 310 }) 311 312 static __always_inline void static_assert_is_vm(struct kvm_vm *vm) { } 313 314 #define __vm_ioctl(vm, cmd, arg) \ 315 ({ \ 316 static_assert_is_vm(vm); \ 317 kvm_do_ioctl((vm)->fd, cmd, arg); \ 318 }) 319 320 /* 321 * Assert that a VM or vCPU ioctl() succeeded, with extra magic to detect if 322 * the ioctl() failed because KVM killed/bugged the VM. To detect a dead VM, 323 * probe KVM_CAP_USER_MEMORY, which (a) has been supported by KVM since before 324 * selftests existed and (b) should never outright fail, i.e. is supposed to 325 * return 0 or 1. If KVM kills a VM, KVM returns -EIO for all ioctl()s for the 326 * VM and its vCPUs, including KVM_CHECK_EXTENSION. 327 */ 328 #define __TEST_ASSERT_VM_VCPU_IOCTL(cond, name, ret, vm) \ 329 do { \ 330 int __errno = errno; \ 331 \ 332 static_assert_is_vm(vm); \ 333 \ 334 if (cond) \ 335 break; \ 336 \ 337 if (errno == EIO && \ 338 __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)KVM_CAP_USER_MEMORY) < 0) { \ 339 TEST_ASSERT(errno == EIO, "KVM killed the VM, should return -EIO"); \ 340 TEST_FAIL("KVM killed/bugged the VM, check the kernel log for clues"); \ 341 } \ 342 errno = __errno; \ 343 TEST_ASSERT(cond, __KVM_IOCTL_ERROR(name, ret)); \ 344 } while (0) 345 346 #define TEST_ASSERT_VM_VCPU_IOCTL(cond, cmd, ret, vm) \ 347 __TEST_ASSERT_VM_VCPU_IOCTL(cond, #cmd, ret, vm) 348 349 #define vm_ioctl(vm, cmd, arg) \ 350 ({ \ 351 int ret = __vm_ioctl(vm, cmd, arg); \ 352 \ 353 __TEST_ASSERT_VM_VCPU_IOCTL(!ret, #cmd, ret, vm); \ 354 }) 355 356 static __always_inline void static_assert_is_vcpu(struct kvm_vcpu *vcpu) { } 357 358 #define __vcpu_ioctl(vcpu, cmd, arg) \ 359 ({ \ 360 static_assert_is_vcpu(vcpu); \ 361 kvm_do_ioctl((vcpu)->fd, cmd, arg); \ 362 }) 363 364 #define vcpu_ioctl(vcpu, cmd, arg) \ 365 ({ \ 366 int ret = __vcpu_ioctl(vcpu, cmd, arg); \ 367 \ 368 __TEST_ASSERT_VM_VCPU_IOCTL(!ret, #cmd, ret, (vcpu)->vm); \ 369 }) 370 371 /* 372 * Looks up and returns the value corresponding to the capability 373 * (KVM_CAP_*) given by cap. 374 */ 375 static inline int vm_check_cap(struct kvm_vm *vm, long cap) 376 { 377 int ret = __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)cap); 378 379 TEST_ASSERT_VM_VCPU_IOCTL(ret >= 0, KVM_CHECK_EXTENSION, ret, vm); 380 return ret; 381 } 382 383 static inline int __vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0) 384 { 385 struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } }; 386 387 return __vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap); 388 } 389 static inline void vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0) 390 { 391 struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } }; 392 393 vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap); 394 } 395 396 static inline void vm_set_memory_attributes(struct kvm_vm *vm, uint64_t gpa, 397 uint64_t size, uint64_t attributes) 398 { 399 struct kvm_memory_attributes attr = { 400 .attributes = attributes, 401 .address = gpa, 402 .size = size, 403 .flags = 0, 404 }; 405 406 /* 407 * KVM_SET_MEMORY_ATTRIBUTES overwrites _all_ attributes. These flows 408 * need significant enhancements to support multiple attributes. 409 */ 410 TEST_ASSERT(!attributes || attributes == KVM_MEMORY_ATTRIBUTE_PRIVATE, 411 "Update me to support multiple attributes!"); 412 413 vm_ioctl(vm, KVM_SET_MEMORY_ATTRIBUTES, &attr); 414 } 415 416 417 static inline void vm_mem_set_private(struct kvm_vm *vm, uint64_t gpa, 418 uint64_t size) 419 { 420 vm_set_memory_attributes(vm, gpa, size, KVM_MEMORY_ATTRIBUTE_PRIVATE); 421 } 422 423 static inline void vm_mem_set_shared(struct kvm_vm *vm, uint64_t gpa, 424 uint64_t size) 425 { 426 vm_set_memory_attributes(vm, gpa, size, 0); 427 } 428 429 void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t gpa, uint64_t size, 430 bool punch_hole); 431 432 static inline void vm_guest_mem_punch_hole(struct kvm_vm *vm, uint64_t gpa, 433 uint64_t size) 434 { 435 vm_guest_mem_fallocate(vm, gpa, size, true); 436 } 437 438 static inline void vm_guest_mem_allocate(struct kvm_vm *vm, uint64_t gpa, 439 uint64_t size) 440 { 441 vm_guest_mem_fallocate(vm, gpa, size, false); 442 } 443 444 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size); 445 const char *vm_guest_mode_string(uint32_t i); 446 447 void kvm_vm_free(struct kvm_vm *vmp); 448 void kvm_vm_restart(struct kvm_vm *vmp); 449 void kvm_vm_release(struct kvm_vm *vmp); 450 void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename); 451 int kvm_memfd_alloc(size_t size, bool hugepages); 452 453 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent); 454 455 static inline void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log) 456 { 457 struct kvm_dirty_log args = { .dirty_bitmap = log, .slot = slot }; 458 459 vm_ioctl(vm, KVM_GET_DIRTY_LOG, &args); 460 } 461 462 static inline void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log, 463 uint64_t first_page, uint32_t num_pages) 464 { 465 struct kvm_clear_dirty_log args = { 466 .dirty_bitmap = log, 467 .slot = slot, 468 .first_page = first_page, 469 .num_pages = num_pages 470 }; 471 472 vm_ioctl(vm, KVM_CLEAR_DIRTY_LOG, &args); 473 } 474 475 static inline uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm) 476 { 477 return __vm_ioctl(vm, KVM_RESET_DIRTY_RINGS, NULL); 478 } 479 480 static inline void kvm_vm_register_coalesced_io(struct kvm_vm *vm, 481 uint64_t address, 482 uint64_t size, bool pio) 483 { 484 struct kvm_coalesced_mmio_zone zone = { 485 .addr = address, 486 .size = size, 487 .pio = pio, 488 }; 489 490 vm_ioctl(vm, KVM_REGISTER_COALESCED_MMIO, &zone); 491 } 492 493 static inline void kvm_vm_unregister_coalesced_io(struct kvm_vm *vm, 494 uint64_t address, 495 uint64_t size, bool pio) 496 { 497 struct kvm_coalesced_mmio_zone zone = { 498 .addr = address, 499 .size = size, 500 .pio = pio, 501 }; 502 503 vm_ioctl(vm, KVM_UNREGISTER_COALESCED_MMIO, &zone); 504 } 505 506 static inline int vm_get_stats_fd(struct kvm_vm *vm) 507 { 508 int fd = __vm_ioctl(vm, KVM_GET_STATS_FD, NULL); 509 510 TEST_ASSERT_VM_VCPU_IOCTL(fd >= 0, KVM_GET_STATS_FD, fd, vm); 511 return fd; 512 } 513 514 static inline int __kvm_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd, 515 uint32_t flags) 516 { 517 struct kvm_irqfd irqfd = { 518 .fd = eventfd, 519 .gsi = gsi, 520 .flags = flags, 521 .resamplefd = -1, 522 }; 523 524 return __vm_ioctl(vm, KVM_IRQFD, &irqfd); 525 } 526 527 static inline void kvm_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd, 528 uint32_t flags) 529 { 530 int ret = __kvm_irqfd(vm, gsi, eventfd, flags); 531 532 TEST_ASSERT_VM_VCPU_IOCTL(!ret, KVM_IRQFD, ret, vm); 533 } 534 535 static inline void kvm_assign_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd) 536 { 537 kvm_irqfd(vm, gsi, eventfd, 0); 538 } 539 540 static inline void kvm_deassign_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd) 541 { 542 kvm_irqfd(vm, gsi, eventfd, KVM_IRQFD_FLAG_DEASSIGN); 543 } 544 545 static inline int kvm_new_eventfd(void) 546 { 547 int fd = eventfd(0, 0); 548 549 TEST_ASSERT(fd >= 0, __KVM_SYSCALL_ERROR("eventfd()", fd)); 550 return fd; 551 } 552 553 static inline void read_stats_header(int stats_fd, struct kvm_stats_header *header) 554 { 555 ssize_t ret; 556 557 ret = pread(stats_fd, header, sizeof(*header), 0); 558 TEST_ASSERT(ret == sizeof(*header), 559 "Failed to read '%lu' header bytes, ret = '%ld'", 560 sizeof(*header), ret); 561 } 562 563 struct kvm_stats_desc *read_stats_descriptors(int stats_fd, 564 struct kvm_stats_header *header); 565 566 static inline ssize_t get_stats_descriptor_size(struct kvm_stats_header *header) 567 { 568 /* 569 * The base size of the descriptor is defined by KVM's ABI, but the 570 * size of the name field is variable, as far as KVM's ABI is 571 * concerned. For a given instance of KVM, the name field is the same 572 * size for all stats and is provided in the overall stats header. 573 */ 574 return sizeof(struct kvm_stats_desc) + header->name_size; 575 } 576 577 static inline struct kvm_stats_desc *get_stats_descriptor(struct kvm_stats_desc *stats, 578 int index, 579 struct kvm_stats_header *header) 580 { 581 /* 582 * Note, size_desc includes the size of the name field, which is 583 * variable. i.e. this is NOT equivalent to &stats_desc[i]. 584 */ 585 return (void *)stats + index * get_stats_descriptor_size(header); 586 } 587 588 void read_stat_data(int stats_fd, struct kvm_stats_header *header, 589 struct kvm_stats_desc *desc, uint64_t *data, 590 size_t max_elements); 591 592 void kvm_get_stat(struct kvm_binary_stats *stats, const char *name, 593 uint64_t *data, size_t max_elements); 594 595 #define __get_stat(stats, stat) \ 596 ({ \ 597 uint64_t data; \ 598 \ 599 kvm_get_stat(stats, #stat, &data, 1); \ 600 data; \ 601 }) 602 603 #define vm_get_stat(vm, stat) __get_stat(&(vm)->stats, stat) 604 #define vcpu_get_stat(vcpu, stat) __get_stat(&(vcpu)->stats, stat) 605 606 static inline bool read_smt_control(char *buf, size_t buf_size) 607 { 608 FILE *f = fopen("/sys/devices/system/cpu/smt/control", "r"); 609 bool ret; 610 611 if (!f) 612 return false; 613 614 ret = fread(buf, sizeof(*buf), buf_size, f) > 0; 615 fclose(f); 616 617 return ret; 618 } 619 620 static inline bool is_smt_possible(void) 621 { 622 char buf[16]; 623 624 if (read_smt_control(buf, sizeof(buf)) && 625 (!strncmp(buf, "forceoff", 8) || !strncmp(buf, "notsupported", 12))) 626 return false; 627 628 return true; 629 } 630 631 static inline bool is_smt_on(void) 632 { 633 char buf[16]; 634 635 if (read_smt_control(buf, sizeof(buf)) && !strncmp(buf, "on", 2)) 636 return true; 637 638 return false; 639 } 640 641 void vm_create_irqchip(struct kvm_vm *vm); 642 643 static inline int __vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size, 644 uint64_t flags) 645 { 646 struct kvm_create_guest_memfd guest_memfd = { 647 .size = size, 648 .flags = flags, 649 }; 650 651 return __vm_ioctl(vm, KVM_CREATE_GUEST_MEMFD, &guest_memfd); 652 } 653 654 static inline int vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size, 655 uint64_t flags) 656 { 657 int fd = __vm_create_guest_memfd(vm, size, flags); 658 659 TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_GUEST_MEMFD, fd)); 660 return fd; 661 } 662 663 void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 664 uint64_t gpa, uint64_t size, void *hva); 665 int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 666 uint64_t gpa, uint64_t size, void *hva); 667 void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 668 uint64_t gpa, uint64_t size, void *hva, 669 uint32_t guest_memfd, uint64_t guest_memfd_offset); 670 int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 671 uint64_t gpa, uint64_t size, void *hva, 672 uint32_t guest_memfd, uint64_t guest_memfd_offset); 673 674 void vm_userspace_mem_region_add(struct kvm_vm *vm, 675 enum vm_mem_backing_src_type src_type, 676 uint64_t guest_paddr, uint32_t slot, uint64_t npages, 677 uint32_t flags); 678 void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, 679 uint64_t guest_paddr, uint32_t slot, uint64_t npages, 680 uint32_t flags, int guest_memfd_fd, uint64_t guest_memfd_offset); 681 682 #ifndef vm_arch_has_protected_memory 683 static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm) 684 { 685 return false; 686 } 687 #endif 688 689 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags); 690 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa); 691 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot); 692 struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id); 693 void vm_populate_vaddr_bitmap(struct kvm_vm *vm); 694 vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min); 695 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min); 696 vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, 697 enum kvm_mem_region_type type); 698 vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz, 699 vm_vaddr_t vaddr_min, 700 enum kvm_mem_region_type type); 701 vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages); 702 vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm, 703 enum kvm_mem_region_type type); 704 vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm); 705 706 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, 707 unsigned int npages); 708 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa); 709 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva); 710 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva); 711 void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa); 712 713 #ifndef vcpu_arch_put_guest 714 #define vcpu_arch_put_guest(mem, val) do { (mem) = (val); } while (0) 715 #endif 716 717 static inline vm_paddr_t vm_untag_gpa(struct kvm_vm *vm, vm_paddr_t gpa) 718 { 719 return gpa & ~vm->gpa_tag_mask; 720 } 721 722 void vcpu_run(struct kvm_vcpu *vcpu); 723 int _vcpu_run(struct kvm_vcpu *vcpu); 724 725 static inline int __vcpu_run(struct kvm_vcpu *vcpu) 726 { 727 return __vcpu_ioctl(vcpu, KVM_RUN, NULL); 728 } 729 730 void vcpu_run_complete_io(struct kvm_vcpu *vcpu); 731 struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu); 732 733 static inline void vcpu_enable_cap(struct kvm_vcpu *vcpu, uint32_t cap, 734 uint64_t arg0) 735 { 736 struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } }; 737 738 vcpu_ioctl(vcpu, KVM_ENABLE_CAP, &enable_cap); 739 } 740 741 static inline void vcpu_guest_debug_set(struct kvm_vcpu *vcpu, 742 struct kvm_guest_debug *debug) 743 { 744 vcpu_ioctl(vcpu, KVM_SET_GUEST_DEBUG, debug); 745 } 746 747 static inline void vcpu_mp_state_get(struct kvm_vcpu *vcpu, 748 struct kvm_mp_state *mp_state) 749 { 750 vcpu_ioctl(vcpu, KVM_GET_MP_STATE, mp_state); 751 } 752 static inline void vcpu_mp_state_set(struct kvm_vcpu *vcpu, 753 struct kvm_mp_state *mp_state) 754 { 755 vcpu_ioctl(vcpu, KVM_SET_MP_STATE, mp_state); 756 } 757 758 static inline void vcpu_regs_get(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 759 { 760 vcpu_ioctl(vcpu, KVM_GET_REGS, regs); 761 } 762 763 static inline void vcpu_regs_set(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 764 { 765 vcpu_ioctl(vcpu, KVM_SET_REGS, regs); 766 } 767 static inline void vcpu_sregs_get(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 768 { 769 vcpu_ioctl(vcpu, KVM_GET_SREGS, sregs); 770 771 } 772 static inline void vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 773 { 774 vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs); 775 } 776 static inline int _vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 777 { 778 return __vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs); 779 } 780 static inline void vcpu_fpu_get(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 781 { 782 vcpu_ioctl(vcpu, KVM_GET_FPU, fpu); 783 } 784 static inline void vcpu_fpu_set(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 785 { 786 vcpu_ioctl(vcpu, KVM_SET_FPU, fpu); 787 } 788 789 static inline int __vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr) 790 { 791 struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr }; 792 793 return __vcpu_ioctl(vcpu, KVM_GET_ONE_REG, ®); 794 } 795 static inline int __vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val) 796 { 797 struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val }; 798 799 return __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®); 800 } 801 static inline uint64_t vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id) 802 { 803 uint64_t val; 804 struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val }; 805 806 TEST_ASSERT(KVM_REG_SIZE(id) <= sizeof(val), "Reg %lx too big", id); 807 808 vcpu_ioctl(vcpu, KVM_GET_ONE_REG, ®); 809 return val; 810 } 811 static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val) 812 { 813 struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val }; 814 815 TEST_ASSERT(KVM_REG_SIZE(id) <= sizeof(val), "Reg %lx too big", id); 816 817 vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®); 818 } 819 820 #ifdef __KVM_HAVE_VCPU_EVENTS 821 static inline void vcpu_events_get(struct kvm_vcpu *vcpu, 822 struct kvm_vcpu_events *events) 823 { 824 vcpu_ioctl(vcpu, KVM_GET_VCPU_EVENTS, events); 825 } 826 static inline void vcpu_events_set(struct kvm_vcpu *vcpu, 827 struct kvm_vcpu_events *events) 828 { 829 vcpu_ioctl(vcpu, KVM_SET_VCPU_EVENTS, events); 830 } 831 #endif 832 #ifdef __x86_64__ 833 static inline void vcpu_nested_state_get(struct kvm_vcpu *vcpu, 834 struct kvm_nested_state *state) 835 { 836 vcpu_ioctl(vcpu, KVM_GET_NESTED_STATE, state); 837 } 838 static inline int __vcpu_nested_state_set(struct kvm_vcpu *vcpu, 839 struct kvm_nested_state *state) 840 { 841 return __vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state); 842 } 843 844 static inline void vcpu_nested_state_set(struct kvm_vcpu *vcpu, 845 struct kvm_nested_state *state) 846 { 847 vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state); 848 } 849 #endif 850 static inline int vcpu_get_stats_fd(struct kvm_vcpu *vcpu) 851 { 852 int fd = __vcpu_ioctl(vcpu, KVM_GET_STATS_FD, NULL); 853 854 TEST_ASSERT_VM_VCPU_IOCTL(fd >= 0, KVM_CHECK_EXTENSION, fd, vcpu->vm); 855 return fd; 856 } 857 858 int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr); 859 860 static inline void kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr) 861 { 862 int ret = __kvm_has_device_attr(dev_fd, group, attr); 863 864 TEST_ASSERT(!ret, "KVM_HAS_DEVICE_ATTR failed, rc: %i errno: %i", ret, errno); 865 } 866 867 int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val); 868 869 static inline void kvm_device_attr_get(int dev_fd, uint32_t group, 870 uint64_t attr, void *val) 871 { 872 int ret = __kvm_device_attr_get(dev_fd, group, attr, val); 873 874 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_GET_DEVICE_ATTR, ret)); 875 } 876 877 int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val); 878 879 static inline void kvm_device_attr_set(int dev_fd, uint32_t group, 880 uint64_t attr, void *val) 881 { 882 int ret = __kvm_device_attr_set(dev_fd, group, attr, val); 883 884 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_DEVICE_ATTR, ret)); 885 } 886 887 static inline int __vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group, 888 uint64_t attr) 889 { 890 return __kvm_has_device_attr(vcpu->fd, group, attr); 891 } 892 893 static inline void vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group, 894 uint64_t attr) 895 { 896 kvm_has_device_attr(vcpu->fd, group, attr); 897 } 898 899 static inline int __vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group, 900 uint64_t attr, void *val) 901 { 902 return __kvm_device_attr_get(vcpu->fd, group, attr, val); 903 } 904 905 static inline void vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group, 906 uint64_t attr, void *val) 907 { 908 kvm_device_attr_get(vcpu->fd, group, attr, val); 909 } 910 911 static inline int __vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group, 912 uint64_t attr, void *val) 913 { 914 return __kvm_device_attr_set(vcpu->fd, group, attr, val); 915 } 916 917 static inline void vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group, 918 uint64_t attr, void *val) 919 { 920 kvm_device_attr_set(vcpu->fd, group, attr, val); 921 } 922 923 int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type); 924 int __kvm_create_device(struct kvm_vm *vm, uint64_t type); 925 926 static inline int kvm_create_device(struct kvm_vm *vm, uint64_t type) 927 { 928 int fd = __kvm_create_device(vm, type); 929 930 TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_DEVICE, fd)); 931 return fd; 932 } 933 934 void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu); 935 936 /* 937 * VM VCPU Args Set 938 * 939 * Input Args: 940 * vm - Virtual Machine 941 * num - number of arguments 942 * ... - arguments, each of type uint64_t 943 * 944 * Output Args: None 945 * 946 * Return: None 947 * 948 * Sets the first @num input parameters for the function at @vcpu's entry point, 949 * per the C calling convention of the architecture, to the values given as 950 * variable args. Each of the variable args is expected to be of type uint64_t. 951 * The maximum @num can be is specific to the architecture. 952 */ 953 void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...); 954 955 void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level); 956 int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level); 957 958 #define KVM_MAX_IRQ_ROUTES 4096 959 960 struct kvm_irq_routing *kvm_gsi_routing_create(void); 961 void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing, 962 uint32_t gsi, uint32_t pin); 963 int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing); 964 void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing); 965 966 const char *exit_reason_str(unsigned int exit_reason); 967 968 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, 969 uint32_t memslot); 970 vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, 971 vm_paddr_t paddr_min, uint32_t memslot, 972 bool protected); 973 vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm); 974 975 static inline vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, 976 vm_paddr_t paddr_min, uint32_t memslot) 977 { 978 /* 979 * By default, allocate memory as protected for VMs that support 980 * protected memory, as the majority of memory for such VMs is 981 * protected, i.e. using shared memory is effectively opt-in. 982 */ 983 return __vm_phy_pages_alloc(vm, num, paddr_min, memslot, 984 vm_arch_has_protected_memory(vm)); 985 } 986 987 /* 988 * ____vm_create() does KVM_CREATE_VM and little else. __vm_create() also 989 * loads the test binary into guest memory and creates an IRQ chip (x86 only). 990 * __vm_create() does NOT create vCPUs, @nr_runnable_vcpus is used purely to 991 * calculate the amount of memory needed for per-vCPU data, e.g. stacks. 992 */ 993 struct kvm_vm *____vm_create(struct vm_shape shape); 994 struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus, 995 uint64_t nr_extra_pages); 996 997 static inline struct kvm_vm *vm_create_barebones(void) 998 { 999 return ____vm_create(VM_SHAPE_DEFAULT); 1000 } 1001 1002 static inline struct kvm_vm *vm_create_barebones_type(unsigned long type) 1003 { 1004 const struct vm_shape shape = { 1005 .mode = VM_MODE_DEFAULT, 1006 .type = type, 1007 }; 1008 1009 return ____vm_create(shape); 1010 } 1011 1012 static inline struct kvm_vm *vm_create(uint32_t nr_runnable_vcpus) 1013 { 1014 return __vm_create(VM_SHAPE_DEFAULT, nr_runnable_vcpus, 0); 1015 } 1016 1017 struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus, 1018 uint64_t extra_mem_pages, 1019 void *guest_code, struct kvm_vcpu *vcpus[]); 1020 1021 static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus, 1022 void *guest_code, 1023 struct kvm_vcpu *vcpus[]) 1024 { 1025 return __vm_create_with_vcpus(VM_SHAPE_DEFAULT, nr_vcpus, 0, 1026 guest_code, vcpus); 1027 } 1028 1029 1030 struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape, 1031 struct kvm_vcpu **vcpu, 1032 uint64_t extra_mem_pages, 1033 void *guest_code); 1034 1035 /* 1036 * Create a VM with a single vCPU with reasonable defaults and @extra_mem_pages 1037 * additional pages of guest memory. Returns the VM and vCPU (via out param). 1038 */ 1039 static inline struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu, 1040 uint64_t extra_mem_pages, 1041 void *guest_code) 1042 { 1043 return __vm_create_shape_with_one_vcpu(VM_SHAPE_DEFAULT, vcpu, 1044 extra_mem_pages, guest_code); 1045 } 1046 1047 static inline struct kvm_vm *vm_create_with_one_vcpu(struct kvm_vcpu **vcpu, 1048 void *guest_code) 1049 { 1050 return __vm_create_with_one_vcpu(vcpu, 0, guest_code); 1051 } 1052 1053 static inline struct kvm_vm *vm_create_shape_with_one_vcpu(struct vm_shape shape, 1054 struct kvm_vcpu **vcpu, 1055 void *guest_code) 1056 { 1057 return __vm_create_shape_with_one_vcpu(shape, vcpu, 0, guest_code); 1058 } 1059 1060 struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm); 1061 1062 void kvm_set_files_rlimit(uint32_t nr_vcpus); 1063 1064 int __pin_task_to_cpu(pthread_t task, int cpu); 1065 1066 static inline void pin_task_to_cpu(pthread_t task, int cpu) 1067 { 1068 int r; 1069 1070 r = __pin_task_to_cpu(task, cpu); 1071 TEST_ASSERT(!r, "Failed to set thread affinity to pCPU '%u'", cpu); 1072 } 1073 1074 static inline int pin_task_to_any_cpu(pthread_t task) 1075 { 1076 int cpu = sched_getcpu(); 1077 1078 pin_task_to_cpu(task, cpu); 1079 return cpu; 1080 } 1081 1082 static inline void pin_self_to_cpu(int cpu) 1083 { 1084 pin_task_to_cpu(pthread_self(), cpu); 1085 } 1086 1087 static inline int pin_self_to_any_cpu(void) 1088 { 1089 return pin_task_to_any_cpu(pthread_self()); 1090 } 1091 1092 void kvm_print_vcpu_pinning_help(void); 1093 void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[], 1094 int nr_vcpus); 1095 1096 unsigned long vm_compute_max_gfn(struct kvm_vm *vm); 1097 unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size); 1098 unsigned int vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages); 1099 unsigned int vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages); 1100 static inline unsigned int 1101 vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages) 1102 { 1103 unsigned int n; 1104 n = vm_num_guest_pages(mode, vm_num_host_pages(mode, num_guest_pages)); 1105 #ifdef __s390x__ 1106 /* s390 requires 1M aligned guest sizes */ 1107 n = (n + 255) & ~255; 1108 #endif 1109 return n; 1110 } 1111 1112 #define sync_global_to_guest(vm, g) ({ \ 1113 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \ 1114 memcpy(_p, &(g), sizeof(g)); \ 1115 }) 1116 1117 #define sync_global_from_guest(vm, g) ({ \ 1118 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \ 1119 memcpy(&(g), _p, sizeof(g)); \ 1120 }) 1121 1122 /* 1123 * Write a global value, but only in the VM's (guest's) domain. Primarily used 1124 * for "globals" that hold per-VM values (VMs always duplicate code and global 1125 * data into their own region of physical memory), but can be used anytime it's 1126 * undesirable to change the host's copy of the global. 1127 */ 1128 #define write_guest_global(vm, g, val) ({ \ 1129 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \ 1130 typeof(g) _val = val; \ 1131 \ 1132 memcpy(_p, &(_val), sizeof(g)); \ 1133 }) 1134 1135 void assert_on_unhandled_exception(struct kvm_vcpu *vcpu); 1136 1137 void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, 1138 uint8_t indent); 1139 1140 static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu, 1141 uint8_t indent) 1142 { 1143 vcpu_arch_dump(stream, vcpu, indent); 1144 } 1145 1146 /* 1147 * Adds a vCPU with reasonable defaults (e.g. a stack) 1148 * 1149 * Input Args: 1150 * vm - Virtual Machine 1151 * vcpu_id - The id of the VCPU to add to the VM. 1152 */ 1153 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id); 1154 void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code); 1155 1156 static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, 1157 void *guest_code) 1158 { 1159 struct kvm_vcpu *vcpu = vm_arch_vcpu_add(vm, vcpu_id); 1160 1161 vcpu_arch_set_entry_point(vcpu, guest_code); 1162 1163 return vcpu; 1164 } 1165 1166 /* Re-create a vCPU after restarting a VM, e.g. for state save/restore tests. */ 1167 struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id); 1168 1169 static inline struct kvm_vcpu *vm_vcpu_recreate(struct kvm_vm *vm, 1170 uint32_t vcpu_id) 1171 { 1172 return vm_arch_vcpu_recreate(vm, vcpu_id); 1173 } 1174 1175 void vcpu_arch_free(struct kvm_vcpu *vcpu); 1176 1177 void virt_arch_pgd_alloc(struct kvm_vm *vm); 1178 1179 static inline void virt_pgd_alloc(struct kvm_vm *vm) 1180 { 1181 virt_arch_pgd_alloc(vm); 1182 } 1183 1184 /* 1185 * VM Virtual Page Map 1186 * 1187 * Input Args: 1188 * vm - Virtual Machine 1189 * vaddr - VM Virtual Address 1190 * paddr - VM Physical Address 1191 * memslot - Memory region slot for new virtual translation tables 1192 * 1193 * Output Args: None 1194 * 1195 * Return: None 1196 * 1197 * Within @vm, creates a virtual translation for the page starting 1198 * at @vaddr to the page starting at @paddr. 1199 */ 1200 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr); 1201 1202 static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) 1203 { 1204 virt_arch_pg_map(vm, vaddr, paddr); 1205 } 1206 1207 1208 /* 1209 * Address Guest Virtual to Guest Physical 1210 * 1211 * Input Args: 1212 * vm - Virtual Machine 1213 * gva - VM virtual address 1214 * 1215 * Output Args: None 1216 * 1217 * Return: 1218 * Equivalent VM physical address 1219 * 1220 * Returns the VM physical address of the translated VM virtual 1221 * address given by @gva. 1222 */ 1223 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva); 1224 1225 static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) 1226 { 1227 return addr_arch_gva2gpa(vm, gva); 1228 } 1229 1230 /* 1231 * Virtual Translation Tables Dump 1232 * 1233 * Input Args: 1234 * stream - Output FILE stream 1235 * vm - Virtual Machine 1236 * indent - Left margin indent amount 1237 * 1238 * Output Args: None 1239 * 1240 * Return: None 1241 * 1242 * Dumps to the FILE stream given by @stream, the contents of all the 1243 * virtual translation tables for the VM given by @vm. 1244 */ 1245 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent); 1246 1247 static inline void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) 1248 { 1249 virt_arch_dump(stream, vm, indent); 1250 } 1251 1252 1253 static inline int __vm_disable_nx_huge_pages(struct kvm_vm *vm) 1254 { 1255 return __vm_enable_cap(vm, KVM_CAP_VM_DISABLE_NX_HUGE_PAGES, 0); 1256 } 1257 1258 /* 1259 * Arch hook that is invoked via a constructor, i.e. before exeucting main(), 1260 * to allow for arch-specific setup that is common to all tests, e.g. computing 1261 * the default guest "mode". 1262 */ 1263 void kvm_selftest_arch_init(void); 1264 1265 void kvm_arch_vm_post_create(struct kvm_vm *vm); 1266 1267 bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr); 1268 1269 uint32_t guest_get_vcpuid(void); 1270 1271 #endif /* SELFTEST_KVM_UTIL_H */ 1272