1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2018, Google LLC. 4 */ 5 #ifndef SELFTEST_KVM_UTIL_H 6 #define SELFTEST_KVM_UTIL_H 7 8 #include "test_util.h" 9 10 #include <linux/compiler.h> 11 #include "linux/hashtable.h" 12 #include "linux/list.h" 13 #include <linux/kernel.h> 14 #include <linux/kvm.h> 15 #include "linux/rbtree.h" 16 #include <linux/types.h> 17 18 #include <asm/atomic.h> 19 #include <asm/kvm.h> 20 21 #include <sys/ioctl.h> 22 23 #include "kvm_util_arch.h" 24 #include "kvm_util_types.h" 25 #include "sparsebit.h" 26 27 #define KVM_DEV_PATH "/dev/kvm" 28 #define KVM_MAX_VCPUS 512 29 30 #define NSEC_PER_SEC 1000000000L 31 32 struct userspace_mem_region { 33 struct kvm_userspace_memory_region2 region; 34 struct sparsebit *unused_phy_pages; 35 struct sparsebit *protected_phy_pages; 36 int fd; 37 off_t offset; 38 enum vm_mem_backing_src_type backing_src_type; 39 void *host_mem; 40 void *host_alias; 41 void *mmap_start; 42 void *mmap_alias; 43 size_t mmap_size; 44 struct rb_node gpa_node; 45 struct rb_node hva_node; 46 struct hlist_node slot_node; 47 }; 48 49 struct kvm_binary_stats { 50 int fd; 51 struct kvm_stats_header header; 52 struct kvm_stats_desc *desc; 53 }; 54 55 struct kvm_vcpu { 56 struct list_head list; 57 uint32_t id; 58 int fd; 59 struct kvm_vm *vm; 60 struct kvm_run *run; 61 #ifdef __x86_64__ 62 struct kvm_cpuid2 *cpuid; 63 #endif 64 struct kvm_binary_stats stats; 65 struct kvm_dirty_gfn *dirty_gfns; 66 uint32_t fetch_index; 67 uint32_t dirty_gfns_count; 68 }; 69 70 struct userspace_mem_regions { 71 struct rb_root gpa_tree; 72 struct rb_root hva_tree; 73 DECLARE_HASHTABLE(slot_hash, 9); 74 }; 75 76 enum kvm_mem_region_type { 77 MEM_REGION_CODE, 78 MEM_REGION_DATA, 79 MEM_REGION_PT, 80 MEM_REGION_TEST_DATA, 81 NR_MEM_REGIONS, 82 }; 83 84 struct kvm_vm { 85 int mode; 86 unsigned long type; 87 int kvm_fd; 88 int fd; 89 unsigned int pgtable_levels; 90 unsigned int page_size; 91 unsigned int page_shift; 92 unsigned int pa_bits; 93 unsigned int va_bits; 94 uint64_t max_gfn; 95 struct list_head vcpus; 96 struct userspace_mem_regions regions; 97 struct sparsebit *vpages_valid; 98 struct sparsebit *vpages_mapped; 99 bool has_irqchip; 100 bool pgd_created; 101 vm_paddr_t ucall_mmio_addr; 102 vm_paddr_t pgd; 103 vm_vaddr_t handlers; 104 uint32_t dirty_ring_size; 105 uint64_t gpa_tag_mask; 106 107 struct kvm_vm_arch arch; 108 109 struct kvm_binary_stats stats; 110 111 /* 112 * KVM region slots. These are the default memslots used by page 113 * allocators, e.g., lib/elf uses the memslots[MEM_REGION_CODE] 114 * memslot. 115 */ 116 uint32_t memslots[NR_MEM_REGIONS]; 117 }; 118 119 struct vcpu_reg_sublist { 120 const char *name; 121 long capability; 122 int feature; 123 int feature_type; 124 bool finalize; 125 __u64 *regs; 126 __u64 regs_n; 127 __u64 *rejects_set; 128 __u64 rejects_set_n; 129 __u64 *skips_set; 130 __u64 skips_set_n; 131 }; 132 133 struct vcpu_reg_list { 134 char *name; 135 struct vcpu_reg_sublist sublists[]; 136 }; 137 138 #define for_each_sublist(c, s) \ 139 for ((s) = &(c)->sublists[0]; (s)->regs; ++(s)) 140 141 #define kvm_for_each_vcpu(vm, i, vcpu) \ 142 for ((i) = 0; (i) <= (vm)->last_vcpu_id; (i)++) \ 143 if (!((vcpu) = vm->vcpus[i])) \ 144 continue; \ 145 else 146 147 struct userspace_mem_region * 148 memslot2region(struct kvm_vm *vm, uint32_t memslot); 149 150 static inline struct userspace_mem_region *vm_get_mem_region(struct kvm_vm *vm, 151 enum kvm_mem_region_type type) 152 { 153 assert(type < NR_MEM_REGIONS); 154 return memslot2region(vm, vm->memslots[type]); 155 } 156 157 /* Minimum allocated guest virtual and physical addresses */ 158 #define KVM_UTIL_MIN_VADDR 0x2000 159 #define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000 160 161 #define DEFAULT_GUEST_STACK_VADDR_MIN 0xab6000 162 #define DEFAULT_STACK_PGS 5 163 164 enum vm_guest_mode { 165 VM_MODE_P52V48_4K, 166 VM_MODE_P52V48_16K, 167 VM_MODE_P52V48_64K, 168 VM_MODE_P48V48_4K, 169 VM_MODE_P48V48_16K, 170 VM_MODE_P48V48_64K, 171 VM_MODE_P40V48_4K, 172 VM_MODE_P40V48_16K, 173 VM_MODE_P40V48_64K, 174 VM_MODE_PXXV48_4K, /* For 48bits VA but ANY bits PA */ 175 VM_MODE_P47V64_4K, 176 VM_MODE_P44V64_4K, 177 VM_MODE_P36V48_4K, 178 VM_MODE_P36V48_16K, 179 VM_MODE_P36V48_64K, 180 VM_MODE_P47V47_16K, 181 VM_MODE_P36V47_16K, 182 NUM_VM_MODES, 183 }; 184 185 struct vm_shape { 186 uint32_t type; 187 uint8_t mode; 188 uint8_t pad0; 189 uint16_t pad1; 190 }; 191 192 kvm_static_assert(sizeof(struct vm_shape) == sizeof(uint64_t)); 193 194 #define VM_TYPE_DEFAULT 0 195 196 #define VM_SHAPE(__mode) \ 197 ({ \ 198 struct vm_shape shape = { \ 199 .mode = (__mode), \ 200 .type = VM_TYPE_DEFAULT \ 201 }; \ 202 \ 203 shape; \ 204 }) 205 206 #if defined(__aarch64__) 207 208 extern enum vm_guest_mode vm_mode_default; 209 210 #define VM_MODE_DEFAULT vm_mode_default 211 #define MIN_PAGE_SHIFT 12U 212 #define ptes_per_page(page_size) ((page_size) / 8) 213 214 #elif defined(__x86_64__) 215 216 #define VM_MODE_DEFAULT VM_MODE_PXXV48_4K 217 #define MIN_PAGE_SHIFT 12U 218 #define ptes_per_page(page_size) ((page_size) / 8) 219 220 #elif defined(__s390x__) 221 222 #define VM_MODE_DEFAULT VM_MODE_P44V64_4K 223 #define MIN_PAGE_SHIFT 12U 224 #define ptes_per_page(page_size) ((page_size) / 16) 225 226 #elif defined(__riscv) 227 228 #if __riscv_xlen == 32 229 #error "RISC-V 32-bit kvm selftests not supported" 230 #endif 231 232 #define VM_MODE_DEFAULT VM_MODE_P40V48_4K 233 #define MIN_PAGE_SHIFT 12U 234 #define ptes_per_page(page_size) ((page_size) / 8) 235 236 #elif defined(__loongarch__) 237 #define VM_MODE_DEFAULT VM_MODE_P47V47_16K 238 #define MIN_PAGE_SHIFT 12U 239 #define ptes_per_page(page_size) ((page_size) / 8) 240 241 #endif 242 243 #define VM_SHAPE_DEFAULT VM_SHAPE(VM_MODE_DEFAULT) 244 245 #define MIN_PAGE_SIZE (1U << MIN_PAGE_SHIFT) 246 #define PTES_PER_MIN_PAGE ptes_per_page(MIN_PAGE_SIZE) 247 248 struct vm_guest_mode_params { 249 unsigned int pa_bits; 250 unsigned int va_bits; 251 unsigned int page_size; 252 unsigned int page_shift; 253 }; 254 extern const struct vm_guest_mode_params vm_guest_mode_params[]; 255 256 int open_path_or_exit(const char *path, int flags); 257 int open_kvm_dev_path_or_exit(void); 258 259 bool get_kvm_param_bool(const char *param); 260 bool get_kvm_intel_param_bool(const char *param); 261 bool get_kvm_amd_param_bool(const char *param); 262 263 int get_kvm_param_integer(const char *param); 264 int get_kvm_intel_param_integer(const char *param); 265 int get_kvm_amd_param_integer(const char *param); 266 267 unsigned int kvm_check_cap(long cap); 268 269 static inline bool kvm_has_cap(long cap) 270 { 271 return kvm_check_cap(cap); 272 } 273 274 #define __KVM_SYSCALL_ERROR(_name, _ret) \ 275 "%s failed, rc: %i errno: %i (%s)", (_name), (_ret), errno, strerror(errno) 276 277 /* 278 * Use the "inner", double-underscore macro when reporting errors from within 279 * other macros so that the name of ioctl() and not its literal numeric value 280 * is printed on error. The "outer" macro is strongly preferred when reporting 281 * errors "directly", i.e. without an additional layer of macros, as it reduces 282 * the probability of passing in the wrong string. 283 */ 284 #define __KVM_IOCTL_ERROR(_name, _ret) __KVM_SYSCALL_ERROR(_name, _ret) 285 #define KVM_IOCTL_ERROR(_ioctl, _ret) __KVM_IOCTL_ERROR(#_ioctl, _ret) 286 287 #define kvm_do_ioctl(fd, cmd, arg) \ 288 ({ \ 289 kvm_static_assert(!_IOC_SIZE(cmd) || sizeof(*arg) == _IOC_SIZE(cmd)); \ 290 ioctl(fd, cmd, arg); \ 291 }) 292 293 #define __kvm_ioctl(kvm_fd, cmd, arg) \ 294 kvm_do_ioctl(kvm_fd, cmd, arg) 295 296 #define kvm_ioctl(kvm_fd, cmd, arg) \ 297 ({ \ 298 int ret = __kvm_ioctl(kvm_fd, cmd, arg); \ 299 \ 300 TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(#cmd, ret)); \ 301 }) 302 303 static __always_inline void static_assert_is_vm(struct kvm_vm *vm) { } 304 305 #define __vm_ioctl(vm, cmd, arg) \ 306 ({ \ 307 static_assert_is_vm(vm); \ 308 kvm_do_ioctl((vm)->fd, cmd, arg); \ 309 }) 310 311 /* 312 * Assert that a VM or vCPU ioctl() succeeded, with extra magic to detect if 313 * the ioctl() failed because KVM killed/bugged the VM. To detect a dead VM, 314 * probe KVM_CAP_USER_MEMORY, which (a) has been supported by KVM since before 315 * selftests existed and (b) should never outright fail, i.e. is supposed to 316 * return 0 or 1. If KVM kills a VM, KVM returns -EIO for all ioctl()s for the 317 * VM and its vCPUs, including KVM_CHECK_EXTENSION. 318 */ 319 #define __TEST_ASSERT_VM_VCPU_IOCTL(cond, name, ret, vm) \ 320 do { \ 321 int __errno = errno; \ 322 \ 323 static_assert_is_vm(vm); \ 324 \ 325 if (cond) \ 326 break; \ 327 \ 328 if (errno == EIO && \ 329 __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)KVM_CAP_USER_MEMORY) < 0) { \ 330 TEST_ASSERT(errno == EIO, "KVM killed the VM, should return -EIO"); \ 331 TEST_FAIL("KVM killed/bugged the VM, check the kernel log for clues"); \ 332 } \ 333 errno = __errno; \ 334 TEST_ASSERT(cond, __KVM_IOCTL_ERROR(name, ret)); \ 335 } while (0) 336 337 #define TEST_ASSERT_VM_VCPU_IOCTL(cond, cmd, ret, vm) \ 338 __TEST_ASSERT_VM_VCPU_IOCTL(cond, #cmd, ret, vm) 339 340 #define vm_ioctl(vm, cmd, arg) \ 341 ({ \ 342 int ret = __vm_ioctl(vm, cmd, arg); \ 343 \ 344 __TEST_ASSERT_VM_VCPU_IOCTL(!ret, #cmd, ret, vm); \ 345 }) 346 347 static __always_inline void static_assert_is_vcpu(struct kvm_vcpu *vcpu) { } 348 349 #define __vcpu_ioctl(vcpu, cmd, arg) \ 350 ({ \ 351 static_assert_is_vcpu(vcpu); \ 352 kvm_do_ioctl((vcpu)->fd, cmd, arg); \ 353 }) 354 355 #define vcpu_ioctl(vcpu, cmd, arg) \ 356 ({ \ 357 int ret = __vcpu_ioctl(vcpu, cmd, arg); \ 358 \ 359 __TEST_ASSERT_VM_VCPU_IOCTL(!ret, #cmd, ret, (vcpu)->vm); \ 360 }) 361 362 /* 363 * Looks up and returns the value corresponding to the capability 364 * (KVM_CAP_*) given by cap. 365 */ 366 static inline int vm_check_cap(struct kvm_vm *vm, long cap) 367 { 368 int ret = __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)cap); 369 370 TEST_ASSERT_VM_VCPU_IOCTL(ret >= 0, KVM_CHECK_EXTENSION, ret, vm); 371 return ret; 372 } 373 374 static inline int __vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0) 375 { 376 struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } }; 377 378 return __vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap); 379 } 380 static inline void vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0) 381 { 382 struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } }; 383 384 vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap); 385 } 386 387 static inline void vm_set_memory_attributes(struct kvm_vm *vm, uint64_t gpa, 388 uint64_t size, uint64_t attributes) 389 { 390 struct kvm_memory_attributes attr = { 391 .attributes = attributes, 392 .address = gpa, 393 .size = size, 394 .flags = 0, 395 }; 396 397 /* 398 * KVM_SET_MEMORY_ATTRIBUTES overwrites _all_ attributes. These flows 399 * need significant enhancements to support multiple attributes. 400 */ 401 TEST_ASSERT(!attributes || attributes == KVM_MEMORY_ATTRIBUTE_PRIVATE, 402 "Update me to support multiple attributes!"); 403 404 vm_ioctl(vm, KVM_SET_MEMORY_ATTRIBUTES, &attr); 405 } 406 407 408 static inline void vm_mem_set_private(struct kvm_vm *vm, uint64_t gpa, 409 uint64_t size) 410 { 411 vm_set_memory_attributes(vm, gpa, size, KVM_MEMORY_ATTRIBUTE_PRIVATE); 412 } 413 414 static inline void vm_mem_set_shared(struct kvm_vm *vm, uint64_t gpa, 415 uint64_t size) 416 { 417 vm_set_memory_attributes(vm, gpa, size, 0); 418 } 419 420 void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t gpa, uint64_t size, 421 bool punch_hole); 422 423 static inline void vm_guest_mem_punch_hole(struct kvm_vm *vm, uint64_t gpa, 424 uint64_t size) 425 { 426 vm_guest_mem_fallocate(vm, gpa, size, true); 427 } 428 429 static inline void vm_guest_mem_allocate(struct kvm_vm *vm, uint64_t gpa, 430 uint64_t size) 431 { 432 vm_guest_mem_fallocate(vm, gpa, size, false); 433 } 434 435 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size); 436 const char *vm_guest_mode_string(uint32_t i); 437 438 void kvm_vm_free(struct kvm_vm *vmp); 439 void kvm_vm_restart(struct kvm_vm *vmp); 440 void kvm_vm_release(struct kvm_vm *vmp); 441 void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename); 442 int kvm_memfd_alloc(size_t size, bool hugepages); 443 444 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent); 445 446 static inline void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log) 447 { 448 struct kvm_dirty_log args = { .dirty_bitmap = log, .slot = slot }; 449 450 vm_ioctl(vm, KVM_GET_DIRTY_LOG, &args); 451 } 452 453 static inline void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log, 454 uint64_t first_page, uint32_t num_pages) 455 { 456 struct kvm_clear_dirty_log args = { 457 .dirty_bitmap = log, 458 .slot = slot, 459 .first_page = first_page, 460 .num_pages = num_pages 461 }; 462 463 vm_ioctl(vm, KVM_CLEAR_DIRTY_LOG, &args); 464 } 465 466 static inline uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm) 467 { 468 return __vm_ioctl(vm, KVM_RESET_DIRTY_RINGS, NULL); 469 } 470 471 static inline void kvm_vm_register_coalesced_io(struct kvm_vm *vm, 472 uint64_t address, 473 uint64_t size, bool pio) 474 { 475 struct kvm_coalesced_mmio_zone zone = { 476 .addr = address, 477 .size = size, 478 .pio = pio, 479 }; 480 481 vm_ioctl(vm, KVM_REGISTER_COALESCED_MMIO, &zone); 482 } 483 484 static inline void kvm_vm_unregister_coalesced_io(struct kvm_vm *vm, 485 uint64_t address, 486 uint64_t size, bool pio) 487 { 488 struct kvm_coalesced_mmio_zone zone = { 489 .addr = address, 490 .size = size, 491 .pio = pio, 492 }; 493 494 vm_ioctl(vm, KVM_UNREGISTER_COALESCED_MMIO, &zone); 495 } 496 497 static inline int vm_get_stats_fd(struct kvm_vm *vm) 498 { 499 int fd = __vm_ioctl(vm, KVM_GET_STATS_FD, NULL); 500 501 TEST_ASSERT_VM_VCPU_IOCTL(fd >= 0, KVM_GET_STATS_FD, fd, vm); 502 return fd; 503 } 504 505 static inline void read_stats_header(int stats_fd, struct kvm_stats_header *header) 506 { 507 ssize_t ret; 508 509 ret = pread(stats_fd, header, sizeof(*header), 0); 510 TEST_ASSERT(ret == sizeof(*header), 511 "Failed to read '%lu' header bytes, ret = '%ld'", 512 sizeof(*header), ret); 513 } 514 515 struct kvm_stats_desc *read_stats_descriptors(int stats_fd, 516 struct kvm_stats_header *header); 517 518 static inline ssize_t get_stats_descriptor_size(struct kvm_stats_header *header) 519 { 520 /* 521 * The base size of the descriptor is defined by KVM's ABI, but the 522 * size of the name field is variable, as far as KVM's ABI is 523 * concerned. For a given instance of KVM, the name field is the same 524 * size for all stats and is provided in the overall stats header. 525 */ 526 return sizeof(struct kvm_stats_desc) + header->name_size; 527 } 528 529 static inline struct kvm_stats_desc *get_stats_descriptor(struct kvm_stats_desc *stats, 530 int index, 531 struct kvm_stats_header *header) 532 { 533 /* 534 * Note, size_desc includes the size of the name field, which is 535 * variable. i.e. this is NOT equivalent to &stats_desc[i]. 536 */ 537 return (void *)stats + index * get_stats_descriptor_size(header); 538 } 539 540 void read_stat_data(int stats_fd, struct kvm_stats_header *header, 541 struct kvm_stats_desc *desc, uint64_t *data, 542 size_t max_elements); 543 544 void kvm_get_stat(struct kvm_binary_stats *stats, const char *name, 545 uint64_t *data, size_t max_elements); 546 547 #define __get_stat(stats, stat) \ 548 ({ \ 549 uint64_t data; \ 550 \ 551 kvm_get_stat(stats, #stat, &data, 1); \ 552 data; \ 553 }) 554 555 #define vm_get_stat(vm, stat) __get_stat(&(vm)->stats, stat) 556 #define vcpu_get_stat(vcpu, stat) __get_stat(&(vcpu)->stats, stat) 557 558 void vm_create_irqchip(struct kvm_vm *vm); 559 560 static inline int __vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size, 561 uint64_t flags) 562 { 563 struct kvm_create_guest_memfd guest_memfd = { 564 .size = size, 565 .flags = flags, 566 }; 567 568 return __vm_ioctl(vm, KVM_CREATE_GUEST_MEMFD, &guest_memfd); 569 } 570 571 static inline int vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size, 572 uint64_t flags) 573 { 574 int fd = __vm_create_guest_memfd(vm, size, flags); 575 576 TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_GUEST_MEMFD, fd)); 577 return fd; 578 } 579 580 void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 581 uint64_t gpa, uint64_t size, void *hva); 582 int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 583 uint64_t gpa, uint64_t size, void *hva); 584 void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 585 uint64_t gpa, uint64_t size, void *hva, 586 uint32_t guest_memfd, uint64_t guest_memfd_offset); 587 int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 588 uint64_t gpa, uint64_t size, void *hva, 589 uint32_t guest_memfd, uint64_t guest_memfd_offset); 590 591 void vm_userspace_mem_region_add(struct kvm_vm *vm, 592 enum vm_mem_backing_src_type src_type, 593 uint64_t guest_paddr, uint32_t slot, uint64_t npages, 594 uint32_t flags); 595 void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, 596 uint64_t guest_paddr, uint32_t slot, uint64_t npages, 597 uint32_t flags, int guest_memfd_fd, uint64_t guest_memfd_offset); 598 599 #ifndef vm_arch_has_protected_memory 600 static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm) 601 { 602 return false; 603 } 604 #endif 605 606 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags); 607 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa); 608 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot); 609 struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id); 610 void vm_populate_vaddr_bitmap(struct kvm_vm *vm); 611 vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min); 612 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min); 613 vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, 614 enum kvm_mem_region_type type); 615 vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz, 616 vm_vaddr_t vaddr_min, 617 enum kvm_mem_region_type type); 618 vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages); 619 vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm, 620 enum kvm_mem_region_type type); 621 vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm); 622 623 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, 624 unsigned int npages); 625 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa); 626 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva); 627 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva); 628 void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa); 629 630 #ifndef vcpu_arch_put_guest 631 #define vcpu_arch_put_guest(mem, val) do { (mem) = (val); } while (0) 632 #endif 633 634 static inline vm_paddr_t vm_untag_gpa(struct kvm_vm *vm, vm_paddr_t gpa) 635 { 636 return gpa & ~vm->gpa_tag_mask; 637 } 638 639 void vcpu_run(struct kvm_vcpu *vcpu); 640 int _vcpu_run(struct kvm_vcpu *vcpu); 641 642 static inline int __vcpu_run(struct kvm_vcpu *vcpu) 643 { 644 return __vcpu_ioctl(vcpu, KVM_RUN, NULL); 645 } 646 647 void vcpu_run_complete_io(struct kvm_vcpu *vcpu); 648 struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu); 649 650 static inline void vcpu_enable_cap(struct kvm_vcpu *vcpu, uint32_t cap, 651 uint64_t arg0) 652 { 653 struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } }; 654 655 vcpu_ioctl(vcpu, KVM_ENABLE_CAP, &enable_cap); 656 } 657 658 static inline void vcpu_guest_debug_set(struct kvm_vcpu *vcpu, 659 struct kvm_guest_debug *debug) 660 { 661 vcpu_ioctl(vcpu, KVM_SET_GUEST_DEBUG, debug); 662 } 663 664 static inline void vcpu_mp_state_get(struct kvm_vcpu *vcpu, 665 struct kvm_mp_state *mp_state) 666 { 667 vcpu_ioctl(vcpu, KVM_GET_MP_STATE, mp_state); 668 } 669 static inline void vcpu_mp_state_set(struct kvm_vcpu *vcpu, 670 struct kvm_mp_state *mp_state) 671 { 672 vcpu_ioctl(vcpu, KVM_SET_MP_STATE, mp_state); 673 } 674 675 static inline void vcpu_regs_get(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 676 { 677 vcpu_ioctl(vcpu, KVM_GET_REGS, regs); 678 } 679 680 static inline void vcpu_regs_set(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 681 { 682 vcpu_ioctl(vcpu, KVM_SET_REGS, regs); 683 } 684 static inline void vcpu_sregs_get(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 685 { 686 vcpu_ioctl(vcpu, KVM_GET_SREGS, sregs); 687 688 } 689 static inline void vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 690 { 691 vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs); 692 } 693 static inline int _vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 694 { 695 return __vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs); 696 } 697 static inline void vcpu_fpu_get(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 698 { 699 vcpu_ioctl(vcpu, KVM_GET_FPU, fpu); 700 } 701 static inline void vcpu_fpu_set(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 702 { 703 vcpu_ioctl(vcpu, KVM_SET_FPU, fpu); 704 } 705 706 static inline int __vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr) 707 { 708 struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr }; 709 710 return __vcpu_ioctl(vcpu, KVM_GET_ONE_REG, ®); 711 } 712 static inline int __vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val) 713 { 714 struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val }; 715 716 return __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®); 717 } 718 static inline uint64_t vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id) 719 { 720 uint64_t val; 721 struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val }; 722 723 TEST_ASSERT(KVM_REG_SIZE(id) <= sizeof(val), "Reg %lx too big", id); 724 725 vcpu_ioctl(vcpu, KVM_GET_ONE_REG, ®); 726 return val; 727 } 728 static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val) 729 { 730 struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val }; 731 732 TEST_ASSERT(KVM_REG_SIZE(id) <= sizeof(val), "Reg %lx too big", id); 733 734 vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®); 735 } 736 737 #ifdef __KVM_HAVE_VCPU_EVENTS 738 static inline void vcpu_events_get(struct kvm_vcpu *vcpu, 739 struct kvm_vcpu_events *events) 740 { 741 vcpu_ioctl(vcpu, KVM_GET_VCPU_EVENTS, events); 742 } 743 static inline void vcpu_events_set(struct kvm_vcpu *vcpu, 744 struct kvm_vcpu_events *events) 745 { 746 vcpu_ioctl(vcpu, KVM_SET_VCPU_EVENTS, events); 747 } 748 #endif 749 #ifdef __x86_64__ 750 static inline void vcpu_nested_state_get(struct kvm_vcpu *vcpu, 751 struct kvm_nested_state *state) 752 { 753 vcpu_ioctl(vcpu, KVM_GET_NESTED_STATE, state); 754 } 755 static inline int __vcpu_nested_state_set(struct kvm_vcpu *vcpu, 756 struct kvm_nested_state *state) 757 { 758 return __vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state); 759 } 760 761 static inline void vcpu_nested_state_set(struct kvm_vcpu *vcpu, 762 struct kvm_nested_state *state) 763 { 764 vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state); 765 } 766 #endif 767 static inline int vcpu_get_stats_fd(struct kvm_vcpu *vcpu) 768 { 769 int fd = __vcpu_ioctl(vcpu, KVM_GET_STATS_FD, NULL); 770 771 TEST_ASSERT_VM_VCPU_IOCTL(fd >= 0, KVM_CHECK_EXTENSION, fd, vcpu->vm); 772 return fd; 773 } 774 775 int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr); 776 777 static inline void kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr) 778 { 779 int ret = __kvm_has_device_attr(dev_fd, group, attr); 780 781 TEST_ASSERT(!ret, "KVM_HAS_DEVICE_ATTR failed, rc: %i errno: %i", ret, errno); 782 } 783 784 int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val); 785 786 static inline void kvm_device_attr_get(int dev_fd, uint32_t group, 787 uint64_t attr, void *val) 788 { 789 int ret = __kvm_device_attr_get(dev_fd, group, attr, val); 790 791 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_GET_DEVICE_ATTR, ret)); 792 } 793 794 int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val); 795 796 static inline void kvm_device_attr_set(int dev_fd, uint32_t group, 797 uint64_t attr, void *val) 798 { 799 int ret = __kvm_device_attr_set(dev_fd, group, attr, val); 800 801 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_DEVICE_ATTR, ret)); 802 } 803 804 static inline int __vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group, 805 uint64_t attr) 806 { 807 return __kvm_has_device_attr(vcpu->fd, group, attr); 808 } 809 810 static inline void vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group, 811 uint64_t attr) 812 { 813 kvm_has_device_attr(vcpu->fd, group, attr); 814 } 815 816 static inline int __vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group, 817 uint64_t attr, void *val) 818 { 819 return __kvm_device_attr_get(vcpu->fd, group, attr, val); 820 } 821 822 static inline void vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group, 823 uint64_t attr, void *val) 824 { 825 kvm_device_attr_get(vcpu->fd, group, attr, val); 826 } 827 828 static inline int __vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group, 829 uint64_t attr, void *val) 830 { 831 return __kvm_device_attr_set(vcpu->fd, group, attr, val); 832 } 833 834 static inline void vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group, 835 uint64_t attr, void *val) 836 { 837 kvm_device_attr_set(vcpu->fd, group, attr, val); 838 } 839 840 int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type); 841 int __kvm_create_device(struct kvm_vm *vm, uint64_t type); 842 843 static inline int kvm_create_device(struct kvm_vm *vm, uint64_t type) 844 { 845 int fd = __kvm_create_device(vm, type); 846 847 TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_DEVICE, fd)); 848 return fd; 849 } 850 851 void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu); 852 853 /* 854 * VM VCPU Args Set 855 * 856 * Input Args: 857 * vm - Virtual Machine 858 * num - number of arguments 859 * ... - arguments, each of type uint64_t 860 * 861 * Output Args: None 862 * 863 * Return: None 864 * 865 * Sets the first @num input parameters for the function at @vcpu's entry point, 866 * per the C calling convention of the architecture, to the values given as 867 * variable args. Each of the variable args is expected to be of type uint64_t. 868 * The maximum @num can be is specific to the architecture. 869 */ 870 void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...); 871 872 void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level); 873 int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level); 874 875 #define KVM_MAX_IRQ_ROUTES 4096 876 877 struct kvm_irq_routing *kvm_gsi_routing_create(void); 878 void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing, 879 uint32_t gsi, uint32_t pin); 880 int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing); 881 void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing); 882 883 const char *exit_reason_str(unsigned int exit_reason); 884 885 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, 886 uint32_t memslot); 887 vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, 888 vm_paddr_t paddr_min, uint32_t memslot, 889 bool protected); 890 vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm); 891 892 static inline vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, 893 vm_paddr_t paddr_min, uint32_t memslot) 894 { 895 /* 896 * By default, allocate memory as protected for VMs that support 897 * protected memory, as the majority of memory for such VMs is 898 * protected, i.e. using shared memory is effectively opt-in. 899 */ 900 return __vm_phy_pages_alloc(vm, num, paddr_min, memslot, 901 vm_arch_has_protected_memory(vm)); 902 } 903 904 /* 905 * ____vm_create() does KVM_CREATE_VM and little else. __vm_create() also 906 * loads the test binary into guest memory and creates an IRQ chip (x86 only). 907 * __vm_create() does NOT create vCPUs, @nr_runnable_vcpus is used purely to 908 * calculate the amount of memory needed for per-vCPU data, e.g. stacks. 909 */ 910 struct kvm_vm *____vm_create(struct vm_shape shape); 911 struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus, 912 uint64_t nr_extra_pages); 913 914 static inline struct kvm_vm *vm_create_barebones(void) 915 { 916 return ____vm_create(VM_SHAPE_DEFAULT); 917 } 918 919 static inline struct kvm_vm *vm_create_barebones_type(unsigned long type) 920 { 921 const struct vm_shape shape = { 922 .mode = VM_MODE_DEFAULT, 923 .type = type, 924 }; 925 926 return ____vm_create(shape); 927 } 928 929 static inline struct kvm_vm *vm_create(uint32_t nr_runnable_vcpus) 930 { 931 return __vm_create(VM_SHAPE_DEFAULT, nr_runnable_vcpus, 0); 932 } 933 934 struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus, 935 uint64_t extra_mem_pages, 936 void *guest_code, struct kvm_vcpu *vcpus[]); 937 938 static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus, 939 void *guest_code, 940 struct kvm_vcpu *vcpus[]) 941 { 942 return __vm_create_with_vcpus(VM_SHAPE_DEFAULT, nr_vcpus, 0, 943 guest_code, vcpus); 944 } 945 946 947 struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape, 948 struct kvm_vcpu **vcpu, 949 uint64_t extra_mem_pages, 950 void *guest_code); 951 952 /* 953 * Create a VM with a single vCPU with reasonable defaults and @extra_mem_pages 954 * additional pages of guest memory. Returns the VM and vCPU (via out param). 955 */ 956 static inline struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu, 957 uint64_t extra_mem_pages, 958 void *guest_code) 959 { 960 return __vm_create_shape_with_one_vcpu(VM_SHAPE_DEFAULT, vcpu, 961 extra_mem_pages, guest_code); 962 } 963 964 static inline struct kvm_vm *vm_create_with_one_vcpu(struct kvm_vcpu **vcpu, 965 void *guest_code) 966 { 967 return __vm_create_with_one_vcpu(vcpu, 0, guest_code); 968 } 969 970 static inline struct kvm_vm *vm_create_shape_with_one_vcpu(struct vm_shape shape, 971 struct kvm_vcpu **vcpu, 972 void *guest_code) 973 { 974 return __vm_create_shape_with_one_vcpu(shape, vcpu, 0, guest_code); 975 } 976 977 struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm); 978 979 void kvm_set_files_rlimit(uint32_t nr_vcpus); 980 981 void kvm_pin_this_task_to_pcpu(uint32_t pcpu); 982 void kvm_print_vcpu_pinning_help(void); 983 void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[], 984 int nr_vcpus); 985 986 unsigned long vm_compute_max_gfn(struct kvm_vm *vm); 987 unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size); 988 unsigned int vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages); 989 unsigned int vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages); 990 static inline unsigned int 991 vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages) 992 { 993 unsigned int n; 994 n = vm_num_guest_pages(mode, vm_num_host_pages(mode, num_guest_pages)); 995 #ifdef __s390x__ 996 /* s390 requires 1M aligned guest sizes */ 997 n = (n + 255) & ~255; 998 #endif 999 return n; 1000 } 1001 1002 #define sync_global_to_guest(vm, g) ({ \ 1003 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \ 1004 memcpy(_p, &(g), sizeof(g)); \ 1005 }) 1006 1007 #define sync_global_from_guest(vm, g) ({ \ 1008 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \ 1009 memcpy(&(g), _p, sizeof(g)); \ 1010 }) 1011 1012 /* 1013 * Write a global value, but only in the VM's (guest's) domain. Primarily used 1014 * for "globals" that hold per-VM values (VMs always duplicate code and global 1015 * data into their own region of physical memory), but can be used anytime it's 1016 * undesirable to change the host's copy of the global. 1017 */ 1018 #define write_guest_global(vm, g, val) ({ \ 1019 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \ 1020 typeof(g) _val = val; \ 1021 \ 1022 memcpy(_p, &(_val), sizeof(g)); \ 1023 }) 1024 1025 void assert_on_unhandled_exception(struct kvm_vcpu *vcpu); 1026 1027 void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, 1028 uint8_t indent); 1029 1030 static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu, 1031 uint8_t indent) 1032 { 1033 vcpu_arch_dump(stream, vcpu, indent); 1034 } 1035 1036 /* 1037 * Adds a vCPU with reasonable defaults (e.g. a stack) 1038 * 1039 * Input Args: 1040 * vm - Virtual Machine 1041 * vcpu_id - The id of the VCPU to add to the VM. 1042 */ 1043 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id); 1044 void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code); 1045 1046 static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, 1047 void *guest_code) 1048 { 1049 struct kvm_vcpu *vcpu = vm_arch_vcpu_add(vm, vcpu_id); 1050 1051 vcpu_arch_set_entry_point(vcpu, guest_code); 1052 1053 return vcpu; 1054 } 1055 1056 /* Re-create a vCPU after restarting a VM, e.g. for state save/restore tests. */ 1057 struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id); 1058 1059 static inline struct kvm_vcpu *vm_vcpu_recreate(struct kvm_vm *vm, 1060 uint32_t vcpu_id) 1061 { 1062 return vm_arch_vcpu_recreate(vm, vcpu_id); 1063 } 1064 1065 void vcpu_arch_free(struct kvm_vcpu *vcpu); 1066 1067 void virt_arch_pgd_alloc(struct kvm_vm *vm); 1068 1069 static inline void virt_pgd_alloc(struct kvm_vm *vm) 1070 { 1071 virt_arch_pgd_alloc(vm); 1072 } 1073 1074 /* 1075 * VM Virtual Page Map 1076 * 1077 * Input Args: 1078 * vm - Virtual Machine 1079 * vaddr - VM Virtual Address 1080 * paddr - VM Physical Address 1081 * memslot - Memory region slot for new virtual translation tables 1082 * 1083 * Output Args: None 1084 * 1085 * Return: None 1086 * 1087 * Within @vm, creates a virtual translation for the page starting 1088 * at @vaddr to the page starting at @paddr. 1089 */ 1090 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr); 1091 1092 static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) 1093 { 1094 virt_arch_pg_map(vm, vaddr, paddr); 1095 } 1096 1097 1098 /* 1099 * Address Guest Virtual to Guest Physical 1100 * 1101 * Input Args: 1102 * vm - Virtual Machine 1103 * gva - VM virtual address 1104 * 1105 * Output Args: None 1106 * 1107 * Return: 1108 * Equivalent VM physical address 1109 * 1110 * Returns the VM physical address of the translated VM virtual 1111 * address given by @gva. 1112 */ 1113 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva); 1114 1115 static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) 1116 { 1117 return addr_arch_gva2gpa(vm, gva); 1118 } 1119 1120 /* 1121 * Virtual Translation Tables Dump 1122 * 1123 * Input Args: 1124 * stream - Output FILE stream 1125 * vm - Virtual Machine 1126 * indent - Left margin indent amount 1127 * 1128 * Output Args: None 1129 * 1130 * Return: None 1131 * 1132 * Dumps to the FILE stream given by @stream, the contents of all the 1133 * virtual translation tables for the VM given by @vm. 1134 */ 1135 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent); 1136 1137 static inline void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) 1138 { 1139 virt_arch_dump(stream, vm, indent); 1140 } 1141 1142 1143 static inline int __vm_disable_nx_huge_pages(struct kvm_vm *vm) 1144 { 1145 return __vm_enable_cap(vm, KVM_CAP_VM_DISABLE_NX_HUGE_PAGES, 0); 1146 } 1147 1148 /* 1149 * Arch hook that is invoked via a constructor, i.e. before exeucting main(), 1150 * to allow for arch-specific setup that is common to all tests, e.g. computing 1151 * the default guest "mode". 1152 */ 1153 void kvm_selftest_arch_init(void); 1154 1155 void kvm_arch_vm_post_create(struct kvm_vm *vm); 1156 1157 bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr); 1158 1159 uint32_t guest_get_vcpuid(void); 1160 1161 #endif /* SELFTEST_KVM_UTIL_H */ 1162