1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2018, Google LLC.
4 */
5 #ifndef SELFTEST_KVM_UTIL_H
6 #define SELFTEST_KVM_UTIL_H
7
8 #include "test_util.h"
9
10 #include <linux/compiler.h>
11 #include "linux/hashtable.h"
12 #include "linux/list.h"
13 #include <linux/kernel.h>
14 #include <linux/kvm.h>
15 #include "linux/rbtree.h"
16 #include <linux/types.h>
17
18 #include <asm/atomic.h>
19 #include <asm/kvm.h>
20
21 #include <sys/ioctl.h>
22
23 #include "kvm_util_arch.h"
24 #include "kvm_util_types.h"
25 #include "sparsebit.h"
26
27 #define KVM_DEV_PATH "/dev/kvm"
28 #define KVM_MAX_VCPUS 512
29
30 #define NSEC_PER_SEC 1000000000L
31
32 struct userspace_mem_region {
33 struct kvm_userspace_memory_region2 region;
34 struct sparsebit *unused_phy_pages;
35 struct sparsebit *protected_phy_pages;
36 int fd;
37 off_t offset;
38 enum vm_mem_backing_src_type backing_src_type;
39 void *host_mem;
40 void *host_alias;
41 void *mmap_start;
42 void *mmap_alias;
43 size_t mmap_size;
44 struct rb_node gpa_node;
45 struct rb_node hva_node;
46 struct hlist_node slot_node;
47 };
48
49 struct kvm_vcpu {
50 struct list_head list;
51 uint32_t id;
52 int fd;
53 struct kvm_vm *vm;
54 struct kvm_run *run;
55 #ifdef __x86_64__
56 struct kvm_cpuid2 *cpuid;
57 #endif
58 struct kvm_dirty_gfn *dirty_gfns;
59 uint32_t fetch_index;
60 uint32_t dirty_gfns_count;
61 };
62
63 struct userspace_mem_regions {
64 struct rb_root gpa_tree;
65 struct rb_root hva_tree;
66 DECLARE_HASHTABLE(slot_hash, 9);
67 };
68
69 enum kvm_mem_region_type {
70 MEM_REGION_CODE,
71 MEM_REGION_DATA,
72 MEM_REGION_PT,
73 MEM_REGION_TEST_DATA,
74 NR_MEM_REGIONS,
75 };
76
77 struct kvm_vm {
78 int mode;
79 unsigned long type;
80 int kvm_fd;
81 int fd;
82 unsigned int pgtable_levels;
83 unsigned int page_size;
84 unsigned int page_shift;
85 unsigned int pa_bits;
86 unsigned int va_bits;
87 uint64_t max_gfn;
88 struct list_head vcpus;
89 struct userspace_mem_regions regions;
90 struct sparsebit *vpages_valid;
91 struct sparsebit *vpages_mapped;
92 bool has_irqchip;
93 bool pgd_created;
94 vm_paddr_t ucall_mmio_addr;
95 vm_paddr_t pgd;
96 vm_vaddr_t handlers;
97 uint32_t dirty_ring_size;
98 uint64_t gpa_tag_mask;
99
100 struct kvm_vm_arch arch;
101
102 /* Cache of information for binary stats interface */
103 int stats_fd;
104 struct kvm_stats_header stats_header;
105 struct kvm_stats_desc *stats_desc;
106
107 /*
108 * KVM region slots. These are the default memslots used by page
109 * allocators, e.g., lib/elf uses the memslots[MEM_REGION_CODE]
110 * memslot.
111 */
112 uint32_t memslots[NR_MEM_REGIONS];
113 };
114
115 struct vcpu_reg_sublist {
116 const char *name;
117 long capability;
118 int feature;
119 int feature_type;
120 bool finalize;
121 __u64 *regs;
122 __u64 regs_n;
123 __u64 *rejects_set;
124 __u64 rejects_set_n;
125 __u64 *skips_set;
126 __u64 skips_set_n;
127 };
128
129 struct vcpu_reg_list {
130 char *name;
131 struct vcpu_reg_sublist sublists[];
132 };
133
134 #define for_each_sublist(c, s) \
135 for ((s) = &(c)->sublists[0]; (s)->regs; ++(s))
136
137 #define kvm_for_each_vcpu(vm, i, vcpu) \
138 for ((i) = 0; (i) <= (vm)->last_vcpu_id; (i)++) \
139 if (!((vcpu) = vm->vcpus[i])) \
140 continue; \
141 else
142
143 struct userspace_mem_region *
144 memslot2region(struct kvm_vm *vm, uint32_t memslot);
145
vm_get_mem_region(struct kvm_vm * vm,enum kvm_mem_region_type type)146 static inline struct userspace_mem_region *vm_get_mem_region(struct kvm_vm *vm,
147 enum kvm_mem_region_type type)
148 {
149 assert(type < NR_MEM_REGIONS);
150 return memslot2region(vm, vm->memslots[type]);
151 }
152
153 /* Minimum allocated guest virtual and physical addresses */
154 #define KVM_UTIL_MIN_VADDR 0x2000
155 #define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
156
157 #define DEFAULT_GUEST_STACK_VADDR_MIN 0xab6000
158 #define DEFAULT_STACK_PGS 5
159
160 enum vm_guest_mode {
161 VM_MODE_P52V48_4K,
162 VM_MODE_P52V48_16K,
163 VM_MODE_P52V48_64K,
164 VM_MODE_P48V48_4K,
165 VM_MODE_P48V48_16K,
166 VM_MODE_P48V48_64K,
167 VM_MODE_P40V48_4K,
168 VM_MODE_P40V48_16K,
169 VM_MODE_P40V48_64K,
170 VM_MODE_PXXV48_4K, /* For 48bits VA but ANY bits PA */
171 VM_MODE_P47V64_4K,
172 VM_MODE_P44V64_4K,
173 VM_MODE_P36V48_4K,
174 VM_MODE_P36V48_16K,
175 VM_MODE_P36V48_64K,
176 VM_MODE_P36V47_16K,
177 NUM_VM_MODES,
178 };
179
180 struct vm_shape {
181 uint32_t type;
182 uint8_t mode;
183 uint8_t pad0;
184 uint16_t pad1;
185 };
186
187 kvm_static_assert(sizeof(struct vm_shape) == sizeof(uint64_t));
188
189 #define VM_TYPE_DEFAULT 0
190
191 #define VM_SHAPE(__mode) \
192 ({ \
193 struct vm_shape shape = { \
194 .mode = (__mode), \
195 .type = VM_TYPE_DEFAULT \
196 }; \
197 \
198 shape; \
199 })
200
201 #if defined(__aarch64__)
202
203 extern enum vm_guest_mode vm_mode_default;
204
205 #define VM_MODE_DEFAULT vm_mode_default
206 #define MIN_PAGE_SHIFT 12U
207 #define ptes_per_page(page_size) ((page_size) / 8)
208
209 #elif defined(__x86_64__)
210
211 #define VM_MODE_DEFAULT VM_MODE_PXXV48_4K
212 #define MIN_PAGE_SHIFT 12U
213 #define ptes_per_page(page_size) ((page_size) / 8)
214
215 #elif defined(__s390x__)
216
217 #define VM_MODE_DEFAULT VM_MODE_P44V64_4K
218 #define MIN_PAGE_SHIFT 12U
219 #define ptes_per_page(page_size) ((page_size) / 16)
220
221 #elif defined(__riscv)
222
223 #if __riscv_xlen == 32
224 #error "RISC-V 32-bit kvm selftests not supported"
225 #endif
226
227 #define VM_MODE_DEFAULT VM_MODE_P40V48_4K
228 #define MIN_PAGE_SHIFT 12U
229 #define ptes_per_page(page_size) ((page_size) / 8)
230
231 #endif
232
233 #define VM_SHAPE_DEFAULT VM_SHAPE(VM_MODE_DEFAULT)
234
235 #define MIN_PAGE_SIZE (1U << MIN_PAGE_SHIFT)
236 #define PTES_PER_MIN_PAGE ptes_per_page(MIN_PAGE_SIZE)
237
238 struct vm_guest_mode_params {
239 unsigned int pa_bits;
240 unsigned int va_bits;
241 unsigned int page_size;
242 unsigned int page_shift;
243 };
244 extern const struct vm_guest_mode_params vm_guest_mode_params[];
245
246 int open_path_or_exit(const char *path, int flags);
247 int open_kvm_dev_path_or_exit(void);
248
249 bool get_kvm_param_bool(const char *param);
250 bool get_kvm_intel_param_bool(const char *param);
251 bool get_kvm_amd_param_bool(const char *param);
252
253 int get_kvm_param_integer(const char *param);
254 int get_kvm_intel_param_integer(const char *param);
255 int get_kvm_amd_param_integer(const char *param);
256
257 unsigned int kvm_check_cap(long cap);
258
kvm_has_cap(long cap)259 static inline bool kvm_has_cap(long cap)
260 {
261 return kvm_check_cap(cap);
262 }
263
264 #define __KVM_SYSCALL_ERROR(_name, _ret) \
265 "%s failed, rc: %i errno: %i (%s)", (_name), (_ret), errno, strerror(errno)
266
267 /*
268 * Use the "inner", double-underscore macro when reporting errors from within
269 * other macros so that the name of ioctl() and not its literal numeric value
270 * is printed on error. The "outer" macro is strongly preferred when reporting
271 * errors "directly", i.e. without an additional layer of macros, as it reduces
272 * the probability of passing in the wrong string.
273 */
274 #define __KVM_IOCTL_ERROR(_name, _ret) __KVM_SYSCALL_ERROR(_name, _ret)
275 #define KVM_IOCTL_ERROR(_ioctl, _ret) __KVM_IOCTL_ERROR(#_ioctl, _ret)
276
277 #define kvm_do_ioctl(fd, cmd, arg) \
278 ({ \
279 kvm_static_assert(!_IOC_SIZE(cmd) || sizeof(*arg) == _IOC_SIZE(cmd)); \
280 ioctl(fd, cmd, arg); \
281 })
282
283 #define __kvm_ioctl(kvm_fd, cmd, arg) \
284 kvm_do_ioctl(kvm_fd, cmd, arg)
285
286 #define kvm_ioctl(kvm_fd, cmd, arg) \
287 ({ \
288 int ret = __kvm_ioctl(kvm_fd, cmd, arg); \
289 \
290 TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(#cmd, ret)); \
291 })
292
static_assert_is_vm(struct kvm_vm * vm)293 static __always_inline void static_assert_is_vm(struct kvm_vm *vm) { }
294
295 #define __vm_ioctl(vm, cmd, arg) \
296 ({ \
297 static_assert_is_vm(vm); \
298 kvm_do_ioctl((vm)->fd, cmd, arg); \
299 })
300
301 /*
302 * Assert that a VM or vCPU ioctl() succeeded, with extra magic to detect if
303 * the ioctl() failed because KVM killed/bugged the VM. To detect a dead VM,
304 * probe KVM_CAP_USER_MEMORY, which (a) has been supported by KVM since before
305 * selftests existed and (b) should never outright fail, i.e. is supposed to
306 * return 0 or 1. If KVM kills a VM, KVM returns -EIO for all ioctl()s for the
307 * VM and its vCPUs, including KVM_CHECK_EXTENSION.
308 */
309 #define __TEST_ASSERT_VM_VCPU_IOCTL(cond, name, ret, vm) \
310 do { \
311 int __errno = errno; \
312 \
313 static_assert_is_vm(vm); \
314 \
315 if (cond) \
316 break; \
317 \
318 if (errno == EIO && \
319 __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)KVM_CAP_USER_MEMORY) < 0) { \
320 TEST_ASSERT(errno == EIO, "KVM killed the VM, should return -EIO"); \
321 TEST_FAIL("KVM killed/bugged the VM, check the kernel log for clues"); \
322 } \
323 errno = __errno; \
324 TEST_ASSERT(cond, __KVM_IOCTL_ERROR(name, ret)); \
325 } while (0)
326
327 #define TEST_ASSERT_VM_VCPU_IOCTL(cond, cmd, ret, vm) \
328 __TEST_ASSERT_VM_VCPU_IOCTL(cond, #cmd, ret, vm)
329
330 #define vm_ioctl(vm, cmd, arg) \
331 ({ \
332 int ret = __vm_ioctl(vm, cmd, arg); \
333 \
334 __TEST_ASSERT_VM_VCPU_IOCTL(!ret, #cmd, ret, vm); \
335 })
336
static_assert_is_vcpu(struct kvm_vcpu * vcpu)337 static __always_inline void static_assert_is_vcpu(struct kvm_vcpu *vcpu) { }
338
339 #define __vcpu_ioctl(vcpu, cmd, arg) \
340 ({ \
341 static_assert_is_vcpu(vcpu); \
342 kvm_do_ioctl((vcpu)->fd, cmd, arg); \
343 })
344
345 #define vcpu_ioctl(vcpu, cmd, arg) \
346 ({ \
347 int ret = __vcpu_ioctl(vcpu, cmd, arg); \
348 \
349 __TEST_ASSERT_VM_VCPU_IOCTL(!ret, #cmd, ret, (vcpu)->vm); \
350 })
351
352 /*
353 * Looks up and returns the value corresponding to the capability
354 * (KVM_CAP_*) given by cap.
355 */
vm_check_cap(struct kvm_vm * vm,long cap)356 static inline int vm_check_cap(struct kvm_vm *vm, long cap)
357 {
358 int ret = __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)cap);
359
360 TEST_ASSERT_VM_VCPU_IOCTL(ret >= 0, KVM_CHECK_EXTENSION, ret, vm);
361 return ret;
362 }
363
__vm_enable_cap(struct kvm_vm * vm,uint32_t cap,uint64_t arg0)364 static inline int __vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0)
365 {
366 struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
367
368 return __vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
369 }
vm_enable_cap(struct kvm_vm * vm,uint32_t cap,uint64_t arg0)370 static inline void vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0)
371 {
372 struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
373
374 vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
375 }
376
vm_set_memory_attributes(struct kvm_vm * vm,uint64_t gpa,uint64_t size,uint64_t attributes)377 static inline void vm_set_memory_attributes(struct kvm_vm *vm, uint64_t gpa,
378 uint64_t size, uint64_t attributes)
379 {
380 struct kvm_memory_attributes attr = {
381 .attributes = attributes,
382 .address = gpa,
383 .size = size,
384 .flags = 0,
385 };
386
387 /*
388 * KVM_SET_MEMORY_ATTRIBUTES overwrites _all_ attributes. These flows
389 * need significant enhancements to support multiple attributes.
390 */
391 TEST_ASSERT(!attributes || attributes == KVM_MEMORY_ATTRIBUTE_PRIVATE,
392 "Update me to support multiple attributes!");
393
394 vm_ioctl(vm, KVM_SET_MEMORY_ATTRIBUTES, &attr);
395 }
396
397
vm_mem_set_private(struct kvm_vm * vm,uint64_t gpa,uint64_t size)398 static inline void vm_mem_set_private(struct kvm_vm *vm, uint64_t gpa,
399 uint64_t size)
400 {
401 vm_set_memory_attributes(vm, gpa, size, KVM_MEMORY_ATTRIBUTE_PRIVATE);
402 }
403
vm_mem_set_shared(struct kvm_vm * vm,uint64_t gpa,uint64_t size)404 static inline void vm_mem_set_shared(struct kvm_vm *vm, uint64_t gpa,
405 uint64_t size)
406 {
407 vm_set_memory_attributes(vm, gpa, size, 0);
408 }
409
410 void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t gpa, uint64_t size,
411 bool punch_hole);
412
vm_guest_mem_punch_hole(struct kvm_vm * vm,uint64_t gpa,uint64_t size)413 static inline void vm_guest_mem_punch_hole(struct kvm_vm *vm, uint64_t gpa,
414 uint64_t size)
415 {
416 vm_guest_mem_fallocate(vm, gpa, size, true);
417 }
418
vm_guest_mem_allocate(struct kvm_vm * vm,uint64_t gpa,uint64_t size)419 static inline void vm_guest_mem_allocate(struct kvm_vm *vm, uint64_t gpa,
420 uint64_t size)
421 {
422 vm_guest_mem_fallocate(vm, gpa, size, false);
423 }
424
425 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size);
426 const char *vm_guest_mode_string(uint32_t i);
427
428 void kvm_vm_free(struct kvm_vm *vmp);
429 void kvm_vm_restart(struct kvm_vm *vmp);
430 void kvm_vm_release(struct kvm_vm *vmp);
431 int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, const vm_vaddr_t gva,
432 size_t len);
433 void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename);
434 int kvm_memfd_alloc(size_t size, bool hugepages);
435
436 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
437
kvm_vm_get_dirty_log(struct kvm_vm * vm,int slot,void * log)438 static inline void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
439 {
440 struct kvm_dirty_log args = { .dirty_bitmap = log, .slot = slot };
441
442 vm_ioctl(vm, KVM_GET_DIRTY_LOG, &args);
443 }
444
kvm_vm_clear_dirty_log(struct kvm_vm * vm,int slot,void * log,uint64_t first_page,uint32_t num_pages)445 static inline void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
446 uint64_t first_page, uint32_t num_pages)
447 {
448 struct kvm_clear_dirty_log args = {
449 .dirty_bitmap = log,
450 .slot = slot,
451 .first_page = first_page,
452 .num_pages = num_pages
453 };
454
455 vm_ioctl(vm, KVM_CLEAR_DIRTY_LOG, &args);
456 }
457
kvm_vm_reset_dirty_ring(struct kvm_vm * vm)458 static inline uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm)
459 {
460 return __vm_ioctl(vm, KVM_RESET_DIRTY_RINGS, NULL);
461 }
462
vm_get_stats_fd(struct kvm_vm * vm)463 static inline int vm_get_stats_fd(struct kvm_vm *vm)
464 {
465 int fd = __vm_ioctl(vm, KVM_GET_STATS_FD, NULL);
466
467 TEST_ASSERT_VM_VCPU_IOCTL(fd >= 0, KVM_GET_STATS_FD, fd, vm);
468 return fd;
469 }
470
read_stats_header(int stats_fd,struct kvm_stats_header * header)471 static inline void read_stats_header(int stats_fd, struct kvm_stats_header *header)
472 {
473 ssize_t ret;
474
475 ret = pread(stats_fd, header, sizeof(*header), 0);
476 TEST_ASSERT(ret == sizeof(*header),
477 "Failed to read '%lu' header bytes, ret = '%ld'",
478 sizeof(*header), ret);
479 }
480
481 struct kvm_stats_desc *read_stats_descriptors(int stats_fd,
482 struct kvm_stats_header *header);
483
get_stats_descriptor_size(struct kvm_stats_header * header)484 static inline ssize_t get_stats_descriptor_size(struct kvm_stats_header *header)
485 {
486 /*
487 * The base size of the descriptor is defined by KVM's ABI, but the
488 * size of the name field is variable, as far as KVM's ABI is
489 * concerned. For a given instance of KVM, the name field is the same
490 * size for all stats and is provided in the overall stats header.
491 */
492 return sizeof(struct kvm_stats_desc) + header->name_size;
493 }
494
get_stats_descriptor(struct kvm_stats_desc * stats,int index,struct kvm_stats_header * header)495 static inline struct kvm_stats_desc *get_stats_descriptor(struct kvm_stats_desc *stats,
496 int index,
497 struct kvm_stats_header *header)
498 {
499 /*
500 * Note, size_desc includes the size of the name field, which is
501 * variable. i.e. this is NOT equivalent to &stats_desc[i].
502 */
503 return (void *)stats + index * get_stats_descriptor_size(header);
504 }
505
506 void read_stat_data(int stats_fd, struct kvm_stats_header *header,
507 struct kvm_stats_desc *desc, uint64_t *data,
508 size_t max_elements);
509
510 void __vm_get_stat(struct kvm_vm *vm, const char *stat_name, uint64_t *data,
511 size_t max_elements);
512
vm_get_stat(struct kvm_vm * vm,const char * stat_name)513 static inline uint64_t vm_get_stat(struct kvm_vm *vm, const char *stat_name)
514 {
515 uint64_t data;
516
517 __vm_get_stat(vm, stat_name, &data, 1);
518 return data;
519 }
520
521 void vm_create_irqchip(struct kvm_vm *vm);
522
__vm_create_guest_memfd(struct kvm_vm * vm,uint64_t size,uint64_t flags)523 static inline int __vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size,
524 uint64_t flags)
525 {
526 struct kvm_create_guest_memfd guest_memfd = {
527 .size = size,
528 .flags = flags,
529 };
530
531 return __vm_ioctl(vm, KVM_CREATE_GUEST_MEMFD, &guest_memfd);
532 }
533
vm_create_guest_memfd(struct kvm_vm * vm,uint64_t size,uint64_t flags)534 static inline int vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size,
535 uint64_t flags)
536 {
537 int fd = __vm_create_guest_memfd(vm, size, flags);
538
539 TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_GUEST_MEMFD, fd));
540 return fd;
541 }
542
543 void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
544 uint64_t gpa, uint64_t size, void *hva);
545 int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
546 uint64_t gpa, uint64_t size, void *hva);
547 void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
548 uint64_t gpa, uint64_t size, void *hva,
549 uint32_t guest_memfd, uint64_t guest_memfd_offset);
550 int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
551 uint64_t gpa, uint64_t size, void *hva,
552 uint32_t guest_memfd, uint64_t guest_memfd_offset);
553
554 void vm_userspace_mem_region_add(struct kvm_vm *vm,
555 enum vm_mem_backing_src_type src_type,
556 uint64_t guest_paddr, uint32_t slot, uint64_t npages,
557 uint32_t flags);
558 void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
559 uint64_t guest_paddr, uint32_t slot, uint64_t npages,
560 uint32_t flags, int guest_memfd_fd, uint64_t guest_memfd_offset);
561
562 #ifndef vm_arch_has_protected_memory
vm_arch_has_protected_memory(struct kvm_vm * vm)563 static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm)
564 {
565 return false;
566 }
567 #endif
568
569 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
570 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
571 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
572 struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
573 void vm_populate_vaddr_bitmap(struct kvm_vm *vm);
574 vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
575 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
576 vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
577 enum kvm_mem_region_type type);
578 vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz,
579 vm_vaddr_t vaddr_min,
580 enum kvm_mem_region_type type);
581 vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
582 vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm,
583 enum kvm_mem_region_type type);
584 vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm);
585
586 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
587 unsigned int npages);
588 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa);
589 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
590 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
591 void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa);
592
593 #ifndef vcpu_arch_put_guest
594 #define vcpu_arch_put_guest(mem, val) do { (mem) = (val); } while (0)
595 #endif
596
vm_untag_gpa(struct kvm_vm * vm,vm_paddr_t gpa)597 static inline vm_paddr_t vm_untag_gpa(struct kvm_vm *vm, vm_paddr_t gpa)
598 {
599 return gpa & ~vm->gpa_tag_mask;
600 }
601
602 void vcpu_run(struct kvm_vcpu *vcpu);
603 int _vcpu_run(struct kvm_vcpu *vcpu);
604
__vcpu_run(struct kvm_vcpu * vcpu)605 static inline int __vcpu_run(struct kvm_vcpu *vcpu)
606 {
607 return __vcpu_ioctl(vcpu, KVM_RUN, NULL);
608 }
609
610 void vcpu_run_complete_io(struct kvm_vcpu *vcpu);
611 struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu);
612
vcpu_enable_cap(struct kvm_vcpu * vcpu,uint32_t cap,uint64_t arg0)613 static inline void vcpu_enable_cap(struct kvm_vcpu *vcpu, uint32_t cap,
614 uint64_t arg0)
615 {
616 struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
617
618 vcpu_ioctl(vcpu, KVM_ENABLE_CAP, &enable_cap);
619 }
620
vcpu_guest_debug_set(struct kvm_vcpu * vcpu,struct kvm_guest_debug * debug)621 static inline void vcpu_guest_debug_set(struct kvm_vcpu *vcpu,
622 struct kvm_guest_debug *debug)
623 {
624 vcpu_ioctl(vcpu, KVM_SET_GUEST_DEBUG, debug);
625 }
626
vcpu_mp_state_get(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)627 static inline void vcpu_mp_state_get(struct kvm_vcpu *vcpu,
628 struct kvm_mp_state *mp_state)
629 {
630 vcpu_ioctl(vcpu, KVM_GET_MP_STATE, mp_state);
631 }
vcpu_mp_state_set(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)632 static inline void vcpu_mp_state_set(struct kvm_vcpu *vcpu,
633 struct kvm_mp_state *mp_state)
634 {
635 vcpu_ioctl(vcpu, KVM_SET_MP_STATE, mp_state);
636 }
637
vcpu_regs_get(struct kvm_vcpu * vcpu,struct kvm_regs * regs)638 static inline void vcpu_regs_get(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
639 {
640 vcpu_ioctl(vcpu, KVM_GET_REGS, regs);
641 }
642
vcpu_regs_set(struct kvm_vcpu * vcpu,struct kvm_regs * regs)643 static inline void vcpu_regs_set(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
644 {
645 vcpu_ioctl(vcpu, KVM_SET_REGS, regs);
646 }
vcpu_sregs_get(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)647 static inline void vcpu_sregs_get(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
648 {
649 vcpu_ioctl(vcpu, KVM_GET_SREGS, sregs);
650
651 }
vcpu_sregs_set(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)652 static inline void vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
653 {
654 vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs);
655 }
_vcpu_sregs_set(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)656 static inline int _vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
657 {
658 return __vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs);
659 }
vcpu_fpu_get(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)660 static inline void vcpu_fpu_get(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
661 {
662 vcpu_ioctl(vcpu, KVM_GET_FPU, fpu);
663 }
vcpu_fpu_set(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)664 static inline void vcpu_fpu_set(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
665 {
666 vcpu_ioctl(vcpu, KVM_SET_FPU, fpu);
667 }
668
__vcpu_get_reg(struct kvm_vcpu * vcpu,uint64_t id,void * addr)669 static inline int __vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr)
670 {
671 struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr };
672
673 return __vcpu_ioctl(vcpu, KVM_GET_ONE_REG, ®);
674 }
__vcpu_set_reg(struct kvm_vcpu * vcpu,uint64_t id,uint64_t val)675 static inline int __vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
676 {
677 struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
678
679 return __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®);
680 }
vcpu_get_reg(struct kvm_vcpu * vcpu,uint64_t id,void * addr)681 static inline void vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr)
682 {
683 struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr };
684
685 vcpu_ioctl(vcpu, KVM_GET_ONE_REG, ®);
686 }
vcpu_set_reg(struct kvm_vcpu * vcpu,uint64_t id,uint64_t val)687 static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
688 {
689 struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
690
691 vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®);
692 }
693
694 #ifdef __KVM_HAVE_VCPU_EVENTS
vcpu_events_get(struct kvm_vcpu * vcpu,struct kvm_vcpu_events * events)695 static inline void vcpu_events_get(struct kvm_vcpu *vcpu,
696 struct kvm_vcpu_events *events)
697 {
698 vcpu_ioctl(vcpu, KVM_GET_VCPU_EVENTS, events);
699 }
vcpu_events_set(struct kvm_vcpu * vcpu,struct kvm_vcpu_events * events)700 static inline void vcpu_events_set(struct kvm_vcpu *vcpu,
701 struct kvm_vcpu_events *events)
702 {
703 vcpu_ioctl(vcpu, KVM_SET_VCPU_EVENTS, events);
704 }
705 #endif
706 #ifdef __x86_64__
vcpu_nested_state_get(struct kvm_vcpu * vcpu,struct kvm_nested_state * state)707 static inline void vcpu_nested_state_get(struct kvm_vcpu *vcpu,
708 struct kvm_nested_state *state)
709 {
710 vcpu_ioctl(vcpu, KVM_GET_NESTED_STATE, state);
711 }
__vcpu_nested_state_set(struct kvm_vcpu * vcpu,struct kvm_nested_state * state)712 static inline int __vcpu_nested_state_set(struct kvm_vcpu *vcpu,
713 struct kvm_nested_state *state)
714 {
715 return __vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state);
716 }
717
vcpu_nested_state_set(struct kvm_vcpu * vcpu,struct kvm_nested_state * state)718 static inline void vcpu_nested_state_set(struct kvm_vcpu *vcpu,
719 struct kvm_nested_state *state)
720 {
721 vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state);
722 }
723 #endif
vcpu_get_stats_fd(struct kvm_vcpu * vcpu)724 static inline int vcpu_get_stats_fd(struct kvm_vcpu *vcpu)
725 {
726 int fd = __vcpu_ioctl(vcpu, KVM_GET_STATS_FD, NULL);
727
728 TEST_ASSERT_VM_VCPU_IOCTL(fd >= 0, KVM_CHECK_EXTENSION, fd, vcpu->vm);
729 return fd;
730 }
731
732 int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr);
733
kvm_has_device_attr(int dev_fd,uint32_t group,uint64_t attr)734 static inline void kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr)
735 {
736 int ret = __kvm_has_device_attr(dev_fd, group, attr);
737
738 TEST_ASSERT(!ret, "KVM_HAS_DEVICE_ATTR failed, rc: %i errno: %i", ret, errno);
739 }
740
741 int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val);
742
kvm_device_attr_get(int dev_fd,uint32_t group,uint64_t attr,void * val)743 static inline void kvm_device_attr_get(int dev_fd, uint32_t group,
744 uint64_t attr, void *val)
745 {
746 int ret = __kvm_device_attr_get(dev_fd, group, attr, val);
747
748 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_GET_DEVICE_ATTR, ret));
749 }
750
751 int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val);
752
kvm_device_attr_set(int dev_fd,uint32_t group,uint64_t attr,void * val)753 static inline void kvm_device_attr_set(int dev_fd, uint32_t group,
754 uint64_t attr, void *val)
755 {
756 int ret = __kvm_device_attr_set(dev_fd, group, attr, val);
757
758 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_DEVICE_ATTR, ret));
759 }
760
__vcpu_has_device_attr(struct kvm_vcpu * vcpu,uint32_t group,uint64_t attr)761 static inline int __vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group,
762 uint64_t attr)
763 {
764 return __kvm_has_device_attr(vcpu->fd, group, attr);
765 }
766
vcpu_has_device_attr(struct kvm_vcpu * vcpu,uint32_t group,uint64_t attr)767 static inline void vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group,
768 uint64_t attr)
769 {
770 kvm_has_device_attr(vcpu->fd, group, attr);
771 }
772
__vcpu_device_attr_get(struct kvm_vcpu * vcpu,uint32_t group,uint64_t attr,void * val)773 static inline int __vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group,
774 uint64_t attr, void *val)
775 {
776 return __kvm_device_attr_get(vcpu->fd, group, attr, val);
777 }
778
vcpu_device_attr_get(struct kvm_vcpu * vcpu,uint32_t group,uint64_t attr,void * val)779 static inline void vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group,
780 uint64_t attr, void *val)
781 {
782 kvm_device_attr_get(vcpu->fd, group, attr, val);
783 }
784
__vcpu_device_attr_set(struct kvm_vcpu * vcpu,uint32_t group,uint64_t attr,void * val)785 static inline int __vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group,
786 uint64_t attr, void *val)
787 {
788 return __kvm_device_attr_set(vcpu->fd, group, attr, val);
789 }
790
vcpu_device_attr_set(struct kvm_vcpu * vcpu,uint32_t group,uint64_t attr,void * val)791 static inline void vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group,
792 uint64_t attr, void *val)
793 {
794 kvm_device_attr_set(vcpu->fd, group, attr, val);
795 }
796
797 int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type);
798 int __kvm_create_device(struct kvm_vm *vm, uint64_t type);
799
kvm_create_device(struct kvm_vm * vm,uint64_t type)800 static inline int kvm_create_device(struct kvm_vm *vm, uint64_t type)
801 {
802 int fd = __kvm_create_device(vm, type);
803
804 TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_DEVICE, fd));
805 return fd;
806 }
807
808 void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu);
809
810 /*
811 * VM VCPU Args Set
812 *
813 * Input Args:
814 * vm - Virtual Machine
815 * num - number of arguments
816 * ... - arguments, each of type uint64_t
817 *
818 * Output Args: None
819 *
820 * Return: None
821 *
822 * Sets the first @num input parameters for the function at @vcpu's entry point,
823 * per the C calling convention of the architecture, to the values given as
824 * variable args. Each of the variable args is expected to be of type uint64_t.
825 * The maximum @num can be is specific to the architecture.
826 */
827 void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...);
828
829 void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
830 int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
831
832 #define KVM_MAX_IRQ_ROUTES 4096
833
834 struct kvm_irq_routing *kvm_gsi_routing_create(void);
835 void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing,
836 uint32_t gsi, uint32_t pin);
837 int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
838 void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
839
840 const char *exit_reason_str(unsigned int exit_reason);
841
842 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
843 uint32_t memslot);
844 vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
845 vm_paddr_t paddr_min, uint32_t memslot,
846 bool protected);
847 vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
848
vm_phy_pages_alloc(struct kvm_vm * vm,size_t num,vm_paddr_t paddr_min,uint32_t memslot)849 static inline vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
850 vm_paddr_t paddr_min, uint32_t memslot)
851 {
852 /*
853 * By default, allocate memory as protected for VMs that support
854 * protected memory, as the majority of memory for such VMs is
855 * protected, i.e. using shared memory is effectively opt-in.
856 */
857 return __vm_phy_pages_alloc(vm, num, paddr_min, memslot,
858 vm_arch_has_protected_memory(vm));
859 }
860
861 /*
862 * ____vm_create() does KVM_CREATE_VM and little else. __vm_create() also
863 * loads the test binary into guest memory and creates an IRQ chip (x86 only).
864 * __vm_create() does NOT create vCPUs, @nr_runnable_vcpus is used purely to
865 * calculate the amount of memory needed for per-vCPU data, e.g. stacks.
866 */
867 struct kvm_vm *____vm_create(struct vm_shape shape);
868 struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
869 uint64_t nr_extra_pages);
870
vm_create_barebones(void)871 static inline struct kvm_vm *vm_create_barebones(void)
872 {
873 return ____vm_create(VM_SHAPE_DEFAULT);
874 }
875
vm_create_barebones_type(unsigned long type)876 static inline struct kvm_vm *vm_create_barebones_type(unsigned long type)
877 {
878 const struct vm_shape shape = {
879 .mode = VM_MODE_DEFAULT,
880 .type = type,
881 };
882
883 return ____vm_create(shape);
884 }
885
vm_create(uint32_t nr_runnable_vcpus)886 static inline struct kvm_vm *vm_create(uint32_t nr_runnable_vcpus)
887 {
888 return __vm_create(VM_SHAPE_DEFAULT, nr_runnable_vcpus, 0);
889 }
890
891 struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus,
892 uint64_t extra_mem_pages,
893 void *guest_code, struct kvm_vcpu *vcpus[]);
894
vm_create_with_vcpus(uint32_t nr_vcpus,void * guest_code,struct kvm_vcpu * vcpus[])895 static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus,
896 void *guest_code,
897 struct kvm_vcpu *vcpus[])
898 {
899 return __vm_create_with_vcpus(VM_SHAPE_DEFAULT, nr_vcpus, 0,
900 guest_code, vcpus);
901 }
902
903
904 struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape,
905 struct kvm_vcpu **vcpu,
906 uint64_t extra_mem_pages,
907 void *guest_code);
908
909 /*
910 * Create a VM with a single vCPU with reasonable defaults and @extra_mem_pages
911 * additional pages of guest memory. Returns the VM and vCPU (via out param).
912 */
__vm_create_with_one_vcpu(struct kvm_vcpu ** vcpu,uint64_t extra_mem_pages,void * guest_code)913 static inline struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
914 uint64_t extra_mem_pages,
915 void *guest_code)
916 {
917 return __vm_create_shape_with_one_vcpu(VM_SHAPE_DEFAULT, vcpu,
918 extra_mem_pages, guest_code);
919 }
920
vm_create_with_one_vcpu(struct kvm_vcpu ** vcpu,void * guest_code)921 static inline struct kvm_vm *vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
922 void *guest_code)
923 {
924 return __vm_create_with_one_vcpu(vcpu, 0, guest_code);
925 }
926
vm_create_shape_with_one_vcpu(struct vm_shape shape,struct kvm_vcpu ** vcpu,void * guest_code)927 static inline struct kvm_vm *vm_create_shape_with_one_vcpu(struct vm_shape shape,
928 struct kvm_vcpu **vcpu,
929 void *guest_code)
930 {
931 return __vm_create_shape_with_one_vcpu(shape, vcpu, 0, guest_code);
932 }
933
934 struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
935
936 void kvm_pin_this_task_to_pcpu(uint32_t pcpu);
937 void kvm_print_vcpu_pinning_help(void);
938 void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],
939 int nr_vcpus);
940
941 unsigned long vm_compute_max_gfn(struct kvm_vm *vm);
942 unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size);
943 unsigned int vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages);
944 unsigned int vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages);
945 static inline unsigned int
vm_adjust_num_guest_pages(enum vm_guest_mode mode,unsigned int num_guest_pages)946 vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
947 {
948 unsigned int n;
949 n = vm_num_guest_pages(mode, vm_num_host_pages(mode, num_guest_pages));
950 #ifdef __s390x__
951 /* s390 requires 1M aligned guest sizes */
952 n = (n + 255) & ~255;
953 #endif
954 return n;
955 }
956
957 #define sync_global_to_guest(vm, g) ({ \
958 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
959 memcpy(_p, &(g), sizeof(g)); \
960 })
961
962 #define sync_global_from_guest(vm, g) ({ \
963 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
964 memcpy(&(g), _p, sizeof(g)); \
965 })
966
967 /*
968 * Write a global value, but only in the VM's (guest's) domain. Primarily used
969 * for "globals" that hold per-VM values (VMs always duplicate code and global
970 * data into their own region of physical memory), but can be used anytime it's
971 * undesirable to change the host's copy of the global.
972 */
973 #define write_guest_global(vm, g, val) ({ \
974 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
975 typeof(g) _val = val; \
976 \
977 memcpy(_p, &(_val), sizeof(g)); \
978 })
979
980 void assert_on_unhandled_exception(struct kvm_vcpu *vcpu);
981
982 void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu,
983 uint8_t indent);
984
vcpu_dump(FILE * stream,struct kvm_vcpu * vcpu,uint8_t indent)985 static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu,
986 uint8_t indent)
987 {
988 vcpu_arch_dump(stream, vcpu, indent);
989 }
990
991 /*
992 * Adds a vCPU with reasonable defaults (e.g. a stack)
993 *
994 * Input Args:
995 * vm - Virtual Machine
996 * vcpu_id - The id of the VCPU to add to the VM.
997 */
998 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
999 void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code);
1000
vm_vcpu_add(struct kvm_vm * vm,uint32_t vcpu_id,void * guest_code)1001 static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
1002 void *guest_code)
1003 {
1004 struct kvm_vcpu *vcpu = vm_arch_vcpu_add(vm, vcpu_id);
1005
1006 vcpu_arch_set_entry_point(vcpu, guest_code);
1007
1008 return vcpu;
1009 }
1010
1011 /* Re-create a vCPU after restarting a VM, e.g. for state save/restore tests. */
1012 struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id);
1013
vm_vcpu_recreate(struct kvm_vm * vm,uint32_t vcpu_id)1014 static inline struct kvm_vcpu *vm_vcpu_recreate(struct kvm_vm *vm,
1015 uint32_t vcpu_id)
1016 {
1017 return vm_arch_vcpu_recreate(vm, vcpu_id);
1018 }
1019
1020 void vcpu_arch_free(struct kvm_vcpu *vcpu);
1021
1022 void virt_arch_pgd_alloc(struct kvm_vm *vm);
1023
virt_pgd_alloc(struct kvm_vm * vm)1024 static inline void virt_pgd_alloc(struct kvm_vm *vm)
1025 {
1026 virt_arch_pgd_alloc(vm);
1027 }
1028
1029 /*
1030 * VM Virtual Page Map
1031 *
1032 * Input Args:
1033 * vm - Virtual Machine
1034 * vaddr - VM Virtual Address
1035 * paddr - VM Physical Address
1036 * memslot - Memory region slot for new virtual translation tables
1037 *
1038 * Output Args: None
1039 *
1040 * Return: None
1041 *
1042 * Within @vm, creates a virtual translation for the page starting
1043 * at @vaddr to the page starting at @paddr.
1044 */
1045 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr);
1046
virt_pg_map(struct kvm_vm * vm,uint64_t vaddr,uint64_t paddr)1047 static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
1048 {
1049 virt_arch_pg_map(vm, vaddr, paddr);
1050 }
1051
1052
1053 /*
1054 * Address Guest Virtual to Guest Physical
1055 *
1056 * Input Args:
1057 * vm - Virtual Machine
1058 * gva - VM virtual address
1059 *
1060 * Output Args: None
1061 *
1062 * Return:
1063 * Equivalent VM physical address
1064 *
1065 * Returns the VM physical address of the translated VM virtual
1066 * address given by @gva.
1067 */
1068 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
1069
addr_gva2gpa(struct kvm_vm * vm,vm_vaddr_t gva)1070 static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
1071 {
1072 return addr_arch_gva2gpa(vm, gva);
1073 }
1074
1075 /*
1076 * Virtual Translation Tables Dump
1077 *
1078 * Input Args:
1079 * stream - Output FILE stream
1080 * vm - Virtual Machine
1081 * indent - Left margin indent amount
1082 *
1083 * Output Args: None
1084 *
1085 * Return: None
1086 *
1087 * Dumps to the FILE stream given by @stream, the contents of all the
1088 * virtual translation tables for the VM given by @vm.
1089 */
1090 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
1091
virt_dump(FILE * stream,struct kvm_vm * vm,uint8_t indent)1092 static inline void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
1093 {
1094 virt_arch_dump(stream, vm, indent);
1095 }
1096
1097
__vm_disable_nx_huge_pages(struct kvm_vm * vm)1098 static inline int __vm_disable_nx_huge_pages(struct kvm_vm *vm)
1099 {
1100 return __vm_enable_cap(vm, KVM_CAP_VM_DISABLE_NX_HUGE_PAGES, 0);
1101 }
1102
1103 /*
1104 * Arch hook that is invoked via a constructor, i.e. before exeucting main(),
1105 * to allow for arch-specific setup that is common to all tests, e.g. computing
1106 * the default guest "mode".
1107 */
1108 void kvm_selftest_arch_init(void);
1109
1110 void kvm_arch_vm_post_create(struct kvm_vm *vm);
1111
1112 bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr);
1113
1114 uint32_t guest_get_vcpuid(void);
1115
1116 #endif /* SELFTEST_KVM_UTIL_H */
1117