xref: /linux/tools/testing/selftests/kvm/include/kvm_util.h (revision dfd2a8b07c6cc94145e11d87d2f11137d6444854)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2018, Google LLC.
4  */
5 #ifndef SELFTEST_KVM_UTIL_H
6 #define SELFTEST_KVM_UTIL_H
7 
8 #include "test_util.h"
9 
10 #include <linux/compiler.h>
11 #include "linux/hashtable.h"
12 #include "linux/list.h"
13 #include <linux/kernel.h>
14 #include <linux/kvm.h>
15 #include "linux/rbtree.h"
16 #include <linux/types.h>
17 
18 #include <asm/atomic.h>
19 #include <asm/kvm.h>
20 
21 #include <sys/eventfd.h>
22 #include <sys/ioctl.h>
23 
24 #include <pthread.h>
25 
26 #include "kvm_syscalls.h"
27 #include "kvm_util_arch.h"
28 #include "kvm_util_types.h"
29 #include "sparsebit.h"
30 
31 #define KVM_DEV_PATH "/dev/kvm"
32 #define KVM_MAX_VCPUS 512
33 
34 #define NSEC_PER_SEC 1000000000L
35 
36 struct userspace_mem_region {
37 	struct kvm_userspace_memory_region2 region;
38 	struct sparsebit *unused_phy_pages;
39 	struct sparsebit *protected_phy_pages;
40 	int fd;
41 	off_t offset;
42 	enum vm_mem_backing_src_type backing_src_type;
43 	void *host_mem;
44 	void *host_alias;
45 	void *mmap_start;
46 	void *mmap_alias;
47 	size_t mmap_size;
48 	struct rb_node gpa_node;
49 	struct rb_node hva_node;
50 	struct hlist_node slot_node;
51 };
52 
53 struct kvm_binary_stats {
54 	int fd;
55 	struct kvm_stats_header header;
56 	struct kvm_stats_desc *desc;
57 };
58 
59 struct kvm_vcpu {
60 	struct list_head list;
61 	u32 id;
62 	int fd;
63 	struct kvm_vm *vm;
64 	struct kvm_run *run;
65 #ifdef __x86_64__
66 	struct kvm_cpuid2 *cpuid;
67 #endif
68 #ifdef __aarch64__
69 	struct kvm_vcpu_init init;
70 #endif
71 	struct kvm_binary_stats stats;
72 	struct kvm_dirty_gfn *dirty_gfns;
73 	u32 fetch_index;
74 	u32 dirty_gfns_count;
75 };
76 
77 struct userspace_mem_regions {
78 	struct rb_root gpa_tree;
79 	struct rb_root hva_tree;
80 	DECLARE_HASHTABLE(slot_hash, 9);
81 };
82 
83 enum kvm_mem_region_type {
84 	MEM_REGION_CODE,
85 	MEM_REGION_DATA,
86 	MEM_REGION_PT,
87 	MEM_REGION_TEST_DATA,
88 	NR_MEM_REGIONS,
89 };
90 
91 struct kvm_mmu {
92 	bool pgd_created;
93 	u64 pgd;
94 	int pgtable_levels;
95 
96 	struct kvm_mmu_arch arch;
97 };
98 
99 struct kvm_vm {
100 	int mode;
101 	unsigned long type;
102 	int kvm_fd;
103 	int fd;
104 	unsigned int page_size;
105 	unsigned int page_shift;
106 	unsigned int pa_bits;
107 	unsigned int va_bits;
108 	u64 max_gfn;
109 	struct list_head vcpus;
110 	struct userspace_mem_regions regions;
111 	struct sparsebit *vpages_valid;
112 	struct sparsebit *vpages_mapped;
113 	bool has_irqchip;
114 	gpa_t ucall_mmio_addr;
115 	gva_t handlers;
116 	u32 dirty_ring_size;
117 	gpa_t gpa_tag_mask;
118 
119 	/*
120 	 * "mmu" is the guest's stage-1, with a short name because the vast
121 	 * majority of tests only care about the stage-1 MMU.
122 	 */
123 	struct kvm_mmu mmu;
124 	struct kvm_mmu stage2_mmu;
125 
126 	struct kvm_vm_arch arch;
127 
128 	struct kvm_binary_stats stats;
129 
130 	/*
131 	 * KVM region slots. These are the default memslots used by page
132 	 * allocators, e.g., lib/elf uses the memslots[MEM_REGION_CODE]
133 	 * memslot.
134 	 */
135 	u32 memslots[NR_MEM_REGIONS];
136 };
137 
138 struct vcpu_reg_sublist {
139 	const char *name;
140 	long capability;
141 	int feature;
142 	int feature_type;
143 	bool finalize;
144 	__u64 *regs;
145 	__u64 regs_n;
146 	__u64 *rejects_set;
147 	__u64 rejects_set_n;
148 	__u64 *skips_set;
149 	__u64 skips_set_n;
150 };
151 
152 struct vcpu_reg_list {
153 	char *name;
154 	struct vcpu_reg_sublist sublists[];
155 };
156 
157 #define for_each_sublist(c, s)		\
158 	for ((s) = &(c)->sublists[0]; (s)->regs; ++(s))
159 
160 #define kvm_for_each_vcpu(vm, i, vcpu)			\
161 	for ((i) = 0; (i) <= (vm)->last_vcpu_id; (i)++)	\
162 		if (!((vcpu) = vm->vcpus[i]))		\
163 			continue;			\
164 		else
165 
166 struct userspace_mem_region *
167 memslot2region(struct kvm_vm *vm, u32 memslot);
168 
169 static inline struct userspace_mem_region *vm_get_mem_region(struct kvm_vm *vm,
170 							     enum kvm_mem_region_type type)
171 {
172 	assert(type < NR_MEM_REGIONS);
173 	return memslot2region(vm, vm->memslots[type]);
174 }
175 
176 /* Minimum allocated guest virtual and physical addresses */
177 #define KVM_UTIL_MIN_VADDR		0x2000
178 #define KVM_GUEST_PAGE_TABLE_MIN_PADDR	0x180000
179 
180 #define DEFAULT_GUEST_STACK_VADDR_MIN	0xab6000
181 #define DEFAULT_STACK_PGS		5
182 
183 enum vm_guest_mode {
184 	VM_MODE_P52V48_4K,
185 	VM_MODE_P52V48_16K,
186 	VM_MODE_P52V48_64K,
187 	VM_MODE_P48V48_4K,
188 	VM_MODE_P48V48_16K,
189 	VM_MODE_P48V48_64K,
190 	VM_MODE_P40V48_4K,
191 	VM_MODE_P40V48_16K,
192 	VM_MODE_P40V48_64K,
193 	VM_MODE_PXXVYY_4K,	/* For 48-bit or 57-bit VA, depending on host support */
194 	VM_MODE_P47V64_4K,
195 	VM_MODE_P44V64_4K,
196 	VM_MODE_P36V48_4K,
197 	VM_MODE_P36V48_16K,
198 	VM_MODE_P36V48_64K,
199 	VM_MODE_P47V47_16K,
200 	VM_MODE_P36V47_16K,
201 
202 	VM_MODE_P56V57_4K,	/* For riscv64 */
203 	VM_MODE_P56V48_4K,
204 	VM_MODE_P56V39_4K,
205 	VM_MODE_P50V57_4K,
206 	VM_MODE_P50V48_4K,
207 	VM_MODE_P50V39_4K,
208 	VM_MODE_P41V57_4K,
209 	VM_MODE_P41V48_4K,
210 	VM_MODE_P41V39_4K,
211 
212 	NUM_VM_MODES,
213 };
214 
215 struct vm_shape {
216 	u32 type;
217 	u8  mode;
218 	u8  pad0;
219 	u16 pad1;
220 };
221 
222 kvm_static_assert(sizeof(struct vm_shape) == sizeof(u64));
223 
224 #define VM_TYPE_DEFAULT			0
225 
226 #define VM_SHAPE(__mode)			\
227 ({						\
228 	struct vm_shape shape = {		\
229 		.mode = (__mode),		\
230 		.type = VM_TYPE_DEFAULT		\
231 	};					\
232 						\
233 	shape;					\
234 })
235 
236 extern enum vm_guest_mode vm_mode_default;
237 
238 #if defined(__aarch64__)
239 
240 #define VM_MODE_DEFAULT			vm_mode_default
241 #define MIN_PAGE_SHIFT			12U
242 #define ptes_per_page(page_size)	((page_size) / 8)
243 
244 #elif defined(__x86_64__)
245 
246 #define VM_MODE_DEFAULT			VM_MODE_PXXVYY_4K
247 #define MIN_PAGE_SHIFT			12U
248 #define ptes_per_page(page_size)	((page_size) / 8)
249 
250 #elif defined(__s390x__)
251 
252 #define VM_MODE_DEFAULT			VM_MODE_P44V64_4K
253 #define MIN_PAGE_SHIFT			12U
254 #define ptes_per_page(page_size)	((page_size) / 16)
255 
256 #elif defined(__riscv)
257 
258 #if __riscv_xlen == 32
259 #error "RISC-V 32-bit kvm selftests not supported"
260 #endif
261 
262 #define VM_MODE_DEFAULT			vm_mode_default
263 #define MIN_PAGE_SHIFT			12U
264 #define ptes_per_page(page_size)	((page_size) / 8)
265 
266 #elif defined(__loongarch__)
267 #define VM_MODE_DEFAULT			VM_MODE_P47V47_16K
268 #define MIN_PAGE_SHIFT			12U
269 #define ptes_per_page(page_size)	((page_size) / 8)
270 
271 #endif
272 
273 #define VM_SHAPE_DEFAULT	VM_SHAPE(VM_MODE_DEFAULT)
274 
275 #define MIN_PAGE_SIZE		(1U << MIN_PAGE_SHIFT)
276 #define PTES_PER_MIN_PAGE	ptes_per_page(MIN_PAGE_SIZE)
277 
278 struct vm_guest_mode_params {
279 	unsigned int pa_bits;
280 	unsigned int va_bits;
281 	unsigned int page_size;
282 	unsigned int page_shift;
283 };
284 extern const struct vm_guest_mode_params vm_guest_mode_params[];
285 
286 int __open_path_or_exit(const char *path, int flags, const char *enoent_help);
287 int open_path_or_exit(const char *path, int flags);
288 int open_kvm_dev_path_or_exit(void);
289 
290 int kvm_get_module_param_integer(const char *module_name, const char *param);
291 bool kvm_get_module_param_bool(const char *module_name, const char *param);
292 
293 static inline bool get_kvm_param_bool(const char *param)
294 {
295 	return kvm_get_module_param_bool("kvm", param);
296 }
297 
298 static inline int get_kvm_param_integer(const char *param)
299 {
300 	return kvm_get_module_param_integer("kvm", param);
301 }
302 
303 unsigned int kvm_check_cap(long cap);
304 
305 static inline bool kvm_has_cap(long cap)
306 {
307 	return kvm_check_cap(cap);
308 }
309 
310 /*
311  * Use the "inner", double-underscore macro when reporting errors from within
312  * other macros so that the name of ioctl() and not its literal numeric value
313  * is printed on error.  The "outer" macro is strongly preferred when reporting
314  * errors "directly", i.e. without an additional layer of macros, as it reduces
315  * the probability of passing in the wrong string.
316  */
317 #define __KVM_IOCTL_ERROR(_name, _ret)	__KVM_SYSCALL_ERROR(_name, _ret)
318 #define KVM_IOCTL_ERROR(_ioctl, _ret) __KVM_IOCTL_ERROR(#_ioctl, _ret)
319 
320 #define kvm_do_ioctl(fd, cmd, arg)						\
321 ({										\
322 	kvm_static_assert(!_IOC_SIZE(cmd) || sizeof(*arg) == _IOC_SIZE(cmd));	\
323 	ioctl(fd, cmd, arg);							\
324 })
325 
326 #define __kvm_ioctl(kvm_fd, cmd, arg)				\
327 	kvm_do_ioctl(kvm_fd, cmd, arg)
328 
329 #define kvm_ioctl(kvm_fd, cmd, arg)				\
330 ({								\
331 	int ret = __kvm_ioctl(kvm_fd, cmd, arg);		\
332 								\
333 	TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(#cmd, ret));	\
334 })
335 
336 static __always_inline void static_assert_is_vm(struct kvm_vm *vm) { }
337 
338 #define __vm_ioctl(vm, cmd, arg)				\
339 ({								\
340 	static_assert_is_vm(vm);				\
341 	kvm_do_ioctl((vm)->fd, cmd, arg);			\
342 })
343 
344 /*
345  * Assert that a VM or vCPU ioctl() succeeded, with extra magic to detect if
346  * the ioctl() failed because KVM killed/bugged the VM.  To detect a dead VM,
347  * probe KVM_CAP_USER_MEMORY, which (a) has been supported by KVM since before
348  * selftests existed and (b) should never outright fail, i.e. is supposed to
349  * return 0 or 1.  If KVM kills a VM, KVM returns -EIO for all ioctl()s for the
350  * VM and its vCPUs, including KVM_CHECK_EXTENSION.
351  */
352 #define __TEST_ASSERT_VM_VCPU_IOCTL(cond, name, ret, vm)				\
353 do {											\
354 	int __errno = errno;								\
355 											\
356 	static_assert_is_vm(vm);							\
357 											\
358 	if (cond)									\
359 		break;									\
360 											\
361 	if (errno == EIO &&								\
362 	    __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)KVM_CAP_USER_MEMORY) < 0) {	\
363 		TEST_ASSERT(errno == EIO, "KVM killed the VM, should return -EIO");	\
364 		TEST_FAIL("KVM killed/bugged the VM, check the kernel log for clues");	\
365 	}										\
366 	errno = __errno;								\
367 	TEST_ASSERT(cond, __KVM_IOCTL_ERROR(name, ret));				\
368 } while (0)
369 
370 #define TEST_ASSERT_VM_VCPU_IOCTL(cond, cmd, ret, vm)		\
371 	__TEST_ASSERT_VM_VCPU_IOCTL(cond, #cmd, ret, vm)
372 
373 #define vm_ioctl(vm, cmd, arg)					\
374 ({								\
375 	int ret = __vm_ioctl(vm, cmd, arg);			\
376 								\
377 	__TEST_ASSERT_VM_VCPU_IOCTL(!ret, #cmd, ret, vm);		\
378 })
379 
380 static __always_inline void static_assert_is_vcpu(struct kvm_vcpu *vcpu) { }
381 
382 #define __vcpu_ioctl(vcpu, cmd, arg)				\
383 ({								\
384 	static_assert_is_vcpu(vcpu);				\
385 	kvm_do_ioctl((vcpu)->fd, cmd, arg);			\
386 })
387 
388 #define vcpu_ioctl(vcpu, cmd, arg)				\
389 ({								\
390 	int ret = __vcpu_ioctl(vcpu, cmd, arg);			\
391 								\
392 	__TEST_ASSERT_VM_VCPU_IOCTL(!ret, #cmd, ret, (vcpu)->vm);	\
393 })
394 
395 /*
396  * Looks up and returns the value corresponding to the capability
397  * (KVM_CAP_*) given by cap.
398  */
399 static inline int vm_check_cap(struct kvm_vm *vm, long cap)
400 {
401 	int ret =  __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)cap);
402 
403 	TEST_ASSERT_VM_VCPU_IOCTL(ret >= 0, KVM_CHECK_EXTENSION, ret, vm);
404 	return ret;
405 }
406 
407 static inline int __vm_enable_cap(struct kvm_vm *vm, u32 cap, u64 arg0)
408 {
409 	struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
410 
411 	return __vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
412 }
413 
414 static inline void vm_enable_cap(struct kvm_vm *vm, u32 cap, u64 arg0)
415 {
416 	struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
417 
418 	vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
419 }
420 
421 static inline void vm_set_memory_attributes(struct kvm_vm *vm, gpa_t gpa,
422 					    u64 size, u64 attributes)
423 {
424 	struct kvm_memory_attributes attr = {
425 		.attributes = attributes,
426 		.address = gpa,
427 		.size = size,
428 		.flags = 0,
429 	};
430 
431 	/*
432 	 * KVM_SET_MEMORY_ATTRIBUTES overwrites _all_ attributes.  These flows
433 	 * need significant enhancements to support multiple attributes.
434 	 */
435 	TEST_ASSERT(!attributes || attributes == KVM_MEMORY_ATTRIBUTE_PRIVATE,
436 		    "Update me to support multiple attributes!");
437 
438 	vm_ioctl(vm, KVM_SET_MEMORY_ATTRIBUTES, &attr);
439 }
440 
441 
442 static inline void vm_mem_set_private(struct kvm_vm *vm, gpa_t gpa,
443 				      u64 size)
444 {
445 	vm_set_memory_attributes(vm, gpa, size, KVM_MEMORY_ATTRIBUTE_PRIVATE);
446 }
447 
448 static inline void vm_mem_set_shared(struct kvm_vm *vm, gpa_t gpa,
449 				     u64 size)
450 {
451 	vm_set_memory_attributes(vm, gpa, size, 0);
452 }
453 
454 void vm_guest_mem_fallocate(struct kvm_vm *vm, gpa_t gpa, u64 size,
455 			    bool punch_hole);
456 
457 static inline void vm_guest_mem_punch_hole(struct kvm_vm *vm, gpa_t gpa,
458 					   u64 size)
459 {
460 	vm_guest_mem_fallocate(vm, gpa, size, true);
461 }
462 
463 static inline void vm_guest_mem_allocate(struct kvm_vm *vm, gpa_t gpa,
464 					 u64 size)
465 {
466 	vm_guest_mem_fallocate(vm, gpa, size, false);
467 }
468 
469 void vm_enable_dirty_ring(struct kvm_vm *vm, u32 ring_size);
470 const char *vm_guest_mode_string(u32 i);
471 
472 void kvm_vm_free(struct kvm_vm *vmp);
473 void kvm_vm_restart(struct kvm_vm *vmp);
474 void kvm_vm_release(struct kvm_vm *vmp);
475 void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename);
476 int kvm_memfd_alloc(size_t size, bool hugepages);
477 
478 void vm_dump(FILE *stream, struct kvm_vm *vm, u8 indent);
479 
480 static inline void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
481 {
482 	struct kvm_dirty_log args = { .dirty_bitmap = log, .slot = slot };
483 
484 	vm_ioctl(vm, KVM_GET_DIRTY_LOG, &args);
485 }
486 
487 static inline void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
488 					  u64 first_page, u32 num_pages)
489 {
490 	struct kvm_clear_dirty_log args = {
491 		.dirty_bitmap = log,
492 		.slot = slot,
493 		.first_page = first_page,
494 		.num_pages = num_pages
495 	};
496 
497 	vm_ioctl(vm, KVM_CLEAR_DIRTY_LOG, &args);
498 }
499 
500 static inline u32 kvm_vm_reset_dirty_ring(struct kvm_vm *vm)
501 {
502 	return __vm_ioctl(vm, KVM_RESET_DIRTY_RINGS, NULL);
503 }
504 
505 static inline void kvm_vm_register_coalesced_io(struct kvm_vm *vm,
506 						u64 address,
507 						u64 size, bool pio)
508 {
509 	struct kvm_coalesced_mmio_zone zone = {
510 		.addr = address,
511 		.size = size,
512 		.pio  = pio,
513 	};
514 
515 	vm_ioctl(vm, KVM_REGISTER_COALESCED_MMIO, &zone);
516 }
517 
518 static inline void kvm_vm_unregister_coalesced_io(struct kvm_vm *vm,
519 						  u64 address,
520 						  u64 size, bool pio)
521 {
522 	struct kvm_coalesced_mmio_zone zone = {
523 		.addr = address,
524 		.size = size,
525 		.pio  = pio,
526 	};
527 
528 	vm_ioctl(vm, KVM_UNREGISTER_COALESCED_MMIO, &zone);
529 }
530 
531 static inline int vm_get_stats_fd(struct kvm_vm *vm)
532 {
533 	int fd = __vm_ioctl(vm, KVM_GET_STATS_FD, NULL);
534 
535 	TEST_ASSERT_VM_VCPU_IOCTL(fd >= 0, KVM_GET_STATS_FD, fd, vm);
536 	return fd;
537 }
538 
539 static inline int __kvm_irqfd(struct kvm_vm *vm, u32 gsi, int eventfd,
540 			      u32 flags)
541 {
542 	struct kvm_irqfd irqfd = {
543 		.fd = eventfd,
544 		.gsi = gsi,
545 		.flags = flags,
546 		.resamplefd = -1,
547 	};
548 
549 	return __vm_ioctl(vm, KVM_IRQFD, &irqfd);
550 }
551 
552 static inline void kvm_irqfd(struct kvm_vm *vm, u32 gsi, int eventfd, u32 flags)
553 {
554 	int ret = __kvm_irqfd(vm, gsi, eventfd, flags);
555 
556 	TEST_ASSERT_VM_VCPU_IOCTL(!ret, KVM_IRQFD, ret, vm);
557 }
558 
559 static inline void kvm_assign_irqfd(struct kvm_vm *vm, u32 gsi, int eventfd)
560 {
561 	kvm_irqfd(vm, gsi, eventfd, 0);
562 }
563 
564 static inline void kvm_deassign_irqfd(struct kvm_vm *vm, u32 gsi, int eventfd)
565 {
566 	kvm_irqfd(vm, gsi, eventfd, KVM_IRQFD_FLAG_DEASSIGN);
567 }
568 
569 static inline int kvm_new_eventfd(void)
570 {
571 	int fd = eventfd(0, 0);
572 
573 	TEST_ASSERT(fd >= 0, __KVM_SYSCALL_ERROR("eventfd()", fd));
574 	return fd;
575 }
576 
577 static inline void read_stats_header(int stats_fd, struct kvm_stats_header *header)
578 {
579 	ssize_t ret;
580 
581 	ret = pread(stats_fd, header, sizeof(*header), 0);
582 	TEST_ASSERT(ret == sizeof(*header),
583 		    "Failed to read '%lu' header bytes, ret = '%ld'",
584 		    sizeof(*header), ret);
585 }
586 
587 struct kvm_stats_desc *read_stats_descriptors(int stats_fd,
588 					      struct kvm_stats_header *header);
589 
590 static inline ssize_t get_stats_descriptor_size(struct kvm_stats_header *header)
591 {
592 	 /*
593 	  * The base size of the descriptor is defined by KVM's ABI, but the
594 	  * size of the name field is variable, as far as KVM's ABI is
595 	  * concerned. For a given instance of KVM, the name field is the same
596 	  * size for all stats and is provided in the overall stats header.
597 	  */
598 	return sizeof(struct kvm_stats_desc) + header->name_size;
599 }
600 
601 static inline struct kvm_stats_desc *get_stats_descriptor(struct kvm_stats_desc *stats,
602 							  int index,
603 							  struct kvm_stats_header *header)
604 {
605 	/*
606 	 * Note, size_desc includes the size of the name field, which is
607 	 * variable. i.e. this is NOT equivalent to &stats_desc[i].
608 	 */
609 	return (void *)stats + index * get_stats_descriptor_size(header);
610 }
611 
612 void read_stat_data(int stats_fd, struct kvm_stats_header *header,
613 		    struct kvm_stats_desc *desc, u64 *data,
614 		    size_t max_elements);
615 
616 void kvm_get_stat(struct kvm_binary_stats *stats, const char *name,
617 		  u64 *data, size_t max_elements);
618 
619 #define __get_stat(stats, stat)							\
620 ({										\
621 	u64 data;								\
622 										\
623 	kvm_get_stat(stats, #stat, &data, 1);					\
624 	data;									\
625 })
626 
627 #define vm_get_stat(vm, stat) __get_stat(&(vm)->stats, stat)
628 #define vcpu_get_stat(vcpu, stat) __get_stat(&(vcpu)->stats, stat)
629 
630 static inline bool read_smt_control(char *buf, size_t buf_size)
631 {
632 	FILE *f = fopen("/sys/devices/system/cpu/smt/control", "r");
633 	bool ret;
634 
635 	if (!f)
636 		return false;
637 
638 	ret = fread(buf, sizeof(*buf), buf_size, f) > 0;
639 	fclose(f);
640 
641 	return ret;
642 }
643 
644 static inline bool is_smt_possible(void)
645 {
646 	char buf[16];
647 
648 	if (read_smt_control(buf, sizeof(buf)) &&
649 	    (!strncmp(buf, "forceoff", 8) || !strncmp(buf, "notsupported", 12)))
650 		return false;
651 
652 	return true;
653 }
654 
655 static inline bool is_smt_on(void)
656 {
657 	char buf[16];
658 
659 	if (read_smt_control(buf, sizeof(buf)) && !strncmp(buf, "on", 2))
660 		return true;
661 
662 	return false;
663 }
664 
665 void vm_create_irqchip(struct kvm_vm *vm);
666 
667 static inline int __vm_create_guest_memfd(struct kvm_vm *vm, u64 size,
668 					  u64 flags)
669 {
670 	struct kvm_create_guest_memfd guest_memfd = {
671 		.size = size,
672 		.flags = flags,
673 	};
674 
675 	return __vm_ioctl(vm, KVM_CREATE_GUEST_MEMFD, &guest_memfd);
676 }
677 
678 static inline int vm_create_guest_memfd(struct kvm_vm *vm, u64 size,
679 					u64 flags)
680 {
681 	int fd = __vm_create_guest_memfd(vm, size, flags);
682 
683 	TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_GUEST_MEMFD, fd));
684 	return fd;
685 }
686 
687 void vm_set_user_memory_region(struct kvm_vm *vm, u32 slot, u32 flags,
688 			       gpa_t gpa, u64 size, void *hva);
689 int __vm_set_user_memory_region(struct kvm_vm *vm, u32 slot, u32 flags,
690 				gpa_t gpa, u64 size, void *hva);
691 void vm_set_user_memory_region2(struct kvm_vm *vm, u32 slot, u32 flags,
692 				gpa_t gpa, u64 size, void *hva,
693 				u32 guest_memfd, u64 guest_memfd_offset);
694 int __vm_set_user_memory_region2(struct kvm_vm *vm, u32 slot, u32 flags,
695 				 gpa_t gpa, u64 size, void *hva,
696 				 u32 guest_memfd, u64 guest_memfd_offset);
697 
698 void vm_userspace_mem_region_add(struct kvm_vm *vm,
699 				 enum vm_mem_backing_src_type src_type,
700 				 gpa_t gpa, u32 slot, u64 npages, u32 flags);
701 void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
702 		gpa_t gpa, u32 slot, u64 npages, u32 flags,
703 		int guest_memfd_fd, u64 guest_memfd_offset);
704 
705 #ifndef vm_arch_has_protected_memory
706 static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm)
707 {
708 	return false;
709 }
710 #endif
711 
712 void vm_mem_region_set_flags(struct kvm_vm *vm, u32 slot, u32 flags);
713 void vm_mem_region_reload(struct kvm_vm *vm, u32 slot);
714 void vm_mem_region_move(struct kvm_vm *vm, u32 slot, u64 new_gpa);
715 void vm_mem_region_delete(struct kvm_vm *vm, u32 slot);
716 struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, u32 vcpu_id);
717 void vm_populate_gva_bitmap(struct kvm_vm *vm);
718 gva_t vm_unused_gva_gap(struct kvm_vm *vm, size_t sz, gva_t min_gva);
719 gva_t vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva);
720 gva_t __vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva,
721 		 enum kvm_mem_region_type type);
722 gva_t vm_alloc_shared(struct kvm_vm *vm, size_t sz, gva_t min_gva,
723 		      enum kvm_mem_region_type type);
724 gva_t vm_alloc_pages(struct kvm_vm *vm, int nr_pages);
725 gva_t __vm_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type);
726 gva_t vm_alloc_page(struct kvm_vm *vm);
727 
728 void virt_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa,
729 	      unsigned int npages);
730 void *addr_gpa2hva(struct kvm_vm *vm, gpa_t gpa);
731 void *addr_gva2hva(struct kvm_vm *vm, gva_t gva);
732 gpa_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
733 void *addr_gpa2alias(struct kvm_vm *vm, gpa_t gpa);
734 
735 #ifndef vcpu_arch_put_guest
736 #define vcpu_arch_put_guest(mem, val) do { (mem) = (val); } while (0)
737 #endif
738 
739 static inline gpa_t vm_untag_gpa(struct kvm_vm *vm, gpa_t gpa)
740 {
741 	return gpa & ~vm->gpa_tag_mask;
742 }
743 
744 void vcpu_run(struct kvm_vcpu *vcpu);
745 int _vcpu_run(struct kvm_vcpu *vcpu);
746 
747 static inline int __vcpu_run(struct kvm_vcpu *vcpu)
748 {
749 	return __vcpu_ioctl(vcpu, KVM_RUN, NULL);
750 }
751 
752 void vcpu_run_complete_io(struct kvm_vcpu *vcpu);
753 struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu);
754 
755 static inline void vcpu_enable_cap(struct kvm_vcpu *vcpu, u32 cap,
756 				   u64 arg0)
757 {
758 	struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
759 
760 	vcpu_ioctl(vcpu, KVM_ENABLE_CAP, &enable_cap);
761 }
762 
763 static inline void vcpu_guest_debug_set(struct kvm_vcpu *vcpu,
764 					struct kvm_guest_debug *debug)
765 {
766 	vcpu_ioctl(vcpu, KVM_SET_GUEST_DEBUG, debug);
767 }
768 
769 static inline void vcpu_mp_state_get(struct kvm_vcpu *vcpu,
770 				     struct kvm_mp_state *mp_state)
771 {
772 	vcpu_ioctl(vcpu, KVM_GET_MP_STATE, mp_state);
773 }
774 static inline void vcpu_mp_state_set(struct kvm_vcpu *vcpu,
775 				     struct kvm_mp_state *mp_state)
776 {
777 	vcpu_ioctl(vcpu, KVM_SET_MP_STATE, mp_state);
778 }
779 
780 static inline void vcpu_regs_get(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
781 {
782 	vcpu_ioctl(vcpu, KVM_GET_REGS, regs);
783 }
784 
785 static inline void vcpu_regs_set(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
786 {
787 	vcpu_ioctl(vcpu, KVM_SET_REGS, regs);
788 }
789 static inline void vcpu_sregs_get(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
790 {
791 	vcpu_ioctl(vcpu, KVM_GET_SREGS, sregs);
792 
793 }
794 static inline void vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
795 {
796 	vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs);
797 }
798 static inline int _vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
799 {
800 	return __vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs);
801 }
802 static inline void vcpu_fpu_get(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
803 {
804 	vcpu_ioctl(vcpu, KVM_GET_FPU, fpu);
805 }
806 static inline void vcpu_fpu_set(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
807 {
808 	vcpu_ioctl(vcpu, KVM_SET_FPU, fpu);
809 }
810 
811 static inline int __vcpu_get_reg(struct kvm_vcpu *vcpu, u64 id, void *addr)
812 {
813 	struct kvm_one_reg reg = { .id = id, .addr = (u64)addr };
814 
815 	return __vcpu_ioctl(vcpu, KVM_GET_ONE_REG, &reg);
816 }
817 
818 static inline int __vcpu_set_reg(struct kvm_vcpu *vcpu, u64 id, u64 val)
819 {
820 	struct kvm_one_reg reg = { .id = id, .addr = (u64)&val };
821 
822 	return __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
823 }
824 
825 static inline u64 vcpu_get_reg(struct kvm_vcpu *vcpu, u64 id)
826 {
827 	u64 val;
828 	struct kvm_one_reg reg = { .id = id, .addr = (u64)&val };
829 
830 	TEST_ASSERT(KVM_REG_SIZE(id) <= sizeof(val), "Reg %lx too big", id);
831 
832 	vcpu_ioctl(vcpu, KVM_GET_ONE_REG, &reg);
833 	return val;
834 }
835 
836 static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u64 id, u64 val)
837 {
838 	struct kvm_one_reg reg = { .id = id, .addr = (u64)&val };
839 
840 	TEST_ASSERT(KVM_REG_SIZE(id) <= sizeof(val), "Reg %lx too big", id);
841 
842 	vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
843 }
844 
845 #ifdef __KVM_HAVE_VCPU_EVENTS
846 static inline void vcpu_events_get(struct kvm_vcpu *vcpu,
847 				   struct kvm_vcpu_events *events)
848 {
849 	vcpu_ioctl(vcpu, KVM_GET_VCPU_EVENTS, events);
850 }
851 static inline void vcpu_events_set(struct kvm_vcpu *vcpu,
852 				   struct kvm_vcpu_events *events)
853 {
854 	vcpu_ioctl(vcpu, KVM_SET_VCPU_EVENTS, events);
855 }
856 #endif
857 #ifdef __x86_64__
858 static inline void vcpu_nested_state_get(struct kvm_vcpu *vcpu,
859 					 struct kvm_nested_state *state)
860 {
861 	vcpu_ioctl(vcpu, KVM_GET_NESTED_STATE, state);
862 }
863 static inline int __vcpu_nested_state_set(struct kvm_vcpu *vcpu,
864 					  struct kvm_nested_state *state)
865 {
866 	return __vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state);
867 }
868 
869 static inline void vcpu_nested_state_set(struct kvm_vcpu *vcpu,
870 					 struct kvm_nested_state *state)
871 {
872 	vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state);
873 }
874 #endif
875 static inline int vcpu_get_stats_fd(struct kvm_vcpu *vcpu)
876 {
877 	int fd = __vcpu_ioctl(vcpu, KVM_GET_STATS_FD, NULL);
878 
879 	TEST_ASSERT_VM_VCPU_IOCTL(fd >= 0, KVM_CHECK_EXTENSION, fd, vcpu->vm);
880 	return fd;
881 }
882 
883 int __kvm_has_device_attr(int dev_fd, u32 group, u64 attr);
884 
885 static inline void kvm_has_device_attr(int dev_fd, u32 group, u64 attr)
886 {
887 	int ret = __kvm_has_device_attr(dev_fd, group, attr);
888 
889 	TEST_ASSERT(!ret, "KVM_HAS_DEVICE_ATTR failed, rc: %i errno: %i", ret, errno);
890 }
891 
892 int __kvm_device_attr_get(int dev_fd, u32 group, u64 attr, void *val);
893 
894 static inline void kvm_device_attr_get(int dev_fd, u32 group,
895 				       u64 attr, void *val)
896 {
897 	int ret = __kvm_device_attr_get(dev_fd, group, attr, val);
898 
899 	TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_GET_DEVICE_ATTR, ret));
900 }
901 
902 int __kvm_device_attr_set(int dev_fd, u32 group, u64 attr, void *val);
903 
904 static inline void kvm_device_attr_set(int dev_fd, u32 group,
905 				       u64 attr, void *val)
906 {
907 	int ret = __kvm_device_attr_set(dev_fd, group, attr, val);
908 
909 	TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_DEVICE_ATTR, ret));
910 }
911 
912 static inline int __vcpu_has_device_attr(struct kvm_vcpu *vcpu, u32 group,
913 					 u64 attr)
914 {
915 	return __kvm_has_device_attr(vcpu->fd, group, attr);
916 }
917 
918 static inline void vcpu_has_device_attr(struct kvm_vcpu *vcpu, u32 group,
919 					u64 attr)
920 {
921 	kvm_has_device_attr(vcpu->fd, group, attr);
922 }
923 
924 static inline int __vcpu_device_attr_get(struct kvm_vcpu *vcpu, u32 group,
925 					 u64 attr, void *val)
926 {
927 	return __kvm_device_attr_get(vcpu->fd, group, attr, val);
928 }
929 
930 static inline void vcpu_device_attr_get(struct kvm_vcpu *vcpu, u32 group,
931 					u64 attr, void *val)
932 {
933 	kvm_device_attr_get(vcpu->fd, group, attr, val);
934 }
935 
936 static inline int __vcpu_device_attr_set(struct kvm_vcpu *vcpu, u32 group,
937 					 u64 attr, void *val)
938 {
939 	return __kvm_device_attr_set(vcpu->fd, group, attr, val);
940 }
941 
942 static inline void vcpu_device_attr_set(struct kvm_vcpu *vcpu, u32 group,
943 					u64 attr, void *val)
944 {
945 	kvm_device_attr_set(vcpu->fd, group, attr, val);
946 }
947 
948 int __kvm_test_create_device(struct kvm_vm *vm, u64 type);
949 int __kvm_create_device(struct kvm_vm *vm, u64 type);
950 
951 static inline int kvm_create_device(struct kvm_vm *vm, u64 type)
952 {
953 	int fd = __kvm_create_device(vm, type);
954 
955 	TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_DEVICE, fd));
956 	return fd;
957 }
958 
959 void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu);
960 
961 /*
962  * VM VCPU Args Set
963  *
964  * Input Args:
965  *   vcpu - vCPU
966  *   num - number of arguments
967  *   ... - arguments, each of type u64
968  *
969  * Output Args: None
970  *
971  * Return: None
972  *
973  * Sets the first @num input parameters for the function at @vcpu's entry point,
974  * per the C calling convention of the architecture, to the values given as
975  * variable args. Each of the variable args is expected to be of type u64.
976  * The maximum @num can be is specific to the architecture.
977  */
978 void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...);
979 
980 void kvm_irq_line(struct kvm_vm *vm, u32 irq, int level);
981 int _kvm_irq_line(struct kvm_vm *vm, u32 irq, int level);
982 
983 #define KVM_MAX_IRQ_ROUTES		4096
984 
985 struct kvm_irq_routing *kvm_gsi_routing_create(void);
986 void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing,
987 		u32 gsi, u32 pin);
988 int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
989 void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
990 
991 const char *exit_reason_str(unsigned int exit_reason);
992 
993 gpa_t vm_phy_page_alloc(struct kvm_vm *vm, gpa_t min_gpa, u32 memslot);
994 gpa_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, gpa_t min_gpa,
995 			   u32 memslot, bool protected);
996 gpa_t vm_alloc_page_table(struct kvm_vm *vm);
997 
998 static inline gpa_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
999 				       gpa_t min_gpa, u32 memslot)
1000 {
1001 	/*
1002 	 * By default, allocate memory as protected for VMs that support
1003 	 * protected memory, as the majority of memory for such VMs is
1004 	 * protected, i.e. using shared memory is effectively opt-in.
1005 	 */
1006 	return __vm_phy_pages_alloc(vm, num, min_gpa, memslot,
1007 				    vm_arch_has_protected_memory(vm));
1008 }
1009 
1010 /*
1011  * ____vm_create() does KVM_CREATE_VM and little else.  __vm_create() also
1012  * loads the test binary into guest memory and creates an IRQ chip (x86 only).
1013  * __vm_create() does NOT create vCPUs, @nr_runnable_vcpus is used purely to
1014  * calculate the amount of memory needed for per-vCPU data, e.g. stacks.
1015  */
1016 struct kvm_vm *____vm_create(struct vm_shape shape);
1017 struct kvm_vm *__vm_create(struct vm_shape shape, u32 nr_runnable_vcpus,
1018 			   u64 nr_extra_pages);
1019 
1020 static inline struct kvm_vm *vm_create_barebones(void)
1021 {
1022 	return ____vm_create(VM_SHAPE_DEFAULT);
1023 }
1024 
1025 static inline struct kvm_vm *vm_create_barebones_type(unsigned long type)
1026 {
1027 	const struct vm_shape shape = {
1028 		.mode = VM_MODE_DEFAULT,
1029 		.type = type,
1030 	};
1031 
1032 	return ____vm_create(shape);
1033 }
1034 
1035 static inline struct kvm_vm *vm_create(u32 nr_runnable_vcpus)
1036 {
1037 	return __vm_create(VM_SHAPE_DEFAULT, nr_runnable_vcpus, 0);
1038 }
1039 
1040 struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, u32 nr_vcpus,
1041 				      u64 extra_mem_pages,
1042 				      void *guest_code, struct kvm_vcpu *vcpus[]);
1043 
1044 static inline struct kvm_vm *vm_create_with_vcpus(u32 nr_vcpus,
1045 						  void *guest_code,
1046 						  struct kvm_vcpu *vcpus[])
1047 {
1048 	return __vm_create_with_vcpus(VM_SHAPE_DEFAULT, nr_vcpus, 0,
1049 				      guest_code, vcpus);
1050 }
1051 
1052 
1053 struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape,
1054 					       struct kvm_vcpu **vcpu,
1055 					       u64 extra_mem_pages,
1056 					       void *guest_code);
1057 
1058 /*
1059  * Create a VM with a single vCPU with reasonable defaults and @extra_mem_pages
1060  * additional pages of guest memory.  Returns the VM and vCPU (via out param).
1061  */
1062 static inline struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
1063 						       u64 extra_mem_pages,
1064 						       void *guest_code)
1065 {
1066 	return __vm_create_shape_with_one_vcpu(VM_SHAPE_DEFAULT, vcpu,
1067 					       extra_mem_pages, guest_code);
1068 }
1069 
1070 static inline struct kvm_vm *vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
1071 						     void *guest_code)
1072 {
1073 	return __vm_create_with_one_vcpu(vcpu, 0, guest_code);
1074 }
1075 
1076 static inline struct kvm_vm *vm_create_shape_with_one_vcpu(struct vm_shape shape,
1077 							   struct kvm_vcpu **vcpu,
1078 							   void *guest_code)
1079 {
1080 	return __vm_create_shape_with_one_vcpu(shape, vcpu, 0, guest_code);
1081 }
1082 
1083 struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
1084 
1085 void kvm_set_files_rlimit(u32 nr_vcpus);
1086 
1087 int __pin_task_to_cpu(pthread_t task, int cpu);
1088 
1089 static inline void pin_task_to_cpu(pthread_t task, int cpu)
1090 {
1091 	int r;
1092 
1093 	r = __pin_task_to_cpu(task, cpu);
1094 	TEST_ASSERT(!r, "Failed to set thread affinity to pCPU '%u'", cpu);
1095 }
1096 
1097 static inline int pin_task_to_any_cpu(pthread_t task)
1098 {
1099 	int cpu = sched_getcpu();
1100 
1101 	pin_task_to_cpu(task, cpu);
1102 	return cpu;
1103 }
1104 
1105 static inline void pin_self_to_cpu(int cpu)
1106 {
1107 	pin_task_to_cpu(pthread_self(), cpu);
1108 }
1109 
1110 static inline int pin_self_to_any_cpu(void)
1111 {
1112 	return pin_task_to_any_cpu(pthread_self());
1113 }
1114 
1115 void kvm_print_vcpu_pinning_help(void);
1116 void kvm_parse_vcpu_pinning(const char *pcpus_string, u32 vcpu_to_pcpu[],
1117 			    int nr_vcpus);
1118 
1119 unsigned long vm_compute_max_gfn(struct kvm_vm *vm);
1120 unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size);
1121 unsigned int vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages);
1122 unsigned int vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages);
1123 static inline unsigned int
1124 vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
1125 {
1126 	unsigned int n;
1127 	n = vm_num_guest_pages(mode, vm_num_host_pages(mode, num_guest_pages));
1128 	return n;
1129 }
1130 
1131 #define sync_global_to_guest(vm, g) ({				\
1132 	typeof(g) *_p = addr_gva2hva(vm, (gva_t)&(g));		\
1133 	memcpy(_p, &(g), sizeof(g));				\
1134 })
1135 
1136 #define sync_global_from_guest(vm, g) ({			\
1137 	typeof(g) *_p = addr_gva2hva(vm, (gva_t)&(g));		\
1138 	memcpy(&(g), _p, sizeof(g));				\
1139 })
1140 
1141 /*
1142  * Write a global value, but only in the VM's (guest's) domain.  Primarily used
1143  * for "globals" that hold per-VM values (VMs always duplicate code and global
1144  * data into their own region of physical memory), but can be used anytime it's
1145  * undesirable to change the host's copy of the global.
1146  */
1147 #define write_guest_global(vm, g, val) ({			\
1148 	typeof(g) *_p = addr_gva2hva(vm, (gva_t)&(g));		\
1149 	typeof(g) _val = val;					\
1150 								\
1151 	memcpy(_p, &(_val), sizeof(g));				\
1152 })
1153 
1154 void assert_on_unhandled_exception(struct kvm_vcpu *vcpu);
1155 
1156 void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu,
1157 		    u8 indent);
1158 
1159 static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu,
1160 			     u8 indent)
1161 {
1162 	vcpu_arch_dump(stream, vcpu, indent);
1163 }
1164 
1165 /*
1166  * Adds a vCPU with reasonable defaults (e.g. a stack)
1167  *
1168  * Input Args:
1169  *   vm - Virtual Machine
1170  *   vcpu_id - The id of the VCPU to add to the VM.
1171  */
1172 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id);
1173 void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code);
1174 
1175 static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, u32 vcpu_id,
1176 					   void *guest_code)
1177 {
1178 	struct kvm_vcpu *vcpu = vm_arch_vcpu_add(vm, vcpu_id);
1179 
1180 	vcpu_arch_set_entry_point(vcpu, guest_code);
1181 
1182 	return vcpu;
1183 }
1184 
1185 /* Re-create a vCPU after restarting a VM, e.g. for state save/restore tests. */
1186 struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, u32 vcpu_id);
1187 
1188 static inline struct kvm_vcpu *vm_vcpu_recreate(struct kvm_vm *vm,
1189 						u32 vcpu_id)
1190 {
1191 	return vm_arch_vcpu_recreate(vm, vcpu_id);
1192 }
1193 
1194 void vcpu_arch_free(struct kvm_vcpu *vcpu);
1195 
1196 void virt_arch_pgd_alloc(struct kvm_vm *vm);
1197 
1198 static inline void virt_pgd_alloc(struct kvm_vm *vm)
1199 {
1200 	virt_arch_pgd_alloc(vm);
1201 }
1202 
1203 /*
1204  * Within @vm, creates a virtual translation for the page starting
1205  * at @gva to the page starting at @gpa.
1206  */
1207 void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa);
1208 
1209 static inline void virt_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa)
1210 {
1211 	virt_arch_pg_map(vm, gva, gpa);
1212 	sparsebit_set(vm->vpages_mapped, gva >> vm->page_shift);
1213 }
1214 
1215 
1216 /*
1217  * Address Guest Virtual to Guest Physical
1218  *
1219  * Input Args:
1220  *   vm - Virtual Machine
1221  *   gva - VM virtual address
1222  *
1223  * Output Args: None
1224  *
1225  * Return:
1226  *   Equivalent VM physical address
1227  *
1228  * Returns the VM physical address of the translated VM virtual
1229  * address given by @gva.
1230  */
1231 gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva);
1232 
1233 static inline gpa_t addr_gva2gpa(struct kvm_vm *vm, gva_t gva)
1234 {
1235 	return addr_arch_gva2gpa(vm, gva);
1236 }
1237 
1238 /*
1239  * Virtual Translation Tables Dump
1240  *
1241  * Input Args:
1242  *   stream - Output FILE stream
1243  *   vm     - Virtual Machine
1244  *   indent - Left margin indent amount
1245  *
1246  * Output Args: None
1247  *
1248  * Return: None
1249  *
1250  * Dumps to the FILE stream given by @stream, the contents of all the
1251  * virtual translation tables for the VM given by @vm.
1252  */
1253 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent);
1254 
1255 static inline void virt_dump(FILE *stream, struct kvm_vm *vm, u8 indent)
1256 {
1257 	virt_arch_dump(stream, vm, indent);
1258 }
1259 
1260 
1261 static inline int __vm_disable_nx_huge_pages(struct kvm_vm *vm)
1262 {
1263 	return __vm_enable_cap(vm, KVM_CAP_VM_DISABLE_NX_HUGE_PAGES, 0);
1264 }
1265 
1266 static inline u64 vm_page_align(struct kvm_vm *vm, u64 v)
1267 {
1268 	return (v + vm->page_size - 1) & ~(vm->page_size - 1);
1269 }
1270 
1271 /*
1272  * Arch hook that is invoked via a constructor, i.e. before executing main(),
1273  * to allow for arch-specific setup that is common to all tests, e.g. computing
1274  * the default guest "mode".
1275  */
1276 void kvm_selftest_arch_init(void);
1277 
1278 void kvm_arch_vm_post_create(struct kvm_vm *vm, unsigned int nr_vcpus);
1279 void kvm_arch_vm_finalize_vcpus(struct kvm_vm *vm);
1280 void kvm_arch_vm_release(struct kvm_vm *vm);
1281 
1282 bool vm_is_gpa_protected(struct kvm_vm *vm, gpa_t gpa);
1283 
1284 u32 guest_get_vcpuid(void);
1285 
1286 bool kvm_arch_has_default_irqchip(void);
1287 
1288 #endif /* SELFTEST_KVM_UTIL_H */
1289