xref: /linux/tools/testing/selftests/kvm/include/kvm_util.h (revision 26f8453288d4c1fb8c96802eae15ddc988f5e068)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2018, Google LLC.
4  */
5 #ifndef SELFTEST_KVM_UTIL_H
6 #define SELFTEST_KVM_UTIL_H
7 
8 #include "test_util.h"
9 
10 #include <linux/compiler.h>
11 #include "linux/hashtable.h"
12 #include "linux/list.h"
13 #include <linux/kernel.h>
14 #include <linux/kvm.h>
15 #include "linux/rbtree.h"
16 #include <linux/types.h>
17 
18 #include <asm/atomic.h>
19 #include <asm/kvm.h>
20 
21 #include <sys/eventfd.h>
22 #include <sys/ioctl.h>
23 
24 #include <pthread.h>
25 
26 #include "kvm_syscalls.h"
27 #include "kvm_util_arch.h"
28 #include "kvm_util_types.h"
29 #include "sparsebit.h"
30 
31 #define KVM_DEV_PATH "/dev/kvm"
32 #define KVM_MAX_VCPUS 512
33 
34 #define NSEC_PER_SEC 1000000000L
35 
36 struct userspace_mem_region {
37 	struct kvm_userspace_memory_region2 region;
38 	struct sparsebit *unused_phy_pages;
39 	struct sparsebit *protected_phy_pages;
40 	int fd;
41 	off_t offset;
42 	enum vm_mem_backing_src_type backing_src_type;
43 	void *host_mem;
44 	void *host_alias;
45 	void *mmap_start;
46 	void *mmap_alias;
47 	size_t mmap_size;
48 	struct rb_node gpa_node;
49 	struct rb_node hva_node;
50 	struct hlist_node slot_node;
51 };
52 
53 struct kvm_binary_stats {
54 	int fd;
55 	struct kvm_stats_header header;
56 	struct kvm_stats_desc *desc;
57 };
58 
59 struct kvm_vcpu {
60 	struct list_head list;
61 	uint32_t id;
62 	int fd;
63 	struct kvm_vm *vm;
64 	struct kvm_run *run;
65 #ifdef __x86_64__
66 	struct kvm_cpuid2 *cpuid;
67 #endif
68 #ifdef __aarch64__
69 	struct kvm_vcpu_init init;
70 #endif
71 	struct kvm_binary_stats stats;
72 	struct kvm_dirty_gfn *dirty_gfns;
73 	uint32_t fetch_index;
74 	uint32_t dirty_gfns_count;
75 };
76 
77 struct userspace_mem_regions {
78 	struct rb_root gpa_tree;
79 	struct rb_root hva_tree;
80 	DECLARE_HASHTABLE(slot_hash, 9);
81 };
82 
83 enum kvm_mem_region_type {
84 	MEM_REGION_CODE,
85 	MEM_REGION_DATA,
86 	MEM_REGION_PT,
87 	MEM_REGION_TEST_DATA,
88 	NR_MEM_REGIONS,
89 };
90 
91 struct kvm_mmu {
92 	bool pgd_created;
93 	u64 pgd;
94 	int pgtable_levels;
95 
96 	struct kvm_mmu_arch arch;
97 };
98 
99 struct kvm_vm {
100 	int mode;
101 	unsigned long type;
102 	int kvm_fd;
103 	int fd;
104 	unsigned int page_size;
105 	unsigned int page_shift;
106 	unsigned int pa_bits;
107 	unsigned int va_bits;
108 	u64 max_gfn;
109 	struct list_head vcpus;
110 	struct userspace_mem_regions regions;
111 	struct sparsebit *vpages_valid;
112 	struct sparsebit *vpages_mapped;
113 	bool has_irqchip;
114 	gpa_t ucall_mmio_addr;
115 	gva_t handlers;
116 	uint32_t dirty_ring_size;
117 	u64 gpa_tag_mask;
118 
119 	/*
120 	 * "mmu" is the guest's stage-1, with a short name because the vast
121 	 * majority of tests only care about the stage-1 MMU.
122 	 */
123 	struct kvm_mmu mmu;
124 	struct kvm_mmu stage2_mmu;
125 
126 	struct kvm_vm_arch arch;
127 
128 	struct kvm_binary_stats stats;
129 
130 	/*
131 	 * KVM region slots. These are the default memslots used by page
132 	 * allocators, e.g., lib/elf uses the memslots[MEM_REGION_CODE]
133 	 * memslot.
134 	 */
135 	uint32_t memslots[NR_MEM_REGIONS];
136 };
137 
138 struct vcpu_reg_sublist {
139 	const char *name;
140 	long capability;
141 	int feature;
142 	int feature_type;
143 	bool finalize;
144 	__u64 *regs;
145 	__u64 regs_n;
146 	__u64 *rejects_set;
147 	__u64 rejects_set_n;
148 	__u64 *skips_set;
149 	__u64 skips_set_n;
150 };
151 
152 struct vcpu_reg_list {
153 	char *name;
154 	struct vcpu_reg_sublist sublists[];
155 };
156 
157 #define for_each_sublist(c, s)		\
158 	for ((s) = &(c)->sublists[0]; (s)->regs; ++(s))
159 
160 #define kvm_for_each_vcpu(vm, i, vcpu)			\
161 	for ((i) = 0; (i) <= (vm)->last_vcpu_id; (i)++)	\
162 		if (!((vcpu) = vm->vcpus[i]))		\
163 			continue;			\
164 		else
165 
166 struct userspace_mem_region *
167 memslot2region(struct kvm_vm *vm, uint32_t memslot);
168 
169 static inline struct userspace_mem_region *vm_get_mem_region(struct kvm_vm *vm,
170 							     enum kvm_mem_region_type type)
171 {
172 	assert(type < NR_MEM_REGIONS);
173 	return memslot2region(vm, vm->memslots[type]);
174 }
175 
176 /* Minimum allocated guest virtual and physical addresses */
177 #define KVM_UTIL_MIN_VADDR		0x2000
178 #define KVM_GUEST_PAGE_TABLE_MIN_PADDR	0x180000
179 
180 #define DEFAULT_GUEST_STACK_VADDR_MIN	0xab6000
181 #define DEFAULT_STACK_PGS		5
182 
183 enum vm_guest_mode {
184 	VM_MODE_P52V48_4K,
185 	VM_MODE_P52V48_16K,
186 	VM_MODE_P52V48_64K,
187 	VM_MODE_P48V48_4K,
188 	VM_MODE_P48V48_16K,
189 	VM_MODE_P48V48_64K,
190 	VM_MODE_P40V48_4K,
191 	VM_MODE_P40V48_16K,
192 	VM_MODE_P40V48_64K,
193 	VM_MODE_PXXVYY_4K,	/* For 48-bit or 57-bit VA, depending on host support */
194 	VM_MODE_P47V64_4K,
195 	VM_MODE_P44V64_4K,
196 	VM_MODE_P36V48_4K,
197 	VM_MODE_P36V48_16K,
198 	VM_MODE_P36V48_64K,
199 	VM_MODE_P47V47_16K,
200 	VM_MODE_P36V47_16K,
201 
202 	VM_MODE_P56V57_4K,	/* For riscv64 */
203 	VM_MODE_P56V48_4K,
204 	VM_MODE_P56V39_4K,
205 	VM_MODE_P50V57_4K,
206 	VM_MODE_P50V48_4K,
207 	VM_MODE_P50V39_4K,
208 	VM_MODE_P41V57_4K,
209 	VM_MODE_P41V48_4K,
210 	VM_MODE_P41V39_4K,
211 
212 	NUM_VM_MODES,
213 };
214 
215 struct vm_shape {
216 	uint32_t type;
217 	uint8_t  mode;
218 	uint8_t  pad0;
219 	uint16_t pad1;
220 };
221 
222 kvm_static_assert(sizeof(struct vm_shape) == sizeof(u64));
223 
224 #define VM_TYPE_DEFAULT			0
225 
226 #define VM_SHAPE(__mode)			\
227 ({						\
228 	struct vm_shape shape = {		\
229 		.mode = (__mode),		\
230 		.type = VM_TYPE_DEFAULT		\
231 	};					\
232 						\
233 	shape;					\
234 })
235 
236 extern enum vm_guest_mode vm_mode_default;
237 
238 #if defined(__aarch64__)
239 
240 #define VM_MODE_DEFAULT			vm_mode_default
241 #define MIN_PAGE_SHIFT			12U
242 #define ptes_per_page(page_size)	((page_size) / 8)
243 
244 #elif defined(__x86_64__)
245 
246 #define VM_MODE_DEFAULT			VM_MODE_PXXVYY_4K
247 #define MIN_PAGE_SHIFT			12U
248 #define ptes_per_page(page_size)	((page_size) / 8)
249 
250 #elif defined(__s390x__)
251 
252 #define VM_MODE_DEFAULT			VM_MODE_P44V64_4K
253 #define MIN_PAGE_SHIFT			12U
254 #define ptes_per_page(page_size)	((page_size) / 16)
255 
256 #elif defined(__riscv)
257 
258 #if __riscv_xlen == 32
259 #error "RISC-V 32-bit kvm selftests not supported"
260 #endif
261 
262 #define VM_MODE_DEFAULT			vm_mode_default
263 #define MIN_PAGE_SHIFT			12U
264 #define ptes_per_page(page_size)	((page_size) / 8)
265 
266 #elif defined(__loongarch__)
267 #define VM_MODE_DEFAULT			VM_MODE_P47V47_16K
268 #define MIN_PAGE_SHIFT			12U
269 #define ptes_per_page(page_size)	((page_size) / 8)
270 
271 #endif
272 
273 #define VM_SHAPE_DEFAULT	VM_SHAPE(VM_MODE_DEFAULT)
274 
275 #define MIN_PAGE_SIZE		(1U << MIN_PAGE_SHIFT)
276 #define PTES_PER_MIN_PAGE	ptes_per_page(MIN_PAGE_SIZE)
277 
278 struct vm_guest_mode_params {
279 	unsigned int pa_bits;
280 	unsigned int va_bits;
281 	unsigned int page_size;
282 	unsigned int page_shift;
283 };
284 extern const struct vm_guest_mode_params vm_guest_mode_params[];
285 
286 int __open_path_or_exit(const char *path, int flags, const char *enoent_help);
287 int open_path_or_exit(const char *path, int flags);
288 int open_kvm_dev_path_or_exit(void);
289 
290 int kvm_get_module_param_integer(const char *module_name, const char *param);
291 bool kvm_get_module_param_bool(const char *module_name, const char *param);
292 
293 static inline bool get_kvm_param_bool(const char *param)
294 {
295 	return kvm_get_module_param_bool("kvm", param);
296 }
297 
298 static inline int get_kvm_param_integer(const char *param)
299 {
300 	return kvm_get_module_param_integer("kvm", param);
301 }
302 
303 unsigned int kvm_check_cap(long cap);
304 
305 static inline bool kvm_has_cap(long cap)
306 {
307 	return kvm_check_cap(cap);
308 }
309 
310 /*
311  * Use the "inner", double-underscore macro when reporting errors from within
312  * other macros so that the name of ioctl() and not its literal numeric value
313  * is printed on error.  The "outer" macro is strongly preferred when reporting
314  * errors "directly", i.e. without an additional layer of macros, as it reduces
315  * the probability of passing in the wrong string.
316  */
317 #define __KVM_IOCTL_ERROR(_name, _ret)	__KVM_SYSCALL_ERROR(_name, _ret)
318 #define KVM_IOCTL_ERROR(_ioctl, _ret) __KVM_IOCTL_ERROR(#_ioctl, _ret)
319 
320 #define kvm_do_ioctl(fd, cmd, arg)						\
321 ({										\
322 	kvm_static_assert(!_IOC_SIZE(cmd) || sizeof(*arg) == _IOC_SIZE(cmd));	\
323 	ioctl(fd, cmd, arg);							\
324 })
325 
326 #define __kvm_ioctl(kvm_fd, cmd, arg)				\
327 	kvm_do_ioctl(kvm_fd, cmd, arg)
328 
329 #define kvm_ioctl(kvm_fd, cmd, arg)				\
330 ({								\
331 	int ret = __kvm_ioctl(kvm_fd, cmd, arg);		\
332 								\
333 	TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(#cmd, ret));	\
334 })
335 
336 static __always_inline void static_assert_is_vm(struct kvm_vm *vm) { }
337 
338 #define __vm_ioctl(vm, cmd, arg)				\
339 ({								\
340 	static_assert_is_vm(vm);				\
341 	kvm_do_ioctl((vm)->fd, cmd, arg);			\
342 })
343 
344 /*
345  * Assert that a VM or vCPU ioctl() succeeded, with extra magic to detect if
346  * the ioctl() failed because KVM killed/bugged the VM.  To detect a dead VM,
347  * probe KVM_CAP_USER_MEMORY, which (a) has been supported by KVM since before
348  * selftests existed and (b) should never outright fail, i.e. is supposed to
349  * return 0 or 1.  If KVM kills a VM, KVM returns -EIO for all ioctl()s for the
350  * VM and its vCPUs, including KVM_CHECK_EXTENSION.
351  */
352 #define __TEST_ASSERT_VM_VCPU_IOCTL(cond, name, ret, vm)				\
353 do {											\
354 	int __errno = errno;								\
355 											\
356 	static_assert_is_vm(vm);							\
357 											\
358 	if (cond)									\
359 		break;									\
360 											\
361 	if (errno == EIO &&								\
362 	    __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)KVM_CAP_USER_MEMORY) < 0) {	\
363 		TEST_ASSERT(errno == EIO, "KVM killed the VM, should return -EIO");	\
364 		TEST_FAIL("KVM killed/bugged the VM, check the kernel log for clues");	\
365 	}										\
366 	errno = __errno;								\
367 	TEST_ASSERT(cond, __KVM_IOCTL_ERROR(name, ret));				\
368 } while (0)
369 
370 #define TEST_ASSERT_VM_VCPU_IOCTL(cond, cmd, ret, vm)		\
371 	__TEST_ASSERT_VM_VCPU_IOCTL(cond, #cmd, ret, vm)
372 
373 #define vm_ioctl(vm, cmd, arg)					\
374 ({								\
375 	int ret = __vm_ioctl(vm, cmd, arg);			\
376 								\
377 	__TEST_ASSERT_VM_VCPU_IOCTL(!ret, #cmd, ret, vm);		\
378 })
379 
380 static __always_inline void static_assert_is_vcpu(struct kvm_vcpu *vcpu) { }
381 
382 #define __vcpu_ioctl(vcpu, cmd, arg)				\
383 ({								\
384 	static_assert_is_vcpu(vcpu);				\
385 	kvm_do_ioctl((vcpu)->fd, cmd, arg);			\
386 })
387 
388 #define vcpu_ioctl(vcpu, cmd, arg)				\
389 ({								\
390 	int ret = __vcpu_ioctl(vcpu, cmd, arg);			\
391 								\
392 	__TEST_ASSERT_VM_VCPU_IOCTL(!ret, #cmd, ret, (vcpu)->vm);	\
393 })
394 
395 /*
396  * Looks up and returns the value corresponding to the capability
397  * (KVM_CAP_*) given by cap.
398  */
399 static inline int vm_check_cap(struct kvm_vm *vm, long cap)
400 {
401 	int ret =  __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)cap);
402 
403 	TEST_ASSERT_VM_VCPU_IOCTL(ret >= 0, KVM_CHECK_EXTENSION, ret, vm);
404 	return ret;
405 }
406 
407 static inline int __vm_enable_cap(struct kvm_vm *vm, uint32_t cap, u64 arg0)
408 {
409 	struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
410 
411 	return __vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
412 }
413 
414 static inline void vm_enable_cap(struct kvm_vm *vm, uint32_t cap, u64 arg0)
415 {
416 	struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
417 
418 	vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
419 }
420 
421 static inline void vm_set_memory_attributes(struct kvm_vm *vm, u64 gpa,
422 					    u64 size, u64 attributes)
423 {
424 	struct kvm_memory_attributes attr = {
425 		.attributes = attributes,
426 		.address = gpa,
427 		.size = size,
428 		.flags = 0,
429 	};
430 
431 	/*
432 	 * KVM_SET_MEMORY_ATTRIBUTES overwrites _all_ attributes.  These flows
433 	 * need significant enhancements to support multiple attributes.
434 	 */
435 	TEST_ASSERT(!attributes || attributes == KVM_MEMORY_ATTRIBUTE_PRIVATE,
436 		    "Update me to support multiple attributes!");
437 
438 	vm_ioctl(vm, KVM_SET_MEMORY_ATTRIBUTES, &attr);
439 }
440 
441 
442 static inline void vm_mem_set_private(struct kvm_vm *vm, u64 gpa,
443 				      u64 size)
444 {
445 	vm_set_memory_attributes(vm, gpa, size, KVM_MEMORY_ATTRIBUTE_PRIVATE);
446 }
447 
448 static inline void vm_mem_set_shared(struct kvm_vm *vm, u64 gpa,
449 				     u64 size)
450 {
451 	vm_set_memory_attributes(vm, gpa, size, 0);
452 }
453 
454 void vm_guest_mem_fallocate(struct kvm_vm *vm, u64 gpa, u64 size,
455 			    bool punch_hole);
456 
457 static inline void vm_guest_mem_punch_hole(struct kvm_vm *vm, u64 gpa,
458 					   u64 size)
459 {
460 	vm_guest_mem_fallocate(vm, gpa, size, true);
461 }
462 
463 static inline void vm_guest_mem_allocate(struct kvm_vm *vm, u64 gpa,
464 					 u64 size)
465 {
466 	vm_guest_mem_fallocate(vm, gpa, size, false);
467 }
468 
469 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size);
470 const char *vm_guest_mode_string(uint32_t i);
471 
472 void kvm_vm_free(struct kvm_vm *vmp);
473 void kvm_vm_restart(struct kvm_vm *vmp);
474 void kvm_vm_release(struct kvm_vm *vmp);
475 void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename);
476 int kvm_memfd_alloc(size_t size, bool hugepages);
477 
478 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
479 
480 static inline void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
481 {
482 	struct kvm_dirty_log args = { .dirty_bitmap = log, .slot = slot };
483 
484 	vm_ioctl(vm, KVM_GET_DIRTY_LOG, &args);
485 }
486 
487 static inline void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
488 					  u64 first_page, uint32_t num_pages)
489 {
490 	struct kvm_clear_dirty_log args = {
491 		.dirty_bitmap = log,
492 		.slot = slot,
493 		.first_page = first_page,
494 		.num_pages = num_pages
495 	};
496 
497 	vm_ioctl(vm, KVM_CLEAR_DIRTY_LOG, &args);
498 }
499 
500 static inline uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm)
501 {
502 	return __vm_ioctl(vm, KVM_RESET_DIRTY_RINGS, NULL);
503 }
504 
505 static inline void kvm_vm_register_coalesced_io(struct kvm_vm *vm,
506 						u64 address,
507 						u64 size, bool pio)
508 {
509 	struct kvm_coalesced_mmio_zone zone = {
510 		.addr = address,
511 		.size = size,
512 		.pio  = pio,
513 	};
514 
515 	vm_ioctl(vm, KVM_REGISTER_COALESCED_MMIO, &zone);
516 }
517 
518 static inline void kvm_vm_unregister_coalesced_io(struct kvm_vm *vm,
519 						  u64 address,
520 						  u64 size, bool pio)
521 {
522 	struct kvm_coalesced_mmio_zone zone = {
523 		.addr = address,
524 		.size = size,
525 		.pio  = pio,
526 	};
527 
528 	vm_ioctl(vm, KVM_UNREGISTER_COALESCED_MMIO, &zone);
529 }
530 
531 static inline int vm_get_stats_fd(struct kvm_vm *vm)
532 {
533 	int fd = __vm_ioctl(vm, KVM_GET_STATS_FD, NULL);
534 
535 	TEST_ASSERT_VM_VCPU_IOCTL(fd >= 0, KVM_GET_STATS_FD, fd, vm);
536 	return fd;
537 }
538 
539 static inline int __kvm_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd,
540 			      uint32_t flags)
541 {
542 	struct kvm_irqfd irqfd = {
543 		.fd = eventfd,
544 		.gsi = gsi,
545 		.flags = flags,
546 		.resamplefd = -1,
547 	};
548 
549 	return __vm_ioctl(vm, KVM_IRQFD, &irqfd);
550 }
551 
552 static inline void kvm_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd,
553 			      uint32_t flags)
554 {
555 	int ret = __kvm_irqfd(vm, gsi, eventfd, flags);
556 
557 	TEST_ASSERT_VM_VCPU_IOCTL(!ret, KVM_IRQFD, ret, vm);
558 }
559 
560 static inline void kvm_assign_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd)
561 {
562 	kvm_irqfd(vm, gsi, eventfd, 0);
563 }
564 
565 static inline void kvm_deassign_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd)
566 {
567 	kvm_irqfd(vm, gsi, eventfd, KVM_IRQFD_FLAG_DEASSIGN);
568 }
569 
570 static inline int kvm_new_eventfd(void)
571 {
572 	int fd = eventfd(0, 0);
573 
574 	TEST_ASSERT(fd >= 0, __KVM_SYSCALL_ERROR("eventfd()", fd));
575 	return fd;
576 }
577 
578 static inline void read_stats_header(int stats_fd, struct kvm_stats_header *header)
579 {
580 	ssize_t ret;
581 
582 	ret = pread(stats_fd, header, sizeof(*header), 0);
583 	TEST_ASSERT(ret == sizeof(*header),
584 		    "Failed to read '%lu' header bytes, ret = '%ld'",
585 		    sizeof(*header), ret);
586 }
587 
588 struct kvm_stats_desc *read_stats_descriptors(int stats_fd,
589 					      struct kvm_stats_header *header);
590 
591 static inline ssize_t get_stats_descriptor_size(struct kvm_stats_header *header)
592 {
593 	 /*
594 	  * The base size of the descriptor is defined by KVM's ABI, but the
595 	  * size of the name field is variable, as far as KVM's ABI is
596 	  * concerned. For a given instance of KVM, the name field is the same
597 	  * size for all stats and is provided in the overall stats header.
598 	  */
599 	return sizeof(struct kvm_stats_desc) + header->name_size;
600 }
601 
602 static inline struct kvm_stats_desc *get_stats_descriptor(struct kvm_stats_desc *stats,
603 							  int index,
604 							  struct kvm_stats_header *header)
605 {
606 	/*
607 	 * Note, size_desc includes the size of the name field, which is
608 	 * variable. i.e. this is NOT equivalent to &stats_desc[i].
609 	 */
610 	return (void *)stats + index * get_stats_descriptor_size(header);
611 }
612 
613 void read_stat_data(int stats_fd, struct kvm_stats_header *header,
614 		    struct kvm_stats_desc *desc, u64 *data,
615 		    size_t max_elements);
616 
617 void kvm_get_stat(struct kvm_binary_stats *stats, const char *name,
618 		  u64 *data, size_t max_elements);
619 
620 #define __get_stat(stats, stat)							\
621 ({										\
622 	u64 data;								\
623 										\
624 	kvm_get_stat(stats, #stat, &data, 1);					\
625 	data;									\
626 })
627 
628 #define vm_get_stat(vm, stat) __get_stat(&(vm)->stats, stat)
629 #define vcpu_get_stat(vcpu, stat) __get_stat(&(vcpu)->stats, stat)
630 
631 static inline bool read_smt_control(char *buf, size_t buf_size)
632 {
633 	FILE *f = fopen("/sys/devices/system/cpu/smt/control", "r");
634 	bool ret;
635 
636 	if (!f)
637 		return false;
638 
639 	ret = fread(buf, sizeof(*buf), buf_size, f) > 0;
640 	fclose(f);
641 
642 	return ret;
643 }
644 
645 static inline bool is_smt_possible(void)
646 {
647 	char buf[16];
648 
649 	if (read_smt_control(buf, sizeof(buf)) &&
650 	    (!strncmp(buf, "forceoff", 8) || !strncmp(buf, "notsupported", 12)))
651 		return false;
652 
653 	return true;
654 }
655 
656 static inline bool is_smt_on(void)
657 {
658 	char buf[16];
659 
660 	if (read_smt_control(buf, sizeof(buf)) && !strncmp(buf, "on", 2))
661 		return true;
662 
663 	return false;
664 }
665 
666 void vm_create_irqchip(struct kvm_vm *vm);
667 
668 static inline int __vm_create_guest_memfd(struct kvm_vm *vm, u64 size,
669 					  u64 flags)
670 {
671 	struct kvm_create_guest_memfd guest_memfd = {
672 		.size = size,
673 		.flags = flags,
674 	};
675 
676 	return __vm_ioctl(vm, KVM_CREATE_GUEST_MEMFD, &guest_memfd);
677 }
678 
679 static inline int vm_create_guest_memfd(struct kvm_vm *vm, u64 size,
680 					u64 flags)
681 {
682 	int fd = __vm_create_guest_memfd(vm, size, flags);
683 
684 	TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_GUEST_MEMFD, fd));
685 	return fd;
686 }
687 
688 void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
689 			       u64 gpa, u64 size, void *hva);
690 int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
691 				u64 gpa, u64 size, void *hva);
692 void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
693 				u64 gpa, u64 size, void *hva,
694 				uint32_t guest_memfd, u64 guest_memfd_offset);
695 int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
696 				 u64 gpa, u64 size, void *hva,
697 				 uint32_t guest_memfd, u64 guest_memfd_offset);
698 
699 void vm_userspace_mem_region_add(struct kvm_vm *vm,
700 				 enum vm_mem_backing_src_type src_type,
701 				 u64 gpa, uint32_t slot, u64 npages,
702 				 uint32_t flags);
703 void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
704 		u64 gpa, uint32_t slot, u64 npages, uint32_t flags,
705 		int guest_memfd_fd, u64 guest_memfd_offset);
706 
707 #ifndef vm_arch_has_protected_memory
708 static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm)
709 {
710 	return false;
711 }
712 #endif
713 
714 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
715 void vm_mem_region_reload(struct kvm_vm *vm, uint32_t slot);
716 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, u64 new_gpa);
717 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
718 struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
719 void vm_populate_vaddr_bitmap(struct kvm_vm *vm);
720 gva_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, gva_t vaddr_min);
721 gva_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, gva_t vaddr_min);
722 gva_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, gva_t vaddr_min,
723 		       enum kvm_mem_region_type type);
724 gva_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz, gva_t vaddr_min,
725 			    enum kvm_mem_region_type type);
726 gva_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
727 gva_t __vm_vaddr_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type);
728 gva_t vm_vaddr_alloc_page(struct kvm_vm *vm);
729 
730 void virt_map(struct kvm_vm *vm, u64 vaddr, u64 paddr,
731 	      unsigned int npages);
732 void *addr_gpa2hva(struct kvm_vm *vm, gpa_t gpa);
733 void *addr_gva2hva(struct kvm_vm *vm, gva_t gva);
734 gpa_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
735 void *addr_gpa2alias(struct kvm_vm *vm, gpa_t gpa);
736 
737 #ifndef vcpu_arch_put_guest
738 #define vcpu_arch_put_guest(mem, val) do { (mem) = (val); } while (0)
739 #endif
740 
741 static inline gpa_t vm_untag_gpa(struct kvm_vm *vm, gpa_t gpa)
742 {
743 	return gpa & ~vm->gpa_tag_mask;
744 }
745 
746 void vcpu_run(struct kvm_vcpu *vcpu);
747 int _vcpu_run(struct kvm_vcpu *vcpu);
748 
749 static inline int __vcpu_run(struct kvm_vcpu *vcpu)
750 {
751 	return __vcpu_ioctl(vcpu, KVM_RUN, NULL);
752 }
753 
754 void vcpu_run_complete_io(struct kvm_vcpu *vcpu);
755 struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu);
756 
757 static inline void vcpu_enable_cap(struct kvm_vcpu *vcpu, uint32_t cap,
758 				   u64 arg0)
759 {
760 	struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
761 
762 	vcpu_ioctl(vcpu, KVM_ENABLE_CAP, &enable_cap);
763 }
764 
765 static inline void vcpu_guest_debug_set(struct kvm_vcpu *vcpu,
766 					struct kvm_guest_debug *debug)
767 {
768 	vcpu_ioctl(vcpu, KVM_SET_GUEST_DEBUG, debug);
769 }
770 
771 static inline void vcpu_mp_state_get(struct kvm_vcpu *vcpu,
772 				     struct kvm_mp_state *mp_state)
773 {
774 	vcpu_ioctl(vcpu, KVM_GET_MP_STATE, mp_state);
775 }
776 static inline void vcpu_mp_state_set(struct kvm_vcpu *vcpu,
777 				     struct kvm_mp_state *mp_state)
778 {
779 	vcpu_ioctl(vcpu, KVM_SET_MP_STATE, mp_state);
780 }
781 
782 static inline void vcpu_regs_get(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
783 {
784 	vcpu_ioctl(vcpu, KVM_GET_REGS, regs);
785 }
786 
787 static inline void vcpu_regs_set(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
788 {
789 	vcpu_ioctl(vcpu, KVM_SET_REGS, regs);
790 }
791 static inline void vcpu_sregs_get(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
792 {
793 	vcpu_ioctl(vcpu, KVM_GET_SREGS, sregs);
794 
795 }
796 static inline void vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
797 {
798 	vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs);
799 }
800 static inline int _vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
801 {
802 	return __vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs);
803 }
804 static inline void vcpu_fpu_get(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
805 {
806 	vcpu_ioctl(vcpu, KVM_GET_FPU, fpu);
807 }
808 static inline void vcpu_fpu_set(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
809 {
810 	vcpu_ioctl(vcpu, KVM_SET_FPU, fpu);
811 }
812 
813 static inline int __vcpu_get_reg(struct kvm_vcpu *vcpu, u64 id, void *addr)
814 {
815 	struct kvm_one_reg reg = { .id = id, .addr = (u64)addr };
816 
817 	return __vcpu_ioctl(vcpu, KVM_GET_ONE_REG, &reg);
818 }
819 
820 static inline int __vcpu_set_reg(struct kvm_vcpu *vcpu, u64 id, u64 val)
821 {
822 	struct kvm_one_reg reg = { .id = id, .addr = (u64)&val };
823 
824 	return __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
825 }
826 
827 static inline u64 vcpu_get_reg(struct kvm_vcpu *vcpu, u64 id)
828 {
829 	u64 val;
830 	struct kvm_one_reg reg = { .id = id, .addr = (u64)&val };
831 
832 	TEST_ASSERT(KVM_REG_SIZE(id) <= sizeof(val), "Reg %lx too big", id);
833 
834 	vcpu_ioctl(vcpu, KVM_GET_ONE_REG, &reg);
835 	return val;
836 }
837 
838 static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u64 id, u64 val)
839 {
840 	struct kvm_one_reg reg = { .id = id, .addr = (u64)&val };
841 
842 	TEST_ASSERT(KVM_REG_SIZE(id) <= sizeof(val), "Reg %lx too big", id);
843 
844 	vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
845 }
846 
847 #ifdef __KVM_HAVE_VCPU_EVENTS
848 static inline void vcpu_events_get(struct kvm_vcpu *vcpu,
849 				   struct kvm_vcpu_events *events)
850 {
851 	vcpu_ioctl(vcpu, KVM_GET_VCPU_EVENTS, events);
852 }
853 static inline void vcpu_events_set(struct kvm_vcpu *vcpu,
854 				   struct kvm_vcpu_events *events)
855 {
856 	vcpu_ioctl(vcpu, KVM_SET_VCPU_EVENTS, events);
857 }
858 #endif
859 #ifdef __x86_64__
860 static inline void vcpu_nested_state_get(struct kvm_vcpu *vcpu,
861 					 struct kvm_nested_state *state)
862 {
863 	vcpu_ioctl(vcpu, KVM_GET_NESTED_STATE, state);
864 }
865 static inline int __vcpu_nested_state_set(struct kvm_vcpu *vcpu,
866 					  struct kvm_nested_state *state)
867 {
868 	return __vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state);
869 }
870 
871 static inline void vcpu_nested_state_set(struct kvm_vcpu *vcpu,
872 					 struct kvm_nested_state *state)
873 {
874 	vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state);
875 }
876 #endif
877 static inline int vcpu_get_stats_fd(struct kvm_vcpu *vcpu)
878 {
879 	int fd = __vcpu_ioctl(vcpu, KVM_GET_STATS_FD, NULL);
880 
881 	TEST_ASSERT_VM_VCPU_IOCTL(fd >= 0, KVM_CHECK_EXTENSION, fd, vcpu->vm);
882 	return fd;
883 }
884 
885 int __kvm_has_device_attr(int dev_fd, uint32_t group, u64 attr);
886 
887 static inline void kvm_has_device_attr(int dev_fd, uint32_t group, u64 attr)
888 {
889 	int ret = __kvm_has_device_attr(dev_fd, group, attr);
890 
891 	TEST_ASSERT(!ret, "KVM_HAS_DEVICE_ATTR failed, rc: %i errno: %i", ret, errno);
892 }
893 
894 int __kvm_device_attr_get(int dev_fd, uint32_t group, u64 attr, void *val);
895 
896 static inline void kvm_device_attr_get(int dev_fd, uint32_t group,
897 				       u64 attr, void *val)
898 {
899 	int ret = __kvm_device_attr_get(dev_fd, group, attr, val);
900 
901 	TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_GET_DEVICE_ATTR, ret));
902 }
903 
904 int __kvm_device_attr_set(int dev_fd, uint32_t group, u64 attr, void *val);
905 
906 static inline void kvm_device_attr_set(int dev_fd, uint32_t group,
907 				       u64 attr, void *val)
908 {
909 	int ret = __kvm_device_attr_set(dev_fd, group, attr, val);
910 
911 	TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_DEVICE_ATTR, ret));
912 }
913 
914 static inline int __vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group,
915 					 u64 attr)
916 {
917 	return __kvm_has_device_attr(vcpu->fd, group, attr);
918 }
919 
920 static inline void vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group,
921 					u64 attr)
922 {
923 	kvm_has_device_attr(vcpu->fd, group, attr);
924 }
925 
926 static inline int __vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group,
927 					 u64 attr, void *val)
928 {
929 	return __kvm_device_attr_get(vcpu->fd, group, attr, val);
930 }
931 
932 static inline void vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group,
933 					u64 attr, void *val)
934 {
935 	kvm_device_attr_get(vcpu->fd, group, attr, val);
936 }
937 
938 static inline int __vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group,
939 					 u64 attr, void *val)
940 {
941 	return __kvm_device_attr_set(vcpu->fd, group, attr, val);
942 }
943 
944 static inline void vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group,
945 					u64 attr, void *val)
946 {
947 	kvm_device_attr_set(vcpu->fd, group, attr, val);
948 }
949 
950 int __kvm_test_create_device(struct kvm_vm *vm, u64 type);
951 int __kvm_create_device(struct kvm_vm *vm, u64 type);
952 
953 static inline int kvm_create_device(struct kvm_vm *vm, u64 type)
954 {
955 	int fd = __kvm_create_device(vm, type);
956 
957 	TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_DEVICE, fd));
958 	return fd;
959 }
960 
961 void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu);
962 
963 /*
964  * VM VCPU Args Set
965  *
966  * Input Args:
967  *   vcpu - vCPU
968  *   num - number of arguments
969  *   ... - arguments, each of type u64
970  *
971  * Output Args: None
972  *
973  * Return: None
974  *
975  * Sets the first @num input parameters for the function at @vcpu's entry point,
976  * per the C calling convention of the architecture, to the values given as
977  * variable args. Each of the variable args is expected to be of type u64.
978  * The maximum @num can be is specific to the architecture.
979  */
980 void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...);
981 
982 void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
983 int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
984 
985 #define KVM_MAX_IRQ_ROUTES		4096
986 
987 struct kvm_irq_routing *kvm_gsi_routing_create(void);
988 void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing,
989 		uint32_t gsi, uint32_t pin);
990 int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
991 void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
992 
993 const char *exit_reason_str(unsigned int exit_reason);
994 
995 gpa_t vm_phy_page_alloc(struct kvm_vm *vm, gpa_t paddr_min, uint32_t memslot);
996 gpa_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
997 			   gpa_t paddr_min, uint32_t memslot,
998 			   bool protected);
999 gpa_t vm_alloc_page_table(struct kvm_vm *vm);
1000 
1001 static inline gpa_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
1002 				       gpa_t paddr_min, uint32_t memslot)
1003 {
1004 	/*
1005 	 * By default, allocate memory as protected for VMs that support
1006 	 * protected memory, as the majority of memory for such VMs is
1007 	 * protected, i.e. using shared memory is effectively opt-in.
1008 	 */
1009 	return __vm_phy_pages_alloc(vm, num, paddr_min, memslot,
1010 				    vm_arch_has_protected_memory(vm));
1011 }
1012 
1013 /*
1014  * ____vm_create() does KVM_CREATE_VM and little else.  __vm_create() also
1015  * loads the test binary into guest memory and creates an IRQ chip (x86 only).
1016  * __vm_create() does NOT create vCPUs, @nr_runnable_vcpus is used purely to
1017  * calculate the amount of memory needed for per-vCPU data, e.g. stacks.
1018  */
1019 struct kvm_vm *____vm_create(struct vm_shape shape);
1020 struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
1021 			   u64 nr_extra_pages);
1022 
1023 static inline struct kvm_vm *vm_create_barebones(void)
1024 {
1025 	return ____vm_create(VM_SHAPE_DEFAULT);
1026 }
1027 
1028 static inline struct kvm_vm *vm_create_barebones_type(unsigned long type)
1029 {
1030 	const struct vm_shape shape = {
1031 		.mode = VM_MODE_DEFAULT,
1032 		.type = type,
1033 	};
1034 
1035 	return ____vm_create(shape);
1036 }
1037 
1038 static inline struct kvm_vm *vm_create(uint32_t nr_runnable_vcpus)
1039 {
1040 	return __vm_create(VM_SHAPE_DEFAULT, nr_runnable_vcpus, 0);
1041 }
1042 
1043 struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus,
1044 				      u64 extra_mem_pages,
1045 				      void *guest_code, struct kvm_vcpu *vcpus[]);
1046 
1047 static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus,
1048 						  void *guest_code,
1049 						  struct kvm_vcpu *vcpus[])
1050 {
1051 	return __vm_create_with_vcpus(VM_SHAPE_DEFAULT, nr_vcpus, 0,
1052 				      guest_code, vcpus);
1053 }
1054 
1055 
1056 struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape,
1057 					       struct kvm_vcpu **vcpu,
1058 					       u64 extra_mem_pages,
1059 					       void *guest_code);
1060 
1061 /*
1062  * Create a VM with a single vCPU with reasonable defaults and @extra_mem_pages
1063  * additional pages of guest memory.  Returns the VM and vCPU (via out param).
1064  */
1065 static inline struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
1066 						       u64 extra_mem_pages,
1067 						       void *guest_code)
1068 {
1069 	return __vm_create_shape_with_one_vcpu(VM_SHAPE_DEFAULT, vcpu,
1070 					       extra_mem_pages, guest_code);
1071 }
1072 
1073 static inline struct kvm_vm *vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
1074 						     void *guest_code)
1075 {
1076 	return __vm_create_with_one_vcpu(vcpu, 0, guest_code);
1077 }
1078 
1079 static inline struct kvm_vm *vm_create_shape_with_one_vcpu(struct vm_shape shape,
1080 							   struct kvm_vcpu **vcpu,
1081 							   void *guest_code)
1082 {
1083 	return __vm_create_shape_with_one_vcpu(shape, vcpu, 0, guest_code);
1084 }
1085 
1086 struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
1087 
1088 void kvm_set_files_rlimit(uint32_t nr_vcpus);
1089 
1090 int __pin_task_to_cpu(pthread_t task, int cpu);
1091 
1092 static inline void pin_task_to_cpu(pthread_t task, int cpu)
1093 {
1094 	int r;
1095 
1096 	r = __pin_task_to_cpu(task, cpu);
1097 	TEST_ASSERT(!r, "Failed to set thread affinity to pCPU '%u'", cpu);
1098 }
1099 
1100 static inline int pin_task_to_any_cpu(pthread_t task)
1101 {
1102 	int cpu = sched_getcpu();
1103 
1104 	pin_task_to_cpu(task, cpu);
1105 	return cpu;
1106 }
1107 
1108 static inline void pin_self_to_cpu(int cpu)
1109 {
1110 	pin_task_to_cpu(pthread_self(), cpu);
1111 }
1112 
1113 static inline int pin_self_to_any_cpu(void)
1114 {
1115 	return pin_task_to_any_cpu(pthread_self());
1116 }
1117 
1118 void kvm_print_vcpu_pinning_help(void);
1119 void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],
1120 			    int nr_vcpus);
1121 
1122 unsigned long vm_compute_max_gfn(struct kvm_vm *vm);
1123 unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size);
1124 unsigned int vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages);
1125 unsigned int vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages);
1126 static inline unsigned int
1127 vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
1128 {
1129 	unsigned int n;
1130 	n = vm_num_guest_pages(mode, vm_num_host_pages(mode, num_guest_pages));
1131 	return n;
1132 }
1133 
1134 #define sync_global_to_guest(vm, g) ({				\
1135 	typeof(g) *_p = addr_gva2hva(vm, (gva_t)&(g));		\
1136 	memcpy(_p, &(g), sizeof(g));				\
1137 })
1138 
1139 #define sync_global_from_guest(vm, g) ({			\
1140 	typeof(g) *_p = addr_gva2hva(vm, (gva_t)&(g));		\
1141 	memcpy(&(g), _p, sizeof(g));				\
1142 })
1143 
1144 /*
1145  * Write a global value, but only in the VM's (guest's) domain.  Primarily used
1146  * for "globals" that hold per-VM values (VMs always duplicate code and global
1147  * data into their own region of physical memory), but can be used anytime it's
1148  * undesirable to change the host's copy of the global.
1149  */
1150 #define write_guest_global(vm, g, val) ({			\
1151 	typeof(g) *_p = addr_gva2hva(vm, (gva_t)&(g));		\
1152 	typeof(g) _val = val;					\
1153 								\
1154 	memcpy(_p, &(_val), sizeof(g));				\
1155 })
1156 
1157 void assert_on_unhandled_exception(struct kvm_vcpu *vcpu);
1158 
1159 void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu,
1160 		    uint8_t indent);
1161 
1162 static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu,
1163 			     uint8_t indent)
1164 {
1165 	vcpu_arch_dump(stream, vcpu, indent);
1166 }
1167 
1168 /*
1169  * Adds a vCPU with reasonable defaults (e.g. a stack)
1170  *
1171  * Input Args:
1172  *   vm - Virtual Machine
1173  *   vcpu_id - The id of the VCPU to add to the VM.
1174  */
1175 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
1176 void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code);
1177 
1178 static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
1179 					   void *guest_code)
1180 {
1181 	struct kvm_vcpu *vcpu = vm_arch_vcpu_add(vm, vcpu_id);
1182 
1183 	vcpu_arch_set_entry_point(vcpu, guest_code);
1184 
1185 	return vcpu;
1186 }
1187 
1188 /* Re-create a vCPU after restarting a VM, e.g. for state save/restore tests. */
1189 struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id);
1190 
1191 static inline struct kvm_vcpu *vm_vcpu_recreate(struct kvm_vm *vm,
1192 						uint32_t vcpu_id)
1193 {
1194 	return vm_arch_vcpu_recreate(vm, vcpu_id);
1195 }
1196 
1197 void vcpu_arch_free(struct kvm_vcpu *vcpu);
1198 
1199 void virt_arch_pgd_alloc(struct kvm_vm *vm);
1200 
1201 static inline void virt_pgd_alloc(struct kvm_vm *vm)
1202 {
1203 	virt_arch_pgd_alloc(vm);
1204 }
1205 
1206 /*
1207  * VM Virtual Page Map
1208  *
1209  * Input Args:
1210  *   vm - Virtual Machine
1211  *   vaddr - VM Virtual Address
1212  *   paddr - VM Physical Address
1213  *   memslot - Memory region slot for new virtual translation tables
1214  *
1215  * Output Args: None
1216  *
1217  * Return: None
1218  *
1219  * Within @vm, creates a virtual translation for the page starting
1220  * at @vaddr to the page starting at @paddr.
1221  */
1222 void virt_arch_pg_map(struct kvm_vm *vm, u64 vaddr, u64 paddr);
1223 
1224 static inline void virt_pg_map(struct kvm_vm *vm, u64 vaddr, u64 paddr)
1225 {
1226 	virt_arch_pg_map(vm, vaddr, paddr);
1227 	sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift);
1228 }
1229 
1230 
1231 /*
1232  * Address Guest Virtual to Guest Physical
1233  *
1234  * Input Args:
1235  *   vm - Virtual Machine
1236  *   gva - VM virtual address
1237  *
1238  * Output Args: None
1239  *
1240  * Return:
1241  *   Equivalent VM physical address
1242  *
1243  * Returns the VM physical address of the translated VM virtual
1244  * address given by @gva.
1245  */
1246 gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva);
1247 
1248 static inline gpa_t addr_gva2gpa(struct kvm_vm *vm, gva_t gva)
1249 {
1250 	return addr_arch_gva2gpa(vm, gva);
1251 }
1252 
1253 /*
1254  * Virtual Translation Tables Dump
1255  *
1256  * Input Args:
1257  *   stream - Output FILE stream
1258  *   vm     - Virtual Machine
1259  *   indent - Left margin indent amount
1260  *
1261  * Output Args: None
1262  *
1263  * Return: None
1264  *
1265  * Dumps to the FILE stream given by @stream, the contents of all the
1266  * virtual translation tables for the VM given by @vm.
1267  */
1268 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
1269 
1270 static inline void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
1271 {
1272 	virt_arch_dump(stream, vm, indent);
1273 }
1274 
1275 
1276 static inline int __vm_disable_nx_huge_pages(struct kvm_vm *vm)
1277 {
1278 	return __vm_enable_cap(vm, KVM_CAP_VM_DISABLE_NX_HUGE_PAGES, 0);
1279 }
1280 
1281 static inline u64 vm_page_align(struct kvm_vm *vm, u64 v)
1282 {
1283 	return (v + vm->page_size - 1) & ~(vm->page_size - 1);
1284 }
1285 
1286 /*
1287  * Arch hook that is invoked via a constructor, i.e. before executing main(),
1288  * to allow for arch-specific setup that is common to all tests, e.g. computing
1289  * the default guest "mode".
1290  */
1291 void kvm_selftest_arch_init(void);
1292 
1293 void kvm_arch_vm_post_create(struct kvm_vm *vm, unsigned int nr_vcpus);
1294 void kvm_arch_vm_finalize_vcpus(struct kvm_vm *vm);
1295 void kvm_arch_vm_release(struct kvm_vm *vm);
1296 
1297 bool vm_is_gpa_protected(struct kvm_vm *vm, gpa_t paddr);
1298 
1299 uint32_t guest_get_vcpuid(void);
1300 
1301 bool kvm_arch_has_default_irqchip(void);
1302 
1303 #endif /* SELFTEST_KVM_UTIL_H */
1304