xref: /linux/tools/testing/selftests/kvm/include/kvm_util.h (revision 10fd0285305d0b48e8a3bf15d4f17fc4f3d68cb6)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2018, Google LLC.
4  */
5 #ifndef SELFTEST_KVM_UTIL_H
6 #define SELFTEST_KVM_UTIL_H
7 
8 #include "test_util.h"
9 
10 #include <linux/compiler.h>
11 #include "linux/hashtable.h"
12 #include "linux/list.h"
13 #include <linux/kernel.h>
14 #include <linux/kvm.h>
15 #include "linux/rbtree.h"
16 #include <linux/types.h>
17 
18 #include <asm/atomic.h>
19 #include <asm/kvm.h>
20 
21 #include <sys/eventfd.h>
22 #include <sys/ioctl.h>
23 
24 #include <pthread.h>
25 
26 #include "kvm_util_arch.h"
27 #include "kvm_util_types.h"
28 #include "sparsebit.h"
29 
30 #define KVM_DEV_PATH "/dev/kvm"
31 #define KVM_MAX_VCPUS 512
32 
33 #define NSEC_PER_SEC 1000000000L
34 
35 struct userspace_mem_region {
36 	struct kvm_userspace_memory_region2 region;
37 	struct sparsebit *unused_phy_pages;
38 	struct sparsebit *protected_phy_pages;
39 	int fd;
40 	off_t offset;
41 	enum vm_mem_backing_src_type backing_src_type;
42 	void *host_mem;
43 	void *host_alias;
44 	void *mmap_start;
45 	void *mmap_alias;
46 	size_t mmap_size;
47 	struct rb_node gpa_node;
48 	struct rb_node hva_node;
49 	struct hlist_node slot_node;
50 };
51 
52 struct kvm_binary_stats {
53 	int fd;
54 	struct kvm_stats_header header;
55 	struct kvm_stats_desc *desc;
56 };
57 
58 struct kvm_vcpu {
59 	struct list_head list;
60 	uint32_t id;
61 	int fd;
62 	struct kvm_vm *vm;
63 	struct kvm_run *run;
64 #ifdef __x86_64__
65 	struct kvm_cpuid2 *cpuid;
66 #endif
67 #ifdef __aarch64__
68 	struct kvm_vcpu_init init;
69 #endif
70 	struct kvm_binary_stats stats;
71 	struct kvm_dirty_gfn *dirty_gfns;
72 	uint32_t fetch_index;
73 	uint32_t dirty_gfns_count;
74 };
75 
76 struct userspace_mem_regions {
77 	struct rb_root gpa_tree;
78 	struct rb_root hva_tree;
79 	DECLARE_HASHTABLE(slot_hash, 9);
80 };
81 
82 enum kvm_mem_region_type {
83 	MEM_REGION_CODE,
84 	MEM_REGION_DATA,
85 	MEM_REGION_PT,
86 	MEM_REGION_TEST_DATA,
87 	NR_MEM_REGIONS,
88 };
89 
90 struct kvm_vm {
91 	int mode;
92 	unsigned long type;
93 	int kvm_fd;
94 	int fd;
95 	unsigned int pgtable_levels;
96 	unsigned int page_size;
97 	unsigned int page_shift;
98 	unsigned int pa_bits;
99 	unsigned int va_bits;
100 	uint64_t max_gfn;
101 	struct list_head vcpus;
102 	struct userspace_mem_regions regions;
103 	struct sparsebit *vpages_valid;
104 	struct sparsebit *vpages_mapped;
105 	bool has_irqchip;
106 	bool pgd_created;
107 	vm_paddr_t ucall_mmio_addr;
108 	vm_paddr_t pgd;
109 	vm_vaddr_t handlers;
110 	uint32_t dirty_ring_size;
111 	uint64_t gpa_tag_mask;
112 
113 	struct kvm_vm_arch arch;
114 
115 	struct kvm_binary_stats stats;
116 
117 	/*
118 	 * KVM region slots. These are the default memslots used by page
119 	 * allocators, e.g., lib/elf uses the memslots[MEM_REGION_CODE]
120 	 * memslot.
121 	 */
122 	uint32_t memslots[NR_MEM_REGIONS];
123 };
124 
125 struct vcpu_reg_sublist {
126 	const char *name;
127 	long capability;
128 	int feature;
129 	int feature_type;
130 	bool finalize;
131 	__u64 *regs;
132 	__u64 regs_n;
133 	__u64 *rejects_set;
134 	__u64 rejects_set_n;
135 	__u64 *skips_set;
136 	__u64 skips_set_n;
137 };
138 
139 struct vcpu_reg_list {
140 	char *name;
141 	struct vcpu_reg_sublist sublists[];
142 };
143 
144 #define for_each_sublist(c, s)		\
145 	for ((s) = &(c)->sublists[0]; (s)->regs; ++(s))
146 
147 #define kvm_for_each_vcpu(vm, i, vcpu)			\
148 	for ((i) = 0; (i) <= (vm)->last_vcpu_id; (i)++)	\
149 		if (!((vcpu) = vm->vcpus[i]))		\
150 			continue;			\
151 		else
152 
153 struct userspace_mem_region *
154 memslot2region(struct kvm_vm *vm, uint32_t memslot);
155 
156 static inline struct userspace_mem_region *vm_get_mem_region(struct kvm_vm *vm,
157 							     enum kvm_mem_region_type type)
158 {
159 	assert(type < NR_MEM_REGIONS);
160 	return memslot2region(vm, vm->memslots[type]);
161 }
162 
163 /* Minimum allocated guest virtual and physical addresses */
164 #define KVM_UTIL_MIN_VADDR		0x2000
165 #define KVM_GUEST_PAGE_TABLE_MIN_PADDR	0x180000
166 
167 #define DEFAULT_GUEST_STACK_VADDR_MIN	0xab6000
168 #define DEFAULT_STACK_PGS		5
169 
170 enum vm_guest_mode {
171 	VM_MODE_P52V48_4K,
172 	VM_MODE_P52V48_16K,
173 	VM_MODE_P52V48_64K,
174 	VM_MODE_P48V48_4K,
175 	VM_MODE_P48V48_16K,
176 	VM_MODE_P48V48_64K,
177 	VM_MODE_P40V48_4K,
178 	VM_MODE_P40V48_16K,
179 	VM_MODE_P40V48_64K,
180 	VM_MODE_PXXV48_4K,	/* For 48bits VA but ANY bits PA */
181 	VM_MODE_P47V64_4K,
182 	VM_MODE_P44V64_4K,
183 	VM_MODE_P36V48_4K,
184 	VM_MODE_P36V48_16K,
185 	VM_MODE_P36V48_64K,
186 	VM_MODE_P47V47_16K,
187 	VM_MODE_P36V47_16K,
188 	NUM_VM_MODES,
189 };
190 
191 struct vm_shape {
192 	uint32_t type;
193 	uint8_t  mode;
194 	uint8_t  pad0;
195 	uint16_t pad1;
196 };
197 
198 kvm_static_assert(sizeof(struct vm_shape) == sizeof(uint64_t));
199 
200 #define VM_TYPE_DEFAULT			0
201 
202 #define VM_SHAPE(__mode)			\
203 ({						\
204 	struct vm_shape shape = {		\
205 		.mode = (__mode),		\
206 		.type = VM_TYPE_DEFAULT		\
207 	};					\
208 						\
209 	shape;					\
210 })
211 
212 #if defined(__aarch64__)
213 
214 extern enum vm_guest_mode vm_mode_default;
215 
216 #define VM_MODE_DEFAULT			vm_mode_default
217 #define MIN_PAGE_SHIFT			12U
218 #define ptes_per_page(page_size)	((page_size) / 8)
219 
220 #elif defined(__x86_64__)
221 
222 #define VM_MODE_DEFAULT			VM_MODE_PXXV48_4K
223 #define MIN_PAGE_SHIFT			12U
224 #define ptes_per_page(page_size)	((page_size) / 8)
225 
226 #elif defined(__s390x__)
227 
228 #define VM_MODE_DEFAULT			VM_MODE_P44V64_4K
229 #define MIN_PAGE_SHIFT			12U
230 #define ptes_per_page(page_size)	((page_size) / 16)
231 
232 #elif defined(__riscv)
233 
234 #if __riscv_xlen == 32
235 #error "RISC-V 32-bit kvm selftests not supported"
236 #endif
237 
238 #define VM_MODE_DEFAULT			VM_MODE_P40V48_4K
239 #define MIN_PAGE_SHIFT			12U
240 #define ptes_per_page(page_size)	((page_size) / 8)
241 
242 #elif defined(__loongarch__)
243 #define VM_MODE_DEFAULT			VM_MODE_P47V47_16K
244 #define MIN_PAGE_SHIFT			12U
245 #define ptes_per_page(page_size)	((page_size) / 8)
246 
247 #endif
248 
249 #define VM_SHAPE_DEFAULT	VM_SHAPE(VM_MODE_DEFAULT)
250 
251 #define MIN_PAGE_SIZE		(1U << MIN_PAGE_SHIFT)
252 #define PTES_PER_MIN_PAGE	ptes_per_page(MIN_PAGE_SIZE)
253 
254 struct vm_guest_mode_params {
255 	unsigned int pa_bits;
256 	unsigned int va_bits;
257 	unsigned int page_size;
258 	unsigned int page_shift;
259 };
260 extern const struct vm_guest_mode_params vm_guest_mode_params[];
261 
262 int __open_path_or_exit(const char *path, int flags, const char *enoent_help);
263 int open_path_or_exit(const char *path, int flags);
264 int open_kvm_dev_path_or_exit(void);
265 
266 bool get_kvm_param_bool(const char *param);
267 bool get_kvm_intel_param_bool(const char *param);
268 bool get_kvm_amd_param_bool(const char *param);
269 
270 int get_kvm_param_integer(const char *param);
271 int get_kvm_intel_param_integer(const char *param);
272 int get_kvm_amd_param_integer(const char *param);
273 
274 unsigned int kvm_check_cap(long cap);
275 
276 static inline bool kvm_has_cap(long cap)
277 {
278 	return kvm_check_cap(cap);
279 }
280 
281 #define __KVM_SYSCALL_ERROR(_name, _ret) \
282 	"%s failed, rc: %i errno: %i (%s)", (_name), (_ret), errno, strerror(errno)
283 
284 /*
285  * Use the "inner", double-underscore macro when reporting errors from within
286  * other macros so that the name of ioctl() and not its literal numeric value
287  * is printed on error.  The "outer" macro is strongly preferred when reporting
288  * errors "directly", i.e. without an additional layer of macros, as it reduces
289  * the probability of passing in the wrong string.
290  */
291 #define __KVM_IOCTL_ERROR(_name, _ret)	__KVM_SYSCALL_ERROR(_name, _ret)
292 #define KVM_IOCTL_ERROR(_ioctl, _ret) __KVM_IOCTL_ERROR(#_ioctl, _ret)
293 
294 #define kvm_do_ioctl(fd, cmd, arg)						\
295 ({										\
296 	kvm_static_assert(!_IOC_SIZE(cmd) || sizeof(*arg) == _IOC_SIZE(cmd));	\
297 	ioctl(fd, cmd, arg);							\
298 })
299 
300 #define __kvm_ioctl(kvm_fd, cmd, arg)				\
301 	kvm_do_ioctl(kvm_fd, cmd, arg)
302 
303 #define kvm_ioctl(kvm_fd, cmd, arg)				\
304 ({								\
305 	int ret = __kvm_ioctl(kvm_fd, cmd, arg);		\
306 								\
307 	TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(#cmd, ret));	\
308 })
309 
310 static __always_inline void static_assert_is_vm(struct kvm_vm *vm) { }
311 
312 #define __vm_ioctl(vm, cmd, arg)				\
313 ({								\
314 	static_assert_is_vm(vm);				\
315 	kvm_do_ioctl((vm)->fd, cmd, arg);			\
316 })
317 
318 /*
319  * Assert that a VM or vCPU ioctl() succeeded, with extra magic to detect if
320  * the ioctl() failed because KVM killed/bugged the VM.  To detect a dead VM,
321  * probe KVM_CAP_USER_MEMORY, which (a) has been supported by KVM since before
322  * selftests existed and (b) should never outright fail, i.e. is supposed to
323  * return 0 or 1.  If KVM kills a VM, KVM returns -EIO for all ioctl()s for the
324  * VM and its vCPUs, including KVM_CHECK_EXTENSION.
325  */
326 #define __TEST_ASSERT_VM_VCPU_IOCTL(cond, name, ret, vm)				\
327 do {											\
328 	int __errno = errno;								\
329 											\
330 	static_assert_is_vm(vm);							\
331 											\
332 	if (cond)									\
333 		break;									\
334 											\
335 	if (errno == EIO &&								\
336 	    __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)KVM_CAP_USER_MEMORY) < 0) {	\
337 		TEST_ASSERT(errno == EIO, "KVM killed the VM, should return -EIO");	\
338 		TEST_FAIL("KVM killed/bugged the VM, check the kernel log for clues");	\
339 	}										\
340 	errno = __errno;								\
341 	TEST_ASSERT(cond, __KVM_IOCTL_ERROR(name, ret));				\
342 } while (0)
343 
344 #define TEST_ASSERT_VM_VCPU_IOCTL(cond, cmd, ret, vm)		\
345 	__TEST_ASSERT_VM_VCPU_IOCTL(cond, #cmd, ret, vm)
346 
347 #define vm_ioctl(vm, cmd, arg)					\
348 ({								\
349 	int ret = __vm_ioctl(vm, cmd, arg);			\
350 								\
351 	__TEST_ASSERT_VM_VCPU_IOCTL(!ret, #cmd, ret, vm);		\
352 })
353 
354 static __always_inline void static_assert_is_vcpu(struct kvm_vcpu *vcpu) { }
355 
356 #define __vcpu_ioctl(vcpu, cmd, arg)				\
357 ({								\
358 	static_assert_is_vcpu(vcpu);				\
359 	kvm_do_ioctl((vcpu)->fd, cmd, arg);			\
360 })
361 
362 #define vcpu_ioctl(vcpu, cmd, arg)				\
363 ({								\
364 	int ret = __vcpu_ioctl(vcpu, cmd, arg);			\
365 								\
366 	__TEST_ASSERT_VM_VCPU_IOCTL(!ret, #cmd, ret, (vcpu)->vm);	\
367 })
368 
369 /*
370  * Looks up and returns the value corresponding to the capability
371  * (KVM_CAP_*) given by cap.
372  */
373 static inline int vm_check_cap(struct kvm_vm *vm, long cap)
374 {
375 	int ret =  __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)cap);
376 
377 	TEST_ASSERT_VM_VCPU_IOCTL(ret >= 0, KVM_CHECK_EXTENSION, ret, vm);
378 	return ret;
379 }
380 
381 static inline int __vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0)
382 {
383 	struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
384 
385 	return __vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
386 }
387 static inline void vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0)
388 {
389 	struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
390 
391 	vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
392 }
393 
394 static inline void vm_set_memory_attributes(struct kvm_vm *vm, uint64_t gpa,
395 					    uint64_t size, uint64_t attributes)
396 {
397 	struct kvm_memory_attributes attr = {
398 		.attributes = attributes,
399 		.address = gpa,
400 		.size = size,
401 		.flags = 0,
402 	};
403 
404 	/*
405 	 * KVM_SET_MEMORY_ATTRIBUTES overwrites _all_ attributes.  These flows
406 	 * need significant enhancements to support multiple attributes.
407 	 */
408 	TEST_ASSERT(!attributes || attributes == KVM_MEMORY_ATTRIBUTE_PRIVATE,
409 		    "Update me to support multiple attributes!");
410 
411 	vm_ioctl(vm, KVM_SET_MEMORY_ATTRIBUTES, &attr);
412 }
413 
414 
415 static inline void vm_mem_set_private(struct kvm_vm *vm, uint64_t gpa,
416 				      uint64_t size)
417 {
418 	vm_set_memory_attributes(vm, gpa, size, KVM_MEMORY_ATTRIBUTE_PRIVATE);
419 }
420 
421 static inline void vm_mem_set_shared(struct kvm_vm *vm, uint64_t gpa,
422 				     uint64_t size)
423 {
424 	vm_set_memory_attributes(vm, gpa, size, 0);
425 }
426 
427 void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t gpa, uint64_t size,
428 			    bool punch_hole);
429 
430 static inline void vm_guest_mem_punch_hole(struct kvm_vm *vm, uint64_t gpa,
431 					   uint64_t size)
432 {
433 	vm_guest_mem_fallocate(vm, gpa, size, true);
434 }
435 
436 static inline void vm_guest_mem_allocate(struct kvm_vm *vm, uint64_t gpa,
437 					 uint64_t size)
438 {
439 	vm_guest_mem_fallocate(vm, gpa, size, false);
440 }
441 
442 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size);
443 const char *vm_guest_mode_string(uint32_t i);
444 
445 void kvm_vm_free(struct kvm_vm *vmp);
446 void kvm_vm_restart(struct kvm_vm *vmp);
447 void kvm_vm_release(struct kvm_vm *vmp);
448 void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename);
449 int kvm_memfd_alloc(size_t size, bool hugepages);
450 
451 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
452 
453 static inline void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
454 {
455 	struct kvm_dirty_log args = { .dirty_bitmap = log, .slot = slot };
456 
457 	vm_ioctl(vm, KVM_GET_DIRTY_LOG, &args);
458 }
459 
460 static inline void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
461 					  uint64_t first_page, uint32_t num_pages)
462 {
463 	struct kvm_clear_dirty_log args = {
464 		.dirty_bitmap = log,
465 		.slot = slot,
466 		.first_page = first_page,
467 		.num_pages = num_pages
468 	};
469 
470 	vm_ioctl(vm, KVM_CLEAR_DIRTY_LOG, &args);
471 }
472 
473 static inline uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm)
474 {
475 	return __vm_ioctl(vm, KVM_RESET_DIRTY_RINGS, NULL);
476 }
477 
478 static inline void kvm_vm_register_coalesced_io(struct kvm_vm *vm,
479 						uint64_t address,
480 						uint64_t size, bool pio)
481 {
482 	struct kvm_coalesced_mmio_zone zone = {
483 		.addr = address,
484 		.size = size,
485 		.pio  = pio,
486 	};
487 
488 	vm_ioctl(vm, KVM_REGISTER_COALESCED_MMIO, &zone);
489 }
490 
491 static inline void kvm_vm_unregister_coalesced_io(struct kvm_vm *vm,
492 						  uint64_t address,
493 						  uint64_t size, bool pio)
494 {
495 	struct kvm_coalesced_mmio_zone zone = {
496 		.addr = address,
497 		.size = size,
498 		.pio  = pio,
499 	};
500 
501 	vm_ioctl(vm, KVM_UNREGISTER_COALESCED_MMIO, &zone);
502 }
503 
504 static inline int vm_get_stats_fd(struct kvm_vm *vm)
505 {
506 	int fd = __vm_ioctl(vm, KVM_GET_STATS_FD, NULL);
507 
508 	TEST_ASSERT_VM_VCPU_IOCTL(fd >= 0, KVM_GET_STATS_FD, fd, vm);
509 	return fd;
510 }
511 
512 static inline int __kvm_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd,
513 			      uint32_t flags)
514 {
515 	struct kvm_irqfd irqfd = {
516 		.fd = eventfd,
517 		.gsi = gsi,
518 		.flags = flags,
519 		.resamplefd = -1,
520 	};
521 
522 	return __vm_ioctl(vm, KVM_IRQFD, &irqfd);
523 }
524 
525 static inline void kvm_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd,
526 			      uint32_t flags)
527 {
528 	int ret = __kvm_irqfd(vm, gsi, eventfd, flags);
529 
530 	TEST_ASSERT_VM_VCPU_IOCTL(!ret, KVM_IRQFD, ret, vm);
531 }
532 
533 static inline void kvm_assign_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd)
534 {
535 	kvm_irqfd(vm, gsi, eventfd, 0);
536 }
537 
538 static inline void kvm_deassign_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd)
539 {
540 	kvm_irqfd(vm, gsi, eventfd, KVM_IRQFD_FLAG_DEASSIGN);
541 }
542 
543 static inline int kvm_new_eventfd(void)
544 {
545 	int fd = eventfd(0, 0);
546 
547 	TEST_ASSERT(fd >= 0, __KVM_SYSCALL_ERROR("eventfd()", fd));
548 	return fd;
549 }
550 
551 static inline void read_stats_header(int stats_fd, struct kvm_stats_header *header)
552 {
553 	ssize_t ret;
554 
555 	ret = pread(stats_fd, header, sizeof(*header), 0);
556 	TEST_ASSERT(ret == sizeof(*header),
557 		    "Failed to read '%lu' header bytes, ret = '%ld'",
558 		    sizeof(*header), ret);
559 }
560 
561 struct kvm_stats_desc *read_stats_descriptors(int stats_fd,
562 					      struct kvm_stats_header *header);
563 
564 static inline ssize_t get_stats_descriptor_size(struct kvm_stats_header *header)
565 {
566 	 /*
567 	  * The base size of the descriptor is defined by KVM's ABI, but the
568 	  * size of the name field is variable, as far as KVM's ABI is
569 	  * concerned. For a given instance of KVM, the name field is the same
570 	  * size for all stats and is provided in the overall stats header.
571 	  */
572 	return sizeof(struct kvm_stats_desc) + header->name_size;
573 }
574 
575 static inline struct kvm_stats_desc *get_stats_descriptor(struct kvm_stats_desc *stats,
576 							  int index,
577 							  struct kvm_stats_header *header)
578 {
579 	/*
580 	 * Note, size_desc includes the size of the name field, which is
581 	 * variable. i.e. this is NOT equivalent to &stats_desc[i].
582 	 */
583 	return (void *)stats + index * get_stats_descriptor_size(header);
584 }
585 
586 void read_stat_data(int stats_fd, struct kvm_stats_header *header,
587 		    struct kvm_stats_desc *desc, uint64_t *data,
588 		    size_t max_elements);
589 
590 void kvm_get_stat(struct kvm_binary_stats *stats, const char *name,
591 		  uint64_t *data, size_t max_elements);
592 
593 #define __get_stat(stats, stat)							\
594 ({										\
595 	uint64_t data;								\
596 										\
597 	kvm_get_stat(stats, #stat, &data, 1);					\
598 	data;									\
599 })
600 
601 #define vm_get_stat(vm, stat) __get_stat(&(vm)->stats, stat)
602 #define vcpu_get_stat(vcpu, stat) __get_stat(&(vcpu)->stats, stat)
603 
604 static inline bool read_smt_control(char *buf, size_t buf_size)
605 {
606 	FILE *f = fopen("/sys/devices/system/cpu/smt/control", "r");
607 	bool ret;
608 
609 	if (!f)
610 		return false;
611 
612 	ret = fread(buf, sizeof(*buf), buf_size, f) > 0;
613 	fclose(f);
614 
615 	return ret;
616 }
617 
618 static inline bool is_smt_possible(void)
619 {
620 	char buf[16];
621 
622 	if (read_smt_control(buf, sizeof(buf)) &&
623 	    (!strncmp(buf, "forceoff", 8) || !strncmp(buf, "notsupported", 12)))
624 		return false;
625 
626 	return true;
627 }
628 
629 static inline bool is_smt_on(void)
630 {
631 	char buf[16];
632 
633 	if (read_smt_control(buf, sizeof(buf)) && !strncmp(buf, "on", 2))
634 		return true;
635 
636 	return false;
637 }
638 
639 void vm_create_irqchip(struct kvm_vm *vm);
640 
641 static inline int __vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size,
642 					uint64_t flags)
643 {
644 	struct kvm_create_guest_memfd guest_memfd = {
645 		.size = size,
646 		.flags = flags,
647 	};
648 
649 	return __vm_ioctl(vm, KVM_CREATE_GUEST_MEMFD, &guest_memfd);
650 }
651 
652 static inline int vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size,
653 					uint64_t flags)
654 {
655 	int fd = __vm_create_guest_memfd(vm, size, flags);
656 
657 	TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_GUEST_MEMFD, fd));
658 	return fd;
659 }
660 
661 void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
662 			       uint64_t gpa, uint64_t size, void *hva);
663 int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
664 				uint64_t gpa, uint64_t size, void *hva);
665 void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
666 				uint64_t gpa, uint64_t size, void *hva,
667 				uint32_t guest_memfd, uint64_t guest_memfd_offset);
668 int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
669 				 uint64_t gpa, uint64_t size, void *hva,
670 				 uint32_t guest_memfd, uint64_t guest_memfd_offset);
671 
672 void vm_userspace_mem_region_add(struct kvm_vm *vm,
673 	enum vm_mem_backing_src_type src_type,
674 	uint64_t guest_paddr, uint32_t slot, uint64_t npages,
675 	uint32_t flags);
676 void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
677 		uint64_t guest_paddr, uint32_t slot, uint64_t npages,
678 		uint32_t flags, int guest_memfd_fd, uint64_t guest_memfd_offset);
679 
680 #ifndef vm_arch_has_protected_memory
681 static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm)
682 {
683 	return false;
684 }
685 #endif
686 
687 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
688 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
689 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
690 struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
691 void vm_populate_vaddr_bitmap(struct kvm_vm *vm);
692 vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
693 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
694 vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
695 			    enum kvm_mem_region_type type);
696 vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz,
697 				 vm_vaddr_t vaddr_min,
698 				 enum kvm_mem_region_type type);
699 vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
700 vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm,
701 				 enum kvm_mem_region_type type);
702 vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm);
703 
704 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
705 	      unsigned int npages);
706 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa);
707 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
708 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
709 void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa);
710 
711 #ifndef vcpu_arch_put_guest
712 #define vcpu_arch_put_guest(mem, val) do { (mem) = (val); } while (0)
713 #endif
714 
715 static inline vm_paddr_t vm_untag_gpa(struct kvm_vm *vm, vm_paddr_t gpa)
716 {
717 	return gpa & ~vm->gpa_tag_mask;
718 }
719 
720 void vcpu_run(struct kvm_vcpu *vcpu);
721 int _vcpu_run(struct kvm_vcpu *vcpu);
722 
723 static inline int __vcpu_run(struct kvm_vcpu *vcpu)
724 {
725 	return __vcpu_ioctl(vcpu, KVM_RUN, NULL);
726 }
727 
728 void vcpu_run_complete_io(struct kvm_vcpu *vcpu);
729 struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu);
730 
731 static inline void vcpu_enable_cap(struct kvm_vcpu *vcpu, uint32_t cap,
732 				   uint64_t arg0)
733 {
734 	struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
735 
736 	vcpu_ioctl(vcpu, KVM_ENABLE_CAP, &enable_cap);
737 }
738 
739 static inline void vcpu_guest_debug_set(struct kvm_vcpu *vcpu,
740 					struct kvm_guest_debug *debug)
741 {
742 	vcpu_ioctl(vcpu, KVM_SET_GUEST_DEBUG, debug);
743 }
744 
745 static inline void vcpu_mp_state_get(struct kvm_vcpu *vcpu,
746 				     struct kvm_mp_state *mp_state)
747 {
748 	vcpu_ioctl(vcpu, KVM_GET_MP_STATE, mp_state);
749 }
750 static inline void vcpu_mp_state_set(struct kvm_vcpu *vcpu,
751 				     struct kvm_mp_state *mp_state)
752 {
753 	vcpu_ioctl(vcpu, KVM_SET_MP_STATE, mp_state);
754 }
755 
756 static inline void vcpu_regs_get(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
757 {
758 	vcpu_ioctl(vcpu, KVM_GET_REGS, regs);
759 }
760 
761 static inline void vcpu_regs_set(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
762 {
763 	vcpu_ioctl(vcpu, KVM_SET_REGS, regs);
764 }
765 static inline void vcpu_sregs_get(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
766 {
767 	vcpu_ioctl(vcpu, KVM_GET_SREGS, sregs);
768 
769 }
770 static inline void vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
771 {
772 	vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs);
773 }
774 static inline int _vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
775 {
776 	return __vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs);
777 }
778 static inline void vcpu_fpu_get(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
779 {
780 	vcpu_ioctl(vcpu, KVM_GET_FPU, fpu);
781 }
782 static inline void vcpu_fpu_set(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
783 {
784 	vcpu_ioctl(vcpu, KVM_SET_FPU, fpu);
785 }
786 
787 static inline int __vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr)
788 {
789 	struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr };
790 
791 	return __vcpu_ioctl(vcpu, KVM_GET_ONE_REG, &reg);
792 }
793 static inline int __vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
794 {
795 	struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
796 
797 	return __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
798 }
799 static inline uint64_t vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id)
800 {
801 	uint64_t val;
802 	struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
803 
804 	TEST_ASSERT(KVM_REG_SIZE(id) <= sizeof(val), "Reg %lx too big", id);
805 
806 	vcpu_ioctl(vcpu, KVM_GET_ONE_REG, &reg);
807 	return val;
808 }
809 static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
810 {
811 	struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
812 
813 	TEST_ASSERT(KVM_REG_SIZE(id) <= sizeof(val), "Reg %lx too big", id);
814 
815 	vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
816 }
817 
818 #ifdef __KVM_HAVE_VCPU_EVENTS
819 static inline void vcpu_events_get(struct kvm_vcpu *vcpu,
820 				   struct kvm_vcpu_events *events)
821 {
822 	vcpu_ioctl(vcpu, KVM_GET_VCPU_EVENTS, events);
823 }
824 static inline void vcpu_events_set(struct kvm_vcpu *vcpu,
825 				   struct kvm_vcpu_events *events)
826 {
827 	vcpu_ioctl(vcpu, KVM_SET_VCPU_EVENTS, events);
828 }
829 #endif
830 #ifdef __x86_64__
831 static inline void vcpu_nested_state_get(struct kvm_vcpu *vcpu,
832 					 struct kvm_nested_state *state)
833 {
834 	vcpu_ioctl(vcpu, KVM_GET_NESTED_STATE, state);
835 }
836 static inline int __vcpu_nested_state_set(struct kvm_vcpu *vcpu,
837 					  struct kvm_nested_state *state)
838 {
839 	return __vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state);
840 }
841 
842 static inline void vcpu_nested_state_set(struct kvm_vcpu *vcpu,
843 					 struct kvm_nested_state *state)
844 {
845 	vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state);
846 }
847 #endif
848 static inline int vcpu_get_stats_fd(struct kvm_vcpu *vcpu)
849 {
850 	int fd = __vcpu_ioctl(vcpu, KVM_GET_STATS_FD, NULL);
851 
852 	TEST_ASSERT_VM_VCPU_IOCTL(fd >= 0, KVM_CHECK_EXTENSION, fd, vcpu->vm);
853 	return fd;
854 }
855 
856 int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr);
857 
858 static inline void kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr)
859 {
860 	int ret = __kvm_has_device_attr(dev_fd, group, attr);
861 
862 	TEST_ASSERT(!ret, "KVM_HAS_DEVICE_ATTR failed, rc: %i errno: %i", ret, errno);
863 }
864 
865 int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val);
866 
867 static inline void kvm_device_attr_get(int dev_fd, uint32_t group,
868 				       uint64_t attr, void *val)
869 {
870 	int ret = __kvm_device_attr_get(dev_fd, group, attr, val);
871 
872 	TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_GET_DEVICE_ATTR, ret));
873 }
874 
875 int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val);
876 
877 static inline void kvm_device_attr_set(int dev_fd, uint32_t group,
878 				       uint64_t attr, void *val)
879 {
880 	int ret = __kvm_device_attr_set(dev_fd, group, attr, val);
881 
882 	TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_DEVICE_ATTR, ret));
883 }
884 
885 static inline int __vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group,
886 					 uint64_t attr)
887 {
888 	return __kvm_has_device_attr(vcpu->fd, group, attr);
889 }
890 
891 static inline void vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group,
892 					uint64_t attr)
893 {
894 	kvm_has_device_attr(vcpu->fd, group, attr);
895 }
896 
897 static inline int __vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group,
898 					 uint64_t attr, void *val)
899 {
900 	return __kvm_device_attr_get(vcpu->fd, group, attr, val);
901 }
902 
903 static inline void vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group,
904 					uint64_t attr, void *val)
905 {
906 	kvm_device_attr_get(vcpu->fd, group, attr, val);
907 }
908 
909 static inline int __vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group,
910 					 uint64_t attr, void *val)
911 {
912 	return __kvm_device_attr_set(vcpu->fd, group, attr, val);
913 }
914 
915 static inline void vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group,
916 					uint64_t attr, void *val)
917 {
918 	kvm_device_attr_set(vcpu->fd, group, attr, val);
919 }
920 
921 int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type);
922 int __kvm_create_device(struct kvm_vm *vm, uint64_t type);
923 
924 static inline int kvm_create_device(struct kvm_vm *vm, uint64_t type)
925 {
926 	int fd = __kvm_create_device(vm, type);
927 
928 	TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_DEVICE, fd));
929 	return fd;
930 }
931 
932 void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu);
933 
934 /*
935  * VM VCPU Args Set
936  *
937  * Input Args:
938  *   vm - Virtual Machine
939  *   num - number of arguments
940  *   ... - arguments, each of type uint64_t
941  *
942  * Output Args: None
943  *
944  * Return: None
945  *
946  * Sets the first @num input parameters for the function at @vcpu's entry point,
947  * per the C calling convention of the architecture, to the values given as
948  * variable args. Each of the variable args is expected to be of type uint64_t.
949  * The maximum @num can be is specific to the architecture.
950  */
951 void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...);
952 
953 void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
954 int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
955 
956 #define KVM_MAX_IRQ_ROUTES		4096
957 
958 struct kvm_irq_routing *kvm_gsi_routing_create(void);
959 void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing,
960 		uint32_t gsi, uint32_t pin);
961 int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
962 void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
963 
964 const char *exit_reason_str(unsigned int exit_reason);
965 
966 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
967 			     uint32_t memslot);
968 vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
969 				vm_paddr_t paddr_min, uint32_t memslot,
970 				bool protected);
971 vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
972 
973 static inline vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
974 					    vm_paddr_t paddr_min, uint32_t memslot)
975 {
976 	/*
977 	 * By default, allocate memory as protected for VMs that support
978 	 * protected memory, as the majority of memory for such VMs is
979 	 * protected, i.e. using shared memory is effectively opt-in.
980 	 */
981 	return __vm_phy_pages_alloc(vm, num, paddr_min, memslot,
982 				    vm_arch_has_protected_memory(vm));
983 }
984 
985 /*
986  * ____vm_create() does KVM_CREATE_VM and little else.  __vm_create() also
987  * loads the test binary into guest memory and creates an IRQ chip (x86 only).
988  * __vm_create() does NOT create vCPUs, @nr_runnable_vcpus is used purely to
989  * calculate the amount of memory needed for per-vCPU data, e.g. stacks.
990  */
991 struct kvm_vm *____vm_create(struct vm_shape shape);
992 struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
993 			   uint64_t nr_extra_pages);
994 
995 static inline struct kvm_vm *vm_create_barebones(void)
996 {
997 	return ____vm_create(VM_SHAPE_DEFAULT);
998 }
999 
1000 static inline struct kvm_vm *vm_create_barebones_type(unsigned long type)
1001 {
1002 	const struct vm_shape shape = {
1003 		.mode = VM_MODE_DEFAULT,
1004 		.type = type,
1005 	};
1006 
1007 	return ____vm_create(shape);
1008 }
1009 
1010 static inline struct kvm_vm *vm_create(uint32_t nr_runnable_vcpus)
1011 {
1012 	return __vm_create(VM_SHAPE_DEFAULT, nr_runnable_vcpus, 0);
1013 }
1014 
1015 struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus,
1016 				      uint64_t extra_mem_pages,
1017 				      void *guest_code, struct kvm_vcpu *vcpus[]);
1018 
1019 static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus,
1020 						  void *guest_code,
1021 						  struct kvm_vcpu *vcpus[])
1022 {
1023 	return __vm_create_with_vcpus(VM_SHAPE_DEFAULT, nr_vcpus, 0,
1024 				      guest_code, vcpus);
1025 }
1026 
1027 
1028 struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape,
1029 					       struct kvm_vcpu **vcpu,
1030 					       uint64_t extra_mem_pages,
1031 					       void *guest_code);
1032 
1033 /*
1034  * Create a VM with a single vCPU with reasonable defaults and @extra_mem_pages
1035  * additional pages of guest memory.  Returns the VM and vCPU (via out param).
1036  */
1037 static inline struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
1038 						       uint64_t extra_mem_pages,
1039 						       void *guest_code)
1040 {
1041 	return __vm_create_shape_with_one_vcpu(VM_SHAPE_DEFAULT, vcpu,
1042 					       extra_mem_pages, guest_code);
1043 }
1044 
1045 static inline struct kvm_vm *vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
1046 						     void *guest_code)
1047 {
1048 	return __vm_create_with_one_vcpu(vcpu, 0, guest_code);
1049 }
1050 
1051 static inline struct kvm_vm *vm_create_shape_with_one_vcpu(struct vm_shape shape,
1052 							   struct kvm_vcpu **vcpu,
1053 							   void *guest_code)
1054 {
1055 	return __vm_create_shape_with_one_vcpu(shape, vcpu, 0, guest_code);
1056 }
1057 
1058 struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
1059 
1060 void kvm_set_files_rlimit(uint32_t nr_vcpus);
1061 
1062 int __pin_task_to_cpu(pthread_t task, int cpu);
1063 
1064 static inline void pin_task_to_cpu(pthread_t task, int cpu)
1065 {
1066 	int r;
1067 
1068 	r = __pin_task_to_cpu(task, cpu);
1069 	TEST_ASSERT(!r, "Failed to set thread affinity to pCPU '%u'", cpu);
1070 }
1071 
1072 static inline int pin_task_to_any_cpu(pthread_t task)
1073 {
1074 	int cpu = sched_getcpu();
1075 
1076 	pin_task_to_cpu(task, cpu);
1077 	return cpu;
1078 }
1079 
1080 static inline void pin_self_to_cpu(int cpu)
1081 {
1082 	pin_task_to_cpu(pthread_self(), cpu);
1083 }
1084 
1085 static inline int pin_self_to_any_cpu(void)
1086 {
1087 	return pin_task_to_any_cpu(pthread_self());
1088 }
1089 
1090 void kvm_print_vcpu_pinning_help(void);
1091 void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],
1092 			    int nr_vcpus);
1093 
1094 unsigned long vm_compute_max_gfn(struct kvm_vm *vm);
1095 unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size);
1096 unsigned int vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages);
1097 unsigned int vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages);
1098 static inline unsigned int
1099 vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
1100 {
1101 	unsigned int n;
1102 	n = vm_num_guest_pages(mode, vm_num_host_pages(mode, num_guest_pages));
1103 #ifdef __s390x__
1104 	/* s390 requires 1M aligned guest sizes */
1105 	n = (n + 255) & ~255;
1106 #endif
1107 	return n;
1108 }
1109 
1110 #define sync_global_to_guest(vm, g) ({				\
1111 	typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g));	\
1112 	memcpy(_p, &(g), sizeof(g));				\
1113 })
1114 
1115 #define sync_global_from_guest(vm, g) ({			\
1116 	typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g));	\
1117 	memcpy(&(g), _p, sizeof(g));				\
1118 })
1119 
1120 /*
1121  * Write a global value, but only in the VM's (guest's) domain.  Primarily used
1122  * for "globals" that hold per-VM values (VMs always duplicate code and global
1123  * data into their own region of physical memory), but can be used anytime it's
1124  * undesirable to change the host's copy of the global.
1125  */
1126 #define write_guest_global(vm, g, val) ({			\
1127 	typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g));	\
1128 	typeof(g) _val = val;					\
1129 								\
1130 	memcpy(_p, &(_val), sizeof(g));				\
1131 })
1132 
1133 void assert_on_unhandled_exception(struct kvm_vcpu *vcpu);
1134 
1135 void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu,
1136 		    uint8_t indent);
1137 
1138 static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu,
1139 			     uint8_t indent)
1140 {
1141 	vcpu_arch_dump(stream, vcpu, indent);
1142 }
1143 
1144 /*
1145  * Adds a vCPU with reasonable defaults (e.g. a stack)
1146  *
1147  * Input Args:
1148  *   vm - Virtual Machine
1149  *   vcpu_id - The id of the VCPU to add to the VM.
1150  */
1151 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
1152 void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code);
1153 
1154 static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
1155 					   void *guest_code)
1156 {
1157 	struct kvm_vcpu *vcpu = vm_arch_vcpu_add(vm, vcpu_id);
1158 
1159 	vcpu_arch_set_entry_point(vcpu, guest_code);
1160 
1161 	return vcpu;
1162 }
1163 
1164 /* Re-create a vCPU after restarting a VM, e.g. for state save/restore tests. */
1165 struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id);
1166 
1167 static inline struct kvm_vcpu *vm_vcpu_recreate(struct kvm_vm *vm,
1168 						uint32_t vcpu_id)
1169 {
1170 	return vm_arch_vcpu_recreate(vm, vcpu_id);
1171 }
1172 
1173 void vcpu_arch_free(struct kvm_vcpu *vcpu);
1174 
1175 void virt_arch_pgd_alloc(struct kvm_vm *vm);
1176 
1177 static inline void virt_pgd_alloc(struct kvm_vm *vm)
1178 {
1179 	virt_arch_pgd_alloc(vm);
1180 }
1181 
1182 /*
1183  * VM Virtual Page Map
1184  *
1185  * Input Args:
1186  *   vm - Virtual Machine
1187  *   vaddr - VM Virtual Address
1188  *   paddr - VM Physical Address
1189  *   memslot - Memory region slot for new virtual translation tables
1190  *
1191  * Output Args: None
1192  *
1193  * Return: None
1194  *
1195  * Within @vm, creates a virtual translation for the page starting
1196  * at @vaddr to the page starting at @paddr.
1197  */
1198 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr);
1199 
1200 static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
1201 {
1202 	virt_arch_pg_map(vm, vaddr, paddr);
1203 }
1204 
1205 
1206 /*
1207  * Address Guest Virtual to Guest Physical
1208  *
1209  * Input Args:
1210  *   vm - Virtual Machine
1211  *   gva - VM virtual address
1212  *
1213  * Output Args: None
1214  *
1215  * Return:
1216  *   Equivalent VM physical address
1217  *
1218  * Returns the VM physical address of the translated VM virtual
1219  * address given by @gva.
1220  */
1221 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
1222 
1223 static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
1224 {
1225 	return addr_arch_gva2gpa(vm, gva);
1226 }
1227 
1228 /*
1229  * Virtual Translation Tables Dump
1230  *
1231  * Input Args:
1232  *   stream - Output FILE stream
1233  *   vm     - Virtual Machine
1234  *   indent - Left margin indent amount
1235  *
1236  * Output Args: None
1237  *
1238  * Return: None
1239  *
1240  * Dumps to the FILE stream given by @stream, the contents of all the
1241  * virtual translation tables for the VM given by @vm.
1242  */
1243 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
1244 
1245 static inline void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
1246 {
1247 	virt_arch_dump(stream, vm, indent);
1248 }
1249 
1250 
1251 static inline int __vm_disable_nx_huge_pages(struct kvm_vm *vm)
1252 {
1253 	return __vm_enable_cap(vm, KVM_CAP_VM_DISABLE_NX_HUGE_PAGES, 0);
1254 }
1255 
1256 /*
1257  * Arch hook that is invoked via a constructor, i.e. before exeucting main(),
1258  * to allow for arch-specific setup that is common to all tests, e.g. computing
1259  * the default guest "mode".
1260  */
1261 void kvm_selftest_arch_init(void);
1262 
1263 void kvm_arch_vm_post_create(struct kvm_vm *vm, unsigned int nr_vcpus);
1264 void kvm_arch_vm_finalize_vcpus(struct kvm_vm *vm);
1265 void kvm_arch_vm_release(struct kvm_vm *vm);
1266 
1267 bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr);
1268 
1269 uint32_t guest_get_vcpuid(void);
1270 
1271 #endif /* SELFTEST_KVM_UTIL_H */
1272