Lines Matching refs:vm

59 	struct kvm_vm *vm;
141 #define kvm_for_each_vcpu(vm, i, vcpu) \
142 for ((i) = 0; (i) <= (vm)->last_vcpu_id; (i)++) \
143 if (!((vcpu) = vm->vcpus[i])) \
148 memslot2region(struct kvm_vm *vm, uint32_t memslot);
150 static inline struct userspace_mem_region *vm_get_mem_region(struct kvm_vm *vm,
154 return memslot2region(vm, vm->memslots[type]);
303 static __always_inline void static_assert_is_vm(struct kvm_vm *vm) { }
305 #define __vm_ioctl(vm, cmd, arg) \
307 static_assert_is_vm(vm); \
308 kvm_do_ioctl((vm)->fd, cmd, arg); \
319 #define __TEST_ASSERT_VM_VCPU_IOCTL(cond, name, ret, vm) \
323 static_assert_is_vm(vm); \
329 __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)KVM_CAP_USER_MEMORY) < 0) { \
337 #define TEST_ASSERT_VM_VCPU_IOCTL(cond, cmd, ret, vm) \
338 __TEST_ASSERT_VM_VCPU_IOCTL(cond, #cmd, ret, vm)
340 #define vm_ioctl(vm, cmd, arg) \
342 int ret = __vm_ioctl(vm, cmd, arg); \
344 __TEST_ASSERT_VM_VCPU_IOCTL(!ret, #cmd, ret, vm); \
359 __TEST_ASSERT_VM_VCPU_IOCTL(!ret, #cmd, ret, (vcpu)->vm); \
366 static inline int vm_check_cap(struct kvm_vm *vm, long cap)
368 int ret = __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)cap);
370 TEST_ASSERT_VM_VCPU_IOCTL(ret >= 0, KVM_CHECK_EXTENSION, ret, vm);
374 static inline int __vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0)
378 return __vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
380 static inline void vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0)
384 vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
387 static inline void vm_set_memory_attributes(struct kvm_vm *vm, uint64_t gpa,
404 vm_ioctl(vm, KVM_SET_MEMORY_ATTRIBUTES, &attr);
408 static inline void vm_mem_set_private(struct kvm_vm *vm, uint64_t gpa,
411 vm_set_memory_attributes(vm, gpa, size, KVM_MEMORY_ATTRIBUTE_PRIVATE);
414 static inline void vm_mem_set_shared(struct kvm_vm *vm, uint64_t gpa,
417 vm_set_memory_attributes(vm, gpa, size, 0);
420 void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t gpa, uint64_t size,
423 static inline void vm_guest_mem_punch_hole(struct kvm_vm *vm, uint64_t gpa,
426 vm_guest_mem_fallocate(vm, gpa, size, true);
429 static inline void vm_guest_mem_allocate(struct kvm_vm *vm, uint64_t gpa,
432 vm_guest_mem_fallocate(vm, gpa, size, false);
435 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size);
441 void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename);
444 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
446 static inline void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
450 vm_ioctl(vm, KVM_GET_DIRTY_LOG, &args);
453 static inline void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
463 vm_ioctl(vm, KVM_CLEAR_DIRTY_LOG, &args);
466 static inline uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm)
468 return __vm_ioctl(vm, KVM_RESET_DIRTY_RINGS, NULL);
471 static inline void kvm_vm_register_coalesced_io(struct kvm_vm *vm,
481 vm_ioctl(vm, KVM_REGISTER_COALESCED_MMIO, &zone);
484 static inline void kvm_vm_unregister_coalesced_io(struct kvm_vm *vm,
494 vm_ioctl(vm, KVM_UNREGISTER_COALESCED_MMIO, &zone);
497 static inline int vm_get_stats_fd(struct kvm_vm *vm)
499 int fd = __vm_ioctl(vm, KVM_GET_STATS_FD, NULL);
501 TEST_ASSERT_VM_VCPU_IOCTL(fd >= 0, KVM_GET_STATS_FD, fd, vm);
555 #define vm_get_stat(vm, stat) __get_stat(&(vm)->stats, stat)
593 void vm_create_irqchip(struct kvm_vm *vm);
595 static inline int __vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size,
603 return __vm_ioctl(vm, KVM_CREATE_GUEST_MEMFD, &guest_memfd);
606 static inline int vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size,
609 int fd = __vm_create_guest_memfd(vm, size, flags);
615 void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
617 int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
619 void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
622 int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
626 void vm_userspace_mem_region_add(struct kvm_vm *vm,
630 void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
635 static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm)
641 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
642 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
643 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
644 struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
645 void vm_populate_vaddr_bitmap(struct kvm_vm *vm);
646 vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
647 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
648 vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
650 vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz,
653 vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
654 vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm,
656 vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm);
658 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
660 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa);
661 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
662 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
663 void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa);
669 static inline vm_paddr_t vm_untag_gpa(struct kvm_vm *vm, vm_paddr_t gpa)
671 return gpa & ~vm->gpa_tag_mask;
806 TEST_ASSERT_VM_VCPU_IOCTL(fd >= 0, KVM_CHECK_EXTENSION, fd, vcpu->vm);
875 int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type);
876 int __kvm_create_device(struct kvm_vm *vm, uint64_t type);
878 static inline int kvm_create_device(struct kvm_vm *vm, uint64_t type)
880 int fd = __kvm_create_device(vm, type);
892 * vm - Virtual Machine
907 void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
908 int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
915 int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
916 void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
920 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
922 vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
925 vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
927 static inline vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
935 return __vm_phy_pages_alloc(vm, num, paddr_min, memslot,
936 vm_arch_has_protected_memory(vm));
1012 struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
1021 unsigned long vm_compute_max_gfn(struct kvm_vm *vm);
1037 #define sync_global_to_guest(vm, g) ({ \
1038 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
1042 #define sync_global_from_guest(vm, g) ({ \
1043 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
1053 #define write_guest_global(vm, g, val) ({ \
1054 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
1075 * vm - Virtual Machine
1078 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
1081 static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
1084 struct kvm_vcpu *vcpu = vm_arch_vcpu_add(vm, vcpu_id);
1092 struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id);
1094 static inline struct kvm_vcpu *vm_vcpu_recreate(struct kvm_vm *vm,
1097 return vm_arch_vcpu_recreate(vm, vcpu_id);
1102 void virt_arch_pgd_alloc(struct kvm_vm *vm);
1104 static inline void virt_pgd_alloc(struct kvm_vm *vm)
1106 virt_arch_pgd_alloc(vm);
1113 * vm - Virtual Machine
1122 * Within @vm, creates a virtual translation for the page starting
1125 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr);
1127 static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
1129 virt_arch_pg_map(vm, vaddr, paddr);
1137 * vm - Virtual Machine
1148 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
1150 static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
1152 return addr_arch_gva2gpa(vm, gva);
1160 * vm - Virtual Machine
1168 * virtual translation tables for the VM given by @vm.
1170 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
1172 static inline void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
1174 virt_arch_dump(stream, vm, indent);
1178 static inline int __vm_disable_nx_huge_pages(struct kvm_vm *vm)
1180 return __vm_enable_cap(vm, KVM_CAP_VM_DISABLE_NX_HUGE_PAGES, 0);
1190 void kvm_arch_vm_post_create(struct kvm_vm *vm);
1192 bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr);