xref: /linux/include/linux/kvm_types.h (revision 256e3417065b2721f77bcd37331796b59483ef3b)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 #ifndef __KVM_TYPES_H__
4 #define __KVM_TYPES_H__
5 
6 #include <linux/bits.h>
7 #include <linux/export.h>
8 #include <linux/types.h>
9 #include <asm/kvm_types.h>
10 
11 #ifdef KVM_SUB_MODULES
12 #define EXPORT_SYMBOL_FOR_KVM_INTERNAL(symbol) \
13 	EXPORT_SYMBOL_FOR_MODULES(symbol, __stringify(KVM_SUB_MODULES))
14 #else
15 #define EXPORT_SYMBOL_FOR_KVM_INTERNAL(symbol)
16 #endif
17 
18 #ifndef __ASSEMBLER__
19 
20 #include <linux/mutex.h>
21 #include <linux/spinlock_types.h>
22 
23 struct kvm;
24 struct kvm_async_pf;
25 struct kvm_device_ops;
26 struct kvm_gfn_range;
27 struct kvm_interrupt;
28 struct kvm_irq_routing_table;
29 struct kvm_memory_slot;
30 struct kvm_one_reg;
31 struct kvm_run;
32 struct kvm_userspace_memory_region;
33 struct kvm_vcpu;
34 struct kvm_vcpu_init;
35 struct kvm_memslots;
36 
37 enum kvm_mr_change;
38 
39 /*
40  * Address types:
41  *
42  *  gva - guest virtual address
43  *  gpa - guest physical address
44  *  gfn - guest frame number
45  *  hva - host virtual address
46  *  hpa - host physical address
47  *  hfn - host frame number
48  */
49 
50 typedef unsigned long  gva_t;
51 typedef u64            gpa_t;
52 typedef u64            gfn_t;
53 
54 #define INVALID_GPA	(~(gpa_t)0)
55 
56 typedef unsigned long  hva_t;
57 typedef u64            hpa_t;
58 typedef u64            hfn_t;
59 
60 typedef hfn_t kvm_pfn_t;
61 
62 struct gfn_to_hva_cache {
63 	u64 generation;
64 	gpa_t gpa;
65 	unsigned long hva;
66 	unsigned long len;
67 	struct kvm_memory_slot *memslot;
68 };
69 
70 struct gfn_to_pfn_cache {
71 	u64 generation;
72 	gpa_t gpa;
73 	unsigned long uhva;
74 	struct kvm_memory_slot *memslot;
75 	struct kvm *kvm;
76 	struct list_head list;
77 	rwlock_t lock;
78 	struct mutex refresh_lock;
79 	void *khva;
80 	kvm_pfn_t pfn;
81 	bool active;
82 	bool valid;
83 };
84 
85 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
86 /*
87  * Memory caches are used to preallocate memory ahead of various MMU flows,
88  * e.g. page fault handlers.  Gracefully handling allocation failures deep in
89  * MMU flows is problematic, as is triggering reclaim, I/O, etc... while
90  * holding MMU locks.  Note, these caches act more like prefetch buffers than
91  * classical caches, i.e. objects are not returned to the cache on being freed.
92  *
93  * The @capacity field and @objects array are lazily initialized when the cache
94  * is topped up (__kvm_mmu_topup_memory_cache()).
95  */
96 struct kvm_mmu_memory_cache {
97 	gfp_t gfp_zero;
98 	gfp_t gfp_custom;
99 	u64 init_value;
100 	struct kmem_cache *kmem_cache;
101 	int capacity;
102 	int nobjs;
103 	void **objects;
104 };
105 #endif
106 
107 #define HALT_POLL_HIST_COUNT			32
108 
109 struct kvm_vm_stat_generic {
110 	u64 remote_tlb_flush;
111 	u64 remote_tlb_flush_requests;
112 };
113 
114 struct kvm_vcpu_stat_generic {
115 	u64 halt_successful_poll;
116 	u64 halt_attempted_poll;
117 	u64 halt_poll_invalid;
118 	u64 halt_wakeup;
119 	u64 halt_poll_success_ns;
120 	u64 halt_poll_fail_ns;
121 	u64 halt_wait_ns;
122 	u64 halt_poll_success_hist[HALT_POLL_HIST_COUNT];
123 	u64 halt_poll_fail_hist[HALT_POLL_HIST_COUNT];
124 	u64 halt_wait_hist[HALT_POLL_HIST_COUNT];
125 	u64 blocking;
126 };
127 
128 #define KVM_STATS_NAME_SIZE	48
129 #endif /* !__ASSEMBLER__ */
130 
131 #endif /* __KVM_TYPES_H__ */
132