xref: /linux/include/linux/kvm_dirty_ring.h (revision 63eb28bb1402891b1ad2be02a530f29a9dd7f1cd)
1 #ifndef KVM_DIRTY_RING_H
2 #define KVM_DIRTY_RING_H
3 
4 #include <linux/kvm.h>
5 
6 /**
7  * kvm_dirty_ring: KVM internal dirty ring structure
8  *
9  * @dirty_index: free running counter that points to the next slot in
10  *               dirty_ring->dirty_gfns, where a new dirty page should go
11  * @reset_index: free running counter that points to the next dirty page
12  *               in dirty_ring->dirty_gfns for which dirty trap needs to
13  *               be reenabled
14  * @size:        size of the compact list, dirty_ring->dirty_gfns
15  * @soft_limit:  when the number of dirty pages in the list reaches this
16  *               limit, vcpu that owns this ring should exit to userspace
17  *               to allow userspace to harvest all the dirty pages
18  * @dirty_gfns:  the array to keep the dirty gfns
19  * @index:       index of this dirty ring
20  */
21 struct kvm_dirty_ring {
22 	u32 dirty_index;
23 	u32 reset_index;
24 	u32 size;
25 	u32 soft_limit;
26 	struct kvm_dirty_gfn *dirty_gfns;
27 	int index;
28 };
29 
30 #ifndef CONFIG_HAVE_KVM_DIRTY_RING
31 /*
32  * If CONFIG_HAVE_HVM_DIRTY_RING not defined, kvm_dirty_ring.o should
33  * not be included as well, so define these nop functions for the arch.
34  */
kvm_dirty_ring_get_rsvd_entries(struct kvm * kvm)35 static inline u32 kvm_dirty_ring_get_rsvd_entries(struct kvm *kvm)
36 {
37 	return 0;
38 }
39 
kvm_use_dirty_bitmap(struct kvm * kvm)40 static inline bool kvm_use_dirty_bitmap(struct kvm *kvm)
41 {
42 	return true;
43 }
44 
kvm_dirty_ring_alloc(struct kvm * kvm,struct kvm_dirty_ring * ring,int index,u32 size)45 static inline int kvm_dirty_ring_alloc(struct kvm *kvm, struct kvm_dirty_ring *ring,
46 				       int index, u32 size)
47 {
48 	return 0;
49 }
50 
kvm_dirty_ring_reset(struct kvm * kvm,struct kvm_dirty_ring * ring,int * nr_entries_reset)51 static inline int kvm_dirty_ring_reset(struct kvm *kvm,
52 				       struct kvm_dirty_ring *ring,
53 				       int *nr_entries_reset)
54 {
55 	return -ENOENT;
56 }
57 
kvm_dirty_ring_push(struct kvm_vcpu * vcpu,u32 slot,u64 offset)58 static inline void kvm_dirty_ring_push(struct kvm_vcpu *vcpu,
59 				       u32 slot, u64 offset)
60 {
61 }
62 
kvm_dirty_ring_get_page(struct kvm_dirty_ring * ring,u32 offset)63 static inline struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring,
64 						   u32 offset)
65 {
66 	return NULL;
67 }
68 
kvm_dirty_ring_free(struct kvm_dirty_ring * ring)69 static inline void kvm_dirty_ring_free(struct kvm_dirty_ring *ring)
70 {
71 }
72 
73 #else /* CONFIG_HAVE_KVM_DIRTY_RING */
74 
75 int kvm_cpu_dirty_log_size(struct kvm *kvm);
76 bool kvm_use_dirty_bitmap(struct kvm *kvm);
77 bool kvm_arch_allow_write_without_running_vcpu(struct kvm *kvm);
78 u32 kvm_dirty_ring_get_rsvd_entries(struct kvm *kvm);
79 int kvm_dirty_ring_alloc(struct kvm *kvm, struct kvm_dirty_ring *ring,
80 			 int index, u32 size);
81 int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring,
82 			 int *nr_entries_reset);
83 void kvm_dirty_ring_push(struct kvm_vcpu *vcpu, u32 slot, u64 offset);
84 
85 bool kvm_dirty_ring_check_request(struct kvm_vcpu *vcpu);
86 
87 /* for use in vm_operations_struct */
88 struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, u32 offset);
89 
90 void kvm_dirty_ring_free(struct kvm_dirty_ring *ring);
91 
92 #endif /* CONFIG_HAVE_KVM_DIRTY_RING */
93 
94 #endif	/* KVM_DIRTY_RING_H */
95