xref: /linux/virt/kvm/dirty_ring.c (revision 4b132aacb0768ac1e652cf517097ea6f237214b9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * KVM dirty ring implementation
4  *
5  * Copyright 2019 Red Hat, Inc.
6  */
7 #include <linux/kvm_host.h>
8 #include <linux/kvm.h>
9 #include <linux/vmalloc.h>
10 #include <linux/kvm_dirty_ring.h>
11 #include <trace/events/kvm.h>
12 #include "kvm_mm.h"
13 
14 int __weak kvm_cpu_dirty_log_size(void)
15 {
16 	return 0;
17 }
18 
19 u32 kvm_dirty_ring_get_rsvd_entries(void)
20 {
21 	return KVM_DIRTY_RING_RSVD_ENTRIES + kvm_cpu_dirty_log_size();
22 }
23 
24 bool kvm_use_dirty_bitmap(struct kvm *kvm)
25 {
26 	lockdep_assert_held(&kvm->slots_lock);
27 
28 	return !kvm->dirty_ring_size || kvm->dirty_ring_with_bitmap;
29 }
30 
31 #ifndef CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP
32 bool kvm_arch_allow_write_without_running_vcpu(struct kvm *kvm)
33 {
34 	return false;
35 }
36 #endif
37 
38 static u32 kvm_dirty_ring_used(struct kvm_dirty_ring *ring)
39 {
40 	return READ_ONCE(ring->dirty_index) - READ_ONCE(ring->reset_index);
41 }
42 
43 static bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring)
44 {
45 	return kvm_dirty_ring_used(ring) >= ring->soft_limit;
46 }
47 
48 static bool kvm_dirty_ring_full(struct kvm_dirty_ring *ring)
49 {
50 	return kvm_dirty_ring_used(ring) >= ring->size;
51 }
52 
53 static void kvm_reset_dirty_gfn(struct kvm *kvm, u32 slot, u64 offset, u64 mask)
54 {
55 	struct kvm_memory_slot *memslot;
56 	int as_id, id;
57 
58 	if (!mask)
59 		return;
60 
61 	as_id = slot >> 16;
62 	id = (u16)slot;
63 
64 	if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
65 		return;
66 
67 	memslot = id_to_memslot(__kvm_memslots(kvm, as_id), id);
68 
69 	if (!memslot || (offset + __fls(mask)) >= memslot->npages)
70 		return;
71 
72 	KVM_MMU_LOCK(kvm);
73 	kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, offset, mask);
74 	KVM_MMU_UNLOCK(kvm);
75 }
76 
77 int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size)
78 {
79 	ring->dirty_gfns = vzalloc(size);
80 	if (!ring->dirty_gfns)
81 		return -ENOMEM;
82 
83 	ring->size = size / sizeof(struct kvm_dirty_gfn);
84 	ring->soft_limit = ring->size - kvm_dirty_ring_get_rsvd_entries();
85 	ring->dirty_index = 0;
86 	ring->reset_index = 0;
87 	ring->index = index;
88 
89 	return 0;
90 }
91 
92 static inline void kvm_dirty_gfn_set_invalid(struct kvm_dirty_gfn *gfn)
93 {
94 	smp_store_release(&gfn->flags, 0);
95 }
96 
97 static inline void kvm_dirty_gfn_set_dirtied(struct kvm_dirty_gfn *gfn)
98 {
99 	gfn->flags = KVM_DIRTY_GFN_F_DIRTY;
100 }
101 
102 static inline bool kvm_dirty_gfn_harvested(struct kvm_dirty_gfn *gfn)
103 {
104 	return smp_load_acquire(&gfn->flags) & KVM_DIRTY_GFN_F_RESET;
105 }
106 
107 int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring)
108 {
109 	u32 cur_slot, next_slot;
110 	u64 cur_offset, next_offset;
111 	unsigned long mask;
112 	int count = 0;
113 	struct kvm_dirty_gfn *entry;
114 	bool first_round = true;
115 
116 	/* This is only needed to make compilers happy */
117 	cur_slot = cur_offset = mask = 0;
118 
119 	while (true) {
120 		entry = &ring->dirty_gfns[ring->reset_index & (ring->size - 1)];
121 
122 		if (!kvm_dirty_gfn_harvested(entry))
123 			break;
124 
125 		next_slot = READ_ONCE(entry->slot);
126 		next_offset = READ_ONCE(entry->offset);
127 
128 		/* Update the flags to reflect that this GFN is reset */
129 		kvm_dirty_gfn_set_invalid(entry);
130 
131 		ring->reset_index++;
132 		count++;
133 		/*
134 		 * Try to coalesce the reset operations when the guest is
135 		 * scanning pages in the same slot.
136 		 */
137 		if (!first_round && next_slot == cur_slot) {
138 			s64 delta = next_offset - cur_offset;
139 
140 			if (delta >= 0 && delta < BITS_PER_LONG) {
141 				mask |= 1ull << delta;
142 				continue;
143 			}
144 
145 			/* Backwards visit, careful about overflows!  */
146 			if (delta > -BITS_PER_LONG && delta < 0 &&
147 			    (mask << -delta >> -delta) == mask) {
148 				cur_offset = next_offset;
149 				mask = (mask << -delta) | 1;
150 				continue;
151 			}
152 		}
153 		kvm_reset_dirty_gfn(kvm, cur_slot, cur_offset, mask);
154 		cur_slot = next_slot;
155 		cur_offset = next_offset;
156 		mask = 1;
157 		first_round = false;
158 	}
159 
160 	kvm_reset_dirty_gfn(kvm, cur_slot, cur_offset, mask);
161 
162 	/*
163 	 * The request KVM_REQ_DIRTY_RING_SOFT_FULL will be cleared
164 	 * by the VCPU thread next time when it enters the guest.
165 	 */
166 
167 	trace_kvm_dirty_ring_reset(ring);
168 
169 	return count;
170 }
171 
172 void kvm_dirty_ring_push(struct kvm_vcpu *vcpu, u32 slot, u64 offset)
173 {
174 	struct kvm_dirty_ring *ring = &vcpu->dirty_ring;
175 	struct kvm_dirty_gfn *entry;
176 
177 	/* It should never get full */
178 	WARN_ON_ONCE(kvm_dirty_ring_full(ring));
179 
180 	entry = &ring->dirty_gfns[ring->dirty_index & (ring->size - 1)];
181 
182 	entry->slot = slot;
183 	entry->offset = offset;
184 	/*
185 	 * Make sure the data is filled in before we publish this to
186 	 * the userspace program.  There's no paired kernel-side reader.
187 	 */
188 	smp_wmb();
189 	kvm_dirty_gfn_set_dirtied(entry);
190 	ring->dirty_index++;
191 	trace_kvm_dirty_ring_push(ring, slot, offset);
192 
193 	if (kvm_dirty_ring_soft_full(ring))
194 		kvm_make_request(KVM_REQ_DIRTY_RING_SOFT_FULL, vcpu);
195 }
196 
197 bool kvm_dirty_ring_check_request(struct kvm_vcpu *vcpu)
198 {
199 	/*
200 	 * The VCPU isn't runnable when the dirty ring becomes soft full.
201 	 * The KVM_REQ_DIRTY_RING_SOFT_FULL event is always set to prevent
202 	 * the VCPU from running until the dirty pages are harvested and
203 	 * the dirty ring is reset by userspace.
204 	 */
205 	if (kvm_check_request(KVM_REQ_DIRTY_RING_SOFT_FULL, vcpu) &&
206 	    kvm_dirty_ring_soft_full(&vcpu->dirty_ring)) {
207 		kvm_make_request(KVM_REQ_DIRTY_RING_SOFT_FULL, vcpu);
208 		vcpu->run->exit_reason = KVM_EXIT_DIRTY_RING_FULL;
209 		trace_kvm_dirty_ring_exit(vcpu);
210 		return true;
211 	}
212 
213 	return false;
214 }
215 
216 struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, u32 offset)
217 {
218 	return vmalloc_to_page((void *)ring->dirty_gfns + offset * PAGE_SIZE);
219 }
220 
221 void kvm_dirty_ring_free(struct kvm_dirty_ring *ring)
222 {
223 	vfree(ring->dirty_gfns);
224 	ring->dirty_gfns = NULL;
225 }
226