1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * KVM dirty ring implementation
4 *
5 * Copyright 2019 Red Hat, Inc.
6 */
7 #include <linux/kvm_host.h>
8 #include <linux/kvm.h>
9 #include <linux/vmalloc.h>
10 #include <linux/kvm_dirty_ring.h>
11 #include <trace/events/kvm.h>
12 #include "kvm_mm.h"
13
kvm_cpu_dirty_log_size(struct kvm * kvm)14 int __weak kvm_cpu_dirty_log_size(struct kvm *kvm)
15 {
16 return 0;
17 }
18
kvm_dirty_ring_get_rsvd_entries(struct kvm * kvm)19 u32 kvm_dirty_ring_get_rsvd_entries(struct kvm *kvm)
20 {
21 return KVM_DIRTY_RING_RSVD_ENTRIES + kvm_cpu_dirty_log_size(kvm);
22 }
23
kvm_use_dirty_bitmap(struct kvm * kvm)24 bool kvm_use_dirty_bitmap(struct kvm *kvm)
25 {
26 lockdep_assert_held(&kvm->slots_lock);
27
28 return !kvm->dirty_ring_size || kvm->dirty_ring_with_bitmap;
29 }
30
31 #ifndef CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP
kvm_arch_allow_write_without_running_vcpu(struct kvm * kvm)32 bool kvm_arch_allow_write_without_running_vcpu(struct kvm *kvm)
33 {
34 return false;
35 }
36 #endif
37
kvm_dirty_ring_used(struct kvm_dirty_ring * ring)38 static u32 kvm_dirty_ring_used(struct kvm_dirty_ring *ring)
39 {
40 return READ_ONCE(ring->dirty_index) - READ_ONCE(ring->reset_index);
41 }
42
kvm_dirty_ring_soft_full(struct kvm_dirty_ring * ring)43 static bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring)
44 {
45 return kvm_dirty_ring_used(ring) >= ring->soft_limit;
46 }
47
kvm_dirty_ring_full(struct kvm_dirty_ring * ring)48 static bool kvm_dirty_ring_full(struct kvm_dirty_ring *ring)
49 {
50 return kvm_dirty_ring_used(ring) >= ring->size;
51 }
52
kvm_reset_dirty_gfn(struct kvm * kvm,u32 slot,u64 offset,u64 mask)53 static void kvm_reset_dirty_gfn(struct kvm *kvm, u32 slot, u64 offset, u64 mask)
54 {
55 struct kvm_memory_slot *memslot;
56 int as_id, id;
57
58 as_id = slot >> 16;
59 id = (u16)slot;
60
61 if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
62 return;
63
64 memslot = id_to_memslot(__kvm_memslots(kvm, as_id), id);
65
66 if (!memslot || (offset + __fls(mask)) >= memslot->npages)
67 return;
68
69 KVM_MMU_LOCK(kvm);
70 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, offset, mask);
71 KVM_MMU_UNLOCK(kvm);
72 }
73
kvm_dirty_ring_alloc(struct kvm * kvm,struct kvm_dirty_ring * ring,int index,u32 size)74 int kvm_dirty_ring_alloc(struct kvm *kvm, struct kvm_dirty_ring *ring,
75 int index, u32 size)
76 {
77 ring->dirty_gfns = vzalloc(size);
78 if (!ring->dirty_gfns)
79 return -ENOMEM;
80
81 ring->size = size / sizeof(struct kvm_dirty_gfn);
82 ring->soft_limit = ring->size - kvm_dirty_ring_get_rsvd_entries(kvm);
83 ring->dirty_index = 0;
84 ring->reset_index = 0;
85 ring->index = index;
86
87 return 0;
88 }
89
kvm_dirty_gfn_set_invalid(struct kvm_dirty_gfn * gfn)90 static inline void kvm_dirty_gfn_set_invalid(struct kvm_dirty_gfn *gfn)
91 {
92 smp_store_release(&gfn->flags, 0);
93 }
94
kvm_dirty_gfn_set_dirtied(struct kvm_dirty_gfn * gfn)95 static inline void kvm_dirty_gfn_set_dirtied(struct kvm_dirty_gfn *gfn)
96 {
97 gfn->flags = KVM_DIRTY_GFN_F_DIRTY;
98 }
99
kvm_dirty_gfn_harvested(struct kvm_dirty_gfn * gfn)100 static inline bool kvm_dirty_gfn_harvested(struct kvm_dirty_gfn *gfn)
101 {
102 return smp_load_acquire(&gfn->flags) & KVM_DIRTY_GFN_F_RESET;
103 }
104
kvm_dirty_ring_reset(struct kvm * kvm,struct kvm_dirty_ring * ring,int * nr_entries_reset)105 int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring,
106 int *nr_entries_reset)
107 {
108 /*
109 * To minimize mmu_lock contention, batch resets for harvested entries
110 * whose gfns are in the same slot, and are within N frame numbers of
111 * each other, where N is the number of bits in an unsigned long. For
112 * simplicity, process the current set of entries when the next entry
113 * can't be included in the batch.
114 *
115 * Track the current batch slot, the gfn offset into the slot for the
116 * batch, and the bitmask of gfns that need to be reset (relative to
117 * offset). Note, the offset may be adjusted backwards, e.g. so that
118 * a sequence of gfns X, X-1, ... X-N-1 can be batched.
119 */
120 u32 cur_slot, next_slot;
121 u64 cur_offset, next_offset;
122 unsigned long mask = 0;
123 struct kvm_dirty_gfn *entry;
124
125 /*
126 * Ensure concurrent calls to KVM_RESET_DIRTY_RINGS are serialized,
127 * e.g. so that KVM fully resets all entries processed by a given call
128 * before returning to userspace. Holding slots_lock also protects
129 * the various memslot accesses.
130 */
131 lockdep_assert_held(&kvm->slots_lock);
132
133 while (likely((*nr_entries_reset) < INT_MAX)) {
134 if (signal_pending(current))
135 return -EINTR;
136
137 entry = &ring->dirty_gfns[ring->reset_index & (ring->size - 1)];
138
139 if (!kvm_dirty_gfn_harvested(entry))
140 break;
141
142 next_slot = READ_ONCE(entry->slot);
143 next_offset = READ_ONCE(entry->offset);
144
145 /* Update the flags to reflect that this GFN is reset */
146 kvm_dirty_gfn_set_invalid(entry);
147
148 ring->reset_index++;
149 (*nr_entries_reset)++;
150
151 if (mask) {
152 /*
153 * While the size of each ring is fixed, it's possible
154 * for the ring to be constantly re-dirtied/harvested
155 * while the reset is in-progress (the hard limit exists
156 * only to guard against the count becoming negative).
157 */
158 cond_resched();
159
160 /*
161 * Try to coalesce the reset operations when the guest
162 * is scanning pages in the same slot.
163 */
164 if (next_slot == cur_slot) {
165 s64 delta = next_offset - cur_offset;
166
167 if (delta >= 0 && delta < BITS_PER_LONG) {
168 mask |= 1ull << delta;
169 continue;
170 }
171
172 /* Backwards visit, careful about overflows! */
173 if (delta > -BITS_PER_LONG && delta < 0 &&
174 (mask << -delta >> -delta) == mask) {
175 cur_offset = next_offset;
176 mask = (mask << -delta) | 1;
177 continue;
178 }
179 }
180
181 /*
182 * Reset the slot for all the harvested entries that
183 * have been gathered, but not yet fully processed.
184 */
185 kvm_reset_dirty_gfn(kvm, cur_slot, cur_offset, mask);
186 }
187
188 /*
189 * The current slot was reset or this is the first harvested
190 * entry, (re)initialize the metadata.
191 */
192 cur_slot = next_slot;
193 cur_offset = next_offset;
194 mask = 1;
195 }
196
197 /*
198 * Perform a final reset if there are harvested entries that haven't
199 * been processed, which is guaranteed if at least one harvested was
200 * found. The loop only performs a reset when the "next" entry can't
201 * be batched with the "current" entry(s), and that reset processes the
202 * _current_ entry(s); i.e. the last harvested entry, a.k.a. next, will
203 * always be left pending.
204 */
205 if (mask)
206 kvm_reset_dirty_gfn(kvm, cur_slot, cur_offset, mask);
207
208 /*
209 * The request KVM_REQ_DIRTY_RING_SOFT_FULL will be cleared
210 * by the VCPU thread next time when it enters the guest.
211 */
212
213 trace_kvm_dirty_ring_reset(ring);
214
215 return 0;
216 }
217
kvm_dirty_ring_push(struct kvm_vcpu * vcpu,u32 slot,u64 offset)218 void kvm_dirty_ring_push(struct kvm_vcpu *vcpu, u32 slot, u64 offset)
219 {
220 struct kvm_dirty_ring *ring = &vcpu->dirty_ring;
221 struct kvm_dirty_gfn *entry;
222
223 /* It should never get full */
224 WARN_ON_ONCE(kvm_dirty_ring_full(ring));
225
226 entry = &ring->dirty_gfns[ring->dirty_index & (ring->size - 1)];
227
228 entry->slot = slot;
229 entry->offset = offset;
230 /*
231 * Make sure the data is filled in before we publish this to
232 * the userspace program. There's no paired kernel-side reader.
233 */
234 smp_wmb();
235 kvm_dirty_gfn_set_dirtied(entry);
236 ring->dirty_index++;
237 trace_kvm_dirty_ring_push(ring, slot, offset);
238
239 if (kvm_dirty_ring_soft_full(ring))
240 kvm_make_request(KVM_REQ_DIRTY_RING_SOFT_FULL, vcpu);
241 }
242
kvm_dirty_ring_check_request(struct kvm_vcpu * vcpu)243 bool kvm_dirty_ring_check_request(struct kvm_vcpu *vcpu)
244 {
245 /*
246 * The VCPU isn't runnable when the dirty ring becomes soft full.
247 * The KVM_REQ_DIRTY_RING_SOFT_FULL event is always set to prevent
248 * the VCPU from running until the dirty pages are harvested and
249 * the dirty ring is reset by userspace.
250 */
251 if (kvm_check_request(KVM_REQ_DIRTY_RING_SOFT_FULL, vcpu) &&
252 kvm_dirty_ring_soft_full(&vcpu->dirty_ring)) {
253 kvm_make_request(KVM_REQ_DIRTY_RING_SOFT_FULL, vcpu);
254 vcpu->run->exit_reason = KVM_EXIT_DIRTY_RING_FULL;
255 trace_kvm_dirty_ring_exit(vcpu);
256 return true;
257 }
258
259 return false;
260 }
261
kvm_dirty_ring_get_page(struct kvm_dirty_ring * ring,u32 offset)262 struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, u32 offset)
263 {
264 return vmalloc_to_page((void *)ring->dirty_gfns + offset * PAGE_SIZE);
265 }
266
kvm_dirty_ring_free(struct kvm_dirty_ring * ring)267 void kvm_dirty_ring_free(struct kvm_dirty_ring *ring)
268 {
269 vfree(ring->dirty_gfns);
270 ring->dirty_gfns = NULL;
271 }
272