xref: /linux/virt/kvm/dirty_ring.c (revision 43db1111073049220381944af4a3b8a5400eda71)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * KVM dirty ring implementation
4  *
5  * Copyright 2019 Red Hat, Inc.
6  */
7 #include <linux/kvm_host.h>
8 #include <linux/kvm.h>
9 #include <linux/vmalloc.h>
10 #include <linux/kvm_dirty_ring.h>
11 #include <trace/events/kvm.h>
12 #include "kvm_mm.h"
13 
kvm_cpu_dirty_log_size(struct kvm * kvm)14 int __weak kvm_cpu_dirty_log_size(struct kvm *kvm)
15 {
16 	return 0;
17 }
18 
kvm_dirty_ring_get_rsvd_entries(struct kvm * kvm)19 u32 kvm_dirty_ring_get_rsvd_entries(struct kvm *kvm)
20 {
21 	return KVM_DIRTY_RING_RSVD_ENTRIES + kvm_cpu_dirty_log_size(kvm);
22 }
23 
kvm_use_dirty_bitmap(struct kvm * kvm)24 bool kvm_use_dirty_bitmap(struct kvm *kvm)
25 {
26 	lockdep_assert_held(&kvm->slots_lock);
27 
28 	return !kvm->dirty_ring_size || kvm->dirty_ring_with_bitmap;
29 }
30 
31 #ifndef CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP
kvm_arch_allow_write_without_running_vcpu(struct kvm * kvm)32 bool kvm_arch_allow_write_without_running_vcpu(struct kvm *kvm)
33 {
34 	return false;
35 }
36 #endif
37 
kvm_dirty_ring_used(struct kvm_dirty_ring * ring)38 static u32 kvm_dirty_ring_used(struct kvm_dirty_ring *ring)
39 {
40 	return READ_ONCE(ring->dirty_index) - READ_ONCE(ring->reset_index);
41 }
42 
kvm_dirty_ring_soft_full(struct kvm_dirty_ring * ring)43 static bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring)
44 {
45 	return kvm_dirty_ring_used(ring) >= ring->soft_limit;
46 }
47 
kvm_dirty_ring_full(struct kvm_dirty_ring * ring)48 static bool kvm_dirty_ring_full(struct kvm_dirty_ring *ring)
49 {
50 	return kvm_dirty_ring_used(ring) >= ring->size;
51 }
52 
kvm_reset_dirty_gfn(struct kvm * kvm,u32 slot,u64 offset,u64 mask)53 static void kvm_reset_dirty_gfn(struct kvm *kvm, u32 slot, u64 offset, u64 mask)
54 {
55 	struct kvm_memory_slot *memslot;
56 	int as_id, id;
57 
58 	if (!mask)
59 		return;
60 
61 	as_id = slot >> 16;
62 	id = (u16)slot;
63 
64 	if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
65 		return;
66 
67 	memslot = id_to_memslot(__kvm_memslots(kvm, as_id), id);
68 
69 	if (!memslot || (offset + __fls(mask)) >= memslot->npages)
70 		return;
71 
72 	KVM_MMU_LOCK(kvm);
73 	kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, offset, mask);
74 	KVM_MMU_UNLOCK(kvm);
75 }
76 
kvm_dirty_ring_alloc(struct kvm * kvm,struct kvm_dirty_ring * ring,int index,u32 size)77 int kvm_dirty_ring_alloc(struct kvm *kvm, struct kvm_dirty_ring *ring,
78 			 int index, u32 size)
79 {
80 	ring->dirty_gfns = vzalloc(size);
81 	if (!ring->dirty_gfns)
82 		return -ENOMEM;
83 
84 	ring->size = size / sizeof(struct kvm_dirty_gfn);
85 	ring->soft_limit = ring->size - kvm_dirty_ring_get_rsvd_entries(kvm);
86 	ring->dirty_index = 0;
87 	ring->reset_index = 0;
88 	ring->index = index;
89 
90 	return 0;
91 }
92 
kvm_dirty_gfn_set_invalid(struct kvm_dirty_gfn * gfn)93 static inline void kvm_dirty_gfn_set_invalid(struct kvm_dirty_gfn *gfn)
94 {
95 	smp_store_release(&gfn->flags, 0);
96 }
97 
kvm_dirty_gfn_set_dirtied(struct kvm_dirty_gfn * gfn)98 static inline void kvm_dirty_gfn_set_dirtied(struct kvm_dirty_gfn *gfn)
99 {
100 	gfn->flags = KVM_DIRTY_GFN_F_DIRTY;
101 }
102 
kvm_dirty_gfn_harvested(struct kvm_dirty_gfn * gfn)103 static inline bool kvm_dirty_gfn_harvested(struct kvm_dirty_gfn *gfn)
104 {
105 	return smp_load_acquire(&gfn->flags) & KVM_DIRTY_GFN_F_RESET;
106 }
107 
kvm_dirty_ring_reset(struct kvm * kvm,struct kvm_dirty_ring * ring)108 int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring)
109 {
110 	u32 cur_slot, next_slot;
111 	u64 cur_offset, next_offset;
112 	unsigned long mask;
113 	int count = 0;
114 	struct kvm_dirty_gfn *entry;
115 	bool first_round = true;
116 
117 	/* This is only needed to make compilers happy */
118 	cur_slot = cur_offset = mask = 0;
119 
120 	while (true) {
121 		entry = &ring->dirty_gfns[ring->reset_index & (ring->size - 1)];
122 
123 		if (!kvm_dirty_gfn_harvested(entry))
124 			break;
125 
126 		next_slot = READ_ONCE(entry->slot);
127 		next_offset = READ_ONCE(entry->offset);
128 
129 		/* Update the flags to reflect that this GFN is reset */
130 		kvm_dirty_gfn_set_invalid(entry);
131 
132 		ring->reset_index++;
133 		count++;
134 		/*
135 		 * Try to coalesce the reset operations when the guest is
136 		 * scanning pages in the same slot.
137 		 */
138 		if (!first_round && next_slot == cur_slot) {
139 			s64 delta = next_offset - cur_offset;
140 
141 			if (delta >= 0 && delta < BITS_PER_LONG) {
142 				mask |= 1ull << delta;
143 				continue;
144 			}
145 
146 			/* Backwards visit, careful about overflows!  */
147 			if (delta > -BITS_PER_LONG && delta < 0 &&
148 			    (mask << -delta >> -delta) == mask) {
149 				cur_offset = next_offset;
150 				mask = (mask << -delta) | 1;
151 				continue;
152 			}
153 		}
154 		kvm_reset_dirty_gfn(kvm, cur_slot, cur_offset, mask);
155 		cur_slot = next_slot;
156 		cur_offset = next_offset;
157 		mask = 1;
158 		first_round = false;
159 	}
160 
161 	kvm_reset_dirty_gfn(kvm, cur_slot, cur_offset, mask);
162 
163 	/*
164 	 * The request KVM_REQ_DIRTY_RING_SOFT_FULL will be cleared
165 	 * by the VCPU thread next time when it enters the guest.
166 	 */
167 
168 	trace_kvm_dirty_ring_reset(ring);
169 
170 	return count;
171 }
172 
kvm_dirty_ring_push(struct kvm_vcpu * vcpu,u32 slot,u64 offset)173 void kvm_dirty_ring_push(struct kvm_vcpu *vcpu, u32 slot, u64 offset)
174 {
175 	struct kvm_dirty_ring *ring = &vcpu->dirty_ring;
176 	struct kvm_dirty_gfn *entry;
177 
178 	/* It should never get full */
179 	WARN_ON_ONCE(kvm_dirty_ring_full(ring));
180 
181 	entry = &ring->dirty_gfns[ring->dirty_index & (ring->size - 1)];
182 
183 	entry->slot = slot;
184 	entry->offset = offset;
185 	/*
186 	 * Make sure the data is filled in before we publish this to
187 	 * the userspace program.  There's no paired kernel-side reader.
188 	 */
189 	smp_wmb();
190 	kvm_dirty_gfn_set_dirtied(entry);
191 	ring->dirty_index++;
192 	trace_kvm_dirty_ring_push(ring, slot, offset);
193 
194 	if (kvm_dirty_ring_soft_full(ring))
195 		kvm_make_request(KVM_REQ_DIRTY_RING_SOFT_FULL, vcpu);
196 }
197 
kvm_dirty_ring_check_request(struct kvm_vcpu * vcpu)198 bool kvm_dirty_ring_check_request(struct kvm_vcpu *vcpu)
199 {
200 	/*
201 	 * The VCPU isn't runnable when the dirty ring becomes soft full.
202 	 * The KVM_REQ_DIRTY_RING_SOFT_FULL event is always set to prevent
203 	 * the VCPU from running until the dirty pages are harvested and
204 	 * the dirty ring is reset by userspace.
205 	 */
206 	if (kvm_check_request(KVM_REQ_DIRTY_RING_SOFT_FULL, vcpu) &&
207 	    kvm_dirty_ring_soft_full(&vcpu->dirty_ring)) {
208 		kvm_make_request(KVM_REQ_DIRTY_RING_SOFT_FULL, vcpu);
209 		vcpu->run->exit_reason = KVM_EXIT_DIRTY_RING_FULL;
210 		trace_kvm_dirty_ring_exit(vcpu);
211 		return true;
212 	}
213 
214 	return false;
215 }
216 
kvm_dirty_ring_get_page(struct kvm_dirty_ring * ring,u32 offset)217 struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, u32 offset)
218 {
219 	return vmalloc_to_page((void *)ring->dirty_gfns + offset * PAGE_SIZE);
220 }
221 
kvm_dirty_ring_free(struct kvm_dirty_ring * ring)222 void kvm_dirty_ring_free(struct kvm_dirty_ring *ring)
223 {
224 	vfree(ring->dirty_gfns);
225 	ring->dirty_gfns = NULL;
226 }
227