xref: /linux/virt/kvm/dirty_ring.c (revision e6a901a00822659181c93c86d8bbc2a17779fddc)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * KVM dirty ring implementation
4  *
5  * Copyright 2019 Red Hat, Inc.
6  */
7 #include <linux/kvm_host.h>
8 #include <linux/kvm.h>
9 #include <linux/vmalloc.h>
10 #include <linux/kvm_dirty_ring.h>
11 #include <trace/events/kvm.h>
12 #include "kvm_mm.h"
13 
14 int __weak kvm_cpu_dirty_log_size(void)
15 {
16 	return 0;
17 }
18 
19 u32 kvm_dirty_ring_get_rsvd_entries(void)
20 {
21 	return KVM_DIRTY_RING_RSVD_ENTRIES + kvm_cpu_dirty_log_size();
22 }
23 
24 bool kvm_use_dirty_bitmap(struct kvm *kvm)
25 {
26 	lockdep_assert_held(&kvm->slots_lock);
27 
28 	return !kvm->dirty_ring_size || kvm->dirty_ring_with_bitmap;
29 }
30 
31 #ifndef CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP
32 bool kvm_arch_allow_write_without_running_vcpu(struct kvm *kvm)
33 {
34 	return false;
35 }
36 #endif
37 
38 static u32 kvm_dirty_ring_used(struct kvm_dirty_ring *ring)
39 {
40 	return READ_ONCE(ring->dirty_index) - READ_ONCE(ring->reset_index);
41 }
42 
43 static bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring)
44 {
45 	return kvm_dirty_ring_used(ring) >= ring->soft_limit;
46 }
47 
48 static bool kvm_dirty_ring_full(struct kvm_dirty_ring *ring)
49 {
50 	return kvm_dirty_ring_used(ring) >= ring->size;
51 }
52 
53 static void kvm_reset_dirty_gfn(struct kvm *kvm, u32 slot, u64 offset, u64 mask)
54 {
55 	struct kvm_memory_slot *memslot;
56 	int as_id, id;
57 
58 	as_id = slot >> 16;
59 	id = (u16)slot;
60 
61 	if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
62 		return;
63 
64 	memslot = id_to_memslot(__kvm_memslots(kvm, as_id), id);
65 
66 	if (!memslot || (offset + __fls(mask)) >= memslot->npages)
67 		return;
68 
69 	KVM_MMU_LOCK(kvm);
70 	kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, offset, mask);
71 	KVM_MMU_UNLOCK(kvm);
72 }
73 
74 int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size)
75 {
76 	ring->dirty_gfns = vzalloc(size);
77 	if (!ring->dirty_gfns)
78 		return -ENOMEM;
79 
80 	ring->size = size / sizeof(struct kvm_dirty_gfn);
81 	ring->soft_limit = ring->size - kvm_dirty_ring_get_rsvd_entries();
82 	ring->dirty_index = 0;
83 	ring->reset_index = 0;
84 	ring->index = index;
85 
86 	return 0;
87 }
88 
89 static inline void kvm_dirty_gfn_set_invalid(struct kvm_dirty_gfn *gfn)
90 {
91 	smp_store_release(&gfn->flags, 0);
92 }
93 
94 static inline void kvm_dirty_gfn_set_dirtied(struct kvm_dirty_gfn *gfn)
95 {
96 	gfn->flags = KVM_DIRTY_GFN_F_DIRTY;
97 }
98 
99 static inline bool kvm_dirty_gfn_harvested(struct kvm_dirty_gfn *gfn)
100 {
101 	return smp_load_acquire(&gfn->flags) & KVM_DIRTY_GFN_F_RESET;
102 }
103 
104 int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring)
105 {
106 	u32 cur_slot, next_slot;
107 	u64 cur_offset, next_offset;
108 	unsigned long mask;
109 	int count = 0;
110 	struct kvm_dirty_gfn *entry;
111 	bool first_round = true;
112 
113 	/* This is only needed to make compilers happy */
114 	cur_slot = cur_offset = mask = 0;
115 
116 	while (true) {
117 		entry = &ring->dirty_gfns[ring->reset_index & (ring->size - 1)];
118 
119 		if (!kvm_dirty_gfn_harvested(entry))
120 			break;
121 
122 		next_slot = READ_ONCE(entry->slot);
123 		next_offset = READ_ONCE(entry->offset);
124 
125 		/* Update the flags to reflect that this GFN is reset */
126 		kvm_dirty_gfn_set_invalid(entry);
127 
128 		ring->reset_index++;
129 		count++;
130 		/*
131 		 * Try to coalesce the reset operations when the guest is
132 		 * scanning pages in the same slot.
133 		 */
134 		if (!first_round && next_slot == cur_slot) {
135 			s64 delta = next_offset - cur_offset;
136 
137 			if (delta >= 0 && delta < BITS_PER_LONG) {
138 				mask |= 1ull << delta;
139 				continue;
140 			}
141 
142 			/* Backwards visit, careful about overflows!  */
143 			if (delta > -BITS_PER_LONG && delta < 0 &&
144 			    (mask << -delta >> -delta) == mask) {
145 				cur_offset = next_offset;
146 				mask = (mask << -delta) | 1;
147 				continue;
148 			}
149 		}
150 		kvm_reset_dirty_gfn(kvm, cur_slot, cur_offset, mask);
151 		cur_slot = next_slot;
152 		cur_offset = next_offset;
153 		mask = 1;
154 		first_round = false;
155 	}
156 
157 	kvm_reset_dirty_gfn(kvm, cur_slot, cur_offset, mask);
158 
159 	/*
160 	 * The request KVM_REQ_DIRTY_RING_SOFT_FULL will be cleared
161 	 * by the VCPU thread next time when it enters the guest.
162 	 */
163 
164 	trace_kvm_dirty_ring_reset(ring);
165 
166 	return count;
167 }
168 
169 void kvm_dirty_ring_push(struct kvm_vcpu *vcpu, u32 slot, u64 offset)
170 {
171 	struct kvm_dirty_ring *ring = &vcpu->dirty_ring;
172 	struct kvm_dirty_gfn *entry;
173 
174 	/* It should never get full */
175 	WARN_ON_ONCE(kvm_dirty_ring_full(ring));
176 
177 	entry = &ring->dirty_gfns[ring->dirty_index & (ring->size - 1)];
178 
179 	entry->slot = slot;
180 	entry->offset = offset;
181 	/*
182 	 * Make sure the data is filled in before we publish this to
183 	 * the userspace program.  There's no paired kernel-side reader.
184 	 */
185 	smp_wmb();
186 	kvm_dirty_gfn_set_dirtied(entry);
187 	ring->dirty_index++;
188 	trace_kvm_dirty_ring_push(ring, slot, offset);
189 
190 	if (kvm_dirty_ring_soft_full(ring))
191 		kvm_make_request(KVM_REQ_DIRTY_RING_SOFT_FULL, vcpu);
192 }
193 
194 bool kvm_dirty_ring_check_request(struct kvm_vcpu *vcpu)
195 {
196 	/*
197 	 * The VCPU isn't runnable when the dirty ring becomes soft full.
198 	 * The KVM_REQ_DIRTY_RING_SOFT_FULL event is always set to prevent
199 	 * the VCPU from running until the dirty pages are harvested and
200 	 * the dirty ring is reset by userspace.
201 	 */
202 	if (kvm_check_request(KVM_REQ_DIRTY_RING_SOFT_FULL, vcpu) &&
203 	    kvm_dirty_ring_soft_full(&vcpu->dirty_ring)) {
204 		kvm_make_request(KVM_REQ_DIRTY_RING_SOFT_FULL, vcpu);
205 		vcpu->run->exit_reason = KVM_EXIT_DIRTY_RING_FULL;
206 		trace_kvm_dirty_ring_exit(vcpu);
207 		return true;
208 	}
209 
210 	return false;
211 }
212 
213 struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, u32 offset)
214 {
215 	return vmalloc_to_page((void *)ring->dirty_gfns + offset * PAGE_SIZE);
216 }
217 
218 void kvm_dirty_ring_free(struct kvm_dirty_ring *ring)
219 {
220 	vfree(ring->dirty_gfns);
221 	ring->dirty_gfns = NULL;
222 }
223