xref: /linux/arch/x86/kvm/mmu/page_track.c (revision c22760885fd6f7161fabde8e7db63d7f085b125a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Support KVM gust page tracking
4  *
5  * This feature allows us to track page access in guest. Currently, only
6  * write access is tracked.
7  *
8  * Copyright(C) 2015 Intel Corporation.
9  *
10  * Author:
11  *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
12  */
13 
14 #include <linux/kvm_host.h>
15 #include <linux/rculist.h>
16 
17 #include <asm/kvm_page_track.h>
18 
19 #include "mmu.h"
20 #include "mmu_internal.h"
21 
22 bool kvm_page_track_write_tracking_enabled(struct kvm *kvm)
23 {
24 	return IS_ENABLED(CONFIG_KVM_EXTERNAL_WRITE_TRACKING) ||
25 	       !tdp_enabled || kvm_shadow_root_allocated(kvm);
26 }
27 
28 void kvm_page_track_free_memslot(struct kvm_memory_slot *slot)
29 {
30 	int i;
31 
32 	for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) {
33 		kvfree(slot->arch.gfn_track[i]);
34 		slot->arch.gfn_track[i] = NULL;
35 	}
36 }
37 
38 int kvm_page_track_create_memslot(struct kvm *kvm,
39 				  struct kvm_memory_slot *slot,
40 				  unsigned long npages)
41 {
42 	int i;
43 
44 	for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) {
45 		if (i == KVM_PAGE_TRACK_WRITE &&
46 		    !kvm_page_track_write_tracking_enabled(kvm))
47 			continue;
48 
49 		slot->arch.gfn_track[i] =
50 			kvcalloc(npages, sizeof(*slot->arch.gfn_track[i]),
51 				 GFP_KERNEL_ACCOUNT);
52 		if (!slot->arch.gfn_track[i])
53 			goto track_free;
54 	}
55 
56 	return 0;
57 
58 track_free:
59 	kvm_page_track_free_memslot(slot);
60 	return -ENOMEM;
61 }
62 
63 static inline bool page_track_mode_is_valid(enum kvm_page_track_mode mode)
64 {
65 	if (mode < 0 || mode >= KVM_PAGE_TRACK_MAX)
66 		return false;
67 
68 	return true;
69 }
70 
71 int kvm_page_track_write_tracking_alloc(struct kvm_memory_slot *slot)
72 {
73 	unsigned short *gfn_track;
74 
75 	if (slot->arch.gfn_track[KVM_PAGE_TRACK_WRITE])
76 		return 0;
77 
78 	gfn_track = kvcalloc(slot->npages, sizeof(*gfn_track), GFP_KERNEL_ACCOUNT);
79 	if (gfn_track == NULL)
80 		return -ENOMEM;
81 
82 	slot->arch.gfn_track[KVM_PAGE_TRACK_WRITE] = gfn_track;
83 	return 0;
84 }
85 
86 static void update_gfn_track(struct kvm_memory_slot *slot, gfn_t gfn,
87 			     enum kvm_page_track_mode mode, short count)
88 {
89 	int index, val;
90 
91 	index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K);
92 
93 	val = slot->arch.gfn_track[mode][index];
94 
95 	if (WARN_ON(val + count < 0 || val + count > USHRT_MAX))
96 		return;
97 
98 	slot->arch.gfn_track[mode][index] += count;
99 }
100 
101 /*
102  * add guest page to the tracking pool so that corresponding access on that
103  * page will be intercepted.
104  *
105  * It should be called under the protection both of mmu-lock and kvm->srcu
106  * or kvm->slots_lock.
107  *
108  * @kvm: the guest instance we are interested in.
109  * @slot: the @gfn belongs to.
110  * @gfn: the guest page.
111  * @mode: tracking mode, currently only write track is supported.
112  */
113 void kvm_slot_page_track_add_page(struct kvm *kvm,
114 				  struct kvm_memory_slot *slot, gfn_t gfn,
115 				  enum kvm_page_track_mode mode)
116 {
117 
118 	if (WARN_ON(!page_track_mode_is_valid(mode)))
119 		return;
120 
121 	if (WARN_ON(mode == KVM_PAGE_TRACK_WRITE &&
122 		    !kvm_page_track_write_tracking_enabled(kvm)))
123 		return;
124 
125 	update_gfn_track(slot, gfn, mode, 1);
126 
127 	/*
128 	 * new track stops large page mapping for the
129 	 * tracked page.
130 	 */
131 	kvm_mmu_gfn_disallow_lpage(slot, gfn);
132 
133 	if (mode == KVM_PAGE_TRACK_WRITE)
134 		if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K))
135 			kvm_flush_remote_tlbs(kvm);
136 }
137 EXPORT_SYMBOL_GPL(kvm_slot_page_track_add_page);
138 
139 /*
140  * remove the guest page from the tracking pool which stops the interception
141  * of corresponding access on that page. It is the opposed operation of
142  * kvm_slot_page_track_add_page().
143  *
144  * It should be called under the protection both of mmu-lock and kvm->srcu
145  * or kvm->slots_lock.
146  *
147  * @kvm: the guest instance we are interested in.
148  * @slot: the @gfn belongs to.
149  * @gfn: the guest page.
150  * @mode: tracking mode, currently only write track is supported.
151  */
152 void kvm_slot_page_track_remove_page(struct kvm *kvm,
153 				     struct kvm_memory_slot *slot, gfn_t gfn,
154 				     enum kvm_page_track_mode mode)
155 {
156 	if (WARN_ON(!page_track_mode_is_valid(mode)))
157 		return;
158 
159 	if (WARN_ON(mode == KVM_PAGE_TRACK_WRITE &&
160 		    !kvm_page_track_write_tracking_enabled(kvm)))
161 		return;
162 
163 	update_gfn_track(slot, gfn, mode, -1);
164 
165 	/*
166 	 * allow large page mapping for the tracked page
167 	 * after the tracker is gone.
168 	 */
169 	kvm_mmu_gfn_allow_lpage(slot, gfn);
170 }
171 EXPORT_SYMBOL_GPL(kvm_slot_page_track_remove_page);
172 
173 /*
174  * check if the corresponding access on the specified guest page is tracked.
175  */
176 bool kvm_slot_page_track_is_active(struct kvm_vcpu *vcpu,
177 				   struct kvm_memory_slot *slot, gfn_t gfn,
178 				   enum kvm_page_track_mode mode)
179 {
180 	int index;
181 
182 	if (WARN_ON(!page_track_mode_is_valid(mode)))
183 		return false;
184 
185 	if (!slot)
186 		return false;
187 
188 	if (mode == KVM_PAGE_TRACK_WRITE &&
189 	    !kvm_page_track_write_tracking_enabled(vcpu->kvm))
190 		return false;
191 
192 	index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K);
193 	return !!READ_ONCE(slot->arch.gfn_track[mode][index]);
194 }
195 
196 void kvm_page_track_cleanup(struct kvm *kvm)
197 {
198 	struct kvm_page_track_notifier_head *head;
199 
200 	head = &kvm->arch.track_notifier_head;
201 	cleanup_srcu_struct(&head->track_srcu);
202 }
203 
204 int kvm_page_track_init(struct kvm *kvm)
205 {
206 	struct kvm_page_track_notifier_head *head;
207 
208 	head = &kvm->arch.track_notifier_head;
209 	INIT_HLIST_HEAD(&head->track_notifier_list);
210 	return init_srcu_struct(&head->track_srcu);
211 }
212 
213 /*
214  * register the notifier so that event interception for the tracked guest
215  * pages can be received.
216  */
217 void
218 kvm_page_track_register_notifier(struct kvm *kvm,
219 				 struct kvm_page_track_notifier_node *n)
220 {
221 	struct kvm_page_track_notifier_head *head;
222 
223 	head = &kvm->arch.track_notifier_head;
224 
225 	write_lock(&kvm->mmu_lock);
226 	hlist_add_head_rcu(&n->node, &head->track_notifier_list);
227 	write_unlock(&kvm->mmu_lock);
228 }
229 EXPORT_SYMBOL_GPL(kvm_page_track_register_notifier);
230 
231 /*
232  * stop receiving the event interception. It is the opposed operation of
233  * kvm_page_track_register_notifier().
234  */
235 void
236 kvm_page_track_unregister_notifier(struct kvm *kvm,
237 				   struct kvm_page_track_notifier_node *n)
238 {
239 	struct kvm_page_track_notifier_head *head;
240 
241 	head = &kvm->arch.track_notifier_head;
242 
243 	write_lock(&kvm->mmu_lock);
244 	hlist_del_rcu(&n->node);
245 	write_unlock(&kvm->mmu_lock);
246 	synchronize_srcu(&head->track_srcu);
247 }
248 EXPORT_SYMBOL_GPL(kvm_page_track_unregister_notifier);
249 
250 /*
251  * Notify the node that write access is intercepted and write emulation is
252  * finished at this time.
253  *
254  * The node should figure out if the written page is the one that node is
255  * interested in by itself.
256  */
257 void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
258 			  int bytes)
259 {
260 	struct kvm_page_track_notifier_head *head;
261 	struct kvm_page_track_notifier_node *n;
262 	int idx;
263 
264 	head = &vcpu->kvm->arch.track_notifier_head;
265 
266 	if (hlist_empty(&head->track_notifier_list))
267 		return;
268 
269 	idx = srcu_read_lock(&head->track_srcu);
270 	hlist_for_each_entry_srcu(n, &head->track_notifier_list, node,
271 				srcu_read_lock_held(&head->track_srcu))
272 		if (n->track_write)
273 			n->track_write(vcpu, gpa, new, bytes, n);
274 	srcu_read_unlock(&head->track_srcu, idx);
275 }
276 
277 /*
278  * Notify the node that memory slot is being removed or moved so that it can
279  * drop write-protection for the pages in the memory slot.
280  *
281  * The node should figure out it has any write-protected pages in this slot
282  * by itself.
283  */
284 void kvm_page_track_flush_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
285 {
286 	struct kvm_page_track_notifier_head *head;
287 	struct kvm_page_track_notifier_node *n;
288 	int idx;
289 
290 	head = &kvm->arch.track_notifier_head;
291 
292 	if (hlist_empty(&head->track_notifier_list))
293 		return;
294 
295 	idx = srcu_read_lock(&head->track_srcu);
296 	hlist_for_each_entry_srcu(n, &head->track_notifier_list, node,
297 				srcu_read_lock_held(&head->track_srcu))
298 		if (n->track_flush_slot)
299 			n->track_flush_slot(kvm, slot, n);
300 	srcu_read_unlock(&head->track_srcu, idx);
301 }
302