xref: /linux/arch/x86/kvm/mmu/tdp_mmu.c (revision 06a130e42a5bfc84795464bff023bff4c16f58c5)
1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 
4 #include "mmu.h"
5 #include "mmu_internal.h"
6 #include "mmutrace.h"
7 #include "tdp_iter.h"
8 #include "tdp_mmu.h"
9 #include "spte.h"
10 
11 #include <asm/cmpxchg.h>
12 #include <trace/events/kvm.h>
13 
14 /* Initializes the TDP MMU for the VM, if enabled. */
15 void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
16 {
17 	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
18 	spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
19 }
20 
21 /* Arbitrarily returns true so that this may be used in if statements. */
22 static __always_inline bool kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm,
23 							     bool shared)
24 {
25 	if (shared)
26 		lockdep_assert_held_read(&kvm->mmu_lock);
27 	else
28 		lockdep_assert_held_write(&kvm->mmu_lock);
29 
30 	return true;
31 }
32 
33 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
34 {
35 	/*
36 	 * Invalidate all roots, which besides the obvious, schedules all roots
37 	 * for zapping and thus puts the TDP MMU's reference to each root, i.e.
38 	 * ultimately frees all roots.
39 	 */
40 	kvm_tdp_mmu_invalidate_all_roots(kvm);
41 	kvm_tdp_mmu_zap_invalidated_roots(kvm);
42 
43 	WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages));
44 	WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
45 
46 	/*
47 	 * Ensure that all the outstanding RCU callbacks to free shadow pages
48 	 * can run before the VM is torn down.  Putting the last reference to
49 	 * zapped roots will create new callbacks.
50 	 */
51 	rcu_barrier();
52 }
53 
54 static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
55 {
56 	free_page((unsigned long)sp->spt);
57 	kmem_cache_free(mmu_page_header_cache, sp);
58 }
59 
60 /*
61  * This is called through call_rcu in order to free TDP page table memory
62  * safely with respect to other kernel threads that may be operating on
63  * the memory.
64  * By only accessing TDP MMU page table memory in an RCU read critical
65  * section, and freeing it after a grace period, lockless access to that
66  * memory won't use it after it is freed.
67  */
68 static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
69 {
70 	struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page,
71 					       rcu_head);
72 
73 	tdp_mmu_free_sp(sp);
74 }
75 
76 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
77 {
78 	if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
79 		return;
80 
81 	/*
82 	 * The TDP MMU itself holds a reference to each root until the root is
83 	 * explicitly invalidated, i.e. the final reference should be never be
84 	 * put for a valid root.
85 	 */
86 	KVM_BUG_ON(!is_tdp_mmu_page(root) || !root->role.invalid, kvm);
87 
88 	spin_lock(&kvm->arch.tdp_mmu_pages_lock);
89 	list_del_rcu(&root->link);
90 	spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
91 	call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
92 }
93 
94 /*
95  * Returns the next root after @prev_root (or the first root if @prev_root is
96  * NULL).  A reference to the returned root is acquired, and the reference to
97  * @prev_root is released (the caller obviously must hold a reference to
98  * @prev_root if it's non-NULL).
99  *
100  * If @only_valid is true, invalid roots are skipped.
101  *
102  * Returns NULL if the end of tdp_mmu_roots was reached.
103  */
104 static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
105 					      struct kvm_mmu_page *prev_root,
106 					      bool only_valid)
107 {
108 	struct kvm_mmu_page *next_root;
109 
110 	/*
111 	 * While the roots themselves are RCU-protected, fields such as
112 	 * role.invalid are protected by mmu_lock.
113 	 */
114 	lockdep_assert_held(&kvm->mmu_lock);
115 
116 	rcu_read_lock();
117 
118 	if (prev_root)
119 		next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
120 						  &prev_root->link,
121 						  typeof(*prev_root), link);
122 	else
123 		next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
124 						   typeof(*next_root), link);
125 
126 	while (next_root) {
127 		if ((!only_valid || !next_root->role.invalid) &&
128 		    kvm_tdp_mmu_get_root(next_root))
129 			break;
130 
131 		next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
132 				&next_root->link, typeof(*next_root), link);
133 	}
134 
135 	rcu_read_unlock();
136 
137 	if (prev_root)
138 		kvm_tdp_mmu_put_root(kvm, prev_root);
139 
140 	return next_root;
141 }
142 
143 /*
144  * Note: this iterator gets and puts references to the roots it iterates over.
145  * This makes it safe to release the MMU lock and yield within the loop, but
146  * if exiting the loop early, the caller must drop the reference to the most
147  * recent root. (Unless keeping a live reference is desirable.)
148  *
149  * If shared is set, this function is operating under the MMU lock in read
150  * mode.
151  */
152 #define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _only_valid)	\
153 	for (_root = tdp_mmu_next_root(_kvm, NULL, _only_valid);		\
154 	     ({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root;		\
155 	     _root = tdp_mmu_next_root(_kvm, _root, _only_valid))		\
156 		if (_as_id >= 0 && kvm_mmu_page_as_id(_root) != _as_id) {	\
157 		} else
158 
159 #define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id)	\
160 	__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, true)
161 
162 #define for_each_tdp_mmu_root_yield_safe(_kvm, _root)			\
163 	for (_root = tdp_mmu_next_root(_kvm, NULL, false);		\
164 	     ({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root;	\
165 	     _root = tdp_mmu_next_root(_kvm, _root, false))
166 
167 /*
168  * Iterate over all TDP MMU roots.  Requires that mmu_lock be held for write,
169  * the implication being that any flow that holds mmu_lock for read is
170  * inherently yield-friendly and should use the yield-safe variant above.
171  * Holding mmu_lock for write obviates the need for RCU protection as the list
172  * is guaranteed to be stable.
173  */
174 #define __for_each_tdp_mmu_root(_kvm, _root, _as_id, _only_valid)		\
175 	list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)		\
176 		if (kvm_lockdep_assert_mmu_lock_held(_kvm, false) &&		\
177 		    ((_as_id >= 0 && kvm_mmu_page_as_id(_root) != _as_id) ||	\
178 		     ((_only_valid) && (_root)->role.invalid))) {		\
179 		} else
180 
181 #define for_each_tdp_mmu_root(_kvm, _root, _as_id)			\
182 	__for_each_tdp_mmu_root(_kvm, _root, _as_id, false)
183 
184 #define for_each_valid_tdp_mmu_root(_kvm, _root, _as_id)		\
185 	__for_each_tdp_mmu_root(_kvm, _root, _as_id, true)
186 
187 static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu)
188 {
189 	struct kvm_mmu_page *sp;
190 
191 	sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
192 	sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
193 
194 	return sp;
195 }
196 
197 static void tdp_mmu_init_sp(struct kvm_mmu_page *sp, tdp_ptep_t sptep,
198 			    gfn_t gfn, union kvm_mmu_page_role role)
199 {
200 	INIT_LIST_HEAD(&sp->possible_nx_huge_page_link);
201 
202 	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
203 
204 	sp->role = role;
205 	sp->gfn = gfn;
206 	sp->ptep = sptep;
207 	sp->tdp_mmu_page = true;
208 
209 	trace_kvm_mmu_get_page(sp, true);
210 }
211 
212 static void tdp_mmu_init_child_sp(struct kvm_mmu_page *child_sp,
213 				  struct tdp_iter *iter)
214 {
215 	struct kvm_mmu_page *parent_sp;
216 	union kvm_mmu_page_role role;
217 
218 	parent_sp = sptep_to_sp(rcu_dereference(iter->sptep));
219 
220 	role = parent_sp->role;
221 	role.level--;
222 
223 	tdp_mmu_init_sp(child_sp, iter->sptep, iter->gfn, role);
224 }
225 
226 int kvm_tdp_mmu_alloc_root(struct kvm_vcpu *vcpu)
227 {
228 	struct kvm_mmu *mmu = vcpu->arch.mmu;
229 	union kvm_mmu_page_role role = mmu->root_role;
230 	int as_id = kvm_mmu_role_as_id(role);
231 	struct kvm *kvm = vcpu->kvm;
232 	struct kvm_mmu_page *root;
233 
234 	/*
235 	 * Check for an existing root before acquiring the pages lock to avoid
236 	 * unnecessary serialization if multiple vCPUs are loading a new root.
237 	 * E.g. when bringing up secondary vCPUs, KVM will already have created
238 	 * a valid root on behalf of the primary vCPU.
239 	 */
240 	read_lock(&kvm->mmu_lock);
241 
242 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, as_id) {
243 		if (root->role.word == role.word)
244 			goto out_read_unlock;
245 	}
246 
247 	spin_lock(&kvm->arch.tdp_mmu_pages_lock);
248 
249 	/*
250 	 * Recheck for an existing root after acquiring the pages lock, another
251 	 * vCPU may have raced ahead and created a new usable root.  Manually
252 	 * walk the list of roots as the standard macros assume that the pages
253 	 * lock is *not* held.  WARN if grabbing a reference to a usable root
254 	 * fails, as the last reference to a root can only be put *after* the
255 	 * root has been invalidated, which requires holding mmu_lock for write.
256 	 */
257 	list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) {
258 		if (root->role.word == role.word &&
259 		    !WARN_ON_ONCE(!kvm_tdp_mmu_get_root(root)))
260 			goto out_spin_unlock;
261 	}
262 
263 	root = tdp_mmu_alloc_sp(vcpu);
264 	tdp_mmu_init_sp(root, NULL, 0, role);
265 
266 	/*
267 	 * TDP MMU roots are kept until they are explicitly invalidated, either
268 	 * by a memslot update or by the destruction of the VM.  Initialize the
269 	 * refcount to two; one reference for the vCPU, and one reference for
270 	 * the TDP MMU itself, which is held until the root is invalidated and
271 	 * is ultimately put by kvm_tdp_mmu_zap_invalidated_roots().
272 	 */
273 	refcount_set(&root->tdp_mmu_root_count, 2);
274 	list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots);
275 
276 out_spin_unlock:
277 	spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
278 out_read_unlock:
279 	read_unlock(&kvm->mmu_lock);
280 	/*
281 	 * Note, KVM_REQ_MMU_FREE_OBSOLETE_ROOTS will prevent entering the guest
282 	 * and actually consuming the root if it's invalidated after dropping
283 	 * mmu_lock, and the root can't be freed as this vCPU holds a reference.
284 	 */
285 	mmu->root.hpa = __pa(root->spt);
286 	mmu->root.pgd = 0;
287 	return 0;
288 }
289 
290 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
291 				u64 old_spte, u64 new_spte, int level,
292 				bool shared);
293 
294 static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
295 {
296 	kvm_account_pgtable_pages((void *)sp->spt, +1);
297 	atomic64_inc(&kvm->arch.tdp_mmu_pages);
298 }
299 
300 static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
301 {
302 	kvm_account_pgtable_pages((void *)sp->spt, -1);
303 	atomic64_dec(&kvm->arch.tdp_mmu_pages);
304 }
305 
306 /**
307  * tdp_mmu_unlink_sp() - Remove a shadow page from the list of used pages
308  *
309  * @kvm: kvm instance
310  * @sp: the page to be removed
311  */
312 static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
313 {
314 	tdp_unaccount_mmu_page(kvm, sp);
315 
316 	if (!sp->nx_huge_page_disallowed)
317 		return;
318 
319 	spin_lock(&kvm->arch.tdp_mmu_pages_lock);
320 	sp->nx_huge_page_disallowed = false;
321 	untrack_possible_nx_huge_page(kvm, sp);
322 	spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
323 }
324 
325 /**
326  * handle_removed_pt() - handle a page table removed from the TDP structure
327  *
328  * @kvm: kvm instance
329  * @pt: the page removed from the paging structure
330  * @shared: This operation may not be running under the exclusive use
331  *	    of the MMU lock and the operation must synchronize with other
332  *	    threads that might be modifying SPTEs.
333  *
334  * Given a page table that has been removed from the TDP paging structure,
335  * iterates through the page table to clear SPTEs and free child page tables.
336  *
337  * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU
338  * protection. Since this thread removed it from the paging structure,
339  * this thread will be responsible for ensuring the page is freed. Hence the
340  * early rcu_dereferences in the function.
341  */
342 static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
343 {
344 	struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));
345 	int level = sp->role.level;
346 	gfn_t base_gfn = sp->gfn;
347 	int i;
348 
349 	trace_kvm_mmu_prepare_zap_page(sp);
350 
351 	tdp_mmu_unlink_sp(kvm, sp);
352 
353 	for (i = 0; i < SPTE_ENT_PER_PAGE; i++) {
354 		tdp_ptep_t sptep = pt + i;
355 		gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
356 		u64 old_spte;
357 
358 		if (shared) {
359 			/*
360 			 * Set the SPTE to a nonpresent value that other
361 			 * threads will not overwrite. If the SPTE was
362 			 * already marked as frozen then another thread
363 			 * handling a page fault could overwrite it, so
364 			 * set the SPTE until it is set from some other
365 			 * value to the frozen SPTE value.
366 			 */
367 			for (;;) {
368 				old_spte = kvm_tdp_mmu_write_spte_atomic(sptep, FROZEN_SPTE);
369 				if (!is_frozen_spte(old_spte))
370 					break;
371 				cpu_relax();
372 			}
373 		} else {
374 			/*
375 			 * If the SPTE is not MMU-present, there is no backing
376 			 * page associated with the SPTE and so no side effects
377 			 * that need to be recorded, and exclusive ownership of
378 			 * mmu_lock ensures the SPTE can't be made present.
379 			 * Note, zapping MMIO SPTEs is also unnecessary as they
380 			 * are guarded by the memslots generation, not by being
381 			 * unreachable.
382 			 */
383 			old_spte = kvm_tdp_mmu_read_spte(sptep);
384 			if (!is_shadow_present_pte(old_spte))
385 				continue;
386 
387 			/*
388 			 * Use the common helper instead of a raw WRITE_ONCE as
389 			 * the SPTE needs to be updated atomically if it can be
390 			 * modified by a different vCPU outside of mmu_lock.
391 			 * Even though the parent SPTE is !PRESENT, the TLB
392 			 * hasn't yet been flushed, and both Intel and AMD
393 			 * document that A/D assists can use upper-level PxE
394 			 * entries that are cached in the TLB, i.e. the CPU can
395 			 * still access the page and mark it dirty.
396 			 *
397 			 * No retry is needed in the atomic update path as the
398 			 * sole concern is dropping a Dirty bit, i.e. no other
399 			 * task can zap/remove the SPTE as mmu_lock is held for
400 			 * write.  Marking the SPTE as a frozen SPTE is not
401 			 * strictly necessary for the same reason, but using
402 			 * the frozen SPTE value keeps the shared/exclusive
403 			 * paths consistent and allows the handle_changed_spte()
404 			 * call below to hardcode the new value to FROZEN_SPTE.
405 			 *
406 			 * Note, even though dropping a Dirty bit is the only
407 			 * scenario where a non-atomic update could result in a
408 			 * functional bug, simply checking the Dirty bit isn't
409 			 * sufficient as a fast page fault could read the upper
410 			 * level SPTE before it is zapped, and then make this
411 			 * target SPTE writable, resume the guest, and set the
412 			 * Dirty bit between reading the SPTE above and writing
413 			 * it here.
414 			 */
415 			old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte,
416 							  FROZEN_SPTE, level);
417 		}
418 		handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
419 				    old_spte, FROZEN_SPTE, level, shared);
420 	}
421 
422 	call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
423 }
424 
425 /**
426  * handle_changed_spte - handle bookkeeping associated with an SPTE change
427  * @kvm: kvm instance
428  * @as_id: the address space of the paging structure the SPTE was a part of
429  * @gfn: the base GFN that was mapped by the SPTE
430  * @old_spte: The value of the SPTE before the change
431  * @new_spte: The value of the SPTE after the change
432  * @level: the level of the PT the SPTE is part of in the paging structure
433  * @shared: This operation may not be running under the exclusive use of
434  *	    the MMU lock and the operation must synchronize with other
435  *	    threads that might be modifying SPTEs.
436  *
437  * Handle bookkeeping that might result from the modification of a SPTE.  Note,
438  * dirty logging updates are handled in common code, not here (see make_spte()
439  * and fast_pf_fix_direct_spte()).
440  */
441 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
442 				u64 old_spte, u64 new_spte, int level,
443 				bool shared)
444 {
445 	bool was_present = is_shadow_present_pte(old_spte);
446 	bool is_present = is_shadow_present_pte(new_spte);
447 	bool was_leaf = was_present && is_last_spte(old_spte, level);
448 	bool is_leaf = is_present && is_last_spte(new_spte, level);
449 	bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
450 
451 	WARN_ON_ONCE(level > PT64_ROOT_MAX_LEVEL);
452 	WARN_ON_ONCE(level < PG_LEVEL_4K);
453 	WARN_ON_ONCE(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
454 
455 	/*
456 	 * If this warning were to trigger it would indicate that there was a
457 	 * missing MMU notifier or a race with some notifier handler.
458 	 * A present, leaf SPTE should never be directly replaced with another
459 	 * present leaf SPTE pointing to a different PFN. A notifier handler
460 	 * should be zapping the SPTE before the main MM's page table is
461 	 * changed, or the SPTE should be zeroed, and the TLBs flushed by the
462 	 * thread before replacement.
463 	 */
464 	if (was_leaf && is_leaf && pfn_changed) {
465 		pr_err("Invalid SPTE change: cannot replace a present leaf\n"
466 		       "SPTE with another present leaf SPTE mapping a\n"
467 		       "different PFN!\n"
468 		       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
469 		       as_id, gfn, old_spte, new_spte, level);
470 
471 		/*
472 		 * Crash the host to prevent error propagation and guest data
473 		 * corruption.
474 		 */
475 		BUG();
476 	}
477 
478 	if (old_spte == new_spte)
479 		return;
480 
481 	trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);
482 
483 	if (is_leaf)
484 		check_spte_writable_invariants(new_spte);
485 
486 	/*
487 	 * The only times a SPTE should be changed from a non-present to
488 	 * non-present state is when an MMIO entry is installed/modified/
489 	 * removed. In that case, there is nothing to do here.
490 	 */
491 	if (!was_present && !is_present) {
492 		/*
493 		 * If this change does not involve a MMIO SPTE or frozen SPTE,
494 		 * it is unexpected. Log the change, though it should not
495 		 * impact the guest since both the former and current SPTEs
496 		 * are nonpresent.
497 		 */
498 		if (WARN_ON_ONCE(!is_mmio_spte(kvm, old_spte) &&
499 				 !is_mmio_spte(kvm, new_spte) &&
500 				 !is_frozen_spte(new_spte)))
501 			pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"
502 			       "should not be replaced with another,\n"
503 			       "different nonpresent SPTE, unless one or both\n"
504 			       "are MMIO SPTEs, or the new SPTE is\n"
505 			       "a temporary frozen SPTE.\n"
506 			       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
507 			       as_id, gfn, old_spte, new_spte, level);
508 		return;
509 	}
510 
511 	if (is_leaf != was_leaf)
512 		kvm_update_page_stats(kvm, level, is_leaf ? 1 : -1);
513 
514 	if (was_leaf && is_dirty_spte(old_spte) &&
515 	    (!is_present || !is_dirty_spte(new_spte) || pfn_changed))
516 		kvm_set_pfn_dirty(spte_to_pfn(old_spte));
517 
518 	/*
519 	 * Recursively handle child PTs if the change removed a subtree from
520 	 * the paging structure.  Note the WARN on the PFN changing without the
521 	 * SPTE being converted to a hugepage (leaf) or being zapped.  Shadow
522 	 * pages are kernel allocations and should never be migrated.
523 	 */
524 	if (was_present && !was_leaf &&
525 	    (is_leaf || !is_present || WARN_ON_ONCE(pfn_changed)))
526 		handle_removed_pt(kvm, spte_to_child_pt(old_spte, level), shared);
527 
528 	if (was_leaf && is_accessed_spte(old_spte) &&
529 	    (!is_present || !is_accessed_spte(new_spte) || pfn_changed))
530 		kvm_set_pfn_accessed(spte_to_pfn(old_spte));
531 }
532 
533 static inline int __must_check __tdp_mmu_set_spte_atomic(struct tdp_iter *iter,
534 							 u64 new_spte)
535 {
536 	u64 *sptep = rcu_dereference(iter->sptep);
537 
538 	/*
539 	 * The caller is responsible for ensuring the old SPTE is not a FROZEN
540 	 * SPTE.  KVM should never attempt to zap or manipulate a FROZEN SPTE,
541 	 * and pre-checking before inserting a new SPTE is advantageous as it
542 	 * avoids unnecessary work.
543 	 */
544 	WARN_ON_ONCE(iter->yielded || is_frozen_spte(iter->old_spte));
545 
546 	/*
547 	 * Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs and
548 	 * does not hold the mmu_lock.  On failure, i.e. if a different logical
549 	 * CPU modified the SPTE, try_cmpxchg64() updates iter->old_spte with
550 	 * the current value, so the caller operates on fresh data, e.g. if it
551 	 * retries tdp_mmu_set_spte_atomic()
552 	 */
553 	if (!try_cmpxchg64(sptep, &iter->old_spte, new_spte))
554 		return -EBUSY;
555 
556 	return 0;
557 }
558 
559 /*
560  * tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically
561  * and handle the associated bookkeeping.  Do not mark the page dirty
562  * in KVM's dirty bitmaps.
563  *
564  * If setting the SPTE fails because it has changed, iter->old_spte will be
565  * refreshed to the current value of the spte.
566  *
567  * @kvm: kvm instance
568  * @iter: a tdp_iter instance currently on the SPTE that should be set
569  * @new_spte: The value the SPTE should be set to
570  * Return:
571  * * 0      - If the SPTE was set.
572  * * -EBUSY - If the SPTE cannot be set. In this case this function will have
573  *            no side-effects other than setting iter->old_spte to the last
574  *            known value of the spte.
575  */
576 static inline int __must_check tdp_mmu_set_spte_atomic(struct kvm *kvm,
577 						       struct tdp_iter *iter,
578 						       u64 new_spte)
579 {
580 	int ret;
581 
582 	lockdep_assert_held_read(&kvm->mmu_lock);
583 
584 	ret = __tdp_mmu_set_spte_atomic(iter, new_spte);
585 	if (ret)
586 		return ret;
587 
588 	handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
589 			    new_spte, iter->level, true);
590 
591 	return 0;
592 }
593 
594 static inline int __must_check tdp_mmu_zap_spte_atomic(struct kvm *kvm,
595 						       struct tdp_iter *iter)
596 {
597 	int ret;
598 
599 	lockdep_assert_held_read(&kvm->mmu_lock);
600 
601 	/*
602 	 * Freeze the SPTE by setting it to a special, non-present value. This
603 	 * will stop other threads from immediately installing a present entry
604 	 * in its place before the TLBs are flushed.
605 	 *
606 	 * Delay processing of the zapped SPTE until after TLBs are flushed and
607 	 * the FROZEN_SPTE is replaced (see below).
608 	 */
609 	ret = __tdp_mmu_set_spte_atomic(iter, FROZEN_SPTE);
610 	if (ret)
611 		return ret;
612 
613 	kvm_flush_remote_tlbs_gfn(kvm, iter->gfn, iter->level);
614 
615 	/*
616 	 * No other thread can overwrite the frozen SPTE as they must either
617 	 * wait on the MMU lock or use tdp_mmu_set_spte_atomic() which will not
618 	 * overwrite the special frozen SPTE value. Use the raw write helper to
619 	 * avoid an unnecessary check on volatile bits.
620 	 */
621 	__kvm_tdp_mmu_write_spte(iter->sptep, SHADOW_NONPRESENT_VALUE);
622 
623 	/*
624 	 * Process the zapped SPTE after flushing TLBs, and after replacing
625 	 * FROZEN_SPTE with 0. This minimizes the amount of time vCPUs are
626 	 * blocked by the FROZEN_SPTE and reduces contention on the child
627 	 * SPTEs.
628 	 */
629 	handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
630 			    SHADOW_NONPRESENT_VALUE, iter->level, true);
631 
632 	return 0;
633 }
634 
635 
636 /*
637  * tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping
638  * @kvm:	      KVM instance
639  * @as_id:	      Address space ID, i.e. regular vs. SMM
640  * @sptep:	      Pointer to the SPTE
641  * @old_spte:	      The current value of the SPTE
642  * @new_spte:	      The new value that will be set for the SPTE
643  * @gfn:	      The base GFN that was (or will be) mapped by the SPTE
644  * @level:	      The level _containing_ the SPTE (its parent PT's level)
645  *
646  * Returns the old SPTE value, which _may_ be different than @old_spte if the
647  * SPTE had voldatile bits.
648  */
649 static u64 tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
650 			    u64 old_spte, u64 new_spte, gfn_t gfn, int level)
651 {
652 	lockdep_assert_held_write(&kvm->mmu_lock);
653 
654 	/*
655 	 * No thread should be using this function to set SPTEs to or from the
656 	 * temporary frozen SPTE value.
657 	 * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic
658 	 * should be used. If operating under the MMU lock in write mode, the
659 	 * use of the frozen SPTE should not be necessary.
660 	 */
661 	WARN_ON_ONCE(is_frozen_spte(old_spte) || is_frozen_spte(new_spte));
662 
663 	old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, new_spte, level);
664 
665 	handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false);
666 	return old_spte;
667 }
668 
669 static inline void tdp_mmu_iter_set_spte(struct kvm *kvm, struct tdp_iter *iter,
670 					 u64 new_spte)
671 {
672 	WARN_ON_ONCE(iter->yielded);
673 	iter->old_spte = tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep,
674 					  iter->old_spte, new_spte,
675 					  iter->gfn, iter->level);
676 }
677 
678 #define tdp_root_for_each_pte(_iter, _root, _start, _end) \
679 	for_each_tdp_pte(_iter, _root, _start, _end)
680 
681 #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end)	\
682 	tdp_root_for_each_pte(_iter, _root, _start, _end)		\
683 		if (!is_shadow_present_pte(_iter.old_spte) ||		\
684 		    !is_last_spte(_iter.old_spte, _iter.level))		\
685 			continue;					\
686 		else
687 
688 #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end)		\
689 	for_each_tdp_pte(_iter, root_to_sp(_mmu->root.hpa), _start, _end)
690 
691 /*
692  * Yield if the MMU lock is contended or this thread needs to return control
693  * to the scheduler.
694  *
695  * If this function should yield and flush is set, it will perform a remote
696  * TLB flush before yielding.
697  *
698  * If this function yields, iter->yielded is set and the caller must skip to
699  * the next iteration, where tdp_iter_next() will reset the tdp_iter's walk
700  * over the paging structures to allow the iterator to continue its traversal
701  * from the paging structure root.
702  *
703  * Returns true if this function yielded.
704  */
705 static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm,
706 							  struct tdp_iter *iter,
707 							  bool flush, bool shared)
708 {
709 	WARN_ON_ONCE(iter->yielded);
710 
711 	/* Ensure forward progress has been made before yielding. */
712 	if (iter->next_last_level_gfn == iter->yielded_gfn)
713 		return false;
714 
715 	if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
716 		if (flush)
717 			kvm_flush_remote_tlbs(kvm);
718 
719 		rcu_read_unlock();
720 
721 		if (shared)
722 			cond_resched_rwlock_read(&kvm->mmu_lock);
723 		else
724 			cond_resched_rwlock_write(&kvm->mmu_lock);
725 
726 		rcu_read_lock();
727 
728 		WARN_ON_ONCE(iter->gfn > iter->next_last_level_gfn);
729 
730 		iter->yielded = true;
731 	}
732 
733 	return iter->yielded;
734 }
735 
736 static inline gfn_t tdp_mmu_max_gfn_exclusive(void)
737 {
738 	/*
739 	 * Bound TDP MMU walks at host.MAXPHYADDR.  KVM disallows memslots with
740 	 * a gpa range that would exceed the max gfn, and KVM does not create
741 	 * MMIO SPTEs for "impossible" gfns, instead sending such accesses down
742 	 * the slow emulation path every time.
743 	 */
744 	return kvm_mmu_max_gfn() + 1;
745 }
746 
747 static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
748 			       bool shared, int zap_level)
749 {
750 	struct tdp_iter iter;
751 
752 	gfn_t end = tdp_mmu_max_gfn_exclusive();
753 	gfn_t start = 0;
754 
755 	for_each_tdp_pte_min_level(iter, root, zap_level, start, end) {
756 retry:
757 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
758 			continue;
759 
760 		if (!is_shadow_present_pte(iter.old_spte))
761 			continue;
762 
763 		if (iter.level > zap_level)
764 			continue;
765 
766 		if (!shared)
767 			tdp_mmu_iter_set_spte(kvm, &iter, SHADOW_NONPRESENT_VALUE);
768 		else if (tdp_mmu_set_spte_atomic(kvm, &iter, SHADOW_NONPRESENT_VALUE))
769 			goto retry;
770 	}
771 }
772 
773 static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
774 			     bool shared)
775 {
776 
777 	/*
778 	 * The root must have an elevated refcount so that it's reachable via
779 	 * mmu_notifier callbacks, which allows this path to yield and drop
780 	 * mmu_lock.  When handling an unmap/release mmu_notifier command, KVM
781 	 * must drop all references to relevant pages prior to completing the
782 	 * callback.  Dropping mmu_lock with an unreachable root would result
783 	 * in zapping SPTEs after a relevant mmu_notifier callback completes
784 	 * and lead to use-after-free as zapping a SPTE triggers "writeback" of
785 	 * dirty accessed bits to the SPTE's associated struct page.
786 	 */
787 	WARN_ON_ONCE(!refcount_read(&root->tdp_mmu_root_count));
788 
789 	kvm_lockdep_assert_mmu_lock_held(kvm, shared);
790 
791 	rcu_read_lock();
792 
793 	/*
794 	 * Zap roots in multiple passes of decreasing granularity, i.e. zap at
795 	 * 4KiB=>2MiB=>1GiB=>root, in order to better honor need_resched() (all
796 	 * preempt models) or mmu_lock contention (full or real-time models).
797 	 * Zapping at finer granularity marginally increases the total time of
798 	 * the zap, but in most cases the zap itself isn't latency sensitive.
799 	 *
800 	 * If KVM is configured to prove the MMU, skip the 4KiB and 2MiB zaps
801 	 * in order to mimic the page fault path, which can replace a 1GiB page
802 	 * table with an equivalent 1GiB hugepage, i.e. can get saddled with
803 	 * zapping a 1GiB region that's fully populated with 4KiB SPTEs.  This
804 	 * allows verifying that KVM can safely zap 1GiB regions, e.g. without
805 	 * inducing RCU stalls, without relying on a relatively rare event
806 	 * (zapping roots is orders of magnitude more common).  Note, because
807 	 * zapping a SP recurses on its children, stepping down to PG_LEVEL_4K
808 	 * in the iterator itself is unnecessary.
809 	 */
810 	if (!IS_ENABLED(CONFIG_KVM_PROVE_MMU)) {
811 		__tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_4K);
812 		__tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_2M);
813 	}
814 	__tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_1G);
815 	__tdp_mmu_zap_root(kvm, root, shared, root->role.level);
816 
817 	rcu_read_unlock();
818 }
819 
820 bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
821 {
822 	u64 old_spte;
823 
824 	/*
825 	 * This helper intentionally doesn't allow zapping a root shadow page,
826 	 * which doesn't have a parent page table and thus no associated entry.
827 	 */
828 	if (WARN_ON_ONCE(!sp->ptep))
829 		return false;
830 
831 	old_spte = kvm_tdp_mmu_read_spte(sp->ptep);
832 	if (WARN_ON_ONCE(!is_shadow_present_pte(old_spte)))
833 		return false;
834 
835 	tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte,
836 			 SHADOW_NONPRESENT_VALUE, sp->gfn, sp->role.level + 1);
837 
838 	return true;
839 }
840 
841 /*
842  * If can_yield is true, will release the MMU lock and reschedule if the
843  * scheduler needs the CPU or there is contention on the MMU lock. If this
844  * function cannot yield, it will not release the MMU lock or reschedule and
845  * the caller must ensure it does not supply too large a GFN range, or the
846  * operation can cause a soft lockup.
847  */
848 static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
849 			      gfn_t start, gfn_t end, bool can_yield, bool flush)
850 {
851 	struct tdp_iter iter;
852 
853 	end = min(end, tdp_mmu_max_gfn_exclusive());
854 
855 	lockdep_assert_held_write(&kvm->mmu_lock);
856 
857 	rcu_read_lock();
858 
859 	for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end) {
860 		if (can_yield &&
861 		    tdp_mmu_iter_cond_resched(kvm, &iter, flush, false)) {
862 			flush = false;
863 			continue;
864 		}
865 
866 		if (!is_shadow_present_pte(iter.old_spte) ||
867 		    !is_last_spte(iter.old_spte, iter.level))
868 			continue;
869 
870 		tdp_mmu_iter_set_spte(kvm, &iter, SHADOW_NONPRESENT_VALUE);
871 
872 		/*
873 		 * Zappings SPTEs in invalid roots doesn't require a TLB flush,
874 		 * see kvm_tdp_mmu_zap_invalidated_roots() for details.
875 		 */
876 		if (!root->role.invalid)
877 			flush = true;
878 	}
879 
880 	rcu_read_unlock();
881 
882 	/*
883 	 * Because this flow zaps _only_ leaf SPTEs, the caller doesn't need
884 	 * to provide RCU protection as no 'struct kvm_mmu_page' will be freed.
885 	 */
886 	return flush;
887 }
888 
889 /*
890  * Zap leaf SPTEs for the range of gfns, [start, end), for all *VALID** roots.
891  * Returns true if a TLB flush is needed before releasing the MMU lock, i.e. if
892  * one or more SPTEs were zapped since the MMU lock was last acquired.
893  */
894 bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush)
895 {
896 	struct kvm_mmu_page *root;
897 
898 	lockdep_assert_held_write(&kvm->mmu_lock);
899 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, -1)
900 		flush = tdp_mmu_zap_leafs(kvm, root, start, end, true, flush);
901 
902 	return flush;
903 }
904 
905 void kvm_tdp_mmu_zap_all(struct kvm *kvm)
906 {
907 	struct kvm_mmu_page *root;
908 
909 	/*
910 	 * Zap all roots, including invalid roots, as all SPTEs must be dropped
911 	 * before returning to the caller.  Zap directly even if the root is
912 	 * also being zapped by a worker.  Walking zapped top-level SPTEs isn't
913 	 * all that expensive and mmu_lock is already held, which means the
914 	 * worker has yielded, i.e. flushing the work instead of zapping here
915 	 * isn't guaranteed to be any faster.
916 	 *
917 	 * A TLB flush is unnecessary, KVM zaps everything if and only the VM
918 	 * is being destroyed or the userspace VMM has exited.  In both cases,
919 	 * KVM_RUN is unreachable, i.e. no vCPUs will ever service the request.
920 	 */
921 	lockdep_assert_held_write(&kvm->mmu_lock);
922 	for_each_tdp_mmu_root_yield_safe(kvm, root)
923 		tdp_mmu_zap_root(kvm, root, false);
924 }
925 
926 /*
927  * Zap all invalidated roots to ensure all SPTEs are dropped before the "fast
928  * zap" completes.
929  */
930 void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
931 {
932 	struct kvm_mmu_page *root;
933 
934 	read_lock(&kvm->mmu_lock);
935 
936 	for_each_tdp_mmu_root_yield_safe(kvm, root) {
937 		if (!root->tdp_mmu_scheduled_root_to_zap)
938 			continue;
939 
940 		root->tdp_mmu_scheduled_root_to_zap = false;
941 		KVM_BUG_ON(!root->role.invalid, kvm);
942 
943 		/*
944 		 * A TLB flush is not necessary as KVM performs a local TLB
945 		 * flush when allocating a new root (see kvm_mmu_load()), and
946 		 * when migrating a vCPU to a different pCPU.  Note, the local
947 		 * TLB flush on reuse also invalidates paging-structure-cache
948 		 * entries, i.e. TLB entries for intermediate paging structures,
949 		 * that may be zapped, as such entries are associated with the
950 		 * ASID on both VMX and SVM.
951 		 */
952 		tdp_mmu_zap_root(kvm, root, true);
953 
954 		/*
955 		 * The referenced needs to be put *after* zapping the root, as
956 		 * the root must be reachable by mmu_notifiers while it's being
957 		 * zapped
958 		 */
959 		kvm_tdp_mmu_put_root(kvm, root);
960 	}
961 
962 	read_unlock(&kvm->mmu_lock);
963 }
964 
965 /*
966  * Mark each TDP MMU root as invalid to prevent vCPUs from reusing a root that
967  * is about to be zapped, e.g. in response to a memslots update.  The actual
968  * zapping is done separately so that it happens with mmu_lock with read,
969  * whereas invalidating roots must be done with mmu_lock held for write (unless
970  * the VM is being destroyed).
971  *
972  * Note, kvm_tdp_mmu_zap_invalidated_roots() is gifted the TDP MMU's reference.
973  * See kvm_tdp_mmu_alloc_root().
974  */
975 void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
976 {
977 	struct kvm_mmu_page *root;
978 
979 	/*
980 	 * mmu_lock must be held for write to ensure that a root doesn't become
981 	 * invalid while there are active readers (invalidating a root while
982 	 * there are active readers may or may not be problematic in practice,
983 	 * but it's uncharted territory and not supported).
984 	 *
985 	 * Waive the assertion if there are no users of @kvm, i.e. the VM is
986 	 * being destroyed after all references have been put, or if no vCPUs
987 	 * have been created (which means there are no roots), i.e. the VM is
988 	 * being destroyed in an error path of KVM_CREATE_VM.
989 	 */
990 	if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&
991 	    refcount_read(&kvm->users_count) && kvm->created_vcpus)
992 		lockdep_assert_held_write(&kvm->mmu_lock);
993 
994 	/*
995 	 * As above, mmu_lock isn't held when destroying the VM!  There can't
996 	 * be other references to @kvm, i.e. nothing else can invalidate roots
997 	 * or get/put references to roots.
998 	 */
999 	list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) {
1000 		/*
1001 		 * Note, invalid roots can outlive a memslot update!  Invalid
1002 		 * roots must be *zapped* before the memslot update completes,
1003 		 * but a different task can acquire a reference and keep the
1004 		 * root alive after its been zapped.
1005 		 */
1006 		if (!root->role.invalid) {
1007 			root->tdp_mmu_scheduled_root_to_zap = true;
1008 			root->role.invalid = true;
1009 		}
1010 	}
1011 }
1012 
1013 /*
1014  * Installs a last-level SPTE to handle a TDP page fault.
1015  * (NPT/EPT violation/misconfiguration)
1016  */
1017 static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
1018 					  struct kvm_page_fault *fault,
1019 					  struct tdp_iter *iter)
1020 {
1021 	struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(iter->sptep));
1022 	u64 new_spte;
1023 	int ret = RET_PF_FIXED;
1024 	bool wrprot = false;
1025 
1026 	if (WARN_ON_ONCE(sp->role.level != fault->goal_level))
1027 		return RET_PF_RETRY;
1028 
1029 	if (unlikely(!fault->slot))
1030 		new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
1031 	else
1032 		wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn,
1033 					 fault->pfn, iter->old_spte, fault->prefetch, true,
1034 					 fault->map_writable, &new_spte);
1035 
1036 	if (new_spte == iter->old_spte)
1037 		ret = RET_PF_SPURIOUS;
1038 	else if (tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte))
1039 		return RET_PF_RETRY;
1040 	else if (is_shadow_present_pte(iter->old_spte) &&
1041 		 !is_last_spte(iter->old_spte, iter->level))
1042 		kvm_flush_remote_tlbs_gfn(vcpu->kvm, iter->gfn, iter->level);
1043 
1044 	/*
1045 	 * If the page fault was caused by a write but the page is write
1046 	 * protected, emulation is needed. If the emulation was skipped,
1047 	 * the vCPU would have the same fault again.
1048 	 */
1049 	if (wrprot && fault->write)
1050 		ret = RET_PF_WRITE_PROTECTED;
1051 
1052 	/* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
1053 	if (unlikely(is_mmio_spte(vcpu->kvm, new_spte))) {
1054 		vcpu->stat.pf_mmio_spte_created++;
1055 		trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,
1056 				     new_spte);
1057 		ret = RET_PF_EMULATE;
1058 	} else {
1059 		trace_kvm_mmu_set_spte(iter->level, iter->gfn,
1060 				       rcu_dereference(iter->sptep));
1061 	}
1062 
1063 	return ret;
1064 }
1065 
1066 /*
1067  * tdp_mmu_link_sp - Replace the given spte with an spte pointing to the
1068  * provided page table.
1069  *
1070  * @kvm: kvm instance
1071  * @iter: a tdp_iter instance currently on the SPTE that should be set
1072  * @sp: The new TDP page table to install.
1073  * @shared: This operation is running under the MMU lock in read mode.
1074  *
1075  * Returns: 0 if the new page table was installed. Non-0 if the page table
1076  *          could not be installed (e.g. the atomic compare-exchange failed).
1077  */
1078 static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter,
1079 			   struct kvm_mmu_page *sp, bool shared)
1080 {
1081 	u64 spte = make_nonleaf_spte(sp->spt, !kvm_ad_enabled());
1082 	int ret = 0;
1083 
1084 	if (shared) {
1085 		ret = tdp_mmu_set_spte_atomic(kvm, iter, spte);
1086 		if (ret)
1087 			return ret;
1088 	} else {
1089 		tdp_mmu_iter_set_spte(kvm, iter, spte);
1090 	}
1091 
1092 	tdp_account_mmu_page(kvm, sp);
1093 
1094 	return 0;
1095 }
1096 
1097 static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
1098 				   struct kvm_mmu_page *sp, bool shared);
1099 
1100 /*
1101  * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing
1102  * page tables and SPTEs to translate the faulting guest physical address.
1103  */
1104 int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
1105 {
1106 	struct kvm_mmu *mmu = vcpu->arch.mmu;
1107 	struct kvm *kvm = vcpu->kvm;
1108 	struct tdp_iter iter;
1109 	struct kvm_mmu_page *sp;
1110 	int ret = RET_PF_RETRY;
1111 
1112 	kvm_mmu_hugepage_adjust(vcpu, fault);
1113 
1114 	trace_kvm_mmu_spte_requested(fault);
1115 
1116 	rcu_read_lock();
1117 
1118 	tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) {
1119 		int r;
1120 
1121 		if (fault->nx_huge_page_workaround_enabled)
1122 			disallowed_hugepage_adjust(fault, iter.old_spte, iter.level);
1123 
1124 		/*
1125 		 * If SPTE has been frozen by another thread, just give up and
1126 		 * retry, avoiding unnecessary page table allocation and free.
1127 		 */
1128 		if (is_frozen_spte(iter.old_spte))
1129 			goto retry;
1130 
1131 		if (iter.level == fault->goal_level)
1132 			goto map_target_level;
1133 
1134 		/* Step down into the lower level page table if it exists. */
1135 		if (is_shadow_present_pte(iter.old_spte) &&
1136 		    !is_large_pte(iter.old_spte))
1137 			continue;
1138 
1139 		/*
1140 		 * The SPTE is either non-present or points to a huge page that
1141 		 * needs to be split.
1142 		 */
1143 		sp = tdp_mmu_alloc_sp(vcpu);
1144 		tdp_mmu_init_child_sp(sp, &iter);
1145 
1146 		sp->nx_huge_page_disallowed = fault->huge_page_disallowed;
1147 
1148 		if (is_shadow_present_pte(iter.old_spte))
1149 			r = tdp_mmu_split_huge_page(kvm, &iter, sp, true);
1150 		else
1151 			r = tdp_mmu_link_sp(kvm, &iter, sp, true);
1152 
1153 		/*
1154 		 * Force the guest to retry if installing an upper level SPTE
1155 		 * failed, e.g. because a different task modified the SPTE.
1156 		 */
1157 		if (r) {
1158 			tdp_mmu_free_sp(sp);
1159 			goto retry;
1160 		}
1161 
1162 		if (fault->huge_page_disallowed &&
1163 		    fault->req_level >= iter.level) {
1164 			spin_lock(&kvm->arch.tdp_mmu_pages_lock);
1165 			if (sp->nx_huge_page_disallowed)
1166 				track_possible_nx_huge_page(kvm, sp);
1167 			spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
1168 		}
1169 	}
1170 
1171 	/*
1172 	 * The walk aborted before reaching the target level, e.g. because the
1173 	 * iterator detected an upper level SPTE was frozen during traversal.
1174 	 */
1175 	WARN_ON_ONCE(iter.level == fault->goal_level);
1176 	goto retry;
1177 
1178 map_target_level:
1179 	ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter);
1180 
1181 retry:
1182 	rcu_read_unlock();
1183 	return ret;
1184 }
1185 
1186 bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
1187 				 bool flush)
1188 {
1189 	struct kvm_mmu_page *root;
1190 
1191 	__for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false)
1192 		flush = tdp_mmu_zap_leafs(kvm, root, range->start, range->end,
1193 					  range->may_block, flush);
1194 
1195 	return flush;
1196 }
1197 
1198 typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
1199 			      struct kvm_gfn_range *range);
1200 
1201 static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
1202 						   struct kvm_gfn_range *range,
1203 						   tdp_handler_t handler)
1204 {
1205 	struct kvm_mmu_page *root;
1206 	struct tdp_iter iter;
1207 	bool ret = false;
1208 
1209 	/*
1210 	 * Don't support rescheduling, none of the MMU notifiers that funnel
1211 	 * into this helper allow blocking; it'd be dead, wasteful code.
1212 	 */
1213 	for_each_tdp_mmu_root(kvm, root, range->slot->as_id) {
1214 		rcu_read_lock();
1215 
1216 		tdp_root_for_each_leaf_pte(iter, root, range->start, range->end)
1217 			ret |= handler(kvm, &iter, range);
1218 
1219 		rcu_read_unlock();
1220 	}
1221 
1222 	return ret;
1223 }
1224 
1225 /*
1226  * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero
1227  * if any of the GFNs in the range have been accessed.
1228  *
1229  * No need to mark the corresponding PFN as accessed as this call is coming
1230  * from the clear_young() or clear_flush_young() notifier, which uses the
1231  * return value to determine if the page has been accessed.
1232  */
1233 static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter,
1234 			  struct kvm_gfn_range *range)
1235 {
1236 	u64 new_spte;
1237 
1238 	/* If we have a non-accessed entry we don't need to change the pte. */
1239 	if (!is_accessed_spte(iter->old_spte))
1240 		return false;
1241 
1242 	if (spte_ad_enabled(iter->old_spte)) {
1243 		iter->old_spte = tdp_mmu_clear_spte_bits(iter->sptep,
1244 							 iter->old_spte,
1245 							 shadow_accessed_mask,
1246 							 iter->level);
1247 		new_spte = iter->old_spte & ~shadow_accessed_mask;
1248 	} else {
1249 		/*
1250 		 * Capture the dirty status of the page, so that it doesn't get
1251 		 * lost when the SPTE is marked for access tracking.
1252 		 */
1253 		if (is_writable_pte(iter->old_spte))
1254 			kvm_set_pfn_dirty(spte_to_pfn(iter->old_spte));
1255 
1256 		new_spte = mark_spte_for_access_track(iter->old_spte);
1257 		iter->old_spte = kvm_tdp_mmu_write_spte(iter->sptep,
1258 							iter->old_spte, new_spte,
1259 							iter->level);
1260 	}
1261 
1262 	trace_kvm_tdp_mmu_spte_changed(iter->as_id, iter->gfn, iter->level,
1263 				       iter->old_spte, new_spte);
1264 	return true;
1265 }
1266 
1267 bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1268 {
1269 	return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range);
1270 }
1271 
1272 static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter,
1273 			 struct kvm_gfn_range *range)
1274 {
1275 	return is_accessed_spte(iter->old_spte);
1276 }
1277 
1278 bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1279 {
1280 	return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn);
1281 }
1282 
1283 /*
1284  * Remove write access from all SPTEs at or above min_level that map GFNs
1285  * [start, end). Returns true if an SPTE has been changed and the TLBs need to
1286  * be flushed.
1287  */
1288 static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1289 			     gfn_t start, gfn_t end, int min_level)
1290 {
1291 	struct tdp_iter iter;
1292 	u64 new_spte;
1293 	bool spte_set = false;
1294 
1295 	rcu_read_lock();
1296 
1297 	BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1298 
1299 	for_each_tdp_pte_min_level(iter, root, min_level, start, end) {
1300 retry:
1301 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1302 			continue;
1303 
1304 		if (!is_shadow_present_pte(iter.old_spte) ||
1305 		    !is_last_spte(iter.old_spte, iter.level) ||
1306 		    !(iter.old_spte & PT_WRITABLE_MASK))
1307 			continue;
1308 
1309 		new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1310 
1311 		if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte))
1312 			goto retry;
1313 
1314 		spte_set = true;
1315 	}
1316 
1317 	rcu_read_unlock();
1318 	return spte_set;
1319 }
1320 
1321 /*
1322  * Remove write access from all the SPTEs mapping GFNs in the memslot. Will
1323  * only affect leaf SPTEs down to min_level.
1324  * Returns true if an SPTE has been changed and the TLBs need to be flushed.
1325  */
1326 bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
1327 			     const struct kvm_memory_slot *slot, int min_level)
1328 {
1329 	struct kvm_mmu_page *root;
1330 	bool spte_set = false;
1331 
1332 	lockdep_assert_held_read(&kvm->mmu_lock);
1333 
1334 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
1335 		spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
1336 			     slot->base_gfn + slot->npages, min_level);
1337 
1338 	return spte_set;
1339 }
1340 
1341 static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(void)
1342 {
1343 	struct kvm_mmu_page *sp;
1344 
1345 	sp = kmem_cache_zalloc(mmu_page_header_cache, GFP_KERNEL_ACCOUNT);
1346 	if (!sp)
1347 		return NULL;
1348 
1349 	sp->spt = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
1350 	if (!sp->spt) {
1351 		kmem_cache_free(mmu_page_header_cache, sp);
1352 		return NULL;
1353 	}
1354 
1355 	return sp;
1356 }
1357 
1358 /* Note, the caller is responsible for initializing @sp. */
1359 static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
1360 				   struct kvm_mmu_page *sp, bool shared)
1361 {
1362 	const u64 huge_spte = iter->old_spte;
1363 	const int level = iter->level;
1364 	int ret, i;
1365 
1366 	/*
1367 	 * No need for atomics when writing to sp->spt since the page table has
1368 	 * not been linked in yet and thus is not reachable from any other CPU.
1369 	 */
1370 	for (i = 0; i < SPTE_ENT_PER_PAGE; i++)
1371 		sp->spt[i] = make_huge_page_split_spte(kvm, huge_spte, sp->role, i);
1372 
1373 	/*
1374 	 * Replace the huge spte with a pointer to the populated lower level
1375 	 * page table. Since we are making this change without a TLB flush vCPUs
1376 	 * will see a mix of the split mappings and the original huge mapping,
1377 	 * depending on what's currently in their TLB. This is fine from a
1378 	 * correctness standpoint since the translation will be the same either
1379 	 * way.
1380 	 */
1381 	ret = tdp_mmu_link_sp(kvm, iter, sp, shared);
1382 	if (ret)
1383 		goto out;
1384 
1385 	/*
1386 	 * tdp_mmu_link_sp_atomic() will handle subtracting the huge page we
1387 	 * are overwriting from the page stats. But we have to manually update
1388 	 * the page stats with the new present child pages.
1389 	 */
1390 	kvm_update_page_stats(kvm, level - 1, SPTE_ENT_PER_PAGE);
1391 
1392 out:
1393 	trace_kvm_mmu_split_huge_page(iter->gfn, huge_spte, level, ret);
1394 	return ret;
1395 }
1396 
1397 static int tdp_mmu_split_huge_pages_root(struct kvm *kvm,
1398 					 struct kvm_mmu_page *root,
1399 					 gfn_t start, gfn_t end,
1400 					 int target_level, bool shared)
1401 {
1402 	struct kvm_mmu_page *sp = NULL;
1403 	struct tdp_iter iter;
1404 
1405 	rcu_read_lock();
1406 
1407 	/*
1408 	 * Traverse the page table splitting all huge pages above the target
1409 	 * level into one lower level. For example, if we encounter a 1GB page
1410 	 * we split it into 512 2MB pages.
1411 	 *
1412 	 * Since the TDP iterator uses a pre-order traversal, we are guaranteed
1413 	 * to visit an SPTE before ever visiting its children, which means we
1414 	 * will correctly recursively split huge pages that are more than one
1415 	 * level above the target level (e.g. splitting a 1GB to 512 2MB pages,
1416 	 * and then splitting each of those to 512 4KB pages).
1417 	 */
1418 	for_each_tdp_pte_min_level(iter, root, target_level + 1, start, end) {
1419 retry:
1420 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
1421 			continue;
1422 
1423 		if (!is_shadow_present_pte(iter.old_spte) || !is_large_pte(iter.old_spte))
1424 			continue;
1425 
1426 		if (!sp) {
1427 			rcu_read_unlock();
1428 
1429 			if (shared)
1430 				read_unlock(&kvm->mmu_lock);
1431 			else
1432 				write_unlock(&kvm->mmu_lock);
1433 
1434 			sp = tdp_mmu_alloc_sp_for_split();
1435 
1436 			if (shared)
1437 				read_lock(&kvm->mmu_lock);
1438 			else
1439 				write_lock(&kvm->mmu_lock);
1440 
1441 			if (!sp) {
1442 				trace_kvm_mmu_split_huge_page(iter.gfn,
1443 							      iter.old_spte,
1444 							      iter.level, -ENOMEM);
1445 				return -ENOMEM;
1446 			}
1447 
1448 			rcu_read_lock();
1449 
1450 			iter.yielded = true;
1451 			continue;
1452 		}
1453 
1454 		tdp_mmu_init_child_sp(sp, &iter);
1455 
1456 		if (tdp_mmu_split_huge_page(kvm, &iter, sp, shared))
1457 			goto retry;
1458 
1459 		sp = NULL;
1460 	}
1461 
1462 	rcu_read_unlock();
1463 
1464 	/*
1465 	 * It's possible to exit the loop having never used the last sp if, for
1466 	 * example, a vCPU doing HugePage NX splitting wins the race and
1467 	 * installs its own sp in place of the last sp we tried to split.
1468 	 */
1469 	if (sp)
1470 		tdp_mmu_free_sp(sp);
1471 
1472 	return 0;
1473 }
1474 
1475 
1476 /*
1477  * Try to split all huge pages mapped by the TDP MMU down to the target level.
1478  */
1479 void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
1480 				      const struct kvm_memory_slot *slot,
1481 				      gfn_t start, gfn_t end,
1482 				      int target_level, bool shared)
1483 {
1484 	struct kvm_mmu_page *root;
1485 	int r = 0;
1486 
1487 	kvm_lockdep_assert_mmu_lock_held(kvm, shared);
1488 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id) {
1489 		r = tdp_mmu_split_huge_pages_root(kvm, root, start, end, target_level, shared);
1490 		if (r) {
1491 			kvm_tdp_mmu_put_root(kvm, root);
1492 			break;
1493 		}
1494 	}
1495 }
1496 
1497 static bool tdp_mmu_need_write_protect(struct kvm_mmu_page *sp)
1498 {
1499 	/*
1500 	 * All TDP MMU shadow pages share the same role as their root, aside
1501 	 * from level, so it is valid to key off any shadow page to determine if
1502 	 * write protection is needed for an entire tree.
1503 	 */
1504 	return kvm_mmu_page_ad_need_write_protect(sp) || !kvm_ad_enabled();
1505 }
1506 
1507 static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1508 			   gfn_t start, gfn_t end)
1509 {
1510 	const u64 dbit = tdp_mmu_need_write_protect(root) ? PT_WRITABLE_MASK :
1511 							    shadow_dirty_mask;
1512 	struct tdp_iter iter;
1513 	bool spte_set = false;
1514 
1515 	rcu_read_lock();
1516 
1517 	tdp_root_for_each_pte(iter, root, start, end) {
1518 retry:
1519 		if (!is_shadow_present_pte(iter.old_spte) ||
1520 		    !is_last_spte(iter.old_spte, iter.level))
1521 			continue;
1522 
1523 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1524 			continue;
1525 
1526 		KVM_MMU_WARN_ON(dbit == shadow_dirty_mask &&
1527 				spte_ad_need_write_protect(iter.old_spte));
1528 
1529 		if (!(iter.old_spte & dbit))
1530 			continue;
1531 
1532 		if (tdp_mmu_set_spte_atomic(kvm, &iter, iter.old_spte & ~dbit))
1533 			goto retry;
1534 
1535 		spte_set = true;
1536 	}
1537 
1538 	rcu_read_unlock();
1539 	return spte_set;
1540 }
1541 
1542 /*
1543  * Clear the dirty status (D-bit or W-bit) of all the SPTEs mapping GFNs in the
1544  * memslot. Returns true if an SPTE has been changed and the TLBs need to be
1545  * flushed.
1546  */
1547 bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
1548 				  const struct kvm_memory_slot *slot)
1549 {
1550 	struct kvm_mmu_page *root;
1551 	bool spte_set = false;
1552 
1553 	lockdep_assert_held_read(&kvm->mmu_lock);
1554 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
1555 		spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
1556 				slot->base_gfn + slot->npages);
1557 
1558 	return spte_set;
1559 }
1560 
1561 static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
1562 				  gfn_t gfn, unsigned long mask, bool wrprot)
1563 {
1564 	const u64 dbit = (wrprot || tdp_mmu_need_write_protect(root)) ? PT_WRITABLE_MASK :
1565 									shadow_dirty_mask;
1566 	struct tdp_iter iter;
1567 
1568 	lockdep_assert_held_write(&kvm->mmu_lock);
1569 
1570 	rcu_read_lock();
1571 
1572 	tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
1573 				    gfn + BITS_PER_LONG) {
1574 		if (!mask)
1575 			break;
1576 
1577 		KVM_MMU_WARN_ON(dbit == shadow_dirty_mask &&
1578 				spte_ad_need_write_protect(iter.old_spte));
1579 
1580 		if (iter.level > PG_LEVEL_4K ||
1581 		    !(mask & (1UL << (iter.gfn - gfn))))
1582 			continue;
1583 
1584 		mask &= ~(1UL << (iter.gfn - gfn));
1585 
1586 		if (!(iter.old_spte & dbit))
1587 			continue;
1588 
1589 		iter.old_spte = tdp_mmu_clear_spte_bits(iter.sptep,
1590 							iter.old_spte, dbit,
1591 							iter.level);
1592 
1593 		trace_kvm_tdp_mmu_spte_changed(iter.as_id, iter.gfn, iter.level,
1594 					       iter.old_spte,
1595 					       iter.old_spte & ~dbit);
1596 		kvm_set_pfn_dirty(spte_to_pfn(iter.old_spte));
1597 	}
1598 
1599 	rcu_read_unlock();
1600 }
1601 
1602 /*
1603  * Clear the dirty status (D-bit or W-bit) of all the 4k SPTEs mapping GFNs for
1604  * which a bit is set in mask, starting at gfn. The given memslot is expected to
1605  * contain all the GFNs represented by set bits in the mask.
1606  */
1607 void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1608 				       struct kvm_memory_slot *slot,
1609 				       gfn_t gfn, unsigned long mask,
1610 				       bool wrprot)
1611 {
1612 	struct kvm_mmu_page *root;
1613 
1614 	for_each_valid_tdp_mmu_root(kvm, root, slot->as_id)
1615 		clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
1616 }
1617 
1618 static void zap_collapsible_spte_range(struct kvm *kvm,
1619 				       struct kvm_mmu_page *root,
1620 				       const struct kvm_memory_slot *slot)
1621 {
1622 	gfn_t start = slot->base_gfn;
1623 	gfn_t end = start + slot->npages;
1624 	struct tdp_iter iter;
1625 	int max_mapping_level;
1626 
1627 	rcu_read_lock();
1628 
1629 	for_each_tdp_pte_min_level(iter, root, PG_LEVEL_2M, start, end) {
1630 retry:
1631 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1632 			continue;
1633 
1634 		if (iter.level > KVM_MAX_HUGEPAGE_LEVEL ||
1635 		    !is_shadow_present_pte(iter.old_spte))
1636 			continue;
1637 
1638 		/*
1639 		 * Don't zap leaf SPTEs, if a leaf SPTE could be replaced with
1640 		 * a large page size, then its parent would have been zapped
1641 		 * instead of stepping down.
1642 		 */
1643 		if (is_last_spte(iter.old_spte, iter.level))
1644 			continue;
1645 
1646 		/*
1647 		 * If iter.gfn resides outside of the slot, i.e. the page for
1648 		 * the current level overlaps but is not contained by the slot,
1649 		 * then the SPTE can't be made huge.  More importantly, trying
1650 		 * to query that info from slot->arch.lpage_info will cause an
1651 		 * out-of-bounds access.
1652 		 */
1653 		if (iter.gfn < start || iter.gfn >= end)
1654 			continue;
1655 
1656 		max_mapping_level = kvm_mmu_max_mapping_level(kvm, slot,
1657 							      iter.gfn, PG_LEVEL_NUM);
1658 		if (max_mapping_level < iter.level)
1659 			continue;
1660 
1661 		/* Note, a successful atomic zap also does a remote TLB flush. */
1662 		if (tdp_mmu_zap_spte_atomic(kvm, &iter))
1663 			goto retry;
1664 	}
1665 
1666 	rcu_read_unlock();
1667 }
1668 
1669 /*
1670  * Zap non-leaf SPTEs (and free their associated page tables) which could
1671  * be replaced by huge pages, for GFNs within the slot.
1672  */
1673 void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
1674 				       const struct kvm_memory_slot *slot)
1675 {
1676 	struct kvm_mmu_page *root;
1677 
1678 	lockdep_assert_held_read(&kvm->mmu_lock);
1679 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
1680 		zap_collapsible_spte_range(kvm, root, slot);
1681 }
1682 
1683 /*
1684  * Removes write access on the last level SPTE mapping this GFN and unsets the
1685  * MMU-writable bit to ensure future writes continue to be intercepted.
1686  * Returns true if an SPTE was set and a TLB flush is needed.
1687  */
1688 static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
1689 			      gfn_t gfn, int min_level)
1690 {
1691 	struct tdp_iter iter;
1692 	u64 new_spte;
1693 	bool spte_set = false;
1694 
1695 	BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1696 
1697 	rcu_read_lock();
1698 
1699 	for_each_tdp_pte_min_level(iter, root, min_level, gfn, gfn + 1) {
1700 		if (!is_shadow_present_pte(iter.old_spte) ||
1701 		    !is_last_spte(iter.old_spte, iter.level))
1702 			continue;
1703 
1704 		new_spte = iter.old_spte &
1705 			~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
1706 
1707 		if (new_spte == iter.old_spte)
1708 			break;
1709 
1710 		tdp_mmu_iter_set_spte(kvm, &iter, new_spte);
1711 		spte_set = true;
1712 	}
1713 
1714 	rcu_read_unlock();
1715 
1716 	return spte_set;
1717 }
1718 
1719 /*
1720  * Removes write access on the last level SPTE mapping this GFN and unsets the
1721  * MMU-writable bit to ensure future writes continue to be intercepted.
1722  * Returns true if an SPTE was set and a TLB flush is needed.
1723  */
1724 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
1725 				   struct kvm_memory_slot *slot, gfn_t gfn,
1726 				   int min_level)
1727 {
1728 	struct kvm_mmu_page *root;
1729 	bool spte_set = false;
1730 
1731 	lockdep_assert_held_write(&kvm->mmu_lock);
1732 	for_each_valid_tdp_mmu_root(kvm, root, slot->as_id)
1733 		spte_set |= write_protect_gfn(kvm, root, gfn, min_level);
1734 
1735 	return spte_set;
1736 }
1737 
1738 /*
1739  * Return the level of the lowest level SPTE added to sptes.
1740  * That SPTE may be non-present.
1741  *
1742  * Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
1743  */
1744 int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
1745 			 int *root_level)
1746 {
1747 	struct tdp_iter iter;
1748 	struct kvm_mmu *mmu = vcpu->arch.mmu;
1749 	gfn_t gfn = addr >> PAGE_SHIFT;
1750 	int leaf = -1;
1751 
1752 	*root_level = vcpu->arch.mmu->root_role.level;
1753 
1754 	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
1755 		leaf = iter.level;
1756 		sptes[leaf] = iter.old_spte;
1757 	}
1758 
1759 	return leaf;
1760 }
1761 
1762 /*
1763  * Returns the last level spte pointer of the shadow page walk for the given
1764  * gpa, and sets *spte to the spte value. This spte may be non-preset. If no
1765  * walk could be performed, returns NULL and *spte does not contain valid data.
1766  *
1767  * Contract:
1768  *  - Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
1769  *  - The returned sptep must not be used after kvm_tdp_mmu_walk_lockless_end.
1770  *
1771  * WARNING: This function is only intended to be called during fast_page_fault.
1772  */
1773 u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gfn_t gfn,
1774 					u64 *spte)
1775 {
1776 	struct tdp_iter iter;
1777 	struct kvm_mmu *mmu = vcpu->arch.mmu;
1778 	tdp_ptep_t sptep = NULL;
1779 
1780 	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
1781 		*spte = iter.old_spte;
1782 		sptep = iter.sptep;
1783 	}
1784 
1785 	/*
1786 	 * Perform the rcu_dereference to get the raw spte pointer value since
1787 	 * we are passing it up to fast_page_fault, which is shared with the
1788 	 * legacy MMU and thus does not retain the TDP MMU-specific __rcu
1789 	 * annotation.
1790 	 *
1791 	 * This is safe since fast_page_fault obeys the contracts of this
1792 	 * function as well as all TDP MMU contracts around modifying SPTEs
1793 	 * outside of mmu_lock.
1794 	 */
1795 	return rcu_dereference(sptep);
1796 }
1797