xref: /linux/arch/x86/kvm/mmu/tdp_mmu.c (revision 90d32e92011eaae8e70a9169b4e7acf4ca8f9d3a)
1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 
4 #include "mmu.h"
5 #include "mmu_internal.h"
6 #include "mmutrace.h"
7 #include "tdp_iter.h"
8 #include "tdp_mmu.h"
9 #include "spte.h"
10 
11 #include <asm/cmpxchg.h>
12 #include <trace/events/kvm.h>
13 
14 /* Initializes the TDP MMU for the VM, if enabled. */
15 void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
16 {
17 	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
18 	spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
19 }
20 
21 /* Arbitrarily returns true so that this may be used in if statements. */
22 static __always_inline bool kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm,
23 							     bool shared)
24 {
25 	if (shared)
26 		lockdep_assert_held_read(&kvm->mmu_lock);
27 	else
28 		lockdep_assert_held_write(&kvm->mmu_lock);
29 
30 	return true;
31 }
32 
33 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
34 {
35 	/*
36 	 * Invalidate all roots, which besides the obvious, schedules all roots
37 	 * for zapping and thus puts the TDP MMU's reference to each root, i.e.
38 	 * ultimately frees all roots.
39 	 */
40 	kvm_tdp_mmu_invalidate_all_roots(kvm);
41 	kvm_tdp_mmu_zap_invalidated_roots(kvm);
42 
43 	WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages));
44 	WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
45 
46 	/*
47 	 * Ensure that all the outstanding RCU callbacks to free shadow pages
48 	 * can run before the VM is torn down.  Putting the last reference to
49 	 * zapped roots will create new callbacks.
50 	 */
51 	rcu_barrier();
52 }
53 
54 static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
55 {
56 	free_page((unsigned long)sp->spt);
57 	kmem_cache_free(mmu_page_header_cache, sp);
58 }
59 
60 /*
61  * This is called through call_rcu in order to free TDP page table memory
62  * safely with respect to other kernel threads that may be operating on
63  * the memory.
64  * By only accessing TDP MMU page table memory in an RCU read critical
65  * section, and freeing it after a grace period, lockless access to that
66  * memory won't use it after it is freed.
67  */
68 static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
69 {
70 	struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page,
71 					       rcu_head);
72 
73 	tdp_mmu_free_sp(sp);
74 }
75 
76 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
77 {
78 	if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
79 		return;
80 
81 	/*
82 	 * The TDP MMU itself holds a reference to each root until the root is
83 	 * explicitly invalidated, i.e. the final reference should be never be
84 	 * put for a valid root.
85 	 */
86 	KVM_BUG_ON(!is_tdp_mmu_page(root) || !root->role.invalid, kvm);
87 
88 	spin_lock(&kvm->arch.tdp_mmu_pages_lock);
89 	list_del_rcu(&root->link);
90 	spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
91 	call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
92 }
93 
94 /*
95  * Returns the next root after @prev_root (or the first root if @prev_root is
96  * NULL).  A reference to the returned root is acquired, and the reference to
97  * @prev_root is released (the caller obviously must hold a reference to
98  * @prev_root if it's non-NULL).
99  *
100  * If @only_valid is true, invalid roots are skipped.
101  *
102  * Returns NULL if the end of tdp_mmu_roots was reached.
103  */
104 static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
105 					      struct kvm_mmu_page *prev_root,
106 					      bool only_valid)
107 {
108 	struct kvm_mmu_page *next_root;
109 
110 	/*
111 	 * While the roots themselves are RCU-protected, fields such as
112 	 * role.invalid are protected by mmu_lock.
113 	 */
114 	lockdep_assert_held(&kvm->mmu_lock);
115 
116 	rcu_read_lock();
117 
118 	if (prev_root)
119 		next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
120 						  &prev_root->link,
121 						  typeof(*prev_root), link);
122 	else
123 		next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
124 						   typeof(*next_root), link);
125 
126 	while (next_root) {
127 		if ((!only_valid || !next_root->role.invalid) &&
128 		    kvm_tdp_mmu_get_root(next_root))
129 			break;
130 
131 		next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
132 				&next_root->link, typeof(*next_root), link);
133 	}
134 
135 	rcu_read_unlock();
136 
137 	if (prev_root)
138 		kvm_tdp_mmu_put_root(kvm, prev_root);
139 
140 	return next_root;
141 }
142 
143 /*
144  * Note: this iterator gets and puts references to the roots it iterates over.
145  * This makes it safe to release the MMU lock and yield within the loop, but
146  * if exiting the loop early, the caller must drop the reference to the most
147  * recent root. (Unless keeping a live reference is desirable.)
148  *
149  * If shared is set, this function is operating under the MMU lock in read
150  * mode.
151  */
152 #define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _only_valid)	\
153 	for (_root = tdp_mmu_next_root(_kvm, NULL, _only_valid);		\
154 	     ({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root;		\
155 	     _root = tdp_mmu_next_root(_kvm, _root, _only_valid))		\
156 		if (_as_id >= 0 && kvm_mmu_page_as_id(_root) != _as_id) {	\
157 		} else
158 
159 #define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id)	\
160 	__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, true)
161 
162 #define for_each_tdp_mmu_root_yield_safe(_kvm, _root)			\
163 	for (_root = tdp_mmu_next_root(_kvm, NULL, false);		\
164 	     ({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root;	\
165 	     _root = tdp_mmu_next_root(_kvm, _root, false))
166 
167 /*
168  * Iterate over all TDP MMU roots.  Requires that mmu_lock be held for write,
169  * the implication being that any flow that holds mmu_lock for read is
170  * inherently yield-friendly and should use the yield-safe variant above.
171  * Holding mmu_lock for write obviates the need for RCU protection as the list
172  * is guaranteed to be stable.
173  */
174 #define __for_each_tdp_mmu_root(_kvm, _root, _as_id, _only_valid)		\
175 	list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)		\
176 		if (kvm_lockdep_assert_mmu_lock_held(_kvm, false) &&		\
177 		    ((_as_id >= 0 && kvm_mmu_page_as_id(_root) != _as_id) ||	\
178 		     ((_only_valid) && (_root)->role.invalid))) {		\
179 		} else
180 
181 #define for_each_tdp_mmu_root(_kvm, _root, _as_id)			\
182 	__for_each_tdp_mmu_root(_kvm, _root, _as_id, false)
183 
184 #define for_each_valid_tdp_mmu_root(_kvm, _root, _as_id)		\
185 	__for_each_tdp_mmu_root(_kvm, _root, _as_id, true)
186 
187 static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu)
188 {
189 	struct kvm_mmu_page *sp;
190 
191 	sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
192 	sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
193 
194 	return sp;
195 }
196 
197 static void tdp_mmu_init_sp(struct kvm_mmu_page *sp, tdp_ptep_t sptep,
198 			    gfn_t gfn, union kvm_mmu_page_role role)
199 {
200 	INIT_LIST_HEAD(&sp->possible_nx_huge_page_link);
201 
202 	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
203 
204 	sp->role = role;
205 	sp->gfn = gfn;
206 	sp->ptep = sptep;
207 	sp->tdp_mmu_page = true;
208 
209 	trace_kvm_mmu_get_page(sp, true);
210 }
211 
212 static void tdp_mmu_init_child_sp(struct kvm_mmu_page *child_sp,
213 				  struct tdp_iter *iter)
214 {
215 	struct kvm_mmu_page *parent_sp;
216 	union kvm_mmu_page_role role;
217 
218 	parent_sp = sptep_to_sp(rcu_dereference(iter->sptep));
219 
220 	role = parent_sp->role;
221 	role.level--;
222 
223 	tdp_mmu_init_sp(child_sp, iter->sptep, iter->gfn, role);
224 }
225 
226 int kvm_tdp_mmu_alloc_root(struct kvm_vcpu *vcpu)
227 {
228 	struct kvm_mmu *mmu = vcpu->arch.mmu;
229 	union kvm_mmu_page_role role = mmu->root_role;
230 	int as_id = kvm_mmu_role_as_id(role);
231 	struct kvm *kvm = vcpu->kvm;
232 	struct kvm_mmu_page *root;
233 
234 	/*
235 	 * Check for an existing root before acquiring the pages lock to avoid
236 	 * unnecessary serialization if multiple vCPUs are loading a new root.
237 	 * E.g. when bringing up secondary vCPUs, KVM will already have created
238 	 * a valid root on behalf of the primary vCPU.
239 	 */
240 	read_lock(&kvm->mmu_lock);
241 
242 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, as_id) {
243 		if (root->role.word == role.word)
244 			goto out_read_unlock;
245 	}
246 
247 	spin_lock(&kvm->arch.tdp_mmu_pages_lock);
248 
249 	/*
250 	 * Recheck for an existing root after acquiring the pages lock, another
251 	 * vCPU may have raced ahead and created a new usable root.  Manually
252 	 * walk the list of roots as the standard macros assume that the pages
253 	 * lock is *not* held.  WARN if grabbing a reference to a usable root
254 	 * fails, as the last reference to a root can only be put *after* the
255 	 * root has been invalidated, which requires holding mmu_lock for write.
256 	 */
257 	list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) {
258 		if (root->role.word == role.word &&
259 		    !WARN_ON_ONCE(!kvm_tdp_mmu_get_root(root)))
260 			goto out_spin_unlock;
261 	}
262 
263 	root = tdp_mmu_alloc_sp(vcpu);
264 	tdp_mmu_init_sp(root, NULL, 0, role);
265 
266 	/*
267 	 * TDP MMU roots are kept until they are explicitly invalidated, either
268 	 * by a memslot update or by the destruction of the VM.  Initialize the
269 	 * refcount to two; one reference for the vCPU, and one reference for
270 	 * the TDP MMU itself, which is held until the root is invalidated and
271 	 * is ultimately put by kvm_tdp_mmu_zap_invalidated_roots().
272 	 */
273 	refcount_set(&root->tdp_mmu_root_count, 2);
274 	list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots);
275 
276 out_spin_unlock:
277 	spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
278 out_read_unlock:
279 	read_unlock(&kvm->mmu_lock);
280 	/*
281 	 * Note, KVM_REQ_MMU_FREE_OBSOLETE_ROOTS will prevent entering the guest
282 	 * and actually consuming the root if it's invalidated after dropping
283 	 * mmu_lock, and the root can't be freed as this vCPU holds a reference.
284 	 */
285 	mmu->root.hpa = __pa(root->spt);
286 	mmu->root.pgd = 0;
287 	return 0;
288 }
289 
290 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
291 				u64 old_spte, u64 new_spte, int level,
292 				bool shared);
293 
294 static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
295 {
296 	kvm_account_pgtable_pages((void *)sp->spt, +1);
297 	atomic64_inc(&kvm->arch.tdp_mmu_pages);
298 }
299 
300 static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
301 {
302 	kvm_account_pgtable_pages((void *)sp->spt, -1);
303 	atomic64_dec(&kvm->arch.tdp_mmu_pages);
304 }
305 
306 /**
307  * tdp_mmu_unlink_sp() - Remove a shadow page from the list of used pages
308  *
309  * @kvm: kvm instance
310  * @sp: the page to be removed
311  */
312 static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
313 {
314 	tdp_unaccount_mmu_page(kvm, sp);
315 
316 	if (!sp->nx_huge_page_disallowed)
317 		return;
318 
319 	spin_lock(&kvm->arch.tdp_mmu_pages_lock);
320 	sp->nx_huge_page_disallowed = false;
321 	untrack_possible_nx_huge_page(kvm, sp);
322 	spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
323 }
324 
325 /**
326  * handle_removed_pt() - handle a page table removed from the TDP structure
327  *
328  * @kvm: kvm instance
329  * @pt: the page removed from the paging structure
330  * @shared: This operation may not be running under the exclusive use
331  *	    of the MMU lock and the operation must synchronize with other
332  *	    threads that might be modifying SPTEs.
333  *
334  * Given a page table that has been removed from the TDP paging structure,
335  * iterates through the page table to clear SPTEs and free child page tables.
336  *
337  * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU
338  * protection. Since this thread removed it from the paging structure,
339  * this thread will be responsible for ensuring the page is freed. Hence the
340  * early rcu_dereferences in the function.
341  */
342 static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
343 {
344 	struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));
345 	int level = sp->role.level;
346 	gfn_t base_gfn = sp->gfn;
347 	int i;
348 
349 	trace_kvm_mmu_prepare_zap_page(sp);
350 
351 	tdp_mmu_unlink_sp(kvm, sp);
352 
353 	for (i = 0; i < SPTE_ENT_PER_PAGE; i++) {
354 		tdp_ptep_t sptep = pt + i;
355 		gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
356 		u64 old_spte;
357 
358 		if (shared) {
359 			/*
360 			 * Set the SPTE to a nonpresent value that other
361 			 * threads will not overwrite. If the SPTE was
362 			 * already marked as removed then another thread
363 			 * handling a page fault could overwrite it, so
364 			 * set the SPTE until it is set from some other
365 			 * value to the removed SPTE value.
366 			 */
367 			for (;;) {
368 				old_spte = kvm_tdp_mmu_write_spte_atomic(sptep, REMOVED_SPTE);
369 				if (!is_removed_spte(old_spte))
370 					break;
371 				cpu_relax();
372 			}
373 		} else {
374 			/*
375 			 * If the SPTE is not MMU-present, there is no backing
376 			 * page associated with the SPTE and so no side effects
377 			 * that need to be recorded, and exclusive ownership of
378 			 * mmu_lock ensures the SPTE can't be made present.
379 			 * Note, zapping MMIO SPTEs is also unnecessary as they
380 			 * are guarded by the memslots generation, not by being
381 			 * unreachable.
382 			 */
383 			old_spte = kvm_tdp_mmu_read_spte(sptep);
384 			if (!is_shadow_present_pte(old_spte))
385 				continue;
386 
387 			/*
388 			 * Use the common helper instead of a raw WRITE_ONCE as
389 			 * the SPTE needs to be updated atomically if it can be
390 			 * modified by a different vCPU outside of mmu_lock.
391 			 * Even though the parent SPTE is !PRESENT, the TLB
392 			 * hasn't yet been flushed, and both Intel and AMD
393 			 * document that A/D assists can use upper-level PxE
394 			 * entries that are cached in the TLB, i.e. the CPU can
395 			 * still access the page and mark it dirty.
396 			 *
397 			 * No retry is needed in the atomic update path as the
398 			 * sole concern is dropping a Dirty bit, i.e. no other
399 			 * task can zap/remove the SPTE as mmu_lock is held for
400 			 * write.  Marking the SPTE as a removed SPTE is not
401 			 * strictly necessary for the same reason, but using
402 			 * the remove SPTE value keeps the shared/exclusive
403 			 * paths consistent and allows the handle_changed_spte()
404 			 * call below to hardcode the new value to REMOVED_SPTE.
405 			 *
406 			 * Note, even though dropping a Dirty bit is the only
407 			 * scenario where a non-atomic update could result in a
408 			 * functional bug, simply checking the Dirty bit isn't
409 			 * sufficient as a fast page fault could read the upper
410 			 * level SPTE before it is zapped, and then make this
411 			 * target SPTE writable, resume the guest, and set the
412 			 * Dirty bit between reading the SPTE above and writing
413 			 * it here.
414 			 */
415 			old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte,
416 							  REMOVED_SPTE, level);
417 		}
418 		handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
419 				    old_spte, REMOVED_SPTE, level, shared);
420 	}
421 
422 	call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
423 }
424 
425 /**
426  * handle_changed_spte - handle bookkeeping associated with an SPTE change
427  * @kvm: kvm instance
428  * @as_id: the address space of the paging structure the SPTE was a part of
429  * @gfn: the base GFN that was mapped by the SPTE
430  * @old_spte: The value of the SPTE before the change
431  * @new_spte: The value of the SPTE after the change
432  * @level: the level of the PT the SPTE is part of in the paging structure
433  * @shared: This operation may not be running under the exclusive use of
434  *	    the MMU lock and the operation must synchronize with other
435  *	    threads that might be modifying SPTEs.
436  *
437  * Handle bookkeeping that might result from the modification of a SPTE.  Note,
438  * dirty logging updates are handled in common code, not here (see make_spte()
439  * and fast_pf_fix_direct_spte()).
440  */
441 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
442 				u64 old_spte, u64 new_spte, int level,
443 				bool shared)
444 {
445 	bool was_present = is_shadow_present_pte(old_spte);
446 	bool is_present = is_shadow_present_pte(new_spte);
447 	bool was_leaf = was_present && is_last_spte(old_spte, level);
448 	bool is_leaf = is_present && is_last_spte(new_spte, level);
449 	bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
450 
451 	WARN_ON_ONCE(level > PT64_ROOT_MAX_LEVEL);
452 	WARN_ON_ONCE(level < PG_LEVEL_4K);
453 	WARN_ON_ONCE(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
454 
455 	/*
456 	 * If this warning were to trigger it would indicate that there was a
457 	 * missing MMU notifier or a race with some notifier handler.
458 	 * A present, leaf SPTE should never be directly replaced with another
459 	 * present leaf SPTE pointing to a different PFN. A notifier handler
460 	 * should be zapping the SPTE before the main MM's page table is
461 	 * changed, or the SPTE should be zeroed, and the TLBs flushed by the
462 	 * thread before replacement.
463 	 */
464 	if (was_leaf && is_leaf && pfn_changed) {
465 		pr_err("Invalid SPTE change: cannot replace a present leaf\n"
466 		       "SPTE with another present leaf SPTE mapping a\n"
467 		       "different PFN!\n"
468 		       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
469 		       as_id, gfn, old_spte, new_spte, level);
470 
471 		/*
472 		 * Crash the host to prevent error propagation and guest data
473 		 * corruption.
474 		 */
475 		BUG();
476 	}
477 
478 	if (old_spte == new_spte)
479 		return;
480 
481 	trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);
482 
483 	if (is_leaf)
484 		check_spte_writable_invariants(new_spte);
485 
486 	/*
487 	 * The only times a SPTE should be changed from a non-present to
488 	 * non-present state is when an MMIO entry is installed/modified/
489 	 * removed. In that case, there is nothing to do here.
490 	 */
491 	if (!was_present && !is_present) {
492 		/*
493 		 * If this change does not involve a MMIO SPTE or removed SPTE,
494 		 * it is unexpected. Log the change, though it should not
495 		 * impact the guest since both the former and current SPTEs
496 		 * are nonpresent.
497 		 */
498 		if (WARN_ON_ONCE(!is_mmio_spte(kvm, old_spte) &&
499 				 !is_mmio_spte(kvm, new_spte) &&
500 				 !is_removed_spte(new_spte)))
501 			pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"
502 			       "should not be replaced with another,\n"
503 			       "different nonpresent SPTE, unless one or both\n"
504 			       "are MMIO SPTEs, or the new SPTE is\n"
505 			       "a temporary removed SPTE.\n"
506 			       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
507 			       as_id, gfn, old_spte, new_spte, level);
508 		return;
509 	}
510 
511 	if (is_leaf != was_leaf)
512 		kvm_update_page_stats(kvm, level, is_leaf ? 1 : -1);
513 
514 	if (was_leaf && is_dirty_spte(old_spte) &&
515 	    (!is_present || !is_dirty_spte(new_spte) || pfn_changed))
516 		kvm_set_pfn_dirty(spte_to_pfn(old_spte));
517 
518 	/*
519 	 * Recursively handle child PTs if the change removed a subtree from
520 	 * the paging structure.  Note the WARN on the PFN changing without the
521 	 * SPTE being converted to a hugepage (leaf) or being zapped.  Shadow
522 	 * pages are kernel allocations and should never be migrated.
523 	 */
524 	if (was_present && !was_leaf &&
525 	    (is_leaf || !is_present || WARN_ON_ONCE(pfn_changed)))
526 		handle_removed_pt(kvm, spte_to_child_pt(old_spte, level), shared);
527 
528 	if (was_leaf && is_accessed_spte(old_spte) &&
529 	    (!is_present || !is_accessed_spte(new_spte) || pfn_changed))
530 		kvm_set_pfn_accessed(spte_to_pfn(old_spte));
531 }
532 
533 static inline int __tdp_mmu_set_spte_atomic(struct tdp_iter *iter, u64 new_spte)
534 {
535 	u64 *sptep = rcu_dereference(iter->sptep);
536 
537 	/*
538 	 * The caller is responsible for ensuring the old SPTE is not a REMOVED
539 	 * SPTE.  KVM should never attempt to zap or manipulate a REMOVED SPTE,
540 	 * and pre-checking before inserting a new SPTE is advantageous as it
541 	 * avoids unnecessary work.
542 	 */
543 	WARN_ON_ONCE(iter->yielded || is_removed_spte(iter->old_spte));
544 
545 	/*
546 	 * Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs and
547 	 * does not hold the mmu_lock.  On failure, i.e. if a different logical
548 	 * CPU modified the SPTE, try_cmpxchg64() updates iter->old_spte with
549 	 * the current value, so the caller operates on fresh data, e.g. if it
550 	 * retries tdp_mmu_set_spte_atomic()
551 	 */
552 	if (!try_cmpxchg64(sptep, &iter->old_spte, new_spte))
553 		return -EBUSY;
554 
555 	return 0;
556 }
557 
558 /*
559  * tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically
560  * and handle the associated bookkeeping.  Do not mark the page dirty
561  * in KVM's dirty bitmaps.
562  *
563  * If setting the SPTE fails because it has changed, iter->old_spte will be
564  * refreshed to the current value of the spte.
565  *
566  * @kvm: kvm instance
567  * @iter: a tdp_iter instance currently on the SPTE that should be set
568  * @new_spte: The value the SPTE should be set to
569  * Return:
570  * * 0      - If the SPTE was set.
571  * * -EBUSY - If the SPTE cannot be set. In this case this function will have
572  *            no side-effects other than setting iter->old_spte to the last
573  *            known value of the spte.
574  */
575 static inline int tdp_mmu_set_spte_atomic(struct kvm *kvm,
576 					  struct tdp_iter *iter,
577 					  u64 new_spte)
578 {
579 	int ret;
580 
581 	lockdep_assert_held_read(&kvm->mmu_lock);
582 
583 	ret = __tdp_mmu_set_spte_atomic(iter, new_spte);
584 	if (ret)
585 		return ret;
586 
587 	handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
588 			    new_spte, iter->level, true);
589 
590 	return 0;
591 }
592 
593 static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm,
594 					  struct tdp_iter *iter)
595 {
596 	int ret;
597 
598 	lockdep_assert_held_read(&kvm->mmu_lock);
599 
600 	/*
601 	 * Freeze the SPTE by setting it to a special, non-present value. This
602 	 * will stop other threads from immediately installing a present entry
603 	 * in its place before the TLBs are flushed.
604 	 *
605 	 * Delay processing of the zapped SPTE until after TLBs are flushed and
606 	 * the REMOVED_SPTE is replaced (see below).
607 	 */
608 	ret = __tdp_mmu_set_spte_atomic(iter, REMOVED_SPTE);
609 	if (ret)
610 		return ret;
611 
612 	kvm_flush_remote_tlbs_gfn(kvm, iter->gfn, iter->level);
613 
614 	/*
615 	 * No other thread can overwrite the removed SPTE as they must either
616 	 * wait on the MMU lock or use tdp_mmu_set_spte_atomic() which will not
617 	 * overwrite the special removed SPTE value. Use the raw write helper to
618 	 * avoid an unnecessary check on volatile bits.
619 	 */
620 	__kvm_tdp_mmu_write_spte(iter->sptep, SHADOW_NONPRESENT_VALUE);
621 
622 	/*
623 	 * Process the zapped SPTE after flushing TLBs, and after replacing
624 	 * REMOVED_SPTE with 0. This minimizes the amount of time vCPUs are
625 	 * blocked by the REMOVED_SPTE and reduces contention on the child
626 	 * SPTEs.
627 	 */
628 	handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
629 			    0, iter->level, true);
630 
631 	return 0;
632 }
633 
634 
635 /*
636  * tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping
637  * @kvm:	      KVM instance
638  * @as_id:	      Address space ID, i.e. regular vs. SMM
639  * @sptep:	      Pointer to the SPTE
640  * @old_spte:	      The current value of the SPTE
641  * @new_spte:	      The new value that will be set for the SPTE
642  * @gfn:	      The base GFN that was (or will be) mapped by the SPTE
643  * @level:	      The level _containing_ the SPTE (its parent PT's level)
644  *
645  * Returns the old SPTE value, which _may_ be different than @old_spte if the
646  * SPTE had voldatile bits.
647  */
648 static u64 tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
649 			    u64 old_spte, u64 new_spte, gfn_t gfn, int level)
650 {
651 	lockdep_assert_held_write(&kvm->mmu_lock);
652 
653 	/*
654 	 * No thread should be using this function to set SPTEs to or from the
655 	 * temporary removed SPTE value.
656 	 * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic
657 	 * should be used. If operating under the MMU lock in write mode, the
658 	 * use of the removed SPTE should not be necessary.
659 	 */
660 	WARN_ON_ONCE(is_removed_spte(old_spte) || is_removed_spte(new_spte));
661 
662 	old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, new_spte, level);
663 
664 	handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false);
665 	return old_spte;
666 }
667 
668 static inline void tdp_mmu_iter_set_spte(struct kvm *kvm, struct tdp_iter *iter,
669 					 u64 new_spte)
670 {
671 	WARN_ON_ONCE(iter->yielded);
672 	iter->old_spte = tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep,
673 					  iter->old_spte, new_spte,
674 					  iter->gfn, iter->level);
675 }
676 
677 #define tdp_root_for_each_pte(_iter, _root, _start, _end) \
678 	for_each_tdp_pte(_iter, _root, _start, _end)
679 
680 #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end)	\
681 	tdp_root_for_each_pte(_iter, _root, _start, _end)		\
682 		if (!is_shadow_present_pte(_iter.old_spte) ||		\
683 		    !is_last_spte(_iter.old_spte, _iter.level))		\
684 			continue;					\
685 		else
686 
687 #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end)		\
688 	for_each_tdp_pte(_iter, root_to_sp(_mmu->root.hpa), _start, _end)
689 
690 /*
691  * Yield if the MMU lock is contended or this thread needs to return control
692  * to the scheduler.
693  *
694  * If this function should yield and flush is set, it will perform a remote
695  * TLB flush before yielding.
696  *
697  * If this function yields, iter->yielded is set and the caller must skip to
698  * the next iteration, where tdp_iter_next() will reset the tdp_iter's walk
699  * over the paging structures to allow the iterator to continue its traversal
700  * from the paging structure root.
701  *
702  * Returns true if this function yielded.
703  */
704 static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm,
705 							  struct tdp_iter *iter,
706 							  bool flush, bool shared)
707 {
708 	WARN_ON_ONCE(iter->yielded);
709 
710 	/* Ensure forward progress has been made before yielding. */
711 	if (iter->next_last_level_gfn == iter->yielded_gfn)
712 		return false;
713 
714 	if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
715 		if (flush)
716 			kvm_flush_remote_tlbs(kvm);
717 
718 		rcu_read_unlock();
719 
720 		if (shared)
721 			cond_resched_rwlock_read(&kvm->mmu_lock);
722 		else
723 			cond_resched_rwlock_write(&kvm->mmu_lock);
724 
725 		rcu_read_lock();
726 
727 		WARN_ON_ONCE(iter->gfn > iter->next_last_level_gfn);
728 
729 		iter->yielded = true;
730 	}
731 
732 	return iter->yielded;
733 }
734 
735 static inline gfn_t tdp_mmu_max_gfn_exclusive(void)
736 {
737 	/*
738 	 * Bound TDP MMU walks at host.MAXPHYADDR.  KVM disallows memslots with
739 	 * a gpa range that would exceed the max gfn, and KVM does not create
740 	 * MMIO SPTEs for "impossible" gfns, instead sending such accesses down
741 	 * the slow emulation path every time.
742 	 */
743 	return kvm_mmu_max_gfn() + 1;
744 }
745 
746 static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
747 			       bool shared, int zap_level)
748 {
749 	struct tdp_iter iter;
750 
751 	gfn_t end = tdp_mmu_max_gfn_exclusive();
752 	gfn_t start = 0;
753 
754 	for_each_tdp_pte_min_level(iter, root, zap_level, start, end) {
755 retry:
756 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
757 			continue;
758 
759 		if (!is_shadow_present_pte(iter.old_spte))
760 			continue;
761 
762 		if (iter.level > zap_level)
763 			continue;
764 
765 		if (!shared)
766 			tdp_mmu_iter_set_spte(kvm, &iter, SHADOW_NONPRESENT_VALUE);
767 		else if (tdp_mmu_set_spte_atomic(kvm, &iter, SHADOW_NONPRESENT_VALUE))
768 			goto retry;
769 	}
770 }
771 
772 static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
773 			     bool shared)
774 {
775 
776 	/*
777 	 * The root must have an elevated refcount so that it's reachable via
778 	 * mmu_notifier callbacks, which allows this path to yield and drop
779 	 * mmu_lock.  When handling an unmap/release mmu_notifier command, KVM
780 	 * must drop all references to relevant pages prior to completing the
781 	 * callback.  Dropping mmu_lock with an unreachable root would result
782 	 * in zapping SPTEs after a relevant mmu_notifier callback completes
783 	 * and lead to use-after-free as zapping a SPTE triggers "writeback" of
784 	 * dirty accessed bits to the SPTE's associated struct page.
785 	 */
786 	WARN_ON_ONCE(!refcount_read(&root->tdp_mmu_root_count));
787 
788 	kvm_lockdep_assert_mmu_lock_held(kvm, shared);
789 
790 	rcu_read_lock();
791 
792 	/*
793 	 * Zap roots in multiple passes of decreasing granularity, i.e. zap at
794 	 * 4KiB=>2MiB=>1GiB=>root, in order to better honor need_resched() (all
795 	 * preempt models) or mmu_lock contention (full or real-time models).
796 	 * Zapping at finer granularity marginally increases the total time of
797 	 * the zap, but in most cases the zap itself isn't latency sensitive.
798 	 *
799 	 * If KVM is configured to prove the MMU, skip the 4KiB and 2MiB zaps
800 	 * in order to mimic the page fault path, which can replace a 1GiB page
801 	 * table with an equivalent 1GiB hugepage, i.e. can get saddled with
802 	 * zapping a 1GiB region that's fully populated with 4KiB SPTEs.  This
803 	 * allows verifying that KVM can safely zap 1GiB regions, e.g. without
804 	 * inducing RCU stalls, without relying on a relatively rare event
805 	 * (zapping roots is orders of magnitude more common).  Note, because
806 	 * zapping a SP recurses on its children, stepping down to PG_LEVEL_4K
807 	 * in the iterator itself is unnecessary.
808 	 */
809 	if (!IS_ENABLED(CONFIG_KVM_PROVE_MMU)) {
810 		__tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_4K);
811 		__tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_2M);
812 	}
813 	__tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_1G);
814 	__tdp_mmu_zap_root(kvm, root, shared, root->role.level);
815 
816 	rcu_read_unlock();
817 }
818 
819 bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
820 {
821 	u64 old_spte;
822 
823 	/*
824 	 * This helper intentionally doesn't allow zapping a root shadow page,
825 	 * which doesn't have a parent page table and thus no associated entry.
826 	 */
827 	if (WARN_ON_ONCE(!sp->ptep))
828 		return false;
829 
830 	old_spte = kvm_tdp_mmu_read_spte(sp->ptep);
831 	if (WARN_ON_ONCE(!is_shadow_present_pte(old_spte)))
832 		return false;
833 
834 	tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte,
835 			 SHADOW_NONPRESENT_VALUE, sp->gfn, sp->role.level + 1);
836 
837 	return true;
838 }
839 
840 /*
841  * If can_yield is true, will release the MMU lock and reschedule if the
842  * scheduler needs the CPU or there is contention on the MMU lock. If this
843  * function cannot yield, it will not release the MMU lock or reschedule and
844  * the caller must ensure it does not supply too large a GFN range, or the
845  * operation can cause a soft lockup.
846  */
847 static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
848 			      gfn_t start, gfn_t end, bool can_yield, bool flush)
849 {
850 	struct tdp_iter iter;
851 
852 	end = min(end, tdp_mmu_max_gfn_exclusive());
853 
854 	lockdep_assert_held_write(&kvm->mmu_lock);
855 
856 	rcu_read_lock();
857 
858 	for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end) {
859 		if (can_yield &&
860 		    tdp_mmu_iter_cond_resched(kvm, &iter, flush, false)) {
861 			flush = false;
862 			continue;
863 		}
864 
865 		if (!is_shadow_present_pte(iter.old_spte) ||
866 		    !is_last_spte(iter.old_spte, iter.level))
867 			continue;
868 
869 		tdp_mmu_iter_set_spte(kvm, &iter, SHADOW_NONPRESENT_VALUE);
870 
871 		/*
872 		 * Zappings SPTEs in invalid roots doesn't require a TLB flush,
873 		 * see kvm_tdp_mmu_zap_invalidated_roots() for details.
874 		 */
875 		if (!root->role.invalid)
876 			flush = true;
877 	}
878 
879 	rcu_read_unlock();
880 
881 	/*
882 	 * Because this flow zaps _only_ leaf SPTEs, the caller doesn't need
883 	 * to provide RCU protection as no 'struct kvm_mmu_page' will be freed.
884 	 */
885 	return flush;
886 }
887 
888 /*
889  * Zap leaf SPTEs for the range of gfns, [start, end), for all *VALID** roots.
890  * Returns true if a TLB flush is needed before releasing the MMU lock, i.e. if
891  * one or more SPTEs were zapped since the MMU lock was last acquired.
892  */
893 bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush)
894 {
895 	struct kvm_mmu_page *root;
896 
897 	lockdep_assert_held_write(&kvm->mmu_lock);
898 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, -1)
899 		flush = tdp_mmu_zap_leafs(kvm, root, start, end, true, flush);
900 
901 	return flush;
902 }
903 
904 void kvm_tdp_mmu_zap_all(struct kvm *kvm)
905 {
906 	struct kvm_mmu_page *root;
907 
908 	/*
909 	 * Zap all roots, including invalid roots, as all SPTEs must be dropped
910 	 * before returning to the caller.  Zap directly even if the root is
911 	 * also being zapped by a worker.  Walking zapped top-level SPTEs isn't
912 	 * all that expensive and mmu_lock is already held, which means the
913 	 * worker has yielded, i.e. flushing the work instead of zapping here
914 	 * isn't guaranteed to be any faster.
915 	 *
916 	 * A TLB flush is unnecessary, KVM zaps everything if and only the VM
917 	 * is being destroyed or the userspace VMM has exited.  In both cases,
918 	 * KVM_RUN is unreachable, i.e. no vCPUs will ever service the request.
919 	 */
920 	lockdep_assert_held_write(&kvm->mmu_lock);
921 	for_each_tdp_mmu_root_yield_safe(kvm, root)
922 		tdp_mmu_zap_root(kvm, root, false);
923 }
924 
925 /*
926  * Zap all invalidated roots to ensure all SPTEs are dropped before the "fast
927  * zap" completes.
928  */
929 void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
930 {
931 	struct kvm_mmu_page *root;
932 
933 	read_lock(&kvm->mmu_lock);
934 
935 	for_each_tdp_mmu_root_yield_safe(kvm, root) {
936 		if (!root->tdp_mmu_scheduled_root_to_zap)
937 			continue;
938 
939 		root->tdp_mmu_scheduled_root_to_zap = false;
940 		KVM_BUG_ON(!root->role.invalid, kvm);
941 
942 		/*
943 		 * A TLB flush is not necessary as KVM performs a local TLB
944 		 * flush when allocating a new root (see kvm_mmu_load()), and
945 		 * when migrating a vCPU to a different pCPU.  Note, the local
946 		 * TLB flush on reuse also invalidates paging-structure-cache
947 		 * entries, i.e. TLB entries for intermediate paging structures,
948 		 * that may be zapped, as such entries are associated with the
949 		 * ASID on both VMX and SVM.
950 		 */
951 		tdp_mmu_zap_root(kvm, root, true);
952 
953 		/*
954 		 * The referenced needs to be put *after* zapping the root, as
955 		 * the root must be reachable by mmu_notifiers while it's being
956 		 * zapped
957 		 */
958 		kvm_tdp_mmu_put_root(kvm, root);
959 	}
960 
961 	read_unlock(&kvm->mmu_lock);
962 }
963 
964 /*
965  * Mark each TDP MMU root as invalid to prevent vCPUs from reusing a root that
966  * is about to be zapped, e.g. in response to a memslots update.  The actual
967  * zapping is done separately so that it happens with mmu_lock with read,
968  * whereas invalidating roots must be done with mmu_lock held for write (unless
969  * the VM is being destroyed).
970  *
971  * Note, kvm_tdp_mmu_zap_invalidated_roots() is gifted the TDP MMU's reference.
972  * See kvm_tdp_mmu_alloc_root().
973  */
974 void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
975 {
976 	struct kvm_mmu_page *root;
977 
978 	/*
979 	 * mmu_lock must be held for write to ensure that a root doesn't become
980 	 * invalid while there are active readers (invalidating a root while
981 	 * there are active readers may or may not be problematic in practice,
982 	 * but it's uncharted territory and not supported).
983 	 *
984 	 * Waive the assertion if there are no users of @kvm, i.e. the VM is
985 	 * being destroyed after all references have been put, or if no vCPUs
986 	 * have been created (which means there are no roots), i.e. the VM is
987 	 * being destroyed in an error path of KVM_CREATE_VM.
988 	 */
989 	if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&
990 	    refcount_read(&kvm->users_count) && kvm->created_vcpus)
991 		lockdep_assert_held_write(&kvm->mmu_lock);
992 
993 	/*
994 	 * As above, mmu_lock isn't held when destroying the VM!  There can't
995 	 * be other references to @kvm, i.e. nothing else can invalidate roots
996 	 * or get/put references to roots.
997 	 */
998 	list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) {
999 		/*
1000 		 * Note, invalid roots can outlive a memslot update!  Invalid
1001 		 * roots must be *zapped* before the memslot update completes,
1002 		 * but a different task can acquire a reference and keep the
1003 		 * root alive after its been zapped.
1004 		 */
1005 		if (!root->role.invalid) {
1006 			root->tdp_mmu_scheduled_root_to_zap = true;
1007 			root->role.invalid = true;
1008 		}
1009 	}
1010 }
1011 
1012 /*
1013  * Installs a last-level SPTE to handle a TDP page fault.
1014  * (NPT/EPT violation/misconfiguration)
1015  */
1016 static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
1017 					  struct kvm_page_fault *fault,
1018 					  struct tdp_iter *iter)
1019 {
1020 	struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(iter->sptep));
1021 	u64 new_spte;
1022 	int ret = RET_PF_FIXED;
1023 	bool wrprot = false;
1024 
1025 	if (WARN_ON_ONCE(sp->role.level != fault->goal_level))
1026 		return RET_PF_RETRY;
1027 
1028 	if (unlikely(!fault->slot))
1029 		new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
1030 	else
1031 		wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn,
1032 					 fault->pfn, iter->old_spte, fault->prefetch, true,
1033 					 fault->map_writable, &new_spte);
1034 
1035 	if (new_spte == iter->old_spte)
1036 		ret = RET_PF_SPURIOUS;
1037 	else if (tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte))
1038 		return RET_PF_RETRY;
1039 	else if (is_shadow_present_pte(iter->old_spte) &&
1040 		 !is_last_spte(iter->old_spte, iter->level))
1041 		kvm_flush_remote_tlbs_gfn(vcpu->kvm, iter->gfn, iter->level);
1042 
1043 	/*
1044 	 * If the page fault was caused by a write but the page is write
1045 	 * protected, emulation is needed. If the emulation was skipped,
1046 	 * the vCPU would have the same fault again.
1047 	 */
1048 	if (wrprot) {
1049 		if (fault->write)
1050 			ret = RET_PF_EMULATE;
1051 	}
1052 
1053 	/* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
1054 	if (unlikely(is_mmio_spte(vcpu->kvm, new_spte))) {
1055 		vcpu->stat.pf_mmio_spte_created++;
1056 		trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,
1057 				     new_spte);
1058 		ret = RET_PF_EMULATE;
1059 	} else {
1060 		trace_kvm_mmu_set_spte(iter->level, iter->gfn,
1061 				       rcu_dereference(iter->sptep));
1062 	}
1063 
1064 	return ret;
1065 }
1066 
1067 /*
1068  * tdp_mmu_link_sp - Replace the given spte with an spte pointing to the
1069  * provided page table.
1070  *
1071  * @kvm: kvm instance
1072  * @iter: a tdp_iter instance currently on the SPTE that should be set
1073  * @sp: The new TDP page table to install.
1074  * @shared: This operation is running under the MMU lock in read mode.
1075  *
1076  * Returns: 0 if the new page table was installed. Non-0 if the page table
1077  *          could not be installed (e.g. the atomic compare-exchange failed).
1078  */
1079 static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter,
1080 			   struct kvm_mmu_page *sp, bool shared)
1081 {
1082 	u64 spte = make_nonleaf_spte(sp->spt, !kvm_ad_enabled());
1083 	int ret = 0;
1084 
1085 	if (shared) {
1086 		ret = tdp_mmu_set_spte_atomic(kvm, iter, spte);
1087 		if (ret)
1088 			return ret;
1089 	} else {
1090 		tdp_mmu_iter_set_spte(kvm, iter, spte);
1091 	}
1092 
1093 	tdp_account_mmu_page(kvm, sp);
1094 
1095 	return 0;
1096 }
1097 
1098 static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
1099 				   struct kvm_mmu_page *sp, bool shared);
1100 
1101 /*
1102  * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing
1103  * page tables and SPTEs to translate the faulting guest physical address.
1104  */
1105 int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
1106 {
1107 	struct kvm_mmu *mmu = vcpu->arch.mmu;
1108 	struct kvm *kvm = vcpu->kvm;
1109 	struct tdp_iter iter;
1110 	struct kvm_mmu_page *sp;
1111 	int ret = RET_PF_RETRY;
1112 
1113 	kvm_mmu_hugepage_adjust(vcpu, fault);
1114 
1115 	trace_kvm_mmu_spte_requested(fault);
1116 
1117 	rcu_read_lock();
1118 
1119 	tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) {
1120 		int r;
1121 
1122 		if (fault->nx_huge_page_workaround_enabled)
1123 			disallowed_hugepage_adjust(fault, iter.old_spte, iter.level);
1124 
1125 		/*
1126 		 * If SPTE has been frozen by another thread, just give up and
1127 		 * retry, avoiding unnecessary page table allocation and free.
1128 		 */
1129 		if (is_removed_spte(iter.old_spte))
1130 			goto retry;
1131 
1132 		if (iter.level == fault->goal_level)
1133 			goto map_target_level;
1134 
1135 		/* Step down into the lower level page table if it exists. */
1136 		if (is_shadow_present_pte(iter.old_spte) &&
1137 		    !is_large_pte(iter.old_spte))
1138 			continue;
1139 
1140 		/*
1141 		 * The SPTE is either non-present or points to a huge page that
1142 		 * needs to be split.
1143 		 */
1144 		sp = tdp_mmu_alloc_sp(vcpu);
1145 		tdp_mmu_init_child_sp(sp, &iter);
1146 
1147 		sp->nx_huge_page_disallowed = fault->huge_page_disallowed;
1148 
1149 		if (is_shadow_present_pte(iter.old_spte))
1150 			r = tdp_mmu_split_huge_page(kvm, &iter, sp, true);
1151 		else
1152 			r = tdp_mmu_link_sp(kvm, &iter, sp, true);
1153 
1154 		/*
1155 		 * Force the guest to retry if installing an upper level SPTE
1156 		 * failed, e.g. because a different task modified the SPTE.
1157 		 */
1158 		if (r) {
1159 			tdp_mmu_free_sp(sp);
1160 			goto retry;
1161 		}
1162 
1163 		if (fault->huge_page_disallowed &&
1164 		    fault->req_level >= iter.level) {
1165 			spin_lock(&kvm->arch.tdp_mmu_pages_lock);
1166 			if (sp->nx_huge_page_disallowed)
1167 				track_possible_nx_huge_page(kvm, sp);
1168 			spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
1169 		}
1170 	}
1171 
1172 	/*
1173 	 * The walk aborted before reaching the target level, e.g. because the
1174 	 * iterator detected an upper level SPTE was frozen during traversal.
1175 	 */
1176 	WARN_ON_ONCE(iter.level == fault->goal_level);
1177 	goto retry;
1178 
1179 map_target_level:
1180 	ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter);
1181 
1182 retry:
1183 	rcu_read_unlock();
1184 	return ret;
1185 }
1186 
1187 bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
1188 				 bool flush)
1189 {
1190 	struct kvm_mmu_page *root;
1191 
1192 	__for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false)
1193 		flush = tdp_mmu_zap_leafs(kvm, root, range->start, range->end,
1194 					  range->may_block, flush);
1195 
1196 	return flush;
1197 }
1198 
1199 typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
1200 			      struct kvm_gfn_range *range);
1201 
1202 static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
1203 						   struct kvm_gfn_range *range,
1204 						   tdp_handler_t handler)
1205 {
1206 	struct kvm_mmu_page *root;
1207 	struct tdp_iter iter;
1208 	bool ret = false;
1209 
1210 	/*
1211 	 * Don't support rescheduling, none of the MMU notifiers that funnel
1212 	 * into this helper allow blocking; it'd be dead, wasteful code.
1213 	 */
1214 	for_each_tdp_mmu_root(kvm, root, range->slot->as_id) {
1215 		rcu_read_lock();
1216 
1217 		tdp_root_for_each_leaf_pte(iter, root, range->start, range->end)
1218 			ret |= handler(kvm, &iter, range);
1219 
1220 		rcu_read_unlock();
1221 	}
1222 
1223 	return ret;
1224 }
1225 
1226 /*
1227  * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero
1228  * if any of the GFNs in the range have been accessed.
1229  *
1230  * No need to mark the corresponding PFN as accessed as this call is coming
1231  * from the clear_young() or clear_flush_young() notifier, which uses the
1232  * return value to determine if the page has been accessed.
1233  */
1234 static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter,
1235 			  struct kvm_gfn_range *range)
1236 {
1237 	u64 new_spte;
1238 
1239 	/* If we have a non-accessed entry we don't need to change the pte. */
1240 	if (!is_accessed_spte(iter->old_spte))
1241 		return false;
1242 
1243 	if (spte_ad_enabled(iter->old_spte)) {
1244 		iter->old_spte = tdp_mmu_clear_spte_bits(iter->sptep,
1245 							 iter->old_spte,
1246 							 shadow_accessed_mask,
1247 							 iter->level);
1248 		new_spte = iter->old_spte & ~shadow_accessed_mask;
1249 	} else {
1250 		/*
1251 		 * Capture the dirty status of the page, so that it doesn't get
1252 		 * lost when the SPTE is marked for access tracking.
1253 		 */
1254 		if (is_writable_pte(iter->old_spte))
1255 			kvm_set_pfn_dirty(spte_to_pfn(iter->old_spte));
1256 
1257 		new_spte = mark_spte_for_access_track(iter->old_spte);
1258 		iter->old_spte = kvm_tdp_mmu_write_spte(iter->sptep,
1259 							iter->old_spte, new_spte,
1260 							iter->level);
1261 	}
1262 
1263 	trace_kvm_tdp_mmu_spte_changed(iter->as_id, iter->gfn, iter->level,
1264 				       iter->old_spte, new_spte);
1265 	return true;
1266 }
1267 
1268 bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1269 {
1270 	return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range);
1271 }
1272 
1273 static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter,
1274 			 struct kvm_gfn_range *range)
1275 {
1276 	return is_accessed_spte(iter->old_spte);
1277 }
1278 
1279 bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1280 {
1281 	return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn);
1282 }
1283 
1284 /*
1285  * Remove write access from all SPTEs at or above min_level that map GFNs
1286  * [start, end). Returns true if an SPTE has been changed and the TLBs need to
1287  * be flushed.
1288  */
1289 static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1290 			     gfn_t start, gfn_t end, int min_level)
1291 {
1292 	struct tdp_iter iter;
1293 	u64 new_spte;
1294 	bool spte_set = false;
1295 
1296 	rcu_read_lock();
1297 
1298 	BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1299 
1300 	for_each_tdp_pte_min_level(iter, root, min_level, start, end) {
1301 retry:
1302 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1303 			continue;
1304 
1305 		if (!is_shadow_present_pte(iter.old_spte) ||
1306 		    !is_last_spte(iter.old_spte, iter.level) ||
1307 		    !(iter.old_spte & PT_WRITABLE_MASK))
1308 			continue;
1309 
1310 		new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1311 
1312 		if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte))
1313 			goto retry;
1314 
1315 		spte_set = true;
1316 	}
1317 
1318 	rcu_read_unlock();
1319 	return spte_set;
1320 }
1321 
1322 /*
1323  * Remove write access from all the SPTEs mapping GFNs in the memslot. Will
1324  * only affect leaf SPTEs down to min_level.
1325  * Returns true if an SPTE has been changed and the TLBs need to be flushed.
1326  */
1327 bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
1328 			     const struct kvm_memory_slot *slot, int min_level)
1329 {
1330 	struct kvm_mmu_page *root;
1331 	bool spte_set = false;
1332 
1333 	lockdep_assert_held_read(&kvm->mmu_lock);
1334 
1335 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
1336 		spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
1337 			     slot->base_gfn + slot->npages, min_level);
1338 
1339 	return spte_set;
1340 }
1341 
1342 static struct kvm_mmu_page *__tdp_mmu_alloc_sp_for_split(gfp_t gfp)
1343 {
1344 	struct kvm_mmu_page *sp;
1345 
1346 	gfp |= __GFP_ZERO;
1347 
1348 	sp = kmem_cache_alloc(mmu_page_header_cache, gfp);
1349 	if (!sp)
1350 		return NULL;
1351 
1352 	sp->spt = (void *)__get_free_page(gfp);
1353 	if (!sp->spt) {
1354 		kmem_cache_free(mmu_page_header_cache, sp);
1355 		return NULL;
1356 	}
1357 
1358 	return sp;
1359 }
1360 
1361 static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm,
1362 						       struct tdp_iter *iter,
1363 						       bool shared)
1364 {
1365 	struct kvm_mmu_page *sp;
1366 
1367 	kvm_lockdep_assert_mmu_lock_held(kvm, shared);
1368 
1369 	/*
1370 	 * Since we are allocating while under the MMU lock we have to be
1371 	 * careful about GFP flags. Use GFP_NOWAIT to avoid blocking on direct
1372 	 * reclaim and to avoid making any filesystem callbacks (which can end
1373 	 * up invoking KVM MMU notifiers, resulting in a deadlock).
1374 	 *
1375 	 * If this allocation fails we drop the lock and retry with reclaim
1376 	 * allowed.
1377 	 */
1378 	sp = __tdp_mmu_alloc_sp_for_split(GFP_NOWAIT | __GFP_ACCOUNT);
1379 	if (sp)
1380 		return sp;
1381 
1382 	rcu_read_unlock();
1383 
1384 	if (shared)
1385 		read_unlock(&kvm->mmu_lock);
1386 	else
1387 		write_unlock(&kvm->mmu_lock);
1388 
1389 	iter->yielded = true;
1390 	sp = __tdp_mmu_alloc_sp_for_split(GFP_KERNEL_ACCOUNT);
1391 
1392 	if (shared)
1393 		read_lock(&kvm->mmu_lock);
1394 	else
1395 		write_lock(&kvm->mmu_lock);
1396 
1397 	rcu_read_lock();
1398 
1399 	return sp;
1400 }
1401 
1402 /* Note, the caller is responsible for initializing @sp. */
1403 static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
1404 				   struct kvm_mmu_page *sp, bool shared)
1405 {
1406 	const u64 huge_spte = iter->old_spte;
1407 	const int level = iter->level;
1408 	int ret, i;
1409 
1410 	/*
1411 	 * No need for atomics when writing to sp->spt since the page table has
1412 	 * not been linked in yet and thus is not reachable from any other CPU.
1413 	 */
1414 	for (i = 0; i < SPTE_ENT_PER_PAGE; i++)
1415 		sp->spt[i] = make_huge_page_split_spte(kvm, huge_spte, sp->role, i);
1416 
1417 	/*
1418 	 * Replace the huge spte with a pointer to the populated lower level
1419 	 * page table. Since we are making this change without a TLB flush vCPUs
1420 	 * will see a mix of the split mappings and the original huge mapping,
1421 	 * depending on what's currently in their TLB. This is fine from a
1422 	 * correctness standpoint since the translation will be the same either
1423 	 * way.
1424 	 */
1425 	ret = tdp_mmu_link_sp(kvm, iter, sp, shared);
1426 	if (ret)
1427 		goto out;
1428 
1429 	/*
1430 	 * tdp_mmu_link_sp_atomic() will handle subtracting the huge page we
1431 	 * are overwriting from the page stats. But we have to manually update
1432 	 * the page stats with the new present child pages.
1433 	 */
1434 	kvm_update_page_stats(kvm, level - 1, SPTE_ENT_PER_PAGE);
1435 
1436 out:
1437 	trace_kvm_mmu_split_huge_page(iter->gfn, huge_spte, level, ret);
1438 	return ret;
1439 }
1440 
1441 static int tdp_mmu_split_huge_pages_root(struct kvm *kvm,
1442 					 struct kvm_mmu_page *root,
1443 					 gfn_t start, gfn_t end,
1444 					 int target_level, bool shared)
1445 {
1446 	struct kvm_mmu_page *sp = NULL;
1447 	struct tdp_iter iter;
1448 	int ret = 0;
1449 
1450 	rcu_read_lock();
1451 
1452 	/*
1453 	 * Traverse the page table splitting all huge pages above the target
1454 	 * level into one lower level. For example, if we encounter a 1GB page
1455 	 * we split it into 512 2MB pages.
1456 	 *
1457 	 * Since the TDP iterator uses a pre-order traversal, we are guaranteed
1458 	 * to visit an SPTE before ever visiting its children, which means we
1459 	 * will correctly recursively split huge pages that are more than one
1460 	 * level above the target level (e.g. splitting a 1GB to 512 2MB pages,
1461 	 * and then splitting each of those to 512 4KB pages).
1462 	 */
1463 	for_each_tdp_pte_min_level(iter, root, target_level + 1, start, end) {
1464 retry:
1465 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
1466 			continue;
1467 
1468 		if (!is_shadow_present_pte(iter.old_spte) || !is_large_pte(iter.old_spte))
1469 			continue;
1470 
1471 		if (!sp) {
1472 			sp = tdp_mmu_alloc_sp_for_split(kvm, &iter, shared);
1473 			if (!sp) {
1474 				ret = -ENOMEM;
1475 				trace_kvm_mmu_split_huge_page(iter.gfn,
1476 							      iter.old_spte,
1477 							      iter.level, ret);
1478 				break;
1479 			}
1480 
1481 			if (iter.yielded)
1482 				continue;
1483 		}
1484 
1485 		tdp_mmu_init_child_sp(sp, &iter);
1486 
1487 		if (tdp_mmu_split_huge_page(kvm, &iter, sp, shared))
1488 			goto retry;
1489 
1490 		sp = NULL;
1491 	}
1492 
1493 	rcu_read_unlock();
1494 
1495 	/*
1496 	 * It's possible to exit the loop having never used the last sp if, for
1497 	 * example, a vCPU doing HugePage NX splitting wins the race and
1498 	 * installs its own sp in place of the last sp we tried to split.
1499 	 */
1500 	if (sp)
1501 		tdp_mmu_free_sp(sp);
1502 
1503 	return ret;
1504 }
1505 
1506 
1507 /*
1508  * Try to split all huge pages mapped by the TDP MMU down to the target level.
1509  */
1510 void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
1511 				      const struct kvm_memory_slot *slot,
1512 				      gfn_t start, gfn_t end,
1513 				      int target_level, bool shared)
1514 {
1515 	struct kvm_mmu_page *root;
1516 	int r = 0;
1517 
1518 	kvm_lockdep_assert_mmu_lock_held(kvm, shared);
1519 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id) {
1520 		r = tdp_mmu_split_huge_pages_root(kvm, root, start, end, target_level, shared);
1521 		if (r) {
1522 			kvm_tdp_mmu_put_root(kvm, root);
1523 			break;
1524 		}
1525 	}
1526 }
1527 
1528 static bool tdp_mmu_need_write_protect(struct kvm_mmu_page *sp)
1529 {
1530 	/*
1531 	 * All TDP MMU shadow pages share the same role as their root, aside
1532 	 * from level, so it is valid to key off any shadow page to determine if
1533 	 * write protection is needed for an entire tree.
1534 	 */
1535 	return kvm_mmu_page_ad_need_write_protect(sp) || !kvm_ad_enabled();
1536 }
1537 
1538 static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1539 			   gfn_t start, gfn_t end)
1540 {
1541 	const u64 dbit = tdp_mmu_need_write_protect(root) ? PT_WRITABLE_MASK :
1542 							    shadow_dirty_mask;
1543 	struct tdp_iter iter;
1544 	bool spte_set = false;
1545 
1546 	rcu_read_lock();
1547 
1548 	tdp_root_for_each_pte(iter, root, start, end) {
1549 retry:
1550 		if (!is_shadow_present_pte(iter.old_spte) ||
1551 		    !is_last_spte(iter.old_spte, iter.level))
1552 			continue;
1553 
1554 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1555 			continue;
1556 
1557 		KVM_MMU_WARN_ON(dbit == shadow_dirty_mask &&
1558 				spte_ad_need_write_protect(iter.old_spte));
1559 
1560 		if (!(iter.old_spte & dbit))
1561 			continue;
1562 
1563 		if (tdp_mmu_set_spte_atomic(kvm, &iter, iter.old_spte & ~dbit))
1564 			goto retry;
1565 
1566 		spte_set = true;
1567 	}
1568 
1569 	rcu_read_unlock();
1570 	return spte_set;
1571 }
1572 
1573 /*
1574  * Clear the dirty status (D-bit or W-bit) of all the SPTEs mapping GFNs in the
1575  * memslot. Returns true if an SPTE has been changed and the TLBs need to be
1576  * flushed.
1577  */
1578 bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
1579 				  const struct kvm_memory_slot *slot)
1580 {
1581 	struct kvm_mmu_page *root;
1582 	bool spte_set = false;
1583 
1584 	lockdep_assert_held_read(&kvm->mmu_lock);
1585 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
1586 		spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
1587 				slot->base_gfn + slot->npages);
1588 
1589 	return spte_set;
1590 }
1591 
1592 static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
1593 				  gfn_t gfn, unsigned long mask, bool wrprot)
1594 {
1595 	const u64 dbit = (wrprot || tdp_mmu_need_write_protect(root)) ? PT_WRITABLE_MASK :
1596 									shadow_dirty_mask;
1597 	struct tdp_iter iter;
1598 
1599 	lockdep_assert_held_write(&kvm->mmu_lock);
1600 
1601 	rcu_read_lock();
1602 
1603 	tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
1604 				    gfn + BITS_PER_LONG) {
1605 		if (!mask)
1606 			break;
1607 
1608 		KVM_MMU_WARN_ON(dbit == shadow_dirty_mask &&
1609 				spte_ad_need_write_protect(iter.old_spte));
1610 
1611 		if (iter.level > PG_LEVEL_4K ||
1612 		    !(mask & (1UL << (iter.gfn - gfn))))
1613 			continue;
1614 
1615 		mask &= ~(1UL << (iter.gfn - gfn));
1616 
1617 		if (!(iter.old_spte & dbit))
1618 			continue;
1619 
1620 		iter.old_spte = tdp_mmu_clear_spte_bits(iter.sptep,
1621 							iter.old_spte, dbit,
1622 							iter.level);
1623 
1624 		trace_kvm_tdp_mmu_spte_changed(iter.as_id, iter.gfn, iter.level,
1625 					       iter.old_spte,
1626 					       iter.old_spte & ~dbit);
1627 		kvm_set_pfn_dirty(spte_to_pfn(iter.old_spte));
1628 	}
1629 
1630 	rcu_read_unlock();
1631 }
1632 
1633 /*
1634  * Clear the dirty status (D-bit or W-bit) of all the 4k SPTEs mapping GFNs for
1635  * which a bit is set in mask, starting at gfn. The given memslot is expected to
1636  * contain all the GFNs represented by set bits in the mask.
1637  */
1638 void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1639 				       struct kvm_memory_slot *slot,
1640 				       gfn_t gfn, unsigned long mask,
1641 				       bool wrprot)
1642 {
1643 	struct kvm_mmu_page *root;
1644 
1645 	for_each_valid_tdp_mmu_root(kvm, root, slot->as_id)
1646 		clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
1647 }
1648 
1649 static void zap_collapsible_spte_range(struct kvm *kvm,
1650 				       struct kvm_mmu_page *root,
1651 				       const struct kvm_memory_slot *slot)
1652 {
1653 	gfn_t start = slot->base_gfn;
1654 	gfn_t end = start + slot->npages;
1655 	struct tdp_iter iter;
1656 	int max_mapping_level;
1657 
1658 	rcu_read_lock();
1659 
1660 	for_each_tdp_pte_min_level(iter, root, PG_LEVEL_2M, start, end) {
1661 retry:
1662 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1663 			continue;
1664 
1665 		if (iter.level > KVM_MAX_HUGEPAGE_LEVEL ||
1666 		    !is_shadow_present_pte(iter.old_spte))
1667 			continue;
1668 
1669 		/*
1670 		 * Don't zap leaf SPTEs, if a leaf SPTE could be replaced with
1671 		 * a large page size, then its parent would have been zapped
1672 		 * instead of stepping down.
1673 		 */
1674 		if (is_last_spte(iter.old_spte, iter.level))
1675 			continue;
1676 
1677 		/*
1678 		 * If iter.gfn resides outside of the slot, i.e. the page for
1679 		 * the current level overlaps but is not contained by the slot,
1680 		 * then the SPTE can't be made huge.  More importantly, trying
1681 		 * to query that info from slot->arch.lpage_info will cause an
1682 		 * out-of-bounds access.
1683 		 */
1684 		if (iter.gfn < start || iter.gfn >= end)
1685 			continue;
1686 
1687 		max_mapping_level = kvm_mmu_max_mapping_level(kvm, slot,
1688 							      iter.gfn, PG_LEVEL_NUM);
1689 		if (max_mapping_level < iter.level)
1690 			continue;
1691 
1692 		/* Note, a successful atomic zap also does a remote TLB flush. */
1693 		if (tdp_mmu_zap_spte_atomic(kvm, &iter))
1694 			goto retry;
1695 	}
1696 
1697 	rcu_read_unlock();
1698 }
1699 
1700 /*
1701  * Zap non-leaf SPTEs (and free their associated page tables) which could
1702  * be replaced by huge pages, for GFNs within the slot.
1703  */
1704 void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
1705 				       const struct kvm_memory_slot *slot)
1706 {
1707 	struct kvm_mmu_page *root;
1708 
1709 	lockdep_assert_held_read(&kvm->mmu_lock);
1710 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
1711 		zap_collapsible_spte_range(kvm, root, slot);
1712 }
1713 
1714 /*
1715  * Removes write access on the last level SPTE mapping this GFN and unsets the
1716  * MMU-writable bit to ensure future writes continue to be intercepted.
1717  * Returns true if an SPTE was set and a TLB flush is needed.
1718  */
1719 static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
1720 			      gfn_t gfn, int min_level)
1721 {
1722 	struct tdp_iter iter;
1723 	u64 new_spte;
1724 	bool spte_set = false;
1725 
1726 	BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1727 
1728 	rcu_read_lock();
1729 
1730 	for_each_tdp_pte_min_level(iter, root, min_level, gfn, gfn + 1) {
1731 		if (!is_shadow_present_pte(iter.old_spte) ||
1732 		    !is_last_spte(iter.old_spte, iter.level))
1733 			continue;
1734 
1735 		new_spte = iter.old_spte &
1736 			~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
1737 
1738 		if (new_spte == iter.old_spte)
1739 			break;
1740 
1741 		tdp_mmu_iter_set_spte(kvm, &iter, new_spte);
1742 		spte_set = true;
1743 	}
1744 
1745 	rcu_read_unlock();
1746 
1747 	return spte_set;
1748 }
1749 
1750 /*
1751  * Removes write access on the last level SPTE mapping this GFN and unsets the
1752  * MMU-writable bit to ensure future writes continue to be intercepted.
1753  * Returns true if an SPTE was set and a TLB flush is needed.
1754  */
1755 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
1756 				   struct kvm_memory_slot *slot, gfn_t gfn,
1757 				   int min_level)
1758 {
1759 	struct kvm_mmu_page *root;
1760 	bool spte_set = false;
1761 
1762 	lockdep_assert_held_write(&kvm->mmu_lock);
1763 	for_each_valid_tdp_mmu_root(kvm, root, slot->as_id)
1764 		spte_set |= write_protect_gfn(kvm, root, gfn, min_level);
1765 
1766 	return spte_set;
1767 }
1768 
1769 /*
1770  * Return the level of the lowest level SPTE added to sptes.
1771  * That SPTE may be non-present.
1772  *
1773  * Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
1774  */
1775 int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
1776 			 int *root_level)
1777 {
1778 	struct tdp_iter iter;
1779 	struct kvm_mmu *mmu = vcpu->arch.mmu;
1780 	gfn_t gfn = addr >> PAGE_SHIFT;
1781 	int leaf = -1;
1782 
1783 	*root_level = vcpu->arch.mmu->root_role.level;
1784 
1785 	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
1786 		leaf = iter.level;
1787 		sptes[leaf] = iter.old_spte;
1788 	}
1789 
1790 	return leaf;
1791 }
1792 
1793 /*
1794  * Returns the last level spte pointer of the shadow page walk for the given
1795  * gpa, and sets *spte to the spte value. This spte may be non-preset. If no
1796  * walk could be performed, returns NULL and *spte does not contain valid data.
1797  *
1798  * Contract:
1799  *  - Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
1800  *  - The returned sptep must not be used after kvm_tdp_mmu_walk_lockless_end.
1801  *
1802  * WARNING: This function is only intended to be called during fast_page_fault.
1803  */
1804 u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
1805 					u64 *spte)
1806 {
1807 	struct tdp_iter iter;
1808 	struct kvm_mmu *mmu = vcpu->arch.mmu;
1809 	gfn_t gfn = addr >> PAGE_SHIFT;
1810 	tdp_ptep_t sptep = NULL;
1811 
1812 	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
1813 		*spte = iter.old_spte;
1814 		sptep = iter.sptep;
1815 	}
1816 
1817 	/*
1818 	 * Perform the rcu_dereference to get the raw spte pointer value since
1819 	 * we are passing it up to fast_page_fault, which is shared with the
1820 	 * legacy MMU and thus does not retain the TDP MMU-specific __rcu
1821 	 * annotation.
1822 	 *
1823 	 * This is safe since fast_page_fault obeys the contracts of this
1824 	 * function as well as all TDP MMU contracts around modifying SPTEs
1825 	 * outside of mmu_lock.
1826 	 */
1827 	return rcu_dereference(sptep);
1828 }
1829