xref: /linux/arch/x86/kvm/mmu/mmu.c (revision df83746075778958954aa0460cca55f4b3fc9c02)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * This module enables machines with Intel VT-x extensions to run virtual
6  * machines without emulation or binary translation.
7  *
8  * MMU support
9  *
10  * Copyright (C) 2006 Qumranet, Inc.
11  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
12  *
13  * Authors:
14  *   Yaniv Kamay  <yaniv@qumranet.com>
15  *   Avi Kivity   <avi@qumranet.com>
16  */
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 
19 #include "irq.h"
20 #include "ioapic.h"
21 #include "mmu.h"
22 #include "mmu_internal.h"
23 #include "tdp_mmu.h"
24 #include "x86.h"
25 #include "kvm_cache_regs.h"
26 #include "smm.h"
27 #include "kvm_emulate.h"
28 #include "page_track.h"
29 #include "cpuid.h"
30 #include "spte.h"
31 
32 #include <linux/kvm_host.h>
33 #include <linux/types.h>
34 #include <linux/string.h>
35 #include <linux/mm.h>
36 #include <linux/highmem.h>
37 #include <linux/moduleparam.h>
38 #include <linux/export.h>
39 #include <linux/swap.h>
40 #include <linux/hugetlb.h>
41 #include <linux/compiler.h>
42 #include <linux/srcu.h>
43 #include <linux/slab.h>
44 #include <linux/sched/signal.h>
45 #include <linux/uaccess.h>
46 #include <linux/hash.h>
47 #include <linux/kern_levels.h>
48 #include <linux/kstrtox.h>
49 #include <linux/kthread.h>
50 #include <linux/wordpart.h>
51 
52 #include <asm/page.h>
53 #include <asm/memtype.h>
54 #include <asm/cmpxchg.h>
55 #include <asm/io.h>
56 #include <asm/set_memory.h>
57 #include <asm/spec-ctrl.h>
58 #include <asm/vmx.h>
59 
60 #include "trace.h"
61 
62 static bool nx_hugepage_mitigation_hard_disabled;
63 
64 int __read_mostly nx_huge_pages = -1;
65 static uint __read_mostly nx_huge_pages_recovery_period_ms;
66 #ifdef CONFIG_PREEMPT_RT
67 /* Recovery can cause latency spikes, disable it for PREEMPT_RT.  */
68 static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
69 #else
70 static uint __read_mostly nx_huge_pages_recovery_ratio = 60;
71 #endif
72 
73 static int get_nx_huge_pages(char *buffer, const struct kernel_param *kp);
74 static int set_nx_huge_pages(const char *val, const struct kernel_param *kp);
75 static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp);
76 
77 static const struct kernel_param_ops nx_huge_pages_ops = {
78 	.set = set_nx_huge_pages,
79 	.get = get_nx_huge_pages,
80 };
81 
82 static const struct kernel_param_ops nx_huge_pages_recovery_param_ops = {
83 	.set = set_nx_huge_pages_recovery_param,
84 	.get = param_get_uint,
85 };
86 
87 module_param_cb(nx_huge_pages, &nx_huge_pages_ops, &nx_huge_pages, 0644);
88 __MODULE_PARM_TYPE(nx_huge_pages, "bool");
89 module_param_cb(nx_huge_pages_recovery_ratio, &nx_huge_pages_recovery_param_ops,
90 		&nx_huge_pages_recovery_ratio, 0644);
91 __MODULE_PARM_TYPE(nx_huge_pages_recovery_ratio, "uint");
92 module_param_cb(nx_huge_pages_recovery_period_ms, &nx_huge_pages_recovery_param_ops,
93 		&nx_huge_pages_recovery_period_ms, 0644);
94 __MODULE_PARM_TYPE(nx_huge_pages_recovery_period_ms, "uint");
95 
96 static bool __read_mostly force_flush_and_sync_on_reuse;
97 module_param_named(flush_on_reuse, force_flush_and_sync_on_reuse, bool, 0644);
98 
99 /*
100  * When setting this variable to true it enables Two-Dimensional-Paging
101  * where the hardware walks 2 page tables:
102  * 1. the guest-virtual to guest-physical
103  * 2. while doing 1. it walks guest-physical to host-physical
104  * If the hardware supports that we don't need to do shadow paging.
105  */
106 bool tdp_enabled = false;
107 
108 static bool __ro_after_init tdp_mmu_allowed;
109 
110 #ifdef CONFIG_X86_64
111 bool __read_mostly tdp_mmu_enabled = true;
112 module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0444);
113 EXPORT_SYMBOL_FOR_KVM_INTERNAL(tdp_mmu_enabled);
114 #endif
115 
116 static int max_huge_page_level __read_mostly;
117 static int tdp_root_level __read_mostly;
118 static int max_tdp_level __read_mostly;
119 
120 #define PTE_PREFETCH_NUM		8
121 
122 #include <trace/events/kvm.h>
123 
124 /* make pte_list_desc fit well in cache lines */
125 #define PTE_LIST_EXT 14
126 
127 /*
128  * struct pte_list_desc is the core data structure used to implement a custom
129  * list for tracking a set of related SPTEs, e.g. all the SPTEs that map a
130  * given GFN when used in the context of rmaps.  Using a custom list allows KVM
131  * to optimize for the common case where many GFNs will have at most a handful
132  * of SPTEs pointing at them, i.e. allows packing multiple SPTEs into a small
133  * memory footprint, which in turn improves runtime performance by exploiting
134  * cache locality.
135  *
136  * A list is comprised of one or more pte_list_desc objects (descriptors).
137  * Each individual descriptor stores up to PTE_LIST_EXT SPTEs.  If a descriptor
138  * is full and a new SPTEs needs to be added, a new descriptor is allocated and
139  * becomes the head of the list.  This means that by definitions, all tail
140  * descriptors are full.
141  *
142  * Note, the meta data fields are deliberately placed at the start of the
143  * structure to optimize the cacheline layout; accessing the descriptor will
144  * touch only a single cacheline so long as @spte_count<=6 (or if only the
145  * descriptors metadata is accessed).
146  */
147 struct pte_list_desc {
148 	struct pte_list_desc *more;
149 	/* The number of PTEs stored in _this_ descriptor. */
150 	u32 spte_count;
151 	/* The number of PTEs stored in all tails of this descriptor. */
152 	u32 tail_count;
153 	u64 *sptes[PTE_LIST_EXT];
154 };
155 
156 struct kvm_shadow_walk_iterator {
157 	u64 addr;
158 	hpa_t shadow_addr;
159 	u64 *sptep;
160 	int level;
161 	unsigned index;
162 };
163 
164 #define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker)     \
165 	for (shadow_walk_init_using_root(&(_walker), (_vcpu),              \
166 					 (_root), (_addr));                \
167 	     shadow_walk_okay(&(_walker));			           \
168 	     shadow_walk_next(&(_walker)))
169 
170 #define for_each_shadow_entry(_vcpu, _addr, _walker)            \
171 	for (shadow_walk_init(&(_walker), _vcpu, _addr);	\
172 	     shadow_walk_okay(&(_walker));			\
173 	     shadow_walk_next(&(_walker)))
174 
175 #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte)	\
176 	for (shadow_walk_init(&(_walker), _vcpu, _addr);		\
177 	     shadow_walk_okay(&(_walker)) &&				\
178 		({ spte = mmu_spte_get_lockless(_walker.sptep); 1; });	\
179 	     __shadow_walk_next(&(_walker), spte))
180 
181 static struct kmem_cache *pte_list_desc_cache;
182 struct kmem_cache *mmu_page_header_cache;
183 
184 static void mmu_spte_set(u64 *sptep, u64 spte);
185 
186 struct kvm_mmu_role_regs {
187 	const unsigned long cr0;
188 	const unsigned long cr4;
189 	const u64 efer;
190 };
191 
192 #define CREATE_TRACE_POINTS
193 #include "mmutrace.h"
194 
195 /*
196  * Yes, lot's of underscores.  They're a hint that you probably shouldn't be
197  * reading from the role_regs.  Once the root_role is constructed, it becomes
198  * the single source of truth for the MMU's state.
199  */
200 #define BUILD_MMU_ROLE_REGS_ACCESSOR(reg, name, flag)			\
201 static inline bool __maybe_unused					\
202 ____is_##reg##_##name(const struct kvm_mmu_role_regs *regs)		\
203 {									\
204 	return !!(regs->reg & flag);					\
205 }
206 BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, pg, X86_CR0_PG);
207 BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, wp, X86_CR0_WP);
208 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pse, X86_CR4_PSE);
209 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pae, X86_CR4_PAE);
210 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smep, X86_CR4_SMEP);
211 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smap, X86_CR4_SMAP);
212 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pke, X86_CR4_PKE);
213 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, la57, X86_CR4_LA57);
214 BUILD_MMU_ROLE_REGS_ACCESSOR(efer, nx, EFER_NX);
215 BUILD_MMU_ROLE_REGS_ACCESSOR(efer, lma, EFER_LMA);
216 
217 /*
218  * The MMU itself (with a valid role) is the single source of truth for the
219  * MMU.  Do not use the regs used to build the MMU/role, nor the vCPU.  The
220  * regs don't account for dependencies, e.g. clearing CR4 bits if CR0.PG=1,
221  * and the vCPU may be incorrect/irrelevant.
222  */
223 #define BUILD_MMU_ROLE_ACCESSOR(base_or_ext, reg, name)		\
224 static inline bool __maybe_unused is_##reg##_##name(struct kvm_mmu *mmu)	\
225 {								\
226 	return !!(mmu->cpu_role. base_or_ext . reg##_##name);	\
227 }
228 BUILD_MMU_ROLE_ACCESSOR(base, cr0, wp);
229 BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pse);
230 BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, smep);
231 BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, smap);
232 BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pke);
233 BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, la57);
234 BUILD_MMU_ROLE_ACCESSOR(base, efer, nx);
235 BUILD_MMU_ROLE_ACCESSOR(ext,  efer, lma);
236 
237 static inline bool is_cr0_pg(struct kvm_mmu *mmu)
238 {
239         return mmu->cpu_role.base.level > 0;
240 }
241 
242 static inline bool is_cr4_pae(struct kvm_mmu *mmu)
243 {
244         return !mmu->cpu_role.base.has_4_byte_gpte;
245 }
246 
247 static struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu)
248 {
249 	struct kvm_mmu_role_regs regs = {
250 		.cr0 = kvm_read_cr0_bits(vcpu, KVM_MMU_CR0_ROLE_BITS),
251 		.cr4 = kvm_read_cr4_bits(vcpu, KVM_MMU_CR4_ROLE_BITS),
252 		.efer = vcpu->arch.efer,
253 	};
254 
255 	return regs;
256 }
257 
258 static unsigned long get_guest_cr3(struct kvm_vcpu *vcpu)
259 {
260 	return kvm_read_cr3(vcpu);
261 }
262 
263 static inline unsigned long kvm_mmu_get_guest_pgd(struct kvm_vcpu *vcpu,
264 						  struct kvm_mmu *mmu)
265 {
266 	if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) && mmu->get_guest_pgd == get_guest_cr3)
267 		return kvm_read_cr3(vcpu);
268 
269 	return mmu->get_guest_pgd(vcpu);
270 }
271 
272 static inline bool kvm_available_flush_remote_tlbs_range(void)
273 {
274 #if IS_ENABLED(CONFIG_HYPERV)
275 	return kvm_x86_ops.flush_remote_tlbs_range;
276 #else
277 	return false;
278 #endif
279 }
280 
281 static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index);
282 
283 /* Flush the range of guest memory mapped by the given SPTE. */
284 static void kvm_flush_remote_tlbs_sptep(struct kvm *kvm, u64 *sptep)
285 {
286 	struct kvm_mmu_page *sp = sptep_to_sp(sptep);
287 	gfn_t gfn = kvm_mmu_page_get_gfn(sp, spte_index(sptep));
288 
289 	kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level);
290 }
291 
292 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
293 			   unsigned int access)
294 {
295 	u64 spte = make_mmio_spte(vcpu, gfn, access);
296 
297 	trace_mark_mmio_spte(sptep, gfn, spte);
298 	mmu_spte_set(sptep, spte);
299 }
300 
301 static gfn_t get_mmio_spte_gfn(u64 spte)
302 {
303 	u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
304 
305 	gpa |= (spte >> SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)
306 	       & shadow_nonpresent_or_rsvd_mask;
307 
308 	return gpa >> PAGE_SHIFT;
309 }
310 
311 static unsigned get_mmio_spte_access(u64 spte)
312 {
313 	return spte & shadow_mmio_access_mask;
314 }
315 
316 static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
317 {
318 	u64 kvm_gen, spte_gen, gen;
319 
320 	gen = kvm_vcpu_memslots(vcpu)->generation;
321 	if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
322 		return false;
323 
324 	kvm_gen = gen & MMIO_SPTE_GEN_MASK;
325 	spte_gen = get_mmio_spte_generation(spte);
326 
327 	trace_check_mmio_spte(spte, kvm_gen, spte_gen);
328 	return likely(kvm_gen == spte_gen);
329 }
330 
331 static int is_cpuid_PSE36(void)
332 {
333 	return 1;
334 }
335 
336 #ifdef CONFIG_X86_64
337 static void __set_spte(u64 *sptep, u64 spte)
338 {
339 	KVM_MMU_WARN_ON(is_ept_ve_possible(spte));
340 	WRITE_ONCE(*sptep, spte);
341 }
342 
343 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
344 {
345 	KVM_MMU_WARN_ON(is_ept_ve_possible(spte));
346 	WRITE_ONCE(*sptep, spte);
347 }
348 
349 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
350 {
351 	KVM_MMU_WARN_ON(is_ept_ve_possible(spte));
352 	return xchg(sptep, spte);
353 }
354 
355 static u64 __get_spte_lockless(u64 *sptep)
356 {
357 	return READ_ONCE(*sptep);
358 }
359 #else
360 union split_spte {
361 	struct {
362 		u32 spte_low;
363 		u32 spte_high;
364 	};
365 	u64 spte;
366 };
367 
368 static void count_spte_clear(u64 *sptep, u64 spte)
369 {
370 	struct kvm_mmu_page *sp =  sptep_to_sp(sptep);
371 
372 	if (is_shadow_present_pte(spte))
373 		return;
374 
375 	/* Ensure the spte is completely set before we increase the count */
376 	smp_wmb();
377 	sp->clear_spte_count++;
378 }
379 
380 static void __set_spte(u64 *sptep, u64 spte)
381 {
382 	union split_spte *ssptep, sspte;
383 
384 	ssptep = (union split_spte *)sptep;
385 	sspte = (union split_spte)spte;
386 
387 	ssptep->spte_high = sspte.spte_high;
388 
389 	/*
390 	 * If we map the spte from nonpresent to present, We should store
391 	 * the high bits firstly, then set present bit, so cpu can not
392 	 * fetch this spte while we are setting the spte.
393 	 */
394 	smp_wmb();
395 
396 	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
397 }
398 
399 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
400 {
401 	union split_spte *ssptep, sspte;
402 
403 	ssptep = (union split_spte *)sptep;
404 	sspte = (union split_spte)spte;
405 
406 	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
407 
408 	/*
409 	 * If we map the spte from present to nonpresent, we should clear
410 	 * present bit firstly to avoid vcpu fetch the old high bits.
411 	 */
412 	smp_wmb();
413 
414 	ssptep->spte_high = sspte.spte_high;
415 	count_spte_clear(sptep, spte);
416 }
417 
418 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
419 {
420 	union split_spte *ssptep, sspte, orig;
421 
422 	ssptep = (union split_spte *)sptep;
423 	sspte = (union split_spte)spte;
424 
425 	/* xchg acts as a barrier before the setting of the high bits */
426 	orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low);
427 	orig.spte_high = ssptep->spte_high;
428 	ssptep->spte_high = sspte.spte_high;
429 	count_spte_clear(sptep, spte);
430 
431 	return orig.spte;
432 }
433 
434 /*
435  * The idea using the light way get the spte on x86_32 guest is from
436  * gup_get_pte (mm/gup.c).
437  *
438  * An spte tlb flush may be pending, because they are coalesced and
439  * we are running out of the MMU lock.  Therefore
440  * we need to protect against in-progress updates of the spte.
441  *
442  * Reading the spte while an update is in progress may get the old value
443  * for the high part of the spte.  The race is fine for a present->non-present
444  * change (because the high part of the spte is ignored for non-present spte),
445  * but for a present->present change we must reread the spte.
446  *
447  * All such changes are done in two steps (present->non-present and
448  * non-present->present), hence it is enough to count the number of
449  * present->non-present updates: if it changed while reading the spte,
450  * we might have hit the race.  This is done using clear_spte_count.
451  */
452 static u64 __get_spte_lockless(u64 *sptep)
453 {
454 	struct kvm_mmu_page *sp =  sptep_to_sp(sptep);
455 	union split_spte spte, *orig = (union split_spte *)sptep;
456 	int count;
457 
458 retry:
459 	count = sp->clear_spte_count;
460 	smp_rmb();
461 
462 	spte.spte_low = orig->spte_low;
463 	smp_rmb();
464 
465 	spte.spte_high = orig->spte_high;
466 	smp_rmb();
467 
468 	if (unlikely(spte.spte_low != orig->spte_low ||
469 	      count != sp->clear_spte_count))
470 		goto retry;
471 
472 	return spte.spte;
473 }
474 #endif
475 
476 /* Rules for using mmu_spte_set:
477  * Set the sptep from nonpresent to present.
478  * Note: the sptep being assigned *must* be either not present
479  * or in a state where the hardware will not attempt to update
480  * the spte.
481  */
482 static void mmu_spte_set(u64 *sptep, u64 new_spte)
483 {
484 	WARN_ON_ONCE(is_shadow_present_pte(*sptep));
485 	__set_spte(sptep, new_spte);
486 }
487 
488 /* Rules for using mmu_spte_update:
489  * Update the state bits, it means the mapped pfn is not changed.
490  *
491  * Returns true if the TLB needs to be flushed
492  */
493 static bool mmu_spte_update(u64 *sptep, u64 new_spte)
494 {
495 	u64 old_spte = *sptep;
496 
497 	WARN_ON_ONCE(!is_shadow_present_pte(new_spte));
498 	check_spte_writable_invariants(new_spte);
499 
500 	if (!is_shadow_present_pte(old_spte)) {
501 		mmu_spte_set(sptep, new_spte);
502 		return false;
503 	}
504 
505 	if (!spte_needs_atomic_update(old_spte))
506 		__update_clear_spte_fast(sptep, new_spte);
507 	else
508 		old_spte = __update_clear_spte_slow(sptep, new_spte);
509 
510 	WARN_ON_ONCE(!is_shadow_present_pte(old_spte) ||
511 		     spte_to_pfn(old_spte) != spte_to_pfn(new_spte));
512 
513 	return leaf_spte_change_needs_tlb_flush(old_spte, new_spte);
514 }
515 
516 /*
517  * Rules for using mmu_spte_clear_track_bits:
518  * It sets the sptep from present to nonpresent, and track the
519  * state bits, it is used to clear the last level sptep.
520  * Returns the old PTE.
521  */
522 static u64 mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)
523 {
524 	u64 old_spte = *sptep;
525 	int level = sptep_to_sp(sptep)->role.level;
526 
527 	if (!is_shadow_present_pte(old_spte) ||
528 	    !spte_needs_atomic_update(old_spte))
529 		__update_clear_spte_fast(sptep, SHADOW_NONPRESENT_VALUE);
530 	else
531 		old_spte = __update_clear_spte_slow(sptep, SHADOW_NONPRESENT_VALUE);
532 
533 	if (!is_shadow_present_pte(old_spte))
534 		return old_spte;
535 
536 	kvm_update_page_stats(kvm, level, -1);
537 	return old_spte;
538 }
539 
540 /*
541  * Rules for using mmu_spte_clear_no_track:
542  * Directly clear spte without caring the state bits of sptep,
543  * it is used to set the upper level spte.
544  */
545 static void mmu_spte_clear_no_track(u64 *sptep)
546 {
547 	__update_clear_spte_fast(sptep, SHADOW_NONPRESENT_VALUE);
548 }
549 
550 static u64 mmu_spte_get_lockless(u64 *sptep)
551 {
552 	return __get_spte_lockless(sptep);
553 }
554 
555 static inline bool is_tdp_mmu_active(struct kvm_vcpu *vcpu)
556 {
557 	return tdp_mmu_enabled && vcpu->arch.mmu->root_role.direct;
558 }
559 
560 static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
561 {
562 	if (is_tdp_mmu_active(vcpu)) {
563 		kvm_tdp_mmu_walk_lockless_begin();
564 	} else {
565 		/*
566 		 * Prevent page table teardown by making any free-er wait during
567 		 * kvm_flush_remote_tlbs() IPI to all active vcpus.
568 		 */
569 		local_irq_disable();
570 
571 		/*
572 		 * Make sure a following spte read is not reordered ahead of the write
573 		 * to vcpu->mode.
574 		 */
575 		smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
576 	}
577 }
578 
579 static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
580 {
581 	if (is_tdp_mmu_active(vcpu)) {
582 		kvm_tdp_mmu_walk_lockless_end();
583 	} else {
584 		/*
585 		 * Make sure the write to vcpu->mode is not reordered in front of
586 		 * reads to sptes.  If it does, kvm_mmu_commit_zap_page() can see us
587 		 * OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
588 		 */
589 		smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
590 		local_irq_enable();
591 	}
592 }
593 
594 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
595 {
596 	int r;
597 
598 	/* 1 rmap, 1 parent PTE per level, and the prefetched rmaps. */
599 	r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
600 				       1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);
601 	if (r)
602 		return r;
603 	if (kvm_has_mirrored_tdp(vcpu->kvm)) {
604 		r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_external_spt_cache,
605 					       PT64_ROOT_MAX_LEVEL);
606 		if (r)
607 			return r;
608 	}
609 	r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
610 				       PT64_ROOT_MAX_LEVEL);
611 	if (r)
612 		return r;
613 	if (maybe_indirect) {
614 		r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadowed_info_cache,
615 					       PT64_ROOT_MAX_LEVEL);
616 		if (r)
617 			return r;
618 	}
619 	return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
620 					  PT64_ROOT_MAX_LEVEL);
621 }
622 
623 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
624 {
625 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
626 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
627 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadowed_info_cache);
628 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_external_spt_cache);
629 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
630 }
631 
632 static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
633 {
634 	kmem_cache_free(pte_list_desc_cache, pte_list_desc);
635 }
636 
637 static bool sp_has_gptes(struct kvm_mmu_page *sp);
638 
639 static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
640 {
641 	if (sp->role.passthrough)
642 		return sp->gfn;
643 
644 	if (sp->shadowed_translation)
645 		return sp->shadowed_translation[index] >> PAGE_SHIFT;
646 
647 	return sp->gfn + (index << ((sp->role.level - 1) * SPTE_LEVEL_BITS));
648 }
649 
650 /*
651  * For leaf SPTEs, fetch the *guest* access permissions being shadowed. Note
652  * that the SPTE itself may have a more constrained access permissions that
653  * what the guest enforces. For example, a guest may create an executable
654  * huge PTE but KVM may disallow execution to mitigate iTLB multihit.
655  */
656 static u32 kvm_mmu_page_get_access(struct kvm_mmu_page *sp, int index)
657 {
658 	if (sp->shadowed_translation)
659 		return sp->shadowed_translation[index] & ACC_ALL;
660 
661 	/*
662 	 * For direct MMUs (e.g. TDP or non-paging guests) or passthrough SPs,
663 	 * KVM is not shadowing any guest page tables, so the "guest access
664 	 * permissions" are just ACC_ALL.
665 	 *
666 	 * For direct SPs in indirect MMUs (shadow paging), i.e. when KVM
667 	 * is shadowing a guest huge page with small pages, the guest access
668 	 * permissions being shadowed are the access permissions of the huge
669 	 * page.
670 	 *
671 	 * In both cases, sp->role.access contains the correct access bits.
672 	 */
673 	return sp->role.access;
674 }
675 
676 static void kvm_mmu_page_set_translation(struct kvm_mmu_page *sp, int index,
677 					 gfn_t gfn, unsigned int access)
678 {
679 	if (sp->shadowed_translation) {
680 		sp->shadowed_translation[index] = (gfn << PAGE_SHIFT) | access;
681 		return;
682 	}
683 
684 	WARN_ONCE(access != kvm_mmu_page_get_access(sp, index),
685 	          "access mismatch under %s page %llx (expected %u, got %u)\n",
686 	          sp->role.passthrough ? "passthrough" : "direct",
687 	          sp->gfn, kvm_mmu_page_get_access(sp, index), access);
688 
689 	WARN_ONCE(gfn != kvm_mmu_page_get_gfn(sp, index),
690 	          "gfn mismatch under %s page %llx (expected %llx, got %llx)\n",
691 	          sp->role.passthrough ? "passthrough" : "direct",
692 	          sp->gfn, kvm_mmu_page_get_gfn(sp, index), gfn);
693 }
694 
695 static void kvm_mmu_page_set_access(struct kvm_mmu_page *sp, int index,
696 				    unsigned int access)
697 {
698 	gfn_t gfn = kvm_mmu_page_get_gfn(sp, index);
699 
700 	kvm_mmu_page_set_translation(sp, index, gfn, access);
701 }
702 
703 /*
704  * Return the pointer to the large page information for a given gfn,
705  * handling slots that are not large page aligned.
706  */
707 static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
708 		const struct kvm_memory_slot *slot, int level)
709 {
710 	unsigned long idx;
711 
712 	idx = gfn_to_index(gfn, slot->base_gfn, level);
713 	return &slot->arch.lpage_info[level - 2][idx];
714 }
715 
716 /*
717  * The most significant bit in disallow_lpage tracks whether or not memory
718  * attributes are mixed, i.e. not identical for all gfns at the current level.
719  * The lower order bits are used to refcount other cases where a hugepage is
720  * disallowed, e.g. if KVM has shadow a page table at the gfn.
721  */
722 #define KVM_LPAGE_MIXED_FLAG	BIT(31)
723 
724 static void update_gfn_disallow_lpage_count(const struct kvm_memory_slot *slot,
725 					    gfn_t gfn, int count)
726 {
727 	struct kvm_lpage_info *linfo;
728 	int old, i;
729 
730 	for (i = PG_LEVEL_2M; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
731 		linfo = lpage_info_slot(gfn, slot, i);
732 
733 		old = linfo->disallow_lpage;
734 		linfo->disallow_lpage += count;
735 		WARN_ON_ONCE((old ^ linfo->disallow_lpage) & KVM_LPAGE_MIXED_FLAG);
736 	}
737 }
738 
739 void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
740 {
741 	update_gfn_disallow_lpage_count(slot, gfn, 1);
742 }
743 
744 void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
745 {
746 	update_gfn_disallow_lpage_count(slot, gfn, -1);
747 }
748 
749 static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
750 {
751 	struct kvm_memslots *slots;
752 	struct kvm_memory_slot *slot;
753 	gfn_t gfn;
754 
755 	kvm->arch.indirect_shadow_pages++;
756 	/*
757 	 * Ensure indirect_shadow_pages is elevated prior to re-reading guest
758 	 * child PTEs in FNAME(gpte_changed), i.e. guarantee either in-flight
759 	 * emulated writes are visible before re-reading guest PTEs, or that
760 	 * an emulated write will see the elevated count and acquire mmu_lock
761 	 * to update SPTEs.  Pairs with the smp_mb() in kvm_mmu_track_write().
762 	 */
763 	smp_mb();
764 
765 	gfn = sp->gfn;
766 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
767 	slot = __gfn_to_memslot(slots, gfn);
768 
769 	/* the non-leaf shadow pages are keeping readonly. */
770 	if (sp->role.level > PG_LEVEL_4K)
771 		return __kvm_write_track_add_gfn(kvm, slot, gfn);
772 
773 	kvm_mmu_gfn_disallow_lpage(slot, gfn);
774 
775 	if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K))
776 		kvm_flush_remote_tlbs_gfn(kvm, gfn, PG_LEVEL_4K);
777 }
778 
779 void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp,
780 				 enum kvm_mmu_type mmu_type)
781 {
782 	/*
783 	 * If it's possible to replace the shadow page with an NX huge page,
784 	 * i.e. if the shadow page is the only thing currently preventing KVM
785 	 * from using a huge page, add the shadow page to the list of "to be
786 	 * zapped for NX recovery" pages.  Note, the shadow page can already be
787 	 * on the list if KVM is reusing an existing shadow page, i.e. if KVM
788 	 * links a shadow page at multiple points.
789 	 */
790 	if (!list_empty(&sp->possible_nx_huge_page_link))
791 		return;
792 
793 	++kvm->stat.nx_lpage_splits;
794 	++kvm->arch.possible_nx_huge_pages[mmu_type].nr_pages;
795 	list_add_tail(&sp->possible_nx_huge_page_link,
796 		      &kvm->arch.possible_nx_huge_pages[mmu_type].pages);
797 }
798 
799 static void account_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp,
800 				 bool nx_huge_page_possible)
801 {
802 	sp->nx_huge_page_disallowed = true;
803 
804 	if (nx_huge_page_possible)
805 		track_possible_nx_huge_page(kvm, sp, KVM_SHADOW_MMU);
806 }
807 
808 static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
809 {
810 	struct kvm_memslots *slots;
811 	struct kvm_memory_slot *slot;
812 	gfn_t gfn;
813 
814 	kvm->arch.indirect_shadow_pages--;
815 	gfn = sp->gfn;
816 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
817 	slot = __gfn_to_memslot(slots, gfn);
818 	if (sp->role.level > PG_LEVEL_4K)
819 		return __kvm_write_track_remove_gfn(kvm, slot, gfn);
820 
821 	kvm_mmu_gfn_allow_lpage(slot, gfn);
822 }
823 
824 void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp,
825 				   enum kvm_mmu_type mmu_type)
826 {
827 	if (list_empty(&sp->possible_nx_huge_page_link))
828 		return;
829 
830 	--kvm->stat.nx_lpage_splits;
831 	--kvm->arch.possible_nx_huge_pages[mmu_type].nr_pages;
832 	list_del_init(&sp->possible_nx_huge_page_link);
833 }
834 
835 static void unaccount_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
836 {
837 	sp->nx_huge_page_disallowed = false;
838 
839 	untrack_possible_nx_huge_page(kvm, sp, KVM_SHADOW_MMU);
840 }
841 
842 static struct kvm_memory_slot *gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu,
843 							   gfn_t gfn,
844 							   bool no_dirty_log)
845 {
846 	struct kvm_memory_slot *slot;
847 
848 	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
849 	if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
850 		return NULL;
851 	if (no_dirty_log && kvm_slot_dirty_track_enabled(slot))
852 		return NULL;
853 
854 	return slot;
855 }
856 
857 /*
858  * About rmap_head encoding:
859  *
860  * If the bit zero of rmap_head->val is clear, then it points to the only spte
861  * in this rmap chain. Otherwise, (rmap_head->val & ~3) points to a struct
862  * pte_list_desc containing more mappings.
863  */
864 #define KVM_RMAP_MANY	BIT(0)
865 
866 /*
867  * rmaps and PTE lists are mostly protected by mmu_lock (the shadow MMU always
868  * operates with mmu_lock held for write), but rmaps can be walked without
869  * holding mmu_lock so long as the caller can tolerate SPTEs in the rmap chain
870  * being zapped/dropped _while the rmap is locked_.
871  *
872  * Other than the KVM_RMAP_LOCKED flag, modifications to rmap entries must be
873  * done while holding mmu_lock for write.  This allows a task walking rmaps
874  * without holding mmu_lock to concurrently walk the same entries as a task
875  * that is holding mmu_lock but _not_ the rmap lock.  Neither task will modify
876  * the rmaps, thus the walks are stable.
877  *
878  * As alluded to above, SPTEs in rmaps are _not_ protected by KVM_RMAP_LOCKED,
879  * only the rmap chains themselves are protected.  E.g. holding an rmap's lock
880  * ensures all "struct pte_list_desc" fields are stable.
881  */
882 #define KVM_RMAP_LOCKED	BIT(1)
883 
884 static unsigned long __kvm_rmap_lock(struct kvm_rmap_head *rmap_head)
885 {
886 	unsigned long old_val, new_val;
887 
888 	lockdep_assert_preemption_disabled();
889 
890 	/*
891 	 * Elide the lock if the rmap is empty, as lockless walkers (read-only
892 	 * mode) don't need to (and can't) walk an empty rmap, nor can they add
893 	 * entries to the rmap.  I.e. the only paths that process empty rmaps
894 	 * do so while holding mmu_lock for write, and are mutually exclusive.
895 	 */
896 	old_val = atomic_long_read(&rmap_head->val);
897 	if (!old_val)
898 		return 0;
899 
900 	do {
901 		/*
902 		 * If the rmap is locked, wait for it to be unlocked before
903 		 * trying acquire the lock, e.g. to avoid bouncing the cache
904 		 * line.
905 		 */
906 		while (old_val & KVM_RMAP_LOCKED) {
907 			cpu_relax();
908 			old_val = atomic_long_read(&rmap_head->val);
909 		}
910 
911 		/*
912 		 * Recheck for an empty rmap, it may have been purged by the
913 		 * task that held the lock.
914 		 */
915 		if (!old_val)
916 			return 0;
917 
918 		new_val = old_val | KVM_RMAP_LOCKED;
919 	/*
920 	 * Use try_cmpxchg_acquire() to prevent reads and writes to the rmap
921 	 * from being reordered outside of the critical section created by
922 	 * __kvm_rmap_lock().
923 	 *
924 	 * Pairs with the atomic_long_set_release() in kvm_rmap_unlock().
925 	 *
926 	 * For the !old_val case, no ordering is needed, as there is no rmap
927 	 * to walk.
928 	 */
929 	} while (!atomic_long_try_cmpxchg_acquire(&rmap_head->val, &old_val, new_val));
930 
931 	/*
932 	 * Return the old value, i.e. _without_ the LOCKED bit set.  It's
933 	 * impossible for the return value to be 0 (see above), i.e. the read-
934 	 * only unlock flow can't get a false positive and fail to unlock.
935 	 */
936 	return old_val;
937 }
938 
939 static unsigned long kvm_rmap_lock(struct kvm *kvm,
940 				   struct kvm_rmap_head *rmap_head)
941 {
942 	lockdep_assert_held_write(&kvm->mmu_lock);
943 
944 	return __kvm_rmap_lock(rmap_head);
945 }
946 
947 static void __kvm_rmap_unlock(struct kvm_rmap_head *rmap_head,
948 			      unsigned long val)
949 {
950 	KVM_MMU_WARN_ON(val & KVM_RMAP_LOCKED);
951 	/*
952 	 * Ensure that all accesses to the rmap have completed before unlocking
953 	 * the rmap.
954 	 *
955 	 * Pairs with the atomic_long_try_cmpxchg_acquire() in __kvm_rmap_lock().
956 	 */
957 	atomic_long_set_release(&rmap_head->val, val);
958 }
959 
960 static void kvm_rmap_unlock(struct kvm *kvm,
961 			    struct kvm_rmap_head *rmap_head,
962 			    unsigned long new_val)
963 {
964 	lockdep_assert_held_write(&kvm->mmu_lock);
965 
966 	__kvm_rmap_unlock(rmap_head, new_val);
967 }
968 
969 static unsigned long kvm_rmap_get(struct kvm_rmap_head *rmap_head)
970 {
971 	return atomic_long_read(&rmap_head->val) & ~KVM_RMAP_LOCKED;
972 }
973 
974 /*
975  * If mmu_lock isn't held, rmaps can only be locked in read-only mode.  The
976  * actual locking is the same, but the caller is disallowed from modifying the
977  * rmap, and so the unlock flow is a nop if the rmap is/was empty.
978  */
979 static unsigned long kvm_rmap_lock_readonly(struct kvm_rmap_head *rmap_head)
980 {
981 	unsigned long rmap_val;
982 
983 	preempt_disable();
984 	rmap_val = __kvm_rmap_lock(rmap_head);
985 
986 	if (!rmap_val)
987 		preempt_enable();
988 
989 	return rmap_val;
990 }
991 
992 static void kvm_rmap_unlock_readonly(struct kvm_rmap_head *rmap_head,
993 				     unsigned long old_val)
994 {
995 	if (!old_val)
996 		return;
997 
998 	KVM_MMU_WARN_ON(old_val != kvm_rmap_get(rmap_head));
999 
1000 	__kvm_rmap_unlock(rmap_head, old_val);
1001 	preempt_enable();
1002 }
1003 
1004 /*
1005  * Returns the number of pointers in the rmap chain, not counting the new one.
1006  */
1007 static int pte_list_add(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
1008 			u64 *spte, struct kvm_rmap_head *rmap_head)
1009 {
1010 	unsigned long old_val, new_val;
1011 	struct pte_list_desc *desc;
1012 	int count = 0;
1013 
1014 	old_val = kvm_rmap_lock(kvm, rmap_head);
1015 
1016 	if (!old_val) {
1017 		new_val = (unsigned long)spte;
1018 	} else if (!(old_val & KVM_RMAP_MANY)) {
1019 		desc = kvm_mmu_memory_cache_alloc(cache);
1020 		desc->sptes[0] = (u64 *)old_val;
1021 		desc->sptes[1] = spte;
1022 		desc->spte_count = 2;
1023 		desc->tail_count = 0;
1024 		new_val = (unsigned long)desc | KVM_RMAP_MANY;
1025 		++count;
1026 	} else {
1027 		desc = (struct pte_list_desc *)(old_val & ~KVM_RMAP_MANY);
1028 		count = desc->tail_count + desc->spte_count;
1029 
1030 		/*
1031 		 * If the previous head is full, allocate a new head descriptor
1032 		 * as tail descriptors are always kept full.
1033 		 */
1034 		if (desc->spte_count == PTE_LIST_EXT) {
1035 			desc = kvm_mmu_memory_cache_alloc(cache);
1036 			desc->more = (struct pte_list_desc *)(old_val & ~KVM_RMAP_MANY);
1037 			desc->spte_count = 0;
1038 			desc->tail_count = count;
1039 			new_val = (unsigned long)desc | KVM_RMAP_MANY;
1040 		} else {
1041 			new_val = old_val;
1042 		}
1043 		desc->sptes[desc->spte_count++] = spte;
1044 	}
1045 
1046 	kvm_rmap_unlock(kvm, rmap_head, new_val);
1047 
1048 	return count;
1049 }
1050 
1051 static void pte_list_desc_remove_entry(struct kvm *kvm, unsigned long *rmap_val,
1052 				       struct pte_list_desc *desc, int i)
1053 {
1054 	struct pte_list_desc *head_desc = (struct pte_list_desc *)(*rmap_val & ~KVM_RMAP_MANY);
1055 	int j = head_desc->spte_count - 1;
1056 
1057 	/*
1058 	 * The head descriptor should never be empty.  A new head is added only
1059 	 * when adding an entry and the previous head is full, and heads are
1060 	 * removed (this flow) when they become empty.
1061 	 */
1062 	KVM_BUG_ON_DATA_CORRUPTION(j < 0, kvm);
1063 
1064 	/*
1065 	 * Replace the to-be-freed SPTE with the last valid entry from the head
1066 	 * descriptor to ensure that tail descriptors are full at all times.
1067 	 * Note, this also means that tail_count is stable for each descriptor.
1068 	 */
1069 	desc->sptes[i] = head_desc->sptes[j];
1070 	head_desc->sptes[j] = NULL;
1071 	head_desc->spte_count--;
1072 	if (head_desc->spte_count)
1073 		return;
1074 
1075 	/*
1076 	 * The head descriptor is empty.  If there are no tail descriptors,
1077 	 * nullify the rmap head to mark the list as empty, else point the rmap
1078 	 * head at the next descriptor, i.e. the new head.
1079 	 */
1080 	if (!head_desc->more)
1081 		*rmap_val = 0;
1082 	else
1083 		*rmap_val = (unsigned long)head_desc->more | KVM_RMAP_MANY;
1084 	mmu_free_pte_list_desc(head_desc);
1085 }
1086 
1087 static void pte_list_remove(struct kvm *kvm, u64 *spte,
1088 			    struct kvm_rmap_head *rmap_head)
1089 {
1090 	struct pte_list_desc *desc;
1091 	unsigned long rmap_val;
1092 	int i;
1093 
1094 	rmap_val = kvm_rmap_lock(kvm, rmap_head);
1095 	if (KVM_BUG_ON_DATA_CORRUPTION(!rmap_val, kvm))
1096 		goto out;
1097 
1098 	if (!(rmap_val & KVM_RMAP_MANY)) {
1099 		if (KVM_BUG_ON_DATA_CORRUPTION((u64 *)rmap_val != spte, kvm))
1100 			goto out;
1101 
1102 		rmap_val = 0;
1103 	} else {
1104 		desc = (struct pte_list_desc *)(rmap_val & ~KVM_RMAP_MANY);
1105 		while (desc) {
1106 			for (i = 0; i < desc->spte_count; ++i) {
1107 				if (desc->sptes[i] == spte) {
1108 					pte_list_desc_remove_entry(kvm, &rmap_val,
1109 								   desc, i);
1110 					goto out;
1111 				}
1112 			}
1113 			desc = desc->more;
1114 		}
1115 
1116 		KVM_BUG_ON_DATA_CORRUPTION(true, kvm);
1117 	}
1118 
1119 out:
1120 	kvm_rmap_unlock(kvm, rmap_head, rmap_val);
1121 }
1122 
1123 static void kvm_zap_one_rmap_spte(struct kvm *kvm,
1124 				  struct kvm_rmap_head *rmap_head, u64 *sptep)
1125 {
1126 	mmu_spte_clear_track_bits(kvm, sptep);
1127 	pte_list_remove(kvm, sptep, rmap_head);
1128 }
1129 
1130 /* Return true if at least one SPTE was zapped, false otherwise */
1131 static bool kvm_zap_all_rmap_sptes(struct kvm *kvm,
1132 				   struct kvm_rmap_head *rmap_head)
1133 {
1134 	struct pte_list_desc *desc, *next;
1135 	unsigned long rmap_val;
1136 	int i;
1137 
1138 	rmap_val = kvm_rmap_lock(kvm, rmap_head);
1139 	if (!rmap_val)
1140 		return false;
1141 
1142 	if (!(rmap_val & KVM_RMAP_MANY)) {
1143 		mmu_spte_clear_track_bits(kvm, (u64 *)rmap_val);
1144 		goto out;
1145 	}
1146 
1147 	desc = (struct pte_list_desc *)(rmap_val & ~KVM_RMAP_MANY);
1148 
1149 	for (; desc; desc = next) {
1150 		for (i = 0; i < desc->spte_count; i++)
1151 			mmu_spte_clear_track_bits(kvm, desc->sptes[i]);
1152 		next = desc->more;
1153 		mmu_free_pte_list_desc(desc);
1154 	}
1155 out:
1156 	/* rmap_head is meaningless now, remember to reset it */
1157 	kvm_rmap_unlock(kvm, rmap_head, 0);
1158 	return true;
1159 }
1160 
1161 unsigned int pte_list_count(struct kvm_rmap_head *rmap_head)
1162 {
1163 	unsigned long rmap_val = kvm_rmap_get(rmap_head);
1164 	struct pte_list_desc *desc;
1165 
1166 	if (!rmap_val)
1167 		return 0;
1168 	else if (!(rmap_val & KVM_RMAP_MANY))
1169 		return 1;
1170 
1171 	desc = (struct pte_list_desc *)(rmap_val & ~KVM_RMAP_MANY);
1172 	return desc->tail_count + desc->spte_count;
1173 }
1174 
1175 static struct kvm_rmap_head *gfn_to_rmap(gfn_t gfn, int level,
1176 					 const struct kvm_memory_slot *slot)
1177 {
1178 	unsigned long idx;
1179 
1180 	idx = gfn_to_index(gfn, slot->base_gfn, level);
1181 	return &slot->arch.rmap[level - PG_LEVEL_4K][idx];
1182 }
1183 
1184 static void rmap_remove(struct kvm *kvm, u64 *spte)
1185 {
1186 	struct kvm_memslots *slots;
1187 	struct kvm_memory_slot *slot;
1188 	struct kvm_mmu_page *sp;
1189 	gfn_t gfn;
1190 	struct kvm_rmap_head *rmap_head;
1191 
1192 	sp = sptep_to_sp(spte);
1193 	gfn = kvm_mmu_page_get_gfn(sp, spte_index(spte));
1194 
1195 	/*
1196 	 * Unlike rmap_add, rmap_remove does not run in the context of a vCPU
1197 	 * so we have to determine which memslots to use based on context
1198 	 * information in sp->role.
1199 	 */
1200 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
1201 
1202 	slot = __gfn_to_memslot(slots, gfn);
1203 	rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1204 
1205 	pte_list_remove(kvm, spte, rmap_head);
1206 }
1207 
1208 /*
1209  * Used by the following functions to iterate through the sptes linked by a
1210  * rmap.  All fields are private and not assumed to be used outside.
1211  */
1212 struct rmap_iterator {
1213 	/* private fields */
1214 	struct rmap_head *head;
1215 	struct pte_list_desc *desc;	/* holds the sptep if not NULL */
1216 	int pos;			/* index of the sptep */
1217 };
1218 
1219 /*
1220  * Iteration must be started by this function.  This should also be used after
1221  * removing/dropping sptes from the rmap link because in such cases the
1222  * information in the iterator may not be valid.
1223  *
1224  * Returns sptep if found, NULL otherwise.
1225  */
1226 static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head,
1227 			   struct rmap_iterator *iter)
1228 {
1229 	unsigned long rmap_val = kvm_rmap_get(rmap_head);
1230 
1231 	if (!rmap_val)
1232 		return NULL;
1233 
1234 	if (!(rmap_val & KVM_RMAP_MANY)) {
1235 		iter->desc = NULL;
1236 		return (u64 *)rmap_val;
1237 	}
1238 
1239 	iter->desc = (struct pte_list_desc *)(rmap_val & ~KVM_RMAP_MANY);
1240 	iter->pos = 0;
1241 	return iter->desc->sptes[iter->pos];
1242 }
1243 
1244 /*
1245  * Must be used with a valid iterator: e.g. after rmap_get_first().
1246  *
1247  * Returns sptep if found, NULL otherwise.
1248  */
1249 static u64 *rmap_get_next(struct rmap_iterator *iter)
1250 {
1251 	if (iter->desc) {
1252 		if (iter->pos < PTE_LIST_EXT - 1) {
1253 			++iter->pos;
1254 			if (iter->desc->sptes[iter->pos])
1255 				return iter->desc->sptes[iter->pos];
1256 		}
1257 
1258 		iter->desc = iter->desc->more;
1259 
1260 		if (iter->desc) {
1261 			iter->pos = 0;
1262 			/* desc->sptes[0] cannot be NULL */
1263 			return iter->desc->sptes[iter->pos];
1264 		}
1265 	}
1266 
1267 	return NULL;
1268 }
1269 
1270 #define __for_each_rmap_spte(_rmap_head_, _iter_, _sptep_)	\
1271 	for (_sptep_ = rmap_get_first(_rmap_head_, _iter_);	\
1272 	     _sptep_; _sptep_ = rmap_get_next(_iter_))
1273 
1274 #define for_each_rmap_spte(_rmap_head_, _iter_, _sptep_)			\
1275 	__for_each_rmap_spte(_rmap_head_, _iter_, _sptep_)			\
1276 		if (!WARN_ON_ONCE(!is_shadow_present_pte(*(_sptep_))))	\
1277 
1278 #define for_each_rmap_spte_lockless(_rmap_head_, _iter_, _sptep_, _spte_)	\
1279 	__for_each_rmap_spte(_rmap_head_, _iter_, _sptep_)			\
1280 		if (is_shadow_present_pte(_spte_ = mmu_spte_get_lockless(sptep)))
1281 
1282 static void drop_spte(struct kvm *kvm, u64 *sptep)
1283 {
1284 	u64 old_spte = mmu_spte_clear_track_bits(kvm, sptep);
1285 
1286 	if (is_shadow_present_pte(old_spte))
1287 		rmap_remove(kvm, sptep);
1288 }
1289 
1290 static void drop_large_spte(struct kvm *kvm, u64 *sptep, bool flush)
1291 {
1292 	struct kvm_mmu_page *sp;
1293 
1294 	sp = sptep_to_sp(sptep);
1295 	WARN_ON_ONCE(sp->role.level == PG_LEVEL_4K);
1296 
1297 	drop_spte(kvm, sptep);
1298 
1299 	if (flush)
1300 		kvm_flush_remote_tlbs_sptep(kvm, sptep);
1301 }
1302 
1303 /*
1304  * Write-protect on the specified @sptep, @pt_protect indicates whether
1305  * spte write-protection is caused by protecting shadow page table.
1306  *
1307  * Note: write protection is difference between dirty logging and spte
1308  * protection:
1309  * - for dirty logging, the spte can be set to writable at anytime if
1310  *   its dirty bitmap is properly set.
1311  * - for spte protection, the spte can be writable only after unsync-ing
1312  *   shadow page.
1313  *
1314  * Return true if tlb need be flushed.
1315  */
1316 static bool spte_write_protect(u64 *sptep, bool pt_protect)
1317 {
1318 	u64 spte = *sptep;
1319 
1320 	if (!is_writable_pte(spte) &&
1321 	    !(pt_protect && is_mmu_writable_spte(spte)))
1322 		return false;
1323 
1324 	if (pt_protect)
1325 		spte &= ~shadow_mmu_writable_mask;
1326 	spte = spte & ~PT_WRITABLE_MASK;
1327 
1328 	return mmu_spte_update(sptep, spte);
1329 }
1330 
1331 static bool rmap_write_protect(struct kvm_rmap_head *rmap_head,
1332 			       bool pt_protect)
1333 {
1334 	u64 *sptep;
1335 	struct rmap_iterator iter;
1336 	bool flush = false;
1337 
1338 	for_each_rmap_spte(rmap_head, &iter, sptep)
1339 		flush |= spte_write_protect(sptep, pt_protect);
1340 
1341 	return flush;
1342 }
1343 
1344 static bool spte_clear_dirty(u64 *sptep)
1345 {
1346 	u64 spte = *sptep;
1347 
1348 	KVM_MMU_WARN_ON(!spte_ad_enabled(spte));
1349 	spte &= ~shadow_dirty_mask;
1350 	return mmu_spte_update(sptep, spte);
1351 }
1352 
1353 /*
1354  * Gets the GFN ready for another round of dirty logging by clearing the
1355  *	- D bit on ad-enabled SPTEs, and
1356  *	- W bit on ad-disabled SPTEs.
1357  * Returns true iff any D or W bits were cleared.
1358  */
1359 static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1360 			       const struct kvm_memory_slot *slot)
1361 {
1362 	u64 *sptep;
1363 	struct rmap_iterator iter;
1364 	bool flush = false;
1365 
1366 	for_each_rmap_spte(rmap_head, &iter, sptep) {
1367 		if (spte_ad_need_write_protect(*sptep))
1368 			flush |= test_and_clear_bit(PT_WRITABLE_SHIFT,
1369 						    (unsigned long *)sptep);
1370 		else
1371 			flush |= spte_clear_dirty(sptep);
1372 	}
1373 
1374 	return flush;
1375 }
1376 
1377 static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1378 				     struct kvm_memory_slot *slot,
1379 				     gfn_t gfn_offset, unsigned long mask)
1380 {
1381 	struct kvm_rmap_head *rmap_head;
1382 
1383 	if (tdp_mmu_enabled)
1384 		kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
1385 				slot->base_gfn + gfn_offset, mask, true);
1386 
1387 	if (!kvm_memslots_have_rmaps(kvm))
1388 		return;
1389 
1390 	while (mask) {
1391 		rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
1392 					PG_LEVEL_4K, slot);
1393 		rmap_write_protect(rmap_head, false);
1394 
1395 		/* clear the first set bit */
1396 		mask &= mask - 1;
1397 	}
1398 }
1399 
1400 static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1401 					 struct kvm_memory_slot *slot,
1402 					 gfn_t gfn_offset, unsigned long mask)
1403 {
1404 	struct kvm_rmap_head *rmap_head;
1405 
1406 	if (tdp_mmu_enabled)
1407 		kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
1408 				slot->base_gfn + gfn_offset, mask, false);
1409 
1410 	if (!kvm_memslots_have_rmaps(kvm))
1411 		return;
1412 
1413 	while (mask) {
1414 		rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
1415 					PG_LEVEL_4K, slot);
1416 		__rmap_clear_dirty(kvm, rmap_head, slot);
1417 
1418 		/* clear the first set bit */
1419 		mask &= mask - 1;
1420 	}
1421 }
1422 
1423 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1424 				struct kvm_memory_slot *slot,
1425 				gfn_t gfn_offset, unsigned long mask)
1426 {
1427 	/*
1428 	 * If the slot was assumed to be "initially all dirty", write-protect
1429 	 * huge pages to ensure they are split to 4KiB on the first write (KVM
1430 	 * dirty logs at 4KiB granularity). If eager page splitting is enabled,
1431 	 * immediately try to split huge pages, e.g. so that vCPUs don't get
1432 	 * saddled with the cost of splitting.
1433 	 *
1434 	 * The gfn_offset is guaranteed to be aligned to 64, but the base_gfn
1435 	 * of memslot has no such restriction, so the range can cross two large
1436 	 * pages.
1437 	 */
1438 	if (kvm_dirty_log_manual_protect_and_init_set(kvm)) {
1439 		gfn_t start = slot->base_gfn + gfn_offset + __ffs(mask);
1440 		gfn_t end = slot->base_gfn + gfn_offset + __fls(mask);
1441 
1442 		if (READ_ONCE(eager_page_split))
1443 			kvm_mmu_try_split_huge_pages(kvm, slot, start, end + 1, PG_LEVEL_4K);
1444 
1445 		kvm_mmu_slot_gfn_write_protect(kvm, slot, start, PG_LEVEL_2M);
1446 
1447 		/* Cross two large pages? */
1448 		if (ALIGN(start << PAGE_SHIFT, PMD_SIZE) !=
1449 		    ALIGN(end << PAGE_SHIFT, PMD_SIZE))
1450 			kvm_mmu_slot_gfn_write_protect(kvm, slot, end,
1451 						       PG_LEVEL_2M);
1452 	}
1453 
1454 	/*
1455 	 * (Re)Enable dirty logging for all 4KiB SPTEs that map the GFNs in
1456 	 * mask.  If PML is enabled and the GFN doesn't need to be write-
1457 	 * protected for other reasons, e.g. shadow paging, clear the Dirty bit.
1458 	 * Otherwise clear the Writable bit.
1459 	 *
1460 	 * Note that kvm_mmu_clear_dirty_pt_masked() is called whenever PML is
1461 	 * enabled but it chooses between clearing the Dirty bit and Writeable
1462 	 * bit based on the context.
1463 	 */
1464 	if (kvm->arch.cpu_dirty_log_size)
1465 		kvm_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset, mask);
1466 	else
1467 		kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1468 }
1469 
1470 int kvm_cpu_dirty_log_size(struct kvm *kvm)
1471 {
1472 	return kvm->arch.cpu_dirty_log_size;
1473 }
1474 
1475 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
1476 				    struct kvm_memory_slot *slot, u64 gfn,
1477 				    int min_level)
1478 {
1479 	struct kvm_rmap_head *rmap_head;
1480 	int i;
1481 	bool write_protected = false;
1482 
1483 	if (kvm_memslots_have_rmaps(kvm)) {
1484 		for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
1485 			rmap_head = gfn_to_rmap(gfn, i, slot);
1486 			write_protected |= rmap_write_protect(rmap_head, true);
1487 		}
1488 	}
1489 
1490 	if (tdp_mmu_enabled)
1491 		write_protected |=
1492 			kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn, min_level);
1493 
1494 	return write_protected;
1495 }
1496 
1497 static bool kvm_vcpu_write_protect_gfn(struct kvm_vcpu *vcpu, u64 gfn)
1498 {
1499 	struct kvm_memory_slot *slot;
1500 
1501 	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1502 	return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K);
1503 }
1504 
1505 static bool kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1506 			 const struct kvm_memory_slot *slot)
1507 {
1508 	return kvm_zap_all_rmap_sptes(kvm, rmap_head);
1509 }
1510 
1511 struct slot_rmap_walk_iterator {
1512 	/* input fields. */
1513 	const struct kvm_memory_slot *slot;
1514 	gfn_t start_gfn;
1515 	gfn_t end_gfn;
1516 	int start_level;
1517 	int end_level;
1518 
1519 	/* output fields. */
1520 	gfn_t gfn;
1521 	struct kvm_rmap_head *rmap;
1522 	int level;
1523 
1524 	/* private field. */
1525 	struct kvm_rmap_head *end_rmap;
1526 };
1527 
1528 static void rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator,
1529 				 int level)
1530 {
1531 	iterator->level = level;
1532 	iterator->gfn = iterator->start_gfn;
1533 	iterator->rmap = gfn_to_rmap(iterator->gfn, level, iterator->slot);
1534 	iterator->end_rmap = gfn_to_rmap(iterator->end_gfn, level, iterator->slot);
1535 }
1536 
1537 static void slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator,
1538 				const struct kvm_memory_slot *slot,
1539 				int start_level, int end_level,
1540 				gfn_t start_gfn, gfn_t end_gfn)
1541 {
1542 	iterator->slot = slot;
1543 	iterator->start_level = start_level;
1544 	iterator->end_level = end_level;
1545 	iterator->start_gfn = start_gfn;
1546 	iterator->end_gfn = end_gfn;
1547 
1548 	rmap_walk_init_level(iterator, iterator->start_level);
1549 }
1550 
1551 static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator)
1552 {
1553 	return !!iterator->rmap;
1554 }
1555 
1556 static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
1557 {
1558 	while (++iterator->rmap <= iterator->end_rmap) {
1559 		iterator->gfn += KVM_PAGES_PER_HPAGE(iterator->level);
1560 
1561 		if (atomic_long_read(&iterator->rmap->val))
1562 			return;
1563 	}
1564 
1565 	if (++iterator->level > iterator->end_level) {
1566 		iterator->rmap = NULL;
1567 		return;
1568 	}
1569 
1570 	rmap_walk_init_level(iterator, iterator->level);
1571 }
1572 
1573 #define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_,	\
1574 	   _start_gfn, _end_gfn, _iter_)				\
1575 	for (slot_rmap_walk_init(_iter_, _slot_, _start_level_,		\
1576 				 _end_level_, _start_gfn, _end_gfn);	\
1577 	     slot_rmap_walk_okay(_iter_);				\
1578 	     slot_rmap_walk_next(_iter_))
1579 
1580 /* The return value indicates if tlb flush on all vcpus is needed. */
1581 typedef bool (*slot_rmaps_handler) (struct kvm *kvm,
1582 				    struct kvm_rmap_head *rmap_head,
1583 				    const struct kvm_memory_slot *slot);
1584 
1585 static __always_inline bool __walk_slot_rmaps(struct kvm *kvm,
1586 					      const struct kvm_memory_slot *slot,
1587 					      slot_rmaps_handler fn,
1588 					      int start_level, int end_level,
1589 					      gfn_t start_gfn, gfn_t end_gfn,
1590 					      bool can_yield, bool flush_on_yield,
1591 					      bool flush)
1592 {
1593 	struct slot_rmap_walk_iterator iterator;
1594 
1595 	lockdep_assert_held_write(&kvm->mmu_lock);
1596 
1597 	for_each_slot_rmap_range(slot, start_level, end_level, start_gfn,
1598 			end_gfn, &iterator) {
1599 		if (iterator.rmap)
1600 			flush |= fn(kvm, iterator.rmap, slot);
1601 
1602 		if (!can_yield)
1603 			continue;
1604 
1605 		if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
1606 			if (flush && flush_on_yield) {
1607 				kvm_flush_remote_tlbs_range(kvm, start_gfn,
1608 							    iterator.gfn - start_gfn + 1);
1609 				flush = false;
1610 			}
1611 			cond_resched_rwlock_write(&kvm->mmu_lock);
1612 		}
1613 	}
1614 
1615 	return flush;
1616 }
1617 
1618 static __always_inline bool walk_slot_rmaps(struct kvm *kvm,
1619 					    const struct kvm_memory_slot *slot,
1620 					    slot_rmaps_handler fn,
1621 					    int start_level, int end_level,
1622 					    bool flush_on_yield)
1623 {
1624 	return __walk_slot_rmaps(kvm, slot, fn, start_level, end_level,
1625 				 slot->base_gfn, slot->base_gfn + slot->npages - 1,
1626 				 true, flush_on_yield, false);
1627 }
1628 
1629 static __always_inline bool walk_slot_rmaps_4k(struct kvm *kvm,
1630 					       const struct kvm_memory_slot *slot,
1631 					       slot_rmaps_handler fn,
1632 					       bool flush_on_yield)
1633 {
1634 	return walk_slot_rmaps(kvm, slot, fn, PG_LEVEL_4K, PG_LEVEL_4K, flush_on_yield);
1635 }
1636 
1637 static bool __kvm_rmap_zap_gfn_range(struct kvm *kvm,
1638 				     const struct kvm_memory_slot *slot,
1639 				     gfn_t start, gfn_t end, bool can_yield,
1640 				     bool flush)
1641 {
1642 	return __walk_slot_rmaps(kvm, slot, kvm_zap_rmap,
1643 				 PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
1644 				 start, end - 1, can_yield, true, flush);
1645 }
1646 
1647 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1648 {
1649 	bool flush = false;
1650 
1651 	/*
1652 	 * To prevent races with vCPUs faulting in a gfn using stale data,
1653 	 * zapping a gfn range must be protected by mmu_invalidate_in_progress
1654 	 * (and mmu_invalidate_seq).  The only exception is memslot deletion;
1655 	 * in that case, SRCU synchronization ensures that SPTEs are zapped
1656 	 * after all vCPUs have unlocked SRCU, guaranteeing that vCPUs see the
1657 	 * invalid slot.
1658 	 */
1659 	lockdep_assert_once(kvm->mmu_invalidate_in_progress ||
1660 			    lockdep_is_held(&kvm->slots_lock));
1661 
1662 	if (kvm_memslots_have_rmaps(kvm))
1663 		flush = __kvm_rmap_zap_gfn_range(kvm, range->slot,
1664 						 range->start, range->end,
1665 						 range->may_block, flush);
1666 
1667 	if (tdp_mmu_enabled)
1668 		flush = kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
1669 
1670 	if (kvm_x86_ops.set_apic_access_page_addr &&
1671 	    range->slot->id == APIC_ACCESS_PAGE_PRIVATE_MEMSLOT)
1672 		kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
1673 
1674 	return flush;
1675 }
1676 
1677 #define RMAP_RECYCLE_THRESHOLD 1000
1678 
1679 static void __rmap_add(struct kvm *kvm,
1680 		       struct kvm_mmu_memory_cache *cache,
1681 		       const struct kvm_memory_slot *slot,
1682 		       u64 *spte, gfn_t gfn, unsigned int access)
1683 {
1684 	struct kvm_mmu_page *sp;
1685 	struct kvm_rmap_head *rmap_head;
1686 	int rmap_count;
1687 
1688 	sp = sptep_to_sp(spte);
1689 	kvm_mmu_page_set_translation(sp, spte_index(spte), gfn, access);
1690 	kvm_update_page_stats(kvm, sp->role.level, 1);
1691 
1692 	rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1693 	rmap_count = pte_list_add(kvm, cache, spte, rmap_head);
1694 
1695 	if (rmap_count > kvm->stat.max_mmu_rmap_size)
1696 		kvm->stat.max_mmu_rmap_size = rmap_count;
1697 	if (rmap_count > RMAP_RECYCLE_THRESHOLD) {
1698 		kvm_zap_all_rmap_sptes(kvm, rmap_head);
1699 		kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level);
1700 	}
1701 }
1702 
1703 static void rmap_add(struct kvm_vcpu *vcpu, const struct kvm_memory_slot *slot,
1704 		     u64 *spte, gfn_t gfn, unsigned int access)
1705 {
1706 	struct kvm_mmu_memory_cache *cache = &vcpu->arch.mmu_pte_list_desc_cache;
1707 
1708 	__rmap_add(vcpu->kvm, cache, slot, spte, gfn, access);
1709 }
1710 
1711 static bool kvm_rmap_age_gfn_range(struct kvm *kvm,
1712 				   struct kvm_gfn_range *range,
1713 				   bool test_only)
1714 {
1715 	struct kvm_rmap_head *rmap_head;
1716 	struct rmap_iterator iter;
1717 	unsigned long rmap_val;
1718 	bool young = false;
1719 	u64 *sptep;
1720 	gfn_t gfn;
1721 	int level;
1722 	u64 spte;
1723 
1724 	for (level = PG_LEVEL_4K; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
1725 		for (gfn = range->start; gfn < range->end;
1726 		     gfn += KVM_PAGES_PER_HPAGE(level)) {
1727 			rmap_head = gfn_to_rmap(gfn, level, range->slot);
1728 			rmap_val = kvm_rmap_lock_readonly(rmap_head);
1729 
1730 			for_each_rmap_spte_lockless(rmap_head, &iter, sptep, spte) {
1731 				if (!is_accessed_spte(spte))
1732 					continue;
1733 
1734 				if (test_only) {
1735 					kvm_rmap_unlock_readonly(rmap_head, rmap_val);
1736 					return true;
1737 				}
1738 
1739 				if (spte_ad_enabled(spte))
1740 					clear_bit((ffs(shadow_accessed_mask) - 1),
1741 						  (unsigned long *)sptep);
1742 				else
1743 					/*
1744 					 * If the following cmpxchg fails, the
1745 					 * spte is being concurrently modified
1746 					 * and should most likely stay young.
1747 					 */
1748 					cmpxchg64(sptep, spte,
1749 					      mark_spte_for_access_track(spte));
1750 				young = true;
1751 			}
1752 
1753 			kvm_rmap_unlock_readonly(rmap_head, rmap_val);
1754 		}
1755 	}
1756 	return young;
1757 }
1758 
1759 static bool kvm_may_have_shadow_mmu_sptes(struct kvm *kvm)
1760 {
1761 	return !tdp_mmu_enabled || READ_ONCE(kvm->arch.indirect_shadow_pages);
1762 }
1763 
1764 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1765 {
1766 	bool young = false;
1767 
1768 	if (tdp_mmu_enabled)
1769 		young = kvm_tdp_mmu_age_gfn_range(kvm, range);
1770 
1771 	if (kvm_may_have_shadow_mmu_sptes(kvm))
1772 		young |= kvm_rmap_age_gfn_range(kvm, range, false);
1773 
1774 	return young;
1775 }
1776 
1777 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1778 {
1779 	bool young = false;
1780 
1781 	if (tdp_mmu_enabled)
1782 		young = kvm_tdp_mmu_test_age_gfn(kvm, range);
1783 
1784 	if (young)
1785 		return young;
1786 
1787 	if (kvm_may_have_shadow_mmu_sptes(kvm))
1788 		young |= kvm_rmap_age_gfn_range(kvm, range, true);
1789 
1790 	return young;
1791 }
1792 
1793 static void kvm_mmu_check_sptes_at_free(struct kvm_mmu_page *sp)
1794 {
1795 #ifdef CONFIG_KVM_PROVE_MMU
1796 	int i;
1797 
1798 	for (i = 0; i < SPTE_ENT_PER_PAGE; i++) {
1799 		if (KVM_MMU_WARN_ON(is_shadow_present_pte(sp->spt[i])))
1800 			pr_err_ratelimited("SPTE %llx (@ %p) for gfn %llx shadow-present at free",
1801 					   sp->spt[i], &sp->spt[i],
1802 					   kvm_mmu_page_get_gfn(sp, i));
1803 	}
1804 #endif
1805 }
1806 
1807 static void kvm_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1808 {
1809 	kvm->arch.n_used_mmu_pages++;
1810 	kvm_account_pgtable_pages((void *)sp->spt, +1);
1811 }
1812 
1813 static void kvm_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1814 {
1815 	kvm->arch.n_used_mmu_pages--;
1816 	kvm_account_pgtable_pages((void *)sp->spt, -1);
1817 }
1818 
1819 static void kvm_mmu_free_shadow_page(struct kvm_mmu_page *sp)
1820 {
1821 	kvm_mmu_check_sptes_at_free(sp);
1822 
1823 	hlist_del(&sp->hash_link);
1824 	list_del(&sp->link);
1825 	free_page((unsigned long)sp->spt);
1826 	free_page((unsigned long)sp->shadowed_translation);
1827 	kmem_cache_free(mmu_page_header_cache, sp);
1828 }
1829 
1830 static unsigned kvm_page_table_hashfn(gfn_t gfn)
1831 {
1832 	return hash_64(gfn, KVM_MMU_HASH_SHIFT);
1833 }
1834 
1835 static void mmu_page_add_parent_pte(struct kvm *kvm,
1836 				    struct kvm_mmu_memory_cache *cache,
1837 				    struct kvm_mmu_page *sp, u64 *parent_pte)
1838 {
1839 	if (!parent_pte)
1840 		return;
1841 
1842 	pte_list_add(kvm, cache, parent_pte, &sp->parent_ptes);
1843 }
1844 
1845 static void mmu_page_remove_parent_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
1846 				       u64 *parent_pte)
1847 {
1848 	pte_list_remove(kvm, parent_pte, &sp->parent_ptes);
1849 }
1850 
1851 static void drop_parent_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
1852 			    u64 *parent_pte)
1853 {
1854 	mmu_page_remove_parent_pte(kvm, sp, parent_pte);
1855 	mmu_spte_clear_no_track(parent_pte);
1856 }
1857 
1858 static void mark_unsync(u64 *spte);
1859 static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
1860 {
1861 	u64 *sptep;
1862 	struct rmap_iterator iter;
1863 
1864 	for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) {
1865 		mark_unsync(sptep);
1866 	}
1867 }
1868 
1869 static void mark_unsync(u64 *spte)
1870 {
1871 	struct kvm_mmu_page *sp;
1872 
1873 	sp = sptep_to_sp(spte);
1874 	if (__test_and_set_bit(spte_index(spte), sp->unsync_child_bitmap))
1875 		return;
1876 	if (sp->unsync_children++)
1877 		return;
1878 	kvm_mmu_mark_parents_unsync(sp);
1879 }
1880 
1881 #define KVM_PAGE_ARRAY_NR 16
1882 
1883 struct kvm_mmu_pages {
1884 	struct mmu_page_and_offset {
1885 		struct kvm_mmu_page *sp;
1886 		unsigned int idx;
1887 	} page[KVM_PAGE_ARRAY_NR];
1888 	unsigned int nr;
1889 };
1890 
1891 static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
1892 			 int idx)
1893 {
1894 	int i;
1895 
1896 	if (sp->unsync)
1897 		for (i=0; i < pvec->nr; i++)
1898 			if (pvec->page[i].sp == sp)
1899 				return 0;
1900 
1901 	pvec->page[pvec->nr].sp = sp;
1902 	pvec->page[pvec->nr].idx = idx;
1903 	pvec->nr++;
1904 	return (pvec->nr == KVM_PAGE_ARRAY_NR);
1905 }
1906 
1907 static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx)
1908 {
1909 	--sp->unsync_children;
1910 	WARN_ON_ONCE((int)sp->unsync_children < 0);
1911 	__clear_bit(idx, sp->unsync_child_bitmap);
1912 }
1913 
1914 static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
1915 			   struct kvm_mmu_pages *pvec)
1916 {
1917 	int i, ret, nr_unsync_leaf = 0;
1918 
1919 	for_each_set_bit(i, sp->unsync_child_bitmap, 512) {
1920 		struct kvm_mmu_page *child;
1921 		u64 ent = sp->spt[i];
1922 
1923 		if (!is_shadow_present_pte(ent) || is_large_pte(ent)) {
1924 			clear_unsync_child_bit(sp, i);
1925 			continue;
1926 		}
1927 
1928 		child = spte_to_child_sp(ent);
1929 
1930 		if (child->unsync_children) {
1931 			if (mmu_pages_add(pvec, child, i))
1932 				return -ENOSPC;
1933 
1934 			ret = __mmu_unsync_walk(child, pvec);
1935 			if (!ret) {
1936 				clear_unsync_child_bit(sp, i);
1937 				continue;
1938 			} else if (ret > 0) {
1939 				nr_unsync_leaf += ret;
1940 			} else
1941 				return ret;
1942 		} else if (child->unsync) {
1943 			nr_unsync_leaf++;
1944 			if (mmu_pages_add(pvec, child, i))
1945 				return -ENOSPC;
1946 		} else
1947 			clear_unsync_child_bit(sp, i);
1948 	}
1949 
1950 	return nr_unsync_leaf;
1951 }
1952 
1953 #define INVALID_INDEX (-1)
1954 
1955 static int mmu_unsync_walk(struct kvm_mmu_page *sp,
1956 			   struct kvm_mmu_pages *pvec)
1957 {
1958 	pvec->nr = 0;
1959 	if (!sp->unsync_children)
1960 		return 0;
1961 
1962 	mmu_pages_add(pvec, sp, INVALID_INDEX);
1963 	return __mmu_unsync_walk(sp, pvec);
1964 }
1965 
1966 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1967 {
1968 	WARN_ON_ONCE(!sp->unsync);
1969 	trace_kvm_mmu_sync_page(sp);
1970 	sp->unsync = 0;
1971 	--kvm->stat.mmu_unsync;
1972 }
1973 
1974 static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1975 				     struct list_head *invalid_list);
1976 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1977 				    struct list_head *invalid_list);
1978 
1979 static bool sp_has_gptes(struct kvm_mmu_page *sp)
1980 {
1981 	if (sp->role.direct)
1982 		return false;
1983 
1984 	if (sp->role.passthrough)
1985 		return false;
1986 
1987 	return true;
1988 }
1989 
1990 static __ro_after_init HLIST_HEAD(empty_page_hash);
1991 
1992 static struct hlist_head *kvm_get_mmu_page_hash(struct kvm *kvm, gfn_t gfn)
1993 {
1994 	/*
1995 	 * Ensure the load of the hash table pointer itself is ordered before
1996 	 * loads to walk the table.  The pointer is set at runtime outside of
1997 	 * mmu_lock when the TDP MMU is enabled, i.e. when the hash table of
1998 	 * shadow pages becomes necessary only when KVM needs to shadow L1's
1999 	 * TDP for an L2 guest.  Pairs with the smp_store_release() in
2000 	 * kvm_mmu_alloc_page_hash().
2001 	 */
2002 	struct hlist_head *page_hash = smp_load_acquire(&kvm->arch.mmu_page_hash);
2003 
2004 	lockdep_assert_held(&kvm->mmu_lock);
2005 
2006 	if (!page_hash)
2007 		return &empty_page_hash;
2008 
2009 	return &page_hash[kvm_page_table_hashfn(gfn)];
2010 }
2011 
2012 #define for_each_valid_sp(_kvm, _sp, _list)				\
2013 	hlist_for_each_entry(_sp, _list, hash_link)			\
2014 		if (is_obsolete_sp((_kvm), (_sp))) {			\
2015 		} else
2016 
2017 #define for_each_gfn_valid_sp_with_gptes(_kvm, _sp, _gfn)		\
2018 	for_each_valid_sp(_kvm, _sp, kvm_get_mmu_page_hash(_kvm, _gfn))	\
2019 		if ((_sp)->gfn != (_gfn) || !sp_has_gptes(_sp)) {} else
2020 
2021 static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
2022 {
2023 	union kvm_mmu_page_role root_role = vcpu->arch.mmu->root_role;
2024 
2025 	/*
2026 	 * Ignore various flags when verifying that it's safe to sync a shadow
2027 	 * page using the current MMU context.
2028 	 *
2029 	 *  - level: not part of the overall MMU role and will never match as the MMU's
2030 	 *           level tracks the root level
2031 	 *  - access: updated based on the new guest PTE
2032 	 *  - quadrant: not part of the overall MMU role (similar to level)
2033 	 */
2034 	const union kvm_mmu_page_role sync_role_ign = {
2035 		.level = 0xf,
2036 		.access = 0x7,
2037 		.quadrant = 0x3,
2038 		.passthrough = 0x1,
2039 	};
2040 
2041 	/*
2042 	 * Direct pages can never be unsync, and KVM should never attempt to
2043 	 * sync a shadow page for a different MMU context, e.g. if the role
2044 	 * differs then the memslot lookup (SMM vs. non-SMM) will be bogus, the
2045 	 * reserved bits checks will be wrong, etc...
2046 	 */
2047 	if (WARN_ON_ONCE(sp->role.direct || !vcpu->arch.mmu->sync_spte ||
2048 			 (sp->role.word ^ root_role.word) & ~sync_role_ign.word))
2049 		return false;
2050 
2051 	return true;
2052 }
2053 
2054 static int kvm_sync_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int i)
2055 {
2056 	/* sp->spt[i] has initial value of shadow page table allocation */
2057 	if (sp->spt[i] == SHADOW_NONPRESENT_VALUE)
2058 		return 0;
2059 
2060 	return vcpu->arch.mmu->sync_spte(vcpu, sp, i);
2061 }
2062 
2063 static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
2064 {
2065 	int flush = 0;
2066 	int i;
2067 
2068 	if (!kvm_sync_page_check(vcpu, sp))
2069 		return -1;
2070 
2071 	for (i = 0; i < SPTE_ENT_PER_PAGE; i++) {
2072 		int ret = kvm_sync_spte(vcpu, sp, i);
2073 
2074 		if (ret < -1)
2075 			return -1;
2076 		flush |= ret;
2077 	}
2078 
2079 	/*
2080 	 * Note, any flush is purely for KVM's correctness, e.g. when dropping
2081 	 * an existing SPTE or clearing W/A/D bits to ensure an mmu_notifier
2082 	 * unmap or dirty logging event doesn't fail to flush.  The guest is
2083 	 * responsible for flushing the TLB to ensure any changes in protection
2084 	 * bits are recognized, i.e. until the guest flushes or page faults on
2085 	 * a relevant address, KVM is architecturally allowed to let vCPUs use
2086 	 * cached translations with the old protection bits.
2087 	 */
2088 	return flush;
2089 }
2090 
2091 static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
2092 			 struct list_head *invalid_list)
2093 {
2094 	int ret = __kvm_sync_page(vcpu, sp);
2095 
2096 	if (ret < 0)
2097 		kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
2098 	return ret;
2099 }
2100 
2101 static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
2102 					struct list_head *invalid_list,
2103 					bool remote_flush)
2104 {
2105 	if (!remote_flush && list_empty(invalid_list))
2106 		return false;
2107 
2108 	if (!list_empty(invalid_list))
2109 		kvm_mmu_commit_zap_page(kvm, invalid_list);
2110 	else
2111 		kvm_flush_remote_tlbs(kvm);
2112 	return true;
2113 }
2114 
2115 static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
2116 {
2117 	if (sp->role.invalid)
2118 		return true;
2119 
2120 	/* TDP MMU pages do not use the MMU generation. */
2121 	return !is_tdp_mmu_page(sp) &&
2122 	       unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
2123 }
2124 
2125 struct mmu_page_path {
2126 	struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL];
2127 	unsigned int idx[PT64_ROOT_MAX_LEVEL];
2128 };
2129 
2130 #define for_each_sp(pvec, sp, parents, i)			\
2131 		for (i = mmu_pages_first(&pvec, &parents);	\
2132 			i < pvec.nr && ({ sp = pvec.page[i].sp; 1;});	\
2133 			i = mmu_pages_next(&pvec, &parents, i))
2134 
2135 static int mmu_pages_next(struct kvm_mmu_pages *pvec,
2136 			  struct mmu_page_path *parents,
2137 			  int i)
2138 {
2139 	int n;
2140 
2141 	for (n = i+1; n < pvec->nr; n++) {
2142 		struct kvm_mmu_page *sp = pvec->page[n].sp;
2143 		unsigned idx = pvec->page[n].idx;
2144 		int level = sp->role.level;
2145 
2146 		parents->idx[level-1] = idx;
2147 		if (level == PG_LEVEL_4K)
2148 			break;
2149 
2150 		parents->parent[level-2] = sp;
2151 	}
2152 
2153 	return n;
2154 }
2155 
2156 static int mmu_pages_first(struct kvm_mmu_pages *pvec,
2157 			   struct mmu_page_path *parents)
2158 {
2159 	struct kvm_mmu_page *sp;
2160 	int level;
2161 
2162 	if (pvec->nr == 0)
2163 		return 0;
2164 
2165 	WARN_ON_ONCE(pvec->page[0].idx != INVALID_INDEX);
2166 
2167 	sp = pvec->page[0].sp;
2168 	level = sp->role.level;
2169 	WARN_ON_ONCE(level == PG_LEVEL_4K);
2170 
2171 	parents->parent[level-2] = sp;
2172 
2173 	/* Also set up a sentinel.  Further entries in pvec are all
2174 	 * children of sp, so this element is never overwritten.
2175 	 */
2176 	parents->parent[level-1] = NULL;
2177 	return mmu_pages_next(pvec, parents, 0);
2178 }
2179 
2180 static void mmu_pages_clear_parents(struct mmu_page_path *parents)
2181 {
2182 	struct kvm_mmu_page *sp;
2183 	unsigned int level = 0;
2184 
2185 	do {
2186 		unsigned int idx = parents->idx[level];
2187 		sp = parents->parent[level];
2188 		if (!sp)
2189 			return;
2190 
2191 		WARN_ON_ONCE(idx == INVALID_INDEX);
2192 		clear_unsync_child_bit(sp, idx);
2193 		level++;
2194 	} while (!sp->unsync_children);
2195 }
2196 
2197 static int mmu_sync_children(struct kvm_vcpu *vcpu,
2198 			     struct kvm_mmu_page *parent, bool can_yield)
2199 {
2200 	int i;
2201 	struct kvm_mmu_page *sp;
2202 	struct mmu_page_path parents;
2203 	struct kvm_mmu_pages pages;
2204 	LIST_HEAD(invalid_list);
2205 	bool flush = false;
2206 
2207 	while (mmu_unsync_walk(parent, &pages)) {
2208 		bool protected = false;
2209 
2210 		for_each_sp(pages, sp, parents, i)
2211 			protected |= kvm_vcpu_write_protect_gfn(vcpu, sp->gfn);
2212 
2213 		if (protected) {
2214 			kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, true);
2215 			flush = false;
2216 		}
2217 
2218 		for_each_sp(pages, sp, parents, i) {
2219 			kvm_unlink_unsync_page(vcpu->kvm, sp);
2220 			flush |= kvm_sync_page(vcpu, sp, &invalid_list) > 0;
2221 			mmu_pages_clear_parents(&parents);
2222 		}
2223 		if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
2224 			kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush);
2225 			if (!can_yield) {
2226 				kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
2227 				return -EINTR;
2228 			}
2229 
2230 			cond_resched_rwlock_write(&vcpu->kvm->mmu_lock);
2231 			flush = false;
2232 		}
2233 	}
2234 
2235 	kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush);
2236 	return 0;
2237 }
2238 
2239 static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
2240 {
2241 	atomic_set(&sp->write_flooding_count,  0);
2242 }
2243 
2244 static void clear_sp_write_flooding_count(u64 *spte)
2245 {
2246 	__clear_sp_write_flooding_count(sptep_to_sp(spte));
2247 }
2248 
2249 /*
2250  * The vCPU is required when finding indirect shadow pages; the shadow
2251  * page may already exist and syncing it needs the vCPU pointer in
2252  * order to read guest page tables.  Direct shadow pages are never
2253  * unsync, thus @vcpu can be NULL if @role.direct is true.
2254  */
2255 static struct kvm_mmu_page *kvm_mmu_find_shadow_page(struct kvm *kvm,
2256 						     struct kvm_vcpu *vcpu,
2257 						     gfn_t gfn,
2258 						     struct hlist_head *sp_list,
2259 						     union kvm_mmu_page_role role)
2260 {
2261 	struct kvm_mmu_page *sp;
2262 	int ret;
2263 	int collisions = 0;
2264 	LIST_HEAD(invalid_list);
2265 
2266 	for_each_valid_sp(kvm, sp, sp_list) {
2267 		if (sp->gfn != gfn) {
2268 			collisions++;
2269 			continue;
2270 		}
2271 
2272 		if (sp->role.word != role.word) {
2273 			/*
2274 			 * If the guest is creating an upper-level page, zap
2275 			 * unsync pages for the same gfn.  While it's possible
2276 			 * the guest is using recursive page tables, in all
2277 			 * likelihood the guest has stopped using the unsync
2278 			 * page and is installing a completely unrelated page.
2279 			 * Unsync pages must not be left as is, because the new
2280 			 * upper-level page will be write-protected.
2281 			 */
2282 			if (role.level > PG_LEVEL_4K && sp->unsync)
2283 				kvm_mmu_prepare_zap_page(kvm, sp,
2284 							 &invalid_list);
2285 			continue;
2286 		}
2287 
2288 		/* unsync and write-flooding only apply to indirect SPs. */
2289 		if (sp->role.direct)
2290 			goto out;
2291 
2292 		if (sp->unsync) {
2293 			if (KVM_BUG_ON(!vcpu, kvm))
2294 				break;
2295 
2296 			/*
2297 			 * The page is good, but is stale.  kvm_sync_page does
2298 			 * get the latest guest state, but (unlike mmu_unsync_children)
2299 			 * it doesn't write-protect the page or mark it synchronized!
2300 			 * This way the validity of the mapping is ensured, but the
2301 			 * overhead of write protection is not incurred until the
2302 			 * guest invalidates the TLB mapping.  This allows multiple
2303 			 * SPs for a single gfn to be unsync.
2304 			 *
2305 			 * If the sync fails, the page is zapped.  If so, break
2306 			 * in order to rebuild it.
2307 			 */
2308 			ret = kvm_sync_page(vcpu, sp, &invalid_list);
2309 			if (ret < 0)
2310 				break;
2311 
2312 			WARN_ON_ONCE(!list_empty(&invalid_list));
2313 			if (ret > 0)
2314 				kvm_flush_remote_tlbs(kvm);
2315 		}
2316 
2317 		__clear_sp_write_flooding_count(sp);
2318 
2319 		goto out;
2320 	}
2321 
2322 	sp = NULL;
2323 	++kvm->stat.mmu_cache_miss;
2324 
2325 out:
2326 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
2327 
2328 	if (collisions > kvm->stat.max_mmu_page_hash_collisions)
2329 		kvm->stat.max_mmu_page_hash_collisions = collisions;
2330 	return sp;
2331 }
2332 
2333 /* Caches used when allocating a new shadow page. */
2334 struct shadow_page_caches {
2335 	struct kvm_mmu_memory_cache *page_header_cache;
2336 	struct kvm_mmu_memory_cache *shadow_page_cache;
2337 	struct kvm_mmu_memory_cache *shadowed_info_cache;
2338 };
2339 
2340 static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm *kvm,
2341 						      struct shadow_page_caches *caches,
2342 						      gfn_t gfn,
2343 						      struct hlist_head *sp_list,
2344 						      union kvm_mmu_page_role role)
2345 {
2346 	struct kvm_mmu_page *sp;
2347 
2348 	sp = kvm_mmu_memory_cache_alloc(caches->page_header_cache);
2349 	sp->spt = kvm_mmu_memory_cache_alloc(caches->shadow_page_cache);
2350 	if (!role.direct && role.level <= KVM_MAX_HUGEPAGE_LEVEL)
2351 		sp->shadowed_translation = kvm_mmu_memory_cache_alloc(caches->shadowed_info_cache);
2352 
2353 	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
2354 
2355 	INIT_LIST_HEAD(&sp->possible_nx_huge_page_link);
2356 
2357 	/*
2358 	 * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
2359 	 * depends on valid pages being added to the head of the list.  See
2360 	 * comments in kvm_zap_obsolete_pages().
2361 	 */
2362 	sp->mmu_valid_gen = kvm->arch.mmu_valid_gen;
2363 	list_add(&sp->link, &kvm->arch.active_mmu_pages);
2364 	kvm_account_mmu_page(kvm, sp);
2365 
2366 	sp->gfn = gfn;
2367 	sp->role = role;
2368 	hlist_add_head(&sp->hash_link, sp_list);
2369 	if (sp_has_gptes(sp))
2370 		account_shadowed(kvm, sp);
2371 
2372 	return sp;
2373 }
2374 
2375 /* Note, @vcpu may be NULL if @role.direct is true; see kvm_mmu_find_shadow_page. */
2376 static struct kvm_mmu_page *__kvm_mmu_get_shadow_page(struct kvm *kvm,
2377 						      struct kvm_vcpu *vcpu,
2378 						      struct shadow_page_caches *caches,
2379 						      gfn_t gfn,
2380 						      union kvm_mmu_page_role role)
2381 {
2382 	struct hlist_head *sp_list;
2383 	struct kvm_mmu_page *sp;
2384 	bool created = false;
2385 
2386 	/*
2387 	 * No need for memory barriers, unlike in kvm_get_mmu_page_hash(), as
2388 	 * mmu_page_hash must be set prior to creating the first shadow root,
2389 	 * i.e. reaching this point is fully serialized by slots_arch_lock.
2390 	 */
2391 	BUG_ON(!kvm->arch.mmu_page_hash);
2392 	sp_list = &kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
2393 
2394 	sp = kvm_mmu_find_shadow_page(kvm, vcpu, gfn, sp_list, role);
2395 	if (!sp) {
2396 		created = true;
2397 		sp = kvm_mmu_alloc_shadow_page(kvm, caches, gfn, sp_list, role);
2398 	}
2399 
2400 	trace_kvm_mmu_get_page(sp, created);
2401 	return sp;
2402 }
2403 
2404 static struct kvm_mmu_page *kvm_mmu_get_shadow_page(struct kvm_vcpu *vcpu,
2405 						    gfn_t gfn,
2406 						    union kvm_mmu_page_role role)
2407 {
2408 	struct shadow_page_caches caches = {
2409 		.page_header_cache = &vcpu->arch.mmu_page_header_cache,
2410 		.shadow_page_cache = &vcpu->arch.mmu_shadow_page_cache,
2411 		.shadowed_info_cache = &vcpu->arch.mmu_shadowed_info_cache,
2412 	};
2413 
2414 	return __kvm_mmu_get_shadow_page(vcpu->kvm, vcpu, &caches, gfn, role);
2415 }
2416 
2417 static union kvm_mmu_page_role kvm_mmu_child_role(u64 *sptep, bool direct,
2418 						  unsigned int access)
2419 {
2420 	struct kvm_mmu_page *parent_sp = sptep_to_sp(sptep);
2421 	union kvm_mmu_page_role role;
2422 
2423 	role = parent_sp->role;
2424 	role.level--;
2425 	role.access = access;
2426 	role.direct = direct;
2427 	role.passthrough = 0;
2428 
2429 	/*
2430 	 * If the guest has 4-byte PTEs then that means it's using 32-bit,
2431 	 * 2-level, non-PAE paging. KVM shadows such guests with PAE paging
2432 	 * (i.e. 8-byte PTEs). The difference in PTE size means that KVM must
2433 	 * shadow each guest page table with multiple shadow page tables, which
2434 	 * requires extra bookkeeping in the role.
2435 	 *
2436 	 * Specifically, to shadow the guest's page directory (which covers a
2437 	 * 4GiB address space), KVM uses 4 PAE page directories, each mapping
2438 	 * 1GiB of the address space. @role.quadrant encodes which quarter of
2439 	 * the address space each maps.
2440 	 *
2441 	 * To shadow the guest's page tables (which each map a 4MiB region), KVM
2442 	 * uses 2 PAE page tables, each mapping a 2MiB region. For these,
2443 	 * @role.quadrant encodes which half of the region they map.
2444 	 *
2445 	 * Concretely, a 4-byte PDE consumes bits 31:22, while an 8-byte PDE
2446 	 * consumes bits 29:21.  To consume bits 31:30, KVM's uses 4 shadow
2447 	 * PDPTEs; those 4 PAE page directories are pre-allocated and their
2448 	 * quadrant is assigned in mmu_alloc_root().   A 4-byte PTE consumes
2449 	 * bits 21:12, while an 8-byte PTE consumes bits 20:12.  To consume
2450 	 * bit 21 in the PTE (the child here), KVM propagates that bit to the
2451 	 * quadrant, i.e. sets quadrant to '0' or '1'.  The parent 8-byte PDE
2452 	 * covers bit 21 (see above), thus the quadrant is calculated from the
2453 	 * _least_ significant bit of the PDE index.
2454 	 */
2455 	if (role.has_4_byte_gpte) {
2456 		WARN_ON_ONCE(role.level != PG_LEVEL_4K);
2457 		role.quadrant = spte_index(sptep) & 1;
2458 	}
2459 
2460 	return role;
2461 }
2462 
2463 static struct kvm_mmu_page *kvm_mmu_get_child_sp(struct kvm_vcpu *vcpu,
2464 						 u64 *sptep, gfn_t gfn,
2465 						 bool direct, unsigned int access)
2466 {
2467 	union kvm_mmu_page_role role;
2468 
2469 	if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep))
2470 		return ERR_PTR(-EEXIST);
2471 
2472 	role = kvm_mmu_child_role(sptep, direct, access);
2473 	return kvm_mmu_get_shadow_page(vcpu, gfn, role);
2474 }
2475 
2476 static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator,
2477 					struct kvm_vcpu *vcpu, hpa_t root,
2478 					u64 addr)
2479 {
2480 	iterator->addr = addr;
2481 	iterator->shadow_addr = root;
2482 	iterator->level = vcpu->arch.mmu->root_role.level;
2483 
2484 	if (iterator->level >= PT64_ROOT_4LEVEL &&
2485 	    vcpu->arch.mmu->cpu_role.base.level < PT64_ROOT_4LEVEL &&
2486 	    !vcpu->arch.mmu->root_role.direct)
2487 		iterator->level = PT32E_ROOT_LEVEL;
2488 
2489 	if (iterator->level == PT32E_ROOT_LEVEL) {
2490 		/*
2491 		 * prev_root is currently only used for 64-bit hosts. So only
2492 		 * the active root_hpa is valid here.
2493 		 */
2494 		BUG_ON(root != vcpu->arch.mmu->root.hpa);
2495 
2496 		iterator->shadow_addr
2497 			= vcpu->arch.mmu->pae_root[(addr >> 30) & 3];
2498 		iterator->shadow_addr &= SPTE_BASE_ADDR_MASK;
2499 		--iterator->level;
2500 		if (!iterator->shadow_addr)
2501 			iterator->level = 0;
2502 	}
2503 }
2504 
2505 static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
2506 			     struct kvm_vcpu *vcpu, u64 addr)
2507 {
2508 	shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root.hpa,
2509 				    addr);
2510 }
2511 
2512 static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
2513 {
2514 	if (iterator->level < PG_LEVEL_4K)
2515 		return false;
2516 
2517 	iterator->index = SPTE_INDEX(iterator->addr, iterator->level);
2518 	iterator->sptep	= ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
2519 	return true;
2520 }
2521 
2522 static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator,
2523 			       u64 spte)
2524 {
2525 	if (!is_shadow_present_pte(spte) || is_last_spte(spte, iterator->level)) {
2526 		iterator->level = 0;
2527 		return;
2528 	}
2529 
2530 	iterator->shadow_addr = spte & SPTE_BASE_ADDR_MASK;
2531 	--iterator->level;
2532 }
2533 
2534 static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
2535 {
2536 	__shadow_walk_next(iterator, *iterator->sptep);
2537 }
2538 
2539 static void __link_shadow_page(struct kvm *kvm,
2540 			       struct kvm_mmu_memory_cache *cache, u64 *sptep,
2541 			       struct kvm_mmu_page *sp, bool flush)
2542 {
2543 	u64 spte;
2544 
2545 	BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
2546 
2547 	/*
2548 	 * If an SPTE is present already, it must be a leaf and therefore
2549 	 * a large one.  Drop it, and flush the TLB if needed, before
2550 	 * installing sp.
2551 	 */
2552 	if (is_shadow_present_pte(*sptep))
2553 		drop_large_spte(kvm, sptep, flush);
2554 
2555 	spte = make_nonleaf_spte(sp->spt, sp_ad_disabled(sp));
2556 
2557 	mmu_spte_set(sptep, spte);
2558 
2559 	mmu_page_add_parent_pte(kvm, cache, sp, sptep);
2560 
2561 	/*
2562 	 * The non-direct sub-pagetable must be updated before linking.  For
2563 	 * L1 sp, the pagetable is updated via kvm_sync_page() in
2564 	 * kvm_mmu_find_shadow_page() without write-protecting the gfn,
2565 	 * so sp->unsync can be true or false.  For higher level non-direct
2566 	 * sp, the pagetable is updated/synced via mmu_sync_children() in
2567 	 * FNAME(fetch)(), so sp->unsync_children can only be false.
2568 	 * WARN_ON_ONCE() if anything happens unexpectedly.
2569 	 */
2570 	if (WARN_ON_ONCE(sp->unsync_children) || sp->unsync)
2571 		mark_unsync(sptep);
2572 }
2573 
2574 static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
2575 			     struct kvm_mmu_page *sp)
2576 {
2577 	__link_shadow_page(vcpu->kvm, &vcpu->arch.mmu_pte_list_desc_cache, sptep, sp, true);
2578 }
2579 
2580 static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2581 				   unsigned direct_access)
2582 {
2583 	if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
2584 		struct kvm_mmu_page *child;
2585 
2586 		/*
2587 		 * For the direct sp, if the guest pte's dirty bit
2588 		 * changed form clean to dirty, it will corrupt the
2589 		 * sp's access: allow writable in the read-only sp,
2590 		 * so we should update the spte at this point to get
2591 		 * a new sp with the correct access.
2592 		 */
2593 		child = spte_to_child_sp(*sptep);
2594 		if (child->role.access == direct_access)
2595 			return;
2596 
2597 		drop_parent_pte(vcpu->kvm, child, sptep);
2598 		kvm_flush_remote_tlbs_sptep(vcpu->kvm, sptep);
2599 	}
2600 }
2601 
2602 /* Returns the number of zapped non-leaf child shadow pages. */
2603 static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
2604 			    u64 *spte, struct list_head *invalid_list)
2605 {
2606 	u64 pte;
2607 	struct kvm_mmu_page *child;
2608 
2609 	pte = *spte;
2610 	if (is_shadow_present_pte(pte)) {
2611 		if (is_last_spte(pte, sp->role.level)) {
2612 			drop_spte(kvm, spte);
2613 		} else {
2614 			child = spte_to_child_sp(pte);
2615 			drop_parent_pte(kvm, child, spte);
2616 
2617 			/*
2618 			 * Recursively zap nested TDP SPs, parentless SPs are
2619 			 * unlikely to be used again in the near future.  This
2620 			 * avoids retaining a large number of stale nested SPs.
2621 			 */
2622 			if (tdp_enabled && invalid_list &&
2623 			    child->role.guest_mode &&
2624 			    !atomic_long_read(&child->parent_ptes.val))
2625 				return kvm_mmu_prepare_zap_page(kvm, child,
2626 								invalid_list);
2627 		}
2628 	} else if (is_mmio_spte(kvm, pte)) {
2629 		mmu_spte_clear_no_track(spte);
2630 	}
2631 	return 0;
2632 }
2633 
2634 static int kvm_mmu_page_unlink_children(struct kvm *kvm,
2635 					struct kvm_mmu_page *sp,
2636 					struct list_head *invalid_list)
2637 {
2638 	int zapped = 0;
2639 	unsigned i;
2640 
2641 	for (i = 0; i < SPTE_ENT_PER_PAGE; ++i)
2642 		zapped += mmu_page_zap_pte(kvm, sp, sp->spt + i, invalid_list);
2643 
2644 	return zapped;
2645 }
2646 
2647 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
2648 {
2649 	u64 *sptep;
2650 	struct rmap_iterator iter;
2651 
2652 	while ((sptep = rmap_get_first(&sp->parent_ptes, &iter)))
2653 		drop_parent_pte(kvm, sp, sptep);
2654 }
2655 
2656 static int mmu_zap_unsync_children(struct kvm *kvm,
2657 				   struct kvm_mmu_page *parent,
2658 				   struct list_head *invalid_list)
2659 {
2660 	int i, zapped = 0;
2661 	struct mmu_page_path parents;
2662 	struct kvm_mmu_pages pages;
2663 
2664 	if (parent->role.level == PG_LEVEL_4K)
2665 		return 0;
2666 
2667 	while (mmu_unsync_walk(parent, &pages)) {
2668 		struct kvm_mmu_page *sp;
2669 
2670 		for_each_sp(pages, sp, parents, i) {
2671 			kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
2672 			mmu_pages_clear_parents(&parents);
2673 			zapped++;
2674 		}
2675 	}
2676 
2677 	return zapped;
2678 }
2679 
2680 static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
2681 				       struct kvm_mmu_page *sp,
2682 				       struct list_head *invalid_list,
2683 				       int *nr_zapped)
2684 {
2685 	bool list_unstable, zapped_root = false;
2686 
2687 	lockdep_assert_held_write(&kvm->mmu_lock);
2688 	trace_kvm_mmu_prepare_zap_page(sp);
2689 	++kvm->stat.mmu_shadow_zapped;
2690 	*nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list);
2691 	*nr_zapped += kvm_mmu_page_unlink_children(kvm, sp, invalid_list);
2692 	kvm_mmu_unlink_parents(kvm, sp);
2693 
2694 	/* Zapping children means active_mmu_pages has become unstable. */
2695 	list_unstable = *nr_zapped;
2696 
2697 	if (!sp->role.invalid && sp_has_gptes(sp))
2698 		unaccount_shadowed(kvm, sp);
2699 
2700 	if (sp->unsync)
2701 		kvm_unlink_unsync_page(kvm, sp);
2702 	if (!sp->root_count) {
2703 		/* Count self */
2704 		(*nr_zapped)++;
2705 
2706 		/*
2707 		 * Already invalid pages (previously active roots) are not on
2708 		 * the active page list.  See list_del() in the "else" case of
2709 		 * !sp->root_count.
2710 		 */
2711 		if (sp->role.invalid)
2712 			list_add(&sp->link, invalid_list);
2713 		else
2714 			list_move(&sp->link, invalid_list);
2715 		kvm_unaccount_mmu_page(kvm, sp);
2716 	} else {
2717 		/*
2718 		 * Remove the active root from the active page list, the root
2719 		 * will be explicitly freed when the root_count hits zero.
2720 		 */
2721 		list_del(&sp->link);
2722 
2723 		/*
2724 		 * Obsolete pages cannot be used on any vCPUs, see the comment
2725 		 * in kvm_mmu_zap_all_fast().  Note, is_obsolete_sp() also
2726 		 * treats invalid shadow pages as being obsolete.
2727 		 */
2728 		zapped_root = !is_obsolete_sp(kvm, sp);
2729 	}
2730 
2731 	if (sp->nx_huge_page_disallowed)
2732 		unaccount_nx_huge_page(kvm, sp);
2733 
2734 	sp->role.invalid = 1;
2735 
2736 	/*
2737 	 * Make the request to free obsolete roots after marking the root
2738 	 * invalid, otherwise other vCPUs may not see it as invalid.
2739 	 */
2740 	if (zapped_root)
2741 		kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_FREE_OBSOLETE_ROOTS);
2742 	return list_unstable;
2743 }
2744 
2745 static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2746 				     struct list_head *invalid_list)
2747 {
2748 	int nr_zapped;
2749 
2750 	__kvm_mmu_prepare_zap_page(kvm, sp, invalid_list, &nr_zapped);
2751 	return nr_zapped;
2752 }
2753 
2754 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
2755 				    struct list_head *invalid_list)
2756 {
2757 	struct kvm_mmu_page *sp, *nsp;
2758 
2759 	if (list_empty(invalid_list))
2760 		return;
2761 
2762 	/*
2763 	 * We need to make sure everyone sees our modifications to
2764 	 * the page tables and see changes to vcpu->mode here. The barrier
2765 	 * in the kvm_flush_remote_tlbs() achieves this. This pairs
2766 	 * with vcpu_enter_guest and walk_shadow_page_lockless_begin/end.
2767 	 *
2768 	 * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit
2769 	 * guest mode and/or lockless shadow page table walks.
2770 	 */
2771 	kvm_flush_remote_tlbs(kvm);
2772 
2773 	list_for_each_entry_safe(sp, nsp, invalid_list, link) {
2774 		WARN_ON_ONCE(!sp->role.invalid || sp->root_count);
2775 		kvm_mmu_free_shadow_page(sp);
2776 	}
2777 }
2778 
2779 static unsigned long kvm_mmu_zap_oldest_mmu_pages(struct kvm *kvm,
2780 						  unsigned long nr_to_zap)
2781 {
2782 	unsigned long total_zapped = 0;
2783 	struct kvm_mmu_page *sp, *tmp;
2784 	LIST_HEAD(invalid_list);
2785 	bool unstable;
2786 	int nr_zapped;
2787 
2788 	if (list_empty(&kvm->arch.active_mmu_pages))
2789 		return 0;
2790 
2791 restart:
2792 	list_for_each_entry_safe_reverse(sp, tmp, &kvm->arch.active_mmu_pages, link) {
2793 		/*
2794 		 * Don't zap active root pages, the page itself can't be freed
2795 		 * and zapping it will just force vCPUs to realloc and reload.
2796 		 */
2797 		if (sp->root_count)
2798 			continue;
2799 
2800 		unstable = __kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list,
2801 						      &nr_zapped);
2802 		total_zapped += nr_zapped;
2803 		if (total_zapped >= nr_to_zap)
2804 			break;
2805 
2806 		if (unstable)
2807 			goto restart;
2808 	}
2809 
2810 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
2811 
2812 	kvm->stat.mmu_recycled += total_zapped;
2813 	return total_zapped;
2814 }
2815 
2816 static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
2817 {
2818 	if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
2819 		return kvm->arch.n_max_mmu_pages -
2820 			kvm->arch.n_used_mmu_pages;
2821 
2822 	return 0;
2823 }
2824 
2825 static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
2826 {
2827 	unsigned long avail = kvm_mmu_available_pages(vcpu->kvm);
2828 
2829 	if (likely(avail >= KVM_MIN_FREE_MMU_PAGES))
2830 		return 0;
2831 
2832 	kvm_mmu_zap_oldest_mmu_pages(vcpu->kvm, KVM_REFILL_PAGES - avail);
2833 
2834 	/*
2835 	 * Note, this check is intentionally soft, it only guarantees that one
2836 	 * page is available, while the caller may end up allocating as many as
2837 	 * four pages, e.g. for PAE roots or for 5-level paging.  Temporarily
2838 	 * exceeding the (arbitrary by default) limit will not harm the host,
2839 	 * being too aggressive may unnecessarily kill the guest, and getting an
2840 	 * exact count is far more trouble than it's worth, especially in the
2841 	 * page fault paths.
2842 	 */
2843 	if (!kvm_mmu_available_pages(vcpu->kvm))
2844 		return -ENOSPC;
2845 	return 0;
2846 }
2847 
2848 /*
2849  * Changing the number of mmu pages allocated to the vm
2850  * Note: if goal_nr_mmu_pages is too small, you will get dead lock
2851  */
2852 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
2853 {
2854 	write_lock(&kvm->mmu_lock);
2855 
2856 	if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
2857 		kvm_mmu_zap_oldest_mmu_pages(kvm, kvm->arch.n_used_mmu_pages -
2858 						  goal_nr_mmu_pages);
2859 
2860 		goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
2861 	}
2862 
2863 	kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
2864 
2865 	write_unlock(&kvm->mmu_lock);
2866 }
2867 
2868 bool __kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
2869 				       bool always_retry)
2870 {
2871 	struct kvm *kvm = vcpu->kvm;
2872 	LIST_HEAD(invalid_list);
2873 	struct kvm_mmu_page *sp;
2874 	gpa_t gpa = cr2_or_gpa;
2875 	bool r = false;
2876 
2877 	/*
2878 	 * Bail early if there aren't any write-protected shadow pages to avoid
2879 	 * unnecessarily taking mmu_lock lock, e.g. if the gfn is write-tracked
2880 	 * by a third party.  Reading indirect_shadow_pages without holding
2881 	 * mmu_lock is safe, as this is purely an optimization, i.e. a false
2882 	 * positive is benign, and a false negative will simply result in KVM
2883 	 * skipping the unprotect+retry path, which is also an optimization.
2884 	 */
2885 	if (!READ_ONCE(kvm->arch.indirect_shadow_pages))
2886 		goto out;
2887 
2888 	if (!vcpu->arch.mmu->root_role.direct) {
2889 		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
2890 		if (gpa == INVALID_GPA)
2891 			goto out;
2892 	}
2893 
2894 	write_lock(&kvm->mmu_lock);
2895 	for_each_gfn_valid_sp_with_gptes(kvm, sp, gpa_to_gfn(gpa))
2896 		kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
2897 
2898 	/*
2899 	 * Snapshot the result before zapping, as zapping will remove all list
2900 	 * entries, i.e. checking the list later would yield a false negative.
2901 	 */
2902 	r = !list_empty(&invalid_list);
2903 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
2904 	write_unlock(&kvm->mmu_lock);
2905 
2906 out:
2907 	if (r || always_retry) {
2908 		vcpu->arch.last_retry_eip = kvm_rip_read(vcpu);
2909 		vcpu->arch.last_retry_addr = cr2_or_gpa;
2910 	}
2911 	return r;
2912 }
2913 
2914 static void kvm_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
2915 {
2916 	trace_kvm_mmu_unsync_page(sp);
2917 	++kvm->stat.mmu_unsync;
2918 	sp->unsync = 1;
2919 
2920 	kvm_mmu_mark_parents_unsync(sp);
2921 }
2922 
2923 /*
2924  * Attempt to unsync any shadow pages that can be reached by the specified gfn,
2925  * KVM is creating a writable mapping for said gfn.  Returns 0 if all pages
2926  * were marked unsync (or if there is no shadow page), -EPERM if the SPTE must
2927  * be write-protected.
2928  */
2929 int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot,
2930 			    gfn_t gfn, bool synchronizing, bool prefetch)
2931 {
2932 	struct kvm_mmu_page *sp;
2933 	bool locked = false;
2934 
2935 	/*
2936 	 * Force write-protection if the page is being tracked.  Note, the page
2937 	 * track machinery is used to write-protect upper-level shadow pages,
2938 	 * i.e. this guards the role.level == 4K assertion below!
2939 	 */
2940 	if (kvm_gfn_is_write_tracked(kvm, slot, gfn))
2941 		return -EPERM;
2942 
2943 	/*
2944 	 * The page is not write-tracked, mark existing shadow pages unsync
2945 	 * unless KVM is synchronizing an unsync SP.  In that case, KVM must
2946 	 * complete emulation of the guest TLB flush before allowing shadow
2947 	 * pages to become unsync (writable by the guest).
2948 	 */
2949 	for_each_gfn_valid_sp_with_gptes(kvm, sp, gfn) {
2950 		if (synchronizing)
2951 			return -EPERM;
2952 
2953 		if (sp->unsync)
2954 			continue;
2955 
2956 		if (prefetch)
2957 			return -EEXIST;
2958 
2959 		/*
2960 		 * TDP MMU page faults require an additional spinlock as they
2961 		 * run with mmu_lock held for read, not write, and the unsync
2962 		 * logic is not thread safe.  Take the spinklock regardless of
2963 		 * the MMU type to avoid extra conditionals/parameters, there's
2964 		 * no meaningful penalty if mmu_lock is held for write.
2965 		 */
2966 		if (!locked) {
2967 			locked = true;
2968 			spin_lock(&kvm->arch.mmu_unsync_pages_lock);
2969 
2970 			/*
2971 			 * Recheck after taking the spinlock, a different vCPU
2972 			 * may have since marked the page unsync.  A false
2973 			 * negative on the unprotected check above is not
2974 			 * possible as clearing sp->unsync _must_ hold mmu_lock
2975 			 * for write, i.e. unsync cannot transition from 1->0
2976 			 * while this CPU holds mmu_lock for read (or write).
2977 			 */
2978 			if (READ_ONCE(sp->unsync))
2979 				continue;
2980 		}
2981 
2982 		WARN_ON_ONCE(sp->role.level != PG_LEVEL_4K);
2983 		kvm_unsync_page(kvm, sp);
2984 	}
2985 	if (locked)
2986 		spin_unlock(&kvm->arch.mmu_unsync_pages_lock);
2987 
2988 	/*
2989 	 * We need to ensure that the marking of unsync pages is visible
2990 	 * before the SPTE is updated to allow writes because
2991 	 * kvm_mmu_sync_roots() checks the unsync flags without holding
2992 	 * the MMU lock and so can race with this. If the SPTE was updated
2993 	 * before the page had been marked as unsync-ed, something like the
2994 	 * following could happen:
2995 	 *
2996 	 * CPU 1                    CPU 2
2997 	 * ---------------------------------------------------------------------
2998 	 * 1.2 Host updates SPTE
2999 	 *     to be writable
3000 	 *                      2.1 Guest writes a GPTE for GVA X.
3001 	 *                          (GPTE being in the guest page table shadowed
3002 	 *                           by the SP from CPU 1.)
3003 	 *                          This reads SPTE during the page table walk.
3004 	 *                          Since SPTE.W is read as 1, there is no
3005 	 *                          fault.
3006 	 *
3007 	 *                      2.2 Guest issues TLB flush.
3008 	 *                          That causes a VM Exit.
3009 	 *
3010 	 *                      2.3 Walking of unsync pages sees sp->unsync is
3011 	 *                          false and skips the page.
3012 	 *
3013 	 *                      2.4 Guest accesses GVA X.
3014 	 *                          Since the mapping in the SP was not updated,
3015 	 *                          so the old mapping for GVA X incorrectly
3016 	 *                          gets used.
3017 	 * 1.1 Host marks SP
3018 	 *     as unsync
3019 	 *     (sp->unsync = true)
3020 	 *
3021 	 * The write barrier below ensures that 1.1 happens before 1.2 and thus
3022 	 * the situation in 2.4 does not arise.  It pairs with the read barrier
3023 	 * in is_unsync_root(), placed between 2.1's load of SPTE.W and 2.3.
3024 	 */
3025 	smp_wmb();
3026 
3027 	return 0;
3028 }
3029 
3030 static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
3031 			u64 *sptep, unsigned int pte_access, gfn_t gfn,
3032 			kvm_pfn_t pfn, struct kvm_page_fault *fault)
3033 {
3034 	struct kvm_mmu_page *sp = sptep_to_sp(sptep);
3035 	int level = sp->role.level;
3036 	int was_rmapped = 0;
3037 	int ret = RET_PF_FIXED;
3038 	bool flush = false;
3039 	bool wrprot;
3040 	u64 spte;
3041 
3042 	/* Prefetching always gets a writable pfn.  */
3043 	bool host_writable = !fault || fault->map_writable;
3044 	bool prefetch = !fault || fault->prefetch;
3045 	bool write_fault = fault && fault->write;
3046 
3047 	if (is_shadow_present_pte(*sptep)) {
3048 		if (prefetch && is_last_spte(*sptep, level) &&
3049 		    pfn == spte_to_pfn(*sptep))
3050 			return RET_PF_SPURIOUS;
3051 
3052 		/*
3053 		 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
3054 		 * the parent of the now unreachable PTE.
3055 		 */
3056 		if (level > PG_LEVEL_4K && !is_large_pte(*sptep)) {
3057 			struct kvm_mmu_page *child;
3058 			u64 pte = *sptep;
3059 
3060 			child = spte_to_child_sp(pte);
3061 			drop_parent_pte(vcpu->kvm, child, sptep);
3062 			flush = true;
3063 		} else if (pfn != spte_to_pfn(*sptep)) {
3064 			WARN_ON_ONCE(vcpu->arch.mmu->root_role.direct);
3065 			drop_spte(vcpu->kvm, sptep);
3066 			flush = true;
3067 		} else
3068 			was_rmapped = 1;
3069 	}
3070 
3071 	if (unlikely(is_noslot_pfn(pfn))) {
3072 		vcpu->stat.pf_mmio_spte_created++;
3073 		mark_mmio_spte(vcpu, sptep, gfn, pte_access);
3074 		if (flush)
3075 			kvm_flush_remote_tlbs_gfn(vcpu->kvm, gfn, level);
3076 		return RET_PF_EMULATE;
3077 	}
3078 
3079 	wrprot = make_spte(vcpu, sp, slot, pte_access, gfn, pfn, *sptep, prefetch,
3080 			   false, host_writable, &spte);
3081 
3082 	if (*sptep == spte) {
3083 		ret = RET_PF_SPURIOUS;
3084 	} else {
3085 		flush |= mmu_spte_update(sptep, spte);
3086 		trace_kvm_mmu_set_spte(level, gfn, sptep);
3087 	}
3088 
3089 	if (wrprot && write_fault)
3090 		ret = RET_PF_WRITE_PROTECTED;
3091 
3092 	if (flush)
3093 		kvm_flush_remote_tlbs_gfn(vcpu->kvm, gfn, level);
3094 
3095 	if (!was_rmapped) {
3096 		WARN_ON_ONCE(ret == RET_PF_SPURIOUS);
3097 		rmap_add(vcpu, slot, sptep, gfn, pte_access);
3098 	} else {
3099 		/* Already rmapped but the pte_access bits may have changed. */
3100 		kvm_mmu_page_set_access(sp, spte_index(sptep), pte_access);
3101 	}
3102 
3103 	return ret;
3104 }
3105 
3106 static bool kvm_mmu_prefetch_sptes(struct kvm_vcpu *vcpu, gfn_t gfn, u64 *sptep,
3107 				   int nr_pages, unsigned int access)
3108 {
3109 	struct page *pages[PTE_PREFETCH_NUM];
3110 	struct kvm_memory_slot *slot;
3111 	int i;
3112 
3113 	if (WARN_ON_ONCE(nr_pages > PTE_PREFETCH_NUM))
3114 		return false;
3115 
3116 	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
3117 	if (!slot)
3118 		return false;
3119 
3120 	nr_pages = kvm_prefetch_pages(slot, gfn, pages, nr_pages);
3121 	if (nr_pages <= 0)
3122 		return false;
3123 
3124 	for (i = 0; i < nr_pages; i++, gfn++, sptep++) {
3125 		mmu_set_spte(vcpu, slot, sptep, access, gfn,
3126 			     page_to_pfn(pages[i]), NULL);
3127 
3128 		/*
3129 		 * KVM always prefetches writable pages from the primary MMU,
3130 		 * and KVM can make its SPTE writable in the fast page handler,
3131 		 * without notifying the primary MMU.  Mark pages/folios dirty
3132 		 * now to ensure file data is written back if it ends up being
3133 		 * written by the guest.  Because KVM's prefetching GUPs
3134 		 * writable PTEs, the probability of unnecessary writeback is
3135 		 * extremely low.
3136 		 */
3137 		kvm_release_page_dirty(pages[i]);
3138 	}
3139 
3140 	return true;
3141 }
3142 
3143 static bool direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
3144 				     struct kvm_mmu_page *sp,
3145 				     u64 *start, u64 *end)
3146 {
3147 	gfn_t gfn = kvm_mmu_page_get_gfn(sp, spte_index(start));
3148 	unsigned int access = sp->role.access;
3149 
3150 	return kvm_mmu_prefetch_sptes(vcpu, gfn, start, end - start, access);
3151 }
3152 
3153 static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
3154 				  struct kvm_mmu_page *sp, u64 *sptep)
3155 {
3156 	u64 *spte, *start = NULL;
3157 	int i;
3158 
3159 	WARN_ON_ONCE(!sp->role.direct);
3160 
3161 	i = spte_index(sptep) & ~(PTE_PREFETCH_NUM - 1);
3162 	spte = sp->spt + i;
3163 
3164 	for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
3165 		if (is_shadow_present_pte(*spte) || spte == sptep) {
3166 			if (!start)
3167 				continue;
3168 			if (!direct_pte_prefetch_many(vcpu, sp, start, spte))
3169 				return;
3170 
3171 			start = NULL;
3172 		} else if (!start)
3173 			start = spte;
3174 	}
3175 	if (start)
3176 		direct_pte_prefetch_many(vcpu, sp, start, spte);
3177 }
3178 
3179 static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
3180 {
3181 	struct kvm_mmu_page *sp;
3182 
3183 	sp = sptep_to_sp(sptep);
3184 
3185 	/*
3186 	 * Without accessed bits, there's no way to distinguish between
3187 	 * actually accessed translations and prefetched, so disable pte
3188 	 * prefetch if accessed bits aren't available.
3189 	 */
3190 	if (sp_ad_disabled(sp))
3191 		return;
3192 
3193 	if (sp->role.level > PG_LEVEL_4K)
3194 		return;
3195 
3196 	/*
3197 	 * If addresses are being invalidated, skip prefetching to avoid
3198 	 * accidentally prefetching those addresses.
3199 	 */
3200 	if (unlikely(vcpu->kvm->mmu_invalidate_in_progress))
3201 		return;
3202 
3203 	__direct_pte_prefetch(vcpu, sp, sptep);
3204 }
3205 
3206 /*
3207  * Lookup the mapping level for @gfn in the current mm.
3208  *
3209  * WARNING!  Use of host_pfn_mapping_level() requires the caller and the end
3210  * consumer to be tied into KVM's handlers for MMU notifier events!
3211  *
3212  * There are several ways to safely use this helper:
3213  *
3214  * - Check mmu_invalidate_retry_gfn() after grabbing the mapping level, before
3215  *   consuming it.  In this case, mmu_lock doesn't need to be held during the
3216  *   lookup, but it does need to be held while checking the MMU notifier.
3217  *
3218  * - Hold mmu_lock AND ensure there is no in-progress MMU notifier invalidation
3219  *   event for the hva.  This can be done by explicit checking the MMU notifier
3220  *   or by ensuring that KVM already has a valid mapping that covers the hva.
3221  *
3222  * - Do not use the result to install new mappings, e.g. use the host mapping
3223  *   level only to decide whether or not to zap an entry.  In this case, it's
3224  *   not required to hold mmu_lock (though it's highly likely the caller will
3225  *   want to hold mmu_lock anyways, e.g. to modify SPTEs).
3226  *
3227  * Note!  The lookup can still race with modifications to host page tables, but
3228  * the above "rules" ensure KVM will not _consume_ the result of the walk if a
3229  * race with the primary MMU occurs.
3230  */
3231 static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn,
3232 				  const struct kvm_memory_slot *slot)
3233 {
3234 	int level = PG_LEVEL_4K;
3235 	unsigned long hva;
3236 	unsigned long flags;
3237 	pgd_t pgd;
3238 	p4d_t p4d;
3239 	pud_t pud;
3240 	pmd_t pmd;
3241 
3242 	/*
3243 	 * Note, using the already-retrieved memslot and __gfn_to_hva_memslot()
3244 	 * is not solely for performance, it's also necessary to avoid the
3245 	 * "writable" check in __gfn_to_hva_many(), which will always fail on
3246 	 * read-only memslots due to gfn_to_hva() assuming writes.  Earlier
3247 	 * page fault steps have already verified the guest isn't writing a
3248 	 * read-only memslot.
3249 	 */
3250 	hva = __gfn_to_hva_memslot(slot, gfn);
3251 
3252 	/*
3253 	 * Disable IRQs to prevent concurrent tear down of host page tables,
3254 	 * e.g. if the primary MMU promotes a P*D to a huge page and then frees
3255 	 * the original page table.
3256 	 */
3257 	local_irq_save(flags);
3258 
3259 	/*
3260 	 * Read each entry once.  As above, a non-leaf entry can be promoted to
3261 	 * a huge page _during_ this walk.  Re-reading the entry could send the
3262 	 * walk into the weeks, e.g. p*d_leaf() returns false (sees the old
3263 	 * value) and then p*d_offset() walks into the target huge page instead
3264 	 * of the old page table (sees the new value).
3265 	 */
3266 	pgd = READ_ONCE(*pgd_offset(kvm->mm, hva));
3267 	if (pgd_none(pgd))
3268 		goto out;
3269 
3270 	p4d = READ_ONCE(*p4d_offset(&pgd, hva));
3271 	if (p4d_none(p4d) || !p4d_present(p4d))
3272 		goto out;
3273 
3274 	pud = READ_ONCE(*pud_offset(&p4d, hva));
3275 	if (pud_none(pud) || !pud_present(pud))
3276 		goto out;
3277 
3278 	if (pud_leaf(pud)) {
3279 		level = PG_LEVEL_1G;
3280 		goto out;
3281 	}
3282 
3283 	pmd = READ_ONCE(*pmd_offset(&pud, hva));
3284 	if (pmd_none(pmd) || !pmd_present(pmd))
3285 		goto out;
3286 
3287 	if (pmd_leaf(pmd))
3288 		level = PG_LEVEL_2M;
3289 
3290 out:
3291 	local_irq_restore(flags);
3292 	return level;
3293 }
3294 
3295 static u8 kvm_max_level_for_order(int order)
3296 {
3297 	BUILD_BUG_ON(KVM_MAX_HUGEPAGE_LEVEL > PG_LEVEL_1G);
3298 
3299 	KVM_MMU_WARN_ON(order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G) &&
3300 			order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M) &&
3301 			order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_4K));
3302 
3303 	if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G))
3304 		return PG_LEVEL_1G;
3305 
3306 	if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M))
3307 		return PG_LEVEL_2M;
3308 
3309 	return PG_LEVEL_4K;
3310 }
3311 
3312 static u8 kvm_gmem_max_mapping_level(struct kvm *kvm, struct kvm_page_fault *fault,
3313 				     const struct kvm_memory_slot *slot, gfn_t gfn,
3314 				     bool is_private)
3315 {
3316 	u8 max_level, coco_level;
3317 	kvm_pfn_t pfn;
3318 
3319 	/* For faults, use the gmem information that was resolved earlier. */
3320 	if (fault) {
3321 		pfn = fault->pfn;
3322 		max_level = fault->max_level;
3323 	} else {
3324 		/* TODO: Call into guest_memfd once hugepages are supported. */
3325 		WARN_ONCE(1, "Get pfn+order from guest_memfd");
3326 		pfn = KVM_PFN_ERR_FAULT;
3327 		max_level = PG_LEVEL_4K;
3328 	}
3329 
3330 	if (max_level == PG_LEVEL_4K)
3331 		return max_level;
3332 
3333 	/*
3334 	 * CoCo may influence the max mapping level, e.g. due to RMP or S-EPT
3335 	 * restrictions.  A return of '0' means "no additional restrictions", to
3336 	 * allow for using an optional "ret0" static call.
3337 	 */
3338 	coco_level = kvm_x86_call(gmem_max_mapping_level)(kvm, pfn, is_private);
3339 	if (coco_level)
3340 		max_level = min(max_level, coco_level);
3341 
3342 	return max_level;
3343 }
3344 
3345 int kvm_mmu_max_mapping_level(struct kvm *kvm, struct kvm_page_fault *fault,
3346 			      const struct kvm_memory_slot *slot, gfn_t gfn)
3347 {
3348 	struct kvm_lpage_info *linfo;
3349 	int host_level, max_level;
3350 	bool is_private;
3351 
3352 	lockdep_assert_held(&kvm->mmu_lock);
3353 
3354 	if (fault) {
3355 		max_level = fault->max_level;
3356 		is_private = fault->is_private;
3357 	} else {
3358 		max_level = PG_LEVEL_NUM;
3359 		is_private = kvm_mem_is_private(kvm, gfn);
3360 	}
3361 
3362 	max_level = min(max_level, max_huge_page_level);
3363 	for ( ; max_level > PG_LEVEL_4K; max_level--) {
3364 		linfo = lpage_info_slot(gfn, slot, max_level);
3365 		if (!linfo->disallow_lpage)
3366 			break;
3367 	}
3368 
3369 	if (max_level == PG_LEVEL_4K)
3370 		return PG_LEVEL_4K;
3371 
3372 	if (is_private || kvm_memslot_is_gmem_only(slot))
3373 		host_level = kvm_gmem_max_mapping_level(kvm, fault, slot, gfn,
3374 							is_private);
3375 	else
3376 		host_level = host_pfn_mapping_level(kvm, gfn, slot);
3377 	return min(host_level, max_level);
3378 }
3379 
3380 void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
3381 {
3382 	struct kvm_memory_slot *slot = fault->slot;
3383 	kvm_pfn_t mask;
3384 
3385 	fault->huge_page_disallowed = fault->exec && fault->nx_huge_page_workaround_enabled;
3386 
3387 	if (unlikely(fault->max_level == PG_LEVEL_4K))
3388 		return;
3389 
3390 	if (is_error_noslot_pfn(fault->pfn))
3391 		return;
3392 
3393 	if (kvm_slot_dirty_track_enabled(slot))
3394 		return;
3395 
3396 	/*
3397 	 * Enforce the iTLB multihit workaround after capturing the requested
3398 	 * level, which will be used to do precise, accurate accounting.
3399 	 */
3400 	fault->req_level = kvm_mmu_max_mapping_level(vcpu->kvm, fault,
3401 						     fault->slot, fault->gfn);
3402 	if (fault->req_level == PG_LEVEL_4K || fault->huge_page_disallowed)
3403 		return;
3404 
3405 	/*
3406 	 * mmu_invalidate_retry() was successful and mmu_lock is held, so
3407 	 * the pmd can't be split from under us.
3408 	 */
3409 	fault->goal_level = fault->req_level;
3410 	mask = KVM_PAGES_PER_HPAGE(fault->goal_level) - 1;
3411 	VM_BUG_ON((fault->gfn & mask) != (fault->pfn & mask));
3412 	fault->pfn &= ~mask;
3413 }
3414 
3415 void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level)
3416 {
3417 	if (cur_level > PG_LEVEL_4K &&
3418 	    cur_level == fault->goal_level &&
3419 	    is_shadow_present_pte(spte) &&
3420 	    !is_large_pte(spte) &&
3421 	    spte_to_child_sp(spte)->nx_huge_page_disallowed) {
3422 		/*
3423 		 * A small SPTE exists for this pfn, but FNAME(fetch),
3424 		 * direct_map(), or kvm_tdp_mmu_map() would like to create a
3425 		 * large PTE instead: just force them to go down another level,
3426 		 * patching back for them into pfn the next 9 bits of the
3427 		 * address.
3428 		 */
3429 		u64 page_mask = KVM_PAGES_PER_HPAGE(cur_level) -
3430 				KVM_PAGES_PER_HPAGE(cur_level - 1);
3431 		fault->pfn |= fault->gfn & page_mask;
3432 		fault->goal_level--;
3433 	}
3434 }
3435 
3436 static int direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
3437 {
3438 	struct kvm_shadow_walk_iterator it;
3439 	struct kvm_mmu_page *sp;
3440 	int ret;
3441 	gfn_t base_gfn = fault->gfn;
3442 
3443 	kvm_mmu_hugepage_adjust(vcpu, fault);
3444 
3445 	trace_kvm_mmu_spte_requested(fault);
3446 	for_each_shadow_entry(vcpu, fault->addr, it) {
3447 		/*
3448 		 * We cannot overwrite existing page tables with an NX
3449 		 * large page, as the leaf could be executable.
3450 		 */
3451 		if (fault->nx_huge_page_workaround_enabled)
3452 			disallowed_hugepage_adjust(fault, *it.sptep, it.level);
3453 
3454 		base_gfn = gfn_round_for_level(fault->gfn, it.level);
3455 		if (it.level == fault->goal_level)
3456 			break;
3457 
3458 		sp = kvm_mmu_get_child_sp(vcpu, it.sptep, base_gfn, true, ACC_ALL);
3459 		if (sp == ERR_PTR(-EEXIST))
3460 			continue;
3461 
3462 		link_shadow_page(vcpu, it.sptep, sp);
3463 		if (fault->huge_page_disallowed)
3464 			account_nx_huge_page(vcpu->kvm, sp,
3465 					     fault->req_level >= it.level);
3466 	}
3467 
3468 	if (WARN_ON_ONCE(it.level != fault->goal_level))
3469 		return -EFAULT;
3470 
3471 	ret = mmu_set_spte(vcpu, fault->slot, it.sptep, ACC_ALL,
3472 			   base_gfn, fault->pfn, fault);
3473 	if (ret == RET_PF_SPURIOUS)
3474 		return ret;
3475 
3476 	direct_pte_prefetch(vcpu, it.sptep);
3477 	return ret;
3478 }
3479 
3480 static void kvm_send_hwpoison_signal(struct kvm_memory_slot *slot, gfn_t gfn)
3481 {
3482 	unsigned long hva = gfn_to_hva_memslot(slot, gfn);
3483 
3484 	send_sig_mceerr(BUS_MCEERR_AR, (void __user *)hva, PAGE_SHIFT, current);
3485 }
3486 
3487 static int kvm_handle_error_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
3488 {
3489 	if (is_sigpending_pfn(fault->pfn)) {
3490 		kvm_handle_signal_exit(vcpu);
3491 		return -EINTR;
3492 	}
3493 
3494 	/*
3495 	 * Do not cache the mmio info caused by writing the readonly gfn
3496 	 * into the spte otherwise read access on readonly gfn also can
3497 	 * caused mmio page fault and treat it as mmio access.
3498 	 */
3499 	if (fault->pfn == KVM_PFN_ERR_RO_FAULT)
3500 		return RET_PF_EMULATE;
3501 
3502 	if (fault->pfn == KVM_PFN_ERR_HWPOISON) {
3503 		kvm_send_hwpoison_signal(fault->slot, fault->gfn);
3504 		return RET_PF_RETRY;
3505 	}
3506 
3507 	return -EFAULT;
3508 }
3509 
3510 static int kvm_handle_noslot_fault(struct kvm_vcpu *vcpu,
3511 				   struct kvm_page_fault *fault,
3512 				   unsigned int access)
3513 {
3514 	gva_t gva = fault->is_tdp ? 0 : fault->addr;
3515 
3516 	if (fault->is_private) {
3517 		kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
3518 		return -EFAULT;
3519 	}
3520 
3521 	vcpu_cache_mmio_info(vcpu, gva, fault->gfn,
3522 			     access & shadow_mmio_access_mask);
3523 
3524 	fault->slot = NULL;
3525 	fault->pfn = KVM_PFN_NOSLOT;
3526 	fault->map_writable = false;
3527 
3528 	/*
3529 	 * If MMIO caching is disabled, emulate immediately without
3530 	 * touching the shadow page tables as attempting to install an
3531 	 * MMIO SPTE will just be an expensive nop.
3532 	 */
3533 	if (unlikely(!enable_mmio_caching))
3534 		return RET_PF_EMULATE;
3535 
3536 	/*
3537 	 * Do not create an MMIO SPTE for a gfn greater than host.MAXPHYADDR,
3538 	 * any guest that generates such gfns is running nested and is being
3539 	 * tricked by L0 userspace (you can observe gfn > L1.MAXPHYADDR if and
3540 	 * only if L1's MAXPHYADDR is inaccurate with respect to the
3541 	 * hardware's).
3542 	 */
3543 	if (unlikely(fault->gfn > kvm_mmu_max_gfn()))
3544 		return RET_PF_EMULATE;
3545 
3546 	return RET_PF_CONTINUE;
3547 }
3548 
3549 static bool page_fault_can_be_fast(struct kvm *kvm, struct kvm_page_fault *fault)
3550 {
3551 	/*
3552 	 * Page faults with reserved bits set, i.e. faults on MMIO SPTEs, only
3553 	 * reach the common page fault handler if the SPTE has an invalid MMIO
3554 	 * generation number.  Refreshing the MMIO generation needs to go down
3555 	 * the slow path.  Note, EPT Misconfigs do NOT set the PRESENT flag!
3556 	 */
3557 	if (fault->rsvd)
3558 		return false;
3559 
3560 	/*
3561 	 * For hardware-protected VMs, certain conditions like attempting to
3562 	 * perform a write to a page which is not in the state that the guest
3563 	 * expects it to be in can result in a nested/extended #PF. In this
3564 	 * case, the below code might misconstrue this situation as being the
3565 	 * result of a write-protected access, and treat it as a spurious case
3566 	 * rather than taking any action to satisfy the real source of the #PF
3567 	 * such as generating a KVM_EXIT_MEMORY_FAULT. This can lead to the
3568 	 * guest spinning on a #PF indefinitely, so don't attempt the fast path
3569 	 * in this case.
3570 	 *
3571 	 * Note that the kvm_mem_is_private() check might race with an
3572 	 * attribute update, but this will either result in the guest spinning
3573 	 * on RET_PF_SPURIOUS until the update completes, or an actual spurious
3574 	 * case might go down the slow path. Either case will resolve itself.
3575 	 */
3576 	if (kvm->arch.has_private_mem &&
3577 	    fault->is_private != kvm_mem_is_private(kvm, fault->gfn))
3578 		return false;
3579 
3580 	/*
3581 	 * #PF can be fast if:
3582 	 *
3583 	 * 1. The shadow page table entry is not present and A/D bits are
3584 	 *    disabled _by KVM_, which could mean that the fault is potentially
3585 	 *    caused by access tracking (if enabled).  If A/D bits are enabled
3586 	 *    by KVM, but disabled by L1 for L2, KVM is forced to disable A/D
3587 	 *    bits for L2 and employ access tracking, but the fast page fault
3588 	 *    mechanism only supports direct MMUs.
3589 	 * 2. The shadow page table entry is present, the access is a write,
3590 	 *    and no reserved bits are set (MMIO SPTEs cannot be "fixed"), i.e.
3591 	 *    the fault was caused by a write-protection violation.  If the
3592 	 *    SPTE is MMU-writable (determined later), the fault can be fixed
3593 	 *    by setting the Writable bit, which can be done out of mmu_lock.
3594 	 */
3595 	if (!fault->present)
3596 		return !kvm_ad_enabled;
3597 
3598 	/*
3599 	 * Note, instruction fetches and writes are mutually exclusive, ignore
3600 	 * the "exec" flag.
3601 	 */
3602 	return fault->write;
3603 }
3604 
3605 /*
3606  * Returns true if the SPTE was fixed successfully. Otherwise,
3607  * someone else modified the SPTE from its original value.
3608  */
3609 static bool fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu,
3610 				    struct kvm_page_fault *fault,
3611 				    u64 *sptep, u64 old_spte, u64 new_spte)
3612 {
3613 	/*
3614 	 * Theoretically we could also set dirty bit (and flush TLB) here in
3615 	 * order to eliminate unnecessary PML logging. See comments in
3616 	 * set_spte. But fast_page_fault is very unlikely to happen with PML
3617 	 * enabled, so we do not do this. This might result in the same GPA
3618 	 * to be logged in PML buffer again when the write really happens, and
3619 	 * eventually to be called by mark_page_dirty twice. But it's also no
3620 	 * harm. This also avoids the TLB flush needed after setting dirty bit
3621 	 * so non-PML cases won't be impacted.
3622 	 *
3623 	 * Compare with make_spte() where instead shadow_dirty_mask is set.
3624 	 */
3625 	if (!try_cmpxchg64(sptep, &old_spte, new_spte))
3626 		return false;
3627 
3628 	if (is_writable_pte(new_spte) && !is_writable_pte(old_spte))
3629 		mark_page_dirty_in_slot(vcpu->kvm, fault->slot, fault->gfn);
3630 
3631 	return true;
3632 }
3633 
3634 /*
3635  * Returns the last level spte pointer of the shadow page walk for the given
3636  * gpa, and sets *spte to the spte value. This spte may be non-preset. If no
3637  * walk could be performed, returns NULL and *spte does not contain valid data.
3638  *
3639  * Contract:
3640  *  - Must be called between walk_shadow_page_lockless_{begin,end}.
3641  *  - The returned sptep must not be used after walk_shadow_page_lockless_end.
3642  */
3643 static u64 *fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gpa_t gpa, u64 *spte)
3644 {
3645 	struct kvm_shadow_walk_iterator iterator;
3646 	u64 old_spte;
3647 	u64 *sptep = NULL;
3648 
3649 	for_each_shadow_entry_lockless(vcpu, gpa, iterator, old_spte) {
3650 		sptep = iterator.sptep;
3651 		*spte = old_spte;
3652 	}
3653 
3654 	return sptep;
3655 }
3656 
3657 /*
3658  * Returns one of RET_PF_INVALID, RET_PF_FIXED or RET_PF_SPURIOUS.
3659  */
3660 static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
3661 {
3662 	struct kvm_mmu_page *sp;
3663 	int ret = RET_PF_INVALID;
3664 	u64 spte;
3665 	u64 *sptep;
3666 	uint retry_count = 0;
3667 
3668 	if (!page_fault_can_be_fast(vcpu->kvm, fault))
3669 		return ret;
3670 
3671 	walk_shadow_page_lockless_begin(vcpu);
3672 
3673 	do {
3674 		u64 new_spte;
3675 
3676 		if (tdp_mmu_enabled)
3677 			sptep = kvm_tdp_mmu_fast_pf_get_last_sptep(vcpu, fault->gfn, &spte);
3678 		else
3679 			sptep = fast_pf_get_last_sptep(vcpu, fault->addr, &spte);
3680 
3681 		/*
3682 		 * It's entirely possible for the mapping to have been zapped
3683 		 * by a different task, but the root page should always be
3684 		 * available as the vCPU holds a reference to its root(s).
3685 		 */
3686 		if (WARN_ON_ONCE(!sptep))
3687 			spte = FROZEN_SPTE;
3688 
3689 		if (!is_shadow_present_pte(spte))
3690 			break;
3691 
3692 		sp = sptep_to_sp(sptep);
3693 		if (!is_last_spte(spte, sp->role.level))
3694 			break;
3695 
3696 		/*
3697 		 * Check whether the memory access that caused the fault would
3698 		 * still cause it if it were to be performed right now. If not,
3699 		 * then this is a spurious fault caused by TLB lazily flushed,
3700 		 * or some other CPU has already fixed the PTE after the
3701 		 * current CPU took the fault.
3702 		 *
3703 		 * Need not check the access of upper level table entries since
3704 		 * they are always ACC_ALL.
3705 		 */
3706 		if (is_access_allowed(fault, spte)) {
3707 			ret = RET_PF_SPURIOUS;
3708 			break;
3709 		}
3710 
3711 		new_spte = spte;
3712 
3713 		/*
3714 		 * KVM only supports fixing page faults outside of MMU lock for
3715 		 * direct MMUs, nested MMUs are always indirect, and KVM always
3716 		 * uses A/D bits for non-nested MMUs.  Thus, if A/D bits are
3717 		 * enabled, the SPTE can't be an access-tracked SPTE.
3718 		 */
3719 		if (unlikely(!kvm_ad_enabled) && is_access_track_spte(spte))
3720 			new_spte = restore_acc_track_spte(new_spte) |
3721 				   shadow_accessed_mask;
3722 
3723 		/*
3724 		 * To keep things simple, only SPTEs that are MMU-writable can
3725 		 * be made fully writable outside of mmu_lock, e.g. only SPTEs
3726 		 * that were write-protected for dirty-logging or access
3727 		 * tracking are handled here.  Don't bother checking if the
3728 		 * SPTE is writable to prioritize running with A/D bits enabled.
3729 		 * The is_access_allowed() check above handles the common case
3730 		 * of the fault being spurious, and the SPTE is known to be
3731 		 * shadow-present, i.e. except for access tracking restoration
3732 		 * making the new SPTE writable, the check is wasteful.
3733 		 */
3734 		if (fault->write && is_mmu_writable_spte(spte)) {
3735 			new_spte |= PT_WRITABLE_MASK;
3736 
3737 			/*
3738 			 * Do not fix write-permission on the large spte when
3739 			 * dirty logging is enabled. Since we only dirty the
3740 			 * first page into the dirty-bitmap in
3741 			 * fast_pf_fix_direct_spte(), other pages are missed
3742 			 * if its slot has dirty logging enabled.
3743 			 *
3744 			 * Instead, we let the slow page fault path create a
3745 			 * normal spte to fix the access.
3746 			 */
3747 			if (sp->role.level > PG_LEVEL_4K &&
3748 			    kvm_slot_dirty_track_enabled(fault->slot))
3749 				break;
3750 		}
3751 
3752 		/* Verify that the fault can be handled in the fast path */
3753 		if (new_spte == spte ||
3754 		    !is_access_allowed(fault, new_spte))
3755 			break;
3756 
3757 		/*
3758 		 * Currently, fast page fault only works for direct mapping
3759 		 * since the gfn is not stable for indirect shadow page. See
3760 		 * Documentation/virt/kvm/locking.rst to get more detail.
3761 		 */
3762 		if (fast_pf_fix_direct_spte(vcpu, fault, sptep, spte, new_spte)) {
3763 			ret = RET_PF_FIXED;
3764 			break;
3765 		}
3766 
3767 		if (++retry_count > 4) {
3768 			pr_warn_once("Fast #PF retrying more than 4 times.\n");
3769 			break;
3770 		}
3771 
3772 	} while (true);
3773 
3774 	trace_fast_page_fault(vcpu, fault, sptep, spte, ret);
3775 	walk_shadow_page_lockless_end(vcpu);
3776 
3777 	if (ret != RET_PF_INVALID)
3778 		vcpu->stat.pf_fast++;
3779 
3780 	return ret;
3781 }
3782 
3783 static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
3784 			       struct list_head *invalid_list)
3785 {
3786 	struct kvm_mmu_page *sp;
3787 
3788 	if (!VALID_PAGE(*root_hpa))
3789 		return;
3790 
3791 	sp = root_to_sp(*root_hpa);
3792 	if (WARN_ON_ONCE(!sp))
3793 		return;
3794 
3795 	if (is_tdp_mmu_page(sp)) {
3796 		lockdep_assert_held_read(&kvm->mmu_lock);
3797 		kvm_tdp_mmu_put_root(kvm, sp);
3798 	} else {
3799 		lockdep_assert_held_write(&kvm->mmu_lock);
3800 		if (!--sp->root_count && sp->role.invalid)
3801 			kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
3802 	}
3803 
3804 	*root_hpa = INVALID_PAGE;
3805 }
3806 
3807 /* roots_to_free must be some combination of the KVM_MMU_ROOT_* flags */
3808 void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu,
3809 			ulong roots_to_free)
3810 {
3811 	bool is_tdp_mmu = tdp_mmu_enabled && mmu->root_role.direct;
3812 	int i;
3813 	LIST_HEAD(invalid_list);
3814 	bool free_active_root;
3815 
3816 	WARN_ON_ONCE(roots_to_free & ~KVM_MMU_ROOTS_ALL);
3817 
3818 	BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG);
3819 
3820 	/* Before acquiring the MMU lock, see if we need to do any real work. */
3821 	free_active_root = (roots_to_free & KVM_MMU_ROOT_CURRENT)
3822 		&& VALID_PAGE(mmu->root.hpa);
3823 
3824 	if (!free_active_root) {
3825 		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
3826 			if ((roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) &&
3827 			    VALID_PAGE(mmu->prev_roots[i].hpa))
3828 				break;
3829 
3830 		if (i == KVM_MMU_NUM_PREV_ROOTS)
3831 			return;
3832 	}
3833 
3834 	if (is_tdp_mmu)
3835 		read_lock(&kvm->mmu_lock);
3836 	else
3837 		write_lock(&kvm->mmu_lock);
3838 
3839 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
3840 		if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i))
3841 			mmu_free_root_page(kvm, &mmu->prev_roots[i].hpa,
3842 					   &invalid_list);
3843 
3844 	if (free_active_root) {
3845 		if (kvm_mmu_is_dummy_root(mmu->root.hpa)) {
3846 			/* Nothing to cleanup for dummy roots. */
3847 		} else if (root_to_sp(mmu->root.hpa)) {
3848 			mmu_free_root_page(kvm, &mmu->root.hpa, &invalid_list);
3849 		} else if (mmu->pae_root) {
3850 			for (i = 0; i < 4; ++i) {
3851 				if (!IS_VALID_PAE_ROOT(mmu->pae_root[i]))
3852 					continue;
3853 
3854 				mmu_free_root_page(kvm, &mmu->pae_root[i],
3855 						   &invalid_list);
3856 				mmu->pae_root[i] = INVALID_PAE_ROOT;
3857 			}
3858 		}
3859 		mmu->root.hpa = INVALID_PAGE;
3860 		mmu->root.pgd = 0;
3861 	}
3862 
3863 	if (is_tdp_mmu) {
3864 		read_unlock(&kvm->mmu_lock);
3865 		WARN_ON_ONCE(!list_empty(&invalid_list));
3866 	} else {
3867 		kvm_mmu_commit_zap_page(kvm, &invalid_list);
3868 		write_unlock(&kvm->mmu_lock);
3869 	}
3870 }
3871 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_free_roots);
3872 
3873 void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu)
3874 {
3875 	unsigned long roots_to_free = 0;
3876 	struct kvm_mmu_page *sp;
3877 	hpa_t root_hpa;
3878 	int i;
3879 
3880 	/*
3881 	 * This should not be called while L2 is active, L2 can't invalidate
3882 	 * _only_ its own roots, e.g. INVVPID unconditionally exits.
3883 	 */
3884 	WARN_ON_ONCE(mmu->root_role.guest_mode);
3885 
3886 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
3887 		root_hpa = mmu->prev_roots[i].hpa;
3888 		if (!VALID_PAGE(root_hpa))
3889 			continue;
3890 
3891 		sp = root_to_sp(root_hpa);
3892 		if (!sp || sp->role.guest_mode)
3893 			roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
3894 	}
3895 
3896 	kvm_mmu_free_roots(kvm, mmu, roots_to_free);
3897 }
3898 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_free_guest_mode_roots);
3899 
3900 static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, int quadrant,
3901 			    u8 level)
3902 {
3903 	union kvm_mmu_page_role role = vcpu->arch.mmu->root_role;
3904 	struct kvm_mmu_page *sp;
3905 
3906 	role.level = level;
3907 	role.quadrant = quadrant;
3908 
3909 	WARN_ON_ONCE(quadrant && !role.has_4_byte_gpte);
3910 	WARN_ON_ONCE(role.direct && role.has_4_byte_gpte);
3911 
3912 	sp = kvm_mmu_get_shadow_page(vcpu, gfn, role);
3913 	++sp->root_count;
3914 
3915 	return __pa(sp->spt);
3916 }
3917 
3918 static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
3919 {
3920 	struct kvm_mmu *mmu = vcpu->arch.mmu;
3921 	u8 shadow_root_level = mmu->root_role.level;
3922 	hpa_t root;
3923 	unsigned i;
3924 	int r;
3925 
3926 	if (tdp_mmu_enabled) {
3927 		if (kvm_has_mirrored_tdp(vcpu->kvm) &&
3928 		    !VALID_PAGE(mmu->mirror_root_hpa))
3929 			kvm_tdp_mmu_alloc_root(vcpu, true);
3930 		kvm_tdp_mmu_alloc_root(vcpu, false);
3931 		return 0;
3932 	}
3933 
3934 	write_lock(&vcpu->kvm->mmu_lock);
3935 	r = make_mmu_pages_available(vcpu);
3936 	if (r < 0)
3937 		goto out_unlock;
3938 
3939 	if (shadow_root_level >= PT64_ROOT_4LEVEL) {
3940 		root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level);
3941 		mmu->root.hpa = root;
3942 	} else if (shadow_root_level == PT32E_ROOT_LEVEL) {
3943 		if (WARN_ON_ONCE(!mmu->pae_root)) {
3944 			r = -EIO;
3945 			goto out_unlock;
3946 		}
3947 
3948 		for (i = 0; i < 4; ++i) {
3949 			WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
3950 
3951 			root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT), 0,
3952 					      PT32_ROOT_LEVEL);
3953 			mmu->pae_root[i] = root | PT_PRESENT_MASK |
3954 					   shadow_me_value;
3955 		}
3956 		mmu->root.hpa = __pa(mmu->pae_root);
3957 	} else {
3958 		WARN_ONCE(1, "Bad TDP root level = %d\n", shadow_root_level);
3959 		r = -EIO;
3960 		goto out_unlock;
3961 	}
3962 
3963 	/* root.pgd is ignored for direct MMUs. */
3964 	mmu->root.pgd = 0;
3965 out_unlock:
3966 	write_unlock(&vcpu->kvm->mmu_lock);
3967 	return r;
3968 }
3969 
3970 static int kvm_mmu_alloc_page_hash(struct kvm *kvm)
3971 {
3972 	struct hlist_head *h;
3973 
3974 	if (kvm->arch.mmu_page_hash)
3975 		return 0;
3976 
3977 	h = kvzalloc_objs(*h, KVM_NUM_MMU_PAGES, GFP_KERNEL_ACCOUNT);
3978 	if (!h)
3979 		return -ENOMEM;
3980 
3981 	/*
3982 	 * Ensure the hash table pointer is set only after all stores to zero
3983 	 * the memory are retired.  Pairs with the smp_load_acquire() in
3984 	 * kvm_get_mmu_page_hash().  Note, mmu_lock must be held for write to
3985 	 * add (or remove) shadow pages, and so readers are guaranteed to see
3986 	 * an empty list for their current mmu_lock critical section.
3987 	 */
3988 	smp_store_release(&kvm->arch.mmu_page_hash, h);
3989 	return 0;
3990 }
3991 
3992 static int mmu_first_shadow_root_alloc(struct kvm *kvm)
3993 {
3994 	struct kvm_memslots *slots;
3995 	struct kvm_memory_slot *slot;
3996 	int r = 0, i, bkt;
3997 
3998 	/*
3999 	 * Check if this is the first shadow root being allocated before
4000 	 * taking the lock.
4001 	 */
4002 	if (kvm_shadow_root_allocated(kvm))
4003 		return 0;
4004 
4005 	mutex_lock(&kvm->slots_arch_lock);
4006 
4007 	/* Recheck, under the lock, whether this is the first shadow root. */
4008 	if (kvm_shadow_root_allocated(kvm))
4009 		goto out_unlock;
4010 
4011 	r = kvm_mmu_alloc_page_hash(kvm);
4012 	if (r)
4013 		goto out_unlock;
4014 
4015 	/*
4016 	 * Check if memslot metadata actually needs to be allocated, e.g. all
4017 	 * metadata will be allocated upfront if TDP is disabled.
4018 	 */
4019 	if (kvm_memslots_have_rmaps(kvm) &&
4020 	    kvm_page_track_write_tracking_enabled(kvm))
4021 		goto out_success;
4022 
4023 	for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
4024 		slots = __kvm_memslots(kvm, i);
4025 		kvm_for_each_memslot(slot, bkt, slots) {
4026 			/*
4027 			 * Both of these functions are no-ops if the target is
4028 			 * already allocated, so unconditionally calling both
4029 			 * is safe.  Intentionally do NOT free allocations on
4030 			 * failure to avoid having to track which allocations
4031 			 * were made now versus when the memslot was created.
4032 			 * The metadata is guaranteed to be freed when the slot
4033 			 * is freed, and will be kept/used if userspace retries
4034 			 * KVM_RUN instead of killing the VM.
4035 			 */
4036 			r = memslot_rmap_alloc(slot, slot->npages);
4037 			if (r)
4038 				goto out_unlock;
4039 			r = kvm_page_track_write_tracking_alloc(slot);
4040 			if (r)
4041 				goto out_unlock;
4042 		}
4043 	}
4044 
4045 	/*
4046 	 * Ensure that shadow_root_allocated becomes true strictly after
4047 	 * all the related pointers are set.
4048 	 */
4049 out_success:
4050 	smp_store_release(&kvm->arch.shadow_root_allocated, true);
4051 
4052 out_unlock:
4053 	mutex_unlock(&kvm->slots_arch_lock);
4054 	return r;
4055 }
4056 
4057 static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
4058 {
4059 	struct kvm_mmu *mmu = vcpu->arch.mmu;
4060 	u64 pdptrs[4], pm_mask;
4061 	gfn_t root_gfn, root_pgd;
4062 	int quadrant, i, r;
4063 	hpa_t root;
4064 
4065 	root_pgd = kvm_mmu_get_guest_pgd(vcpu, mmu);
4066 	root_gfn = (root_pgd & __PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
4067 
4068 	if (!kvm_vcpu_is_visible_gfn(vcpu, root_gfn)) {
4069 		mmu->root.hpa = kvm_mmu_get_dummy_root();
4070 		return 0;
4071 	}
4072 
4073 	/*
4074 	 * On SVM, reading PDPTRs might access guest memory, which might fault
4075 	 * and thus might sleep.  Grab the PDPTRs before acquiring mmu_lock.
4076 	 */
4077 	if (mmu->cpu_role.base.level == PT32E_ROOT_LEVEL) {
4078 		for (i = 0; i < 4; ++i) {
4079 			pdptrs[i] = mmu->get_pdptr(vcpu, i);
4080 			if (!(pdptrs[i] & PT_PRESENT_MASK))
4081 				continue;
4082 
4083 			if (!kvm_vcpu_is_visible_gfn(vcpu, pdptrs[i] >> PAGE_SHIFT))
4084 				pdptrs[i] = 0;
4085 		}
4086 	}
4087 
4088 	r = mmu_first_shadow_root_alloc(vcpu->kvm);
4089 	if (r)
4090 		return r;
4091 
4092 	write_lock(&vcpu->kvm->mmu_lock);
4093 	r = make_mmu_pages_available(vcpu);
4094 	if (r < 0)
4095 		goto out_unlock;
4096 
4097 	/*
4098 	 * Do we shadow a long mode page table? If so we need to
4099 	 * write-protect the guests page table root.
4100 	 */
4101 	if (mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL) {
4102 		root = mmu_alloc_root(vcpu, root_gfn, 0,
4103 				      mmu->root_role.level);
4104 		mmu->root.hpa = root;
4105 		goto set_root_pgd;
4106 	}
4107 
4108 	if (WARN_ON_ONCE(!mmu->pae_root)) {
4109 		r = -EIO;
4110 		goto out_unlock;
4111 	}
4112 
4113 	/*
4114 	 * We shadow a 32 bit page table. This may be a legacy 2-level
4115 	 * or a PAE 3-level page table. In either case we need to be aware that
4116 	 * the shadow page table may be a PAE or a long mode page table.
4117 	 */
4118 	pm_mask = PT_PRESENT_MASK | shadow_me_value;
4119 	if (mmu->root_role.level >= PT64_ROOT_4LEVEL) {
4120 		pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
4121 
4122 		if (WARN_ON_ONCE(!mmu->pml4_root)) {
4123 			r = -EIO;
4124 			goto out_unlock;
4125 		}
4126 		mmu->pml4_root[0] = __pa(mmu->pae_root) | pm_mask;
4127 
4128 		if (mmu->root_role.level == PT64_ROOT_5LEVEL) {
4129 			if (WARN_ON_ONCE(!mmu->pml5_root)) {
4130 				r = -EIO;
4131 				goto out_unlock;
4132 			}
4133 			mmu->pml5_root[0] = __pa(mmu->pml4_root) | pm_mask;
4134 		}
4135 	}
4136 
4137 	for (i = 0; i < 4; ++i) {
4138 		WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
4139 
4140 		if (mmu->cpu_role.base.level == PT32E_ROOT_LEVEL) {
4141 			if (!(pdptrs[i] & PT_PRESENT_MASK)) {
4142 				mmu->pae_root[i] = INVALID_PAE_ROOT;
4143 				continue;
4144 			}
4145 			root_gfn = pdptrs[i] >> PAGE_SHIFT;
4146 		}
4147 
4148 		/*
4149 		 * If shadowing 32-bit non-PAE page tables, each PAE page
4150 		 * directory maps one quarter of the guest's non-PAE page
4151 		 * directory. Othwerise each PAE page direct shadows one guest
4152 		 * PAE page directory so that quadrant should be 0.
4153 		 */
4154 		quadrant = (mmu->cpu_role.base.level == PT32_ROOT_LEVEL) ? i : 0;
4155 
4156 		root = mmu_alloc_root(vcpu, root_gfn, quadrant, PT32_ROOT_LEVEL);
4157 		mmu->pae_root[i] = root | pm_mask;
4158 	}
4159 
4160 	if (mmu->root_role.level == PT64_ROOT_5LEVEL)
4161 		mmu->root.hpa = __pa(mmu->pml5_root);
4162 	else if (mmu->root_role.level == PT64_ROOT_4LEVEL)
4163 		mmu->root.hpa = __pa(mmu->pml4_root);
4164 	else
4165 		mmu->root.hpa = __pa(mmu->pae_root);
4166 
4167 set_root_pgd:
4168 	mmu->root.pgd = root_pgd;
4169 out_unlock:
4170 	write_unlock(&vcpu->kvm->mmu_lock);
4171 
4172 	return r;
4173 }
4174 
4175 static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
4176 {
4177 	struct kvm_mmu *mmu = vcpu->arch.mmu;
4178 	bool need_pml5 = mmu->root_role.level > PT64_ROOT_4LEVEL;
4179 	u64 *pml5_root = NULL;
4180 	u64 *pml4_root = NULL;
4181 	u64 *pae_root;
4182 
4183 	/*
4184 	 * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP
4185 	 * tables are allocated and initialized at root creation as there is no
4186 	 * equivalent level in the guest's NPT to shadow.  Allocate the tables
4187 	 * on demand, as running a 32-bit L1 VMM on 64-bit KVM is very rare.
4188 	 */
4189 	if (mmu->root_role.direct ||
4190 	    mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL ||
4191 	    mmu->root_role.level < PT64_ROOT_4LEVEL)
4192 		return 0;
4193 
4194 	/*
4195 	 * NPT, the only paging mode that uses this horror, uses a fixed number
4196 	 * of levels for the shadow page tables, e.g. all MMUs are 4-level or
4197 	 * all MMus are 5-level.  Thus, this can safely require that pml5_root
4198 	 * is allocated if the other roots are valid and pml5 is needed, as any
4199 	 * prior MMU would also have required pml5.
4200 	 */
4201 	if (mmu->pae_root && mmu->pml4_root && (!need_pml5 || mmu->pml5_root))
4202 		return 0;
4203 
4204 	/*
4205 	 * The special roots should always be allocated in concert.  Yell and
4206 	 * bail if KVM ends up in a state where only one of the roots is valid.
4207 	 */
4208 	if (WARN_ON_ONCE(!tdp_enabled || mmu->pae_root || mmu->pml4_root ||
4209 			 (need_pml5 && mmu->pml5_root)))
4210 		return -EIO;
4211 
4212 	/*
4213 	 * Unlike 32-bit NPT, the PDP table doesn't need to be in low mem, and
4214 	 * doesn't need to be decrypted.
4215 	 */
4216 	pae_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
4217 	if (!pae_root)
4218 		return -ENOMEM;
4219 
4220 #ifdef CONFIG_X86_64
4221 	pml4_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
4222 	if (!pml4_root)
4223 		goto err_pml4;
4224 
4225 	if (need_pml5) {
4226 		pml5_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
4227 		if (!pml5_root)
4228 			goto err_pml5;
4229 	}
4230 #endif
4231 
4232 	mmu->pae_root = pae_root;
4233 	mmu->pml4_root = pml4_root;
4234 	mmu->pml5_root = pml5_root;
4235 
4236 	return 0;
4237 
4238 #ifdef CONFIG_X86_64
4239 err_pml5:
4240 	free_page((unsigned long)pml4_root);
4241 err_pml4:
4242 	free_page((unsigned long)pae_root);
4243 	return -ENOMEM;
4244 #endif
4245 }
4246 
4247 static bool is_unsync_root(hpa_t root)
4248 {
4249 	struct kvm_mmu_page *sp;
4250 
4251 	if (!VALID_PAGE(root) || kvm_mmu_is_dummy_root(root))
4252 		return false;
4253 
4254 	/*
4255 	 * The read barrier orders the CPU's read of SPTE.W during the page table
4256 	 * walk before the reads of sp->unsync/sp->unsync_children here.
4257 	 *
4258 	 * Even if another CPU was marking the SP as unsync-ed simultaneously,
4259 	 * any guest page table changes are not guaranteed to be visible anyway
4260 	 * until this VCPU issues a TLB flush strictly after those changes are
4261 	 * made.  We only need to ensure that the other CPU sets these flags
4262 	 * before any actual changes to the page tables are made.  The comments
4263 	 * in mmu_try_to_unsync_pages() describe what could go wrong if this
4264 	 * requirement isn't satisfied.
4265 	 */
4266 	smp_rmb();
4267 	sp = root_to_sp(root);
4268 
4269 	/*
4270 	 * PAE roots (somewhat arbitrarily) aren't backed by shadow pages, the
4271 	 * PDPTEs for a given PAE root need to be synchronized individually.
4272 	 */
4273 	if (WARN_ON_ONCE(!sp))
4274 		return false;
4275 
4276 	if (sp->unsync || sp->unsync_children)
4277 		return true;
4278 
4279 	return false;
4280 }
4281 
4282 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
4283 {
4284 	int i;
4285 	struct kvm_mmu_page *sp;
4286 
4287 	if (vcpu->arch.mmu->root_role.direct)
4288 		return;
4289 
4290 	if (!VALID_PAGE(vcpu->arch.mmu->root.hpa))
4291 		return;
4292 
4293 	vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
4294 
4295 	if (vcpu->arch.mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL) {
4296 		hpa_t root = vcpu->arch.mmu->root.hpa;
4297 
4298 		if (!is_unsync_root(root))
4299 			return;
4300 
4301 		sp = root_to_sp(root);
4302 
4303 		write_lock(&vcpu->kvm->mmu_lock);
4304 		mmu_sync_children(vcpu, sp, true);
4305 		write_unlock(&vcpu->kvm->mmu_lock);
4306 		return;
4307 	}
4308 
4309 	write_lock(&vcpu->kvm->mmu_lock);
4310 
4311 	for (i = 0; i < 4; ++i) {
4312 		hpa_t root = vcpu->arch.mmu->pae_root[i];
4313 
4314 		if (IS_VALID_PAE_ROOT(root)) {
4315 			sp = spte_to_child_sp(root);
4316 			mmu_sync_children(vcpu, sp, true);
4317 		}
4318 	}
4319 
4320 	write_unlock(&vcpu->kvm->mmu_lock);
4321 }
4322 
4323 void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu)
4324 {
4325 	unsigned long roots_to_free = 0;
4326 	int i;
4327 
4328 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
4329 		if (is_unsync_root(vcpu->arch.mmu->prev_roots[i].hpa))
4330 			roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
4331 
4332 	/* sync prev_roots by simply freeing them */
4333 	kvm_mmu_free_roots(vcpu->kvm, vcpu->arch.mmu, roots_to_free);
4334 }
4335 
4336 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
4337 				  gpa_t vaddr, u64 access,
4338 				  struct x86_exception *exception)
4339 {
4340 	if (exception)
4341 		exception->error_code = 0;
4342 	return kvm_translate_gpa(vcpu, mmu, vaddr, access, exception);
4343 }
4344 
4345 static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
4346 {
4347 	/*
4348 	 * A nested guest cannot use the MMIO cache if it is using nested
4349 	 * page tables, because cr2 is a nGPA while the cache stores GPAs.
4350 	 */
4351 	if (mmu_is_nested(vcpu))
4352 		return false;
4353 
4354 	if (direct)
4355 		return vcpu_match_mmio_gpa(vcpu, addr);
4356 
4357 	return vcpu_match_mmio_gva(vcpu, addr);
4358 }
4359 
4360 /*
4361  * Return the level of the lowest level SPTE added to sptes.
4362  * That SPTE may be non-present.
4363  *
4364  * Must be called between walk_shadow_page_lockless_{begin,end}.
4365  */
4366 static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level)
4367 {
4368 	struct kvm_shadow_walk_iterator iterator;
4369 	int leaf = -1;
4370 	u64 spte;
4371 
4372 	for (shadow_walk_init(&iterator, vcpu, addr),
4373 	     *root_level = iterator.level;
4374 	     shadow_walk_okay(&iterator);
4375 	     __shadow_walk_next(&iterator, spte)) {
4376 		leaf = iterator.level;
4377 		spte = mmu_spte_get_lockless(iterator.sptep);
4378 
4379 		sptes[leaf] = spte;
4380 	}
4381 
4382 	return leaf;
4383 }
4384 
4385 static int get_sptes_lockless(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
4386 			      int *root_level)
4387 {
4388 	int leaf;
4389 
4390 	walk_shadow_page_lockless_begin(vcpu);
4391 
4392 	if (is_tdp_mmu_active(vcpu))
4393 		leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, root_level);
4394 	else
4395 		leaf = get_walk(vcpu, addr, sptes, root_level);
4396 
4397 	walk_shadow_page_lockless_end(vcpu);
4398 	return leaf;
4399 }
4400 
4401 /* return true if reserved bit(s) are detected on a valid, non-MMIO SPTE. */
4402 static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
4403 {
4404 	u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
4405 	struct rsvd_bits_validate *rsvd_check;
4406 	int root, leaf, level;
4407 	bool reserved = false;
4408 
4409 	leaf = get_sptes_lockless(vcpu, addr, sptes, &root);
4410 	if (unlikely(leaf < 0)) {
4411 		*sptep = 0ull;
4412 		return reserved;
4413 	}
4414 
4415 	*sptep = sptes[leaf];
4416 
4417 	/*
4418 	 * Skip reserved bits checks on the terminal leaf if it's not a valid
4419 	 * SPTE.  Note, this also (intentionally) skips MMIO SPTEs, which, by
4420 	 * design, always have reserved bits set.  The purpose of the checks is
4421 	 * to detect reserved bits on non-MMIO SPTEs. i.e. buggy SPTEs.
4422 	 */
4423 	if (!is_shadow_present_pte(sptes[leaf]))
4424 		leaf++;
4425 
4426 	rsvd_check = &vcpu->arch.mmu->shadow_zero_check;
4427 
4428 	for (level = root; level >= leaf; level--)
4429 		reserved |= is_rsvd_spte(rsvd_check, sptes[level], level);
4430 
4431 	if (reserved) {
4432 		pr_err("%s: reserved bits set on MMU-present spte, addr 0x%llx, hierarchy:\n",
4433 		       __func__, addr);
4434 		for (level = root; level >= leaf; level--)
4435 			pr_err("------ spte = 0x%llx level = %d, rsvd bits = 0x%llx",
4436 			       sptes[level], level,
4437 			       get_rsvd_bits(rsvd_check, sptes[level], level));
4438 	}
4439 
4440 	return reserved;
4441 }
4442 
4443 static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
4444 {
4445 	u64 spte;
4446 	bool reserved;
4447 
4448 	if (mmio_info_in_cache(vcpu, addr, direct))
4449 		return RET_PF_EMULATE;
4450 
4451 	reserved = get_mmio_spte(vcpu, addr, &spte);
4452 	if (WARN_ON_ONCE(reserved))
4453 		return -EINVAL;
4454 
4455 	if (is_mmio_spte(vcpu->kvm, spte)) {
4456 		gfn_t gfn = get_mmio_spte_gfn(spte);
4457 		unsigned int access = get_mmio_spte_access(spte);
4458 
4459 		if (!check_mmio_spte(vcpu, spte))
4460 			return RET_PF_INVALID;
4461 
4462 		if (direct)
4463 			addr = 0;
4464 
4465 		trace_handle_mmio_page_fault(addr, gfn, access);
4466 		vcpu_cache_mmio_info(vcpu, addr, gfn, access);
4467 		return RET_PF_EMULATE;
4468 	}
4469 
4470 	/*
4471 	 * If the page table is zapped by other cpus, let CPU fault again on
4472 	 * the address.
4473 	 */
4474 	return RET_PF_RETRY;
4475 }
4476 
4477 static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
4478 					 struct kvm_page_fault *fault)
4479 {
4480 	if (unlikely(fault->rsvd))
4481 		return false;
4482 
4483 	if (!fault->present || !fault->write)
4484 		return false;
4485 
4486 	/*
4487 	 * guest is writing the page which is write tracked which can
4488 	 * not be fixed by page fault handler.
4489 	 */
4490 	if (kvm_gfn_is_write_tracked(vcpu->kvm, fault->slot, fault->gfn))
4491 		return true;
4492 
4493 	return false;
4494 }
4495 
4496 static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
4497 {
4498 	struct kvm_shadow_walk_iterator iterator;
4499 	u64 spte;
4500 
4501 	walk_shadow_page_lockless_begin(vcpu);
4502 	for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
4503 		clear_sp_write_flooding_count(iterator.sptep);
4504 	walk_shadow_page_lockless_end(vcpu);
4505 }
4506 
4507 static u32 alloc_apf_token(struct kvm_vcpu *vcpu)
4508 {
4509 	/* make sure the token value is not 0 */
4510 	u32 id = vcpu->arch.apf.id;
4511 
4512 	if (id << 12 == 0)
4513 		vcpu->arch.apf.id = 1;
4514 
4515 	return (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
4516 }
4517 
4518 static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu,
4519 				    struct kvm_page_fault *fault)
4520 {
4521 	struct kvm_arch_async_pf arch;
4522 
4523 	arch.token = alloc_apf_token(vcpu);
4524 	arch.gfn = fault->gfn;
4525 	arch.error_code = fault->error_code;
4526 	arch.direct_map = vcpu->arch.mmu->root_role.direct;
4527 	if (arch.direct_map)
4528 		arch.cr3 = (unsigned long)INVALID_GPA;
4529 	else
4530 		arch.cr3 = kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu);
4531 
4532 	return kvm_setup_async_pf(vcpu, fault->addr,
4533 				  kvm_vcpu_gfn_to_hva(vcpu, fault->gfn), &arch);
4534 }
4535 
4536 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
4537 {
4538 	int r;
4539 
4540 	if (WARN_ON_ONCE(work->arch.error_code & PFERR_PRIVATE_ACCESS))
4541 		return;
4542 
4543 	if ((vcpu->arch.mmu->root_role.direct != work->arch.direct_map) ||
4544 	      work->wakeup_all)
4545 		return;
4546 
4547 	r = kvm_mmu_reload(vcpu);
4548 	if (unlikely(r))
4549 		return;
4550 
4551 	if (!vcpu->arch.mmu->root_role.direct &&
4552 	      work->arch.cr3 != kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu))
4553 		return;
4554 
4555 	r = kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, work->arch.error_code,
4556 				  true, NULL, NULL);
4557 
4558 	/*
4559 	 * Account fixed page faults, otherwise they'll never be counted, but
4560 	 * ignore stats for all other return times.  Page-ready "faults" aren't
4561 	 * truly spurious and never trigger emulation
4562 	 */
4563 	if (r == RET_PF_FIXED)
4564 		vcpu->stat.pf_fixed++;
4565 }
4566 
4567 static void kvm_mmu_finish_page_fault(struct kvm_vcpu *vcpu,
4568 				      struct kvm_page_fault *fault, int r)
4569 {
4570 	kvm_release_faultin_page(vcpu->kvm, fault->refcounted_page,
4571 				 r == RET_PF_RETRY, fault->map_writable);
4572 }
4573 
4574 static int kvm_mmu_faultin_pfn_gmem(struct kvm_vcpu *vcpu,
4575 				    struct kvm_page_fault *fault)
4576 {
4577 	int max_order, r;
4578 
4579 	if (!kvm_slot_has_gmem(fault->slot)) {
4580 		kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
4581 		return -EFAULT;
4582 	}
4583 
4584 	r = kvm_gmem_get_pfn(vcpu->kvm, fault->slot, fault->gfn, &fault->pfn,
4585 			     &fault->refcounted_page, &max_order);
4586 	if (r) {
4587 		kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
4588 		return r;
4589 	}
4590 
4591 	fault->map_writable = !(fault->slot->flags & KVM_MEM_READONLY);
4592 	fault->max_level = kvm_max_level_for_order(max_order);
4593 
4594 	return RET_PF_CONTINUE;
4595 }
4596 
4597 static int __kvm_mmu_faultin_pfn(struct kvm_vcpu *vcpu,
4598 				 struct kvm_page_fault *fault)
4599 {
4600 	unsigned int foll = fault->write ? FOLL_WRITE : 0;
4601 
4602 	if (fault->is_private || kvm_memslot_is_gmem_only(fault->slot))
4603 		return kvm_mmu_faultin_pfn_gmem(vcpu, fault);
4604 
4605 	foll |= FOLL_NOWAIT;
4606 	fault->pfn = __kvm_faultin_pfn(fault->slot, fault->gfn, foll,
4607 				       &fault->map_writable, &fault->refcounted_page);
4608 
4609 	/*
4610 	 * If resolving the page failed because I/O is needed to fault-in the
4611 	 * page, then either set up an asynchronous #PF to do the I/O, or if
4612 	 * doing an async #PF isn't possible, retry with I/O allowed.  All
4613 	 * other failures are terminal, i.e. retrying won't help.
4614 	 */
4615 	if (fault->pfn != KVM_PFN_ERR_NEEDS_IO)
4616 		return RET_PF_CONTINUE;
4617 
4618 	if (!fault->prefetch && kvm_can_do_async_pf(vcpu)) {
4619 		trace_kvm_try_async_get_page(fault->addr, fault->gfn);
4620 		if (kvm_find_async_pf_gfn(vcpu, fault->gfn)) {
4621 			trace_kvm_async_pf_repeated_fault(fault->addr, fault->gfn);
4622 			kvm_make_request(KVM_REQ_APF_HALT, vcpu);
4623 			return RET_PF_RETRY;
4624 		} else if (kvm_arch_setup_async_pf(vcpu, fault)) {
4625 			return RET_PF_RETRY;
4626 		}
4627 	}
4628 
4629 	/*
4630 	 * Allow gup to bail on pending non-fatal signals when it's also allowed
4631 	 * to wait for IO.  Note, gup always bails if it is unable to quickly
4632 	 * get a page and a fatal signal, i.e. SIGKILL, is pending.
4633 	 */
4634 	foll |= FOLL_INTERRUPTIBLE;
4635 	foll &= ~FOLL_NOWAIT;
4636 	fault->pfn = __kvm_faultin_pfn(fault->slot, fault->gfn, foll,
4637 				       &fault->map_writable, &fault->refcounted_page);
4638 
4639 	return RET_PF_CONTINUE;
4640 }
4641 
4642 static int kvm_mmu_faultin_pfn(struct kvm_vcpu *vcpu,
4643 			       struct kvm_page_fault *fault, unsigned int access)
4644 {
4645 	struct kvm_memory_slot *slot = fault->slot;
4646 	struct kvm *kvm = vcpu->kvm;
4647 	int ret;
4648 
4649 	if (KVM_BUG_ON(kvm_is_gfn_alias(kvm, fault->gfn), kvm))
4650 		return -EFAULT;
4651 
4652 	/*
4653 	 * Note that the mmu_invalidate_seq also serves to detect a concurrent
4654 	 * change in attributes.  is_page_fault_stale() will detect an
4655 	 * invalidation relate to fault->fn and resume the guest without
4656 	 * installing a mapping in the page tables.
4657 	 */
4658 	fault->mmu_seq = vcpu->kvm->mmu_invalidate_seq;
4659 	smp_rmb();
4660 
4661 	/*
4662 	 * Now that we have a snapshot of mmu_invalidate_seq we can check for a
4663 	 * private vs. shared mismatch.
4664 	 */
4665 	if (fault->is_private != kvm_mem_is_private(kvm, fault->gfn)) {
4666 		kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
4667 		return -EFAULT;
4668 	}
4669 
4670 	if (unlikely(!slot))
4671 		return kvm_handle_noslot_fault(vcpu, fault, access);
4672 
4673 	/*
4674 	 * Retry the page fault if the gfn hit a memslot that is being deleted
4675 	 * or moved.  This ensures any existing SPTEs for the old memslot will
4676 	 * be zapped before KVM inserts a new MMIO SPTE for the gfn.  Punt the
4677 	 * error to userspace if this is a prefault, as KVM's prefaulting ABI
4678 	 * doesn't provide the same forward progress guarantees as KVM_RUN.
4679 	 */
4680 	if (slot->flags & KVM_MEMSLOT_INVALID) {
4681 		if (fault->prefetch)
4682 			return -EAGAIN;
4683 
4684 		return RET_PF_RETRY;
4685 	}
4686 
4687 	if (slot->id == APIC_ACCESS_PAGE_PRIVATE_MEMSLOT) {
4688 		/*
4689 		 * Don't map L1's APIC access page into L2, KVM doesn't support
4690 		 * using APICv/AVIC to accelerate L2 accesses to L1's APIC,
4691 		 * i.e. the access needs to be emulated.  Emulating access to
4692 		 * L1's APIC is also correct if L1 is accelerating L2's own
4693 		 * virtual APIC, but for some reason L1 also maps _L1's_ APIC
4694 		 * into L2.  Note, vcpu_is_mmio_gpa() always treats access to
4695 		 * the APIC as MMIO.  Allow an MMIO SPTE to be created, as KVM
4696 		 * uses different roots for L1 vs. L2, i.e. there is no danger
4697 		 * of breaking APICv/AVIC for L1.
4698 		 */
4699 		if (is_guest_mode(vcpu))
4700 			return kvm_handle_noslot_fault(vcpu, fault, access);
4701 
4702 		/*
4703 		 * If the APIC access page exists but is disabled, go directly
4704 		 * to emulation without caching the MMIO access or creating a
4705 		 * MMIO SPTE.  That way the cache doesn't need to be purged
4706 		 * when the AVIC is re-enabled.
4707 		 */
4708 		if (!kvm_apicv_activated(vcpu->kvm))
4709 			return RET_PF_EMULATE;
4710 	}
4711 
4712 	/*
4713 	 * Check for a relevant mmu_notifier invalidation event before getting
4714 	 * the pfn from the primary MMU, and before acquiring mmu_lock.
4715 	 *
4716 	 * For mmu_lock, if there is an in-progress invalidation and the kernel
4717 	 * allows preemption, the invalidation task may drop mmu_lock and yield
4718 	 * in response to mmu_lock being contended, which is *very* counter-
4719 	 * productive as this vCPU can't actually make forward progress until
4720 	 * the invalidation completes.
4721 	 *
4722 	 * Retrying now can also avoid unnessary lock contention in the primary
4723 	 * MMU, as the primary MMU doesn't necessarily hold a single lock for
4724 	 * the duration of the invalidation, i.e. faulting in a conflicting pfn
4725 	 * can cause the invalidation to take longer by holding locks that are
4726 	 * needed to complete the invalidation.
4727 	 *
4728 	 * Do the pre-check even for non-preemtible kernels, i.e. even if KVM
4729 	 * will never yield mmu_lock in response to contention, as this vCPU is
4730 	 * *guaranteed* to need to retry, i.e. waiting until mmu_lock is held
4731 	 * to detect retry guarantees the worst case latency for the vCPU.
4732 	 */
4733 	if (mmu_invalidate_retry_gfn_unsafe(kvm, fault->mmu_seq, fault->gfn))
4734 		return RET_PF_RETRY;
4735 
4736 	ret = __kvm_mmu_faultin_pfn(vcpu, fault);
4737 	if (ret != RET_PF_CONTINUE)
4738 		return ret;
4739 
4740 	if (unlikely(is_error_pfn(fault->pfn)))
4741 		return kvm_handle_error_pfn(vcpu, fault);
4742 
4743 	if (WARN_ON_ONCE(!fault->slot || is_noslot_pfn(fault->pfn)))
4744 		return kvm_handle_noslot_fault(vcpu, fault, access);
4745 
4746 	/*
4747 	 * Check again for a relevant mmu_notifier invalidation event purely to
4748 	 * avoid contending mmu_lock.  Most invalidations will be detected by
4749 	 * the previous check, but checking is extremely cheap relative to the
4750 	 * overall cost of failing to detect the invalidation until after
4751 	 * mmu_lock is acquired.
4752 	 */
4753 	if (mmu_invalidate_retry_gfn_unsafe(kvm, fault->mmu_seq, fault->gfn)) {
4754 		kvm_mmu_finish_page_fault(vcpu, fault, RET_PF_RETRY);
4755 		return RET_PF_RETRY;
4756 	}
4757 
4758 	return RET_PF_CONTINUE;
4759 }
4760 
4761 /*
4762  * Returns true if the page fault is stale and needs to be retried, i.e. if the
4763  * root was invalidated by a memslot update or a relevant mmu_notifier fired.
4764  */
4765 static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
4766 				struct kvm_page_fault *fault)
4767 {
4768 	struct kvm_mmu_page *sp = root_to_sp(vcpu->arch.mmu->root.hpa);
4769 
4770 	/* Special roots, e.g. pae_root, are not backed by shadow pages. */
4771 	if (sp && is_obsolete_sp(vcpu->kvm, sp))
4772 		return true;
4773 
4774 	/*
4775 	 * Roots without an associated shadow page are considered invalid if
4776 	 * there is a pending request to free obsolete roots.  The request is
4777 	 * only a hint that the current root _may_ be obsolete and needs to be
4778 	 * reloaded, e.g. if the guest frees a PGD that KVM is tracking as a
4779 	 * previous root, then __kvm_mmu_prepare_zap_page() signals all vCPUs
4780 	 * to reload even if no vCPU is actively using the root.
4781 	 */
4782 	if (!sp && kvm_test_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu))
4783 		return true;
4784 
4785 	/*
4786 	 * Check for a relevant mmu_notifier invalidation event one last time
4787 	 * now that mmu_lock is held, as the "unsafe" checks performed without
4788 	 * holding mmu_lock can get false negatives.
4789 	 */
4790 	return fault->slot &&
4791 	       mmu_invalidate_retry_gfn(vcpu->kvm, fault->mmu_seq, fault->gfn);
4792 }
4793 
4794 static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
4795 {
4796 	int r;
4797 
4798 	/* Dummy roots are used only for shadowing bad guest roots. */
4799 	if (WARN_ON_ONCE(kvm_mmu_is_dummy_root(vcpu->arch.mmu->root.hpa)))
4800 		return RET_PF_RETRY;
4801 
4802 	if (page_fault_handle_page_track(vcpu, fault))
4803 		return RET_PF_WRITE_PROTECTED;
4804 
4805 	r = fast_page_fault(vcpu, fault);
4806 	if (r != RET_PF_INVALID)
4807 		return r;
4808 
4809 	r = mmu_topup_memory_caches(vcpu, false);
4810 	if (r)
4811 		return r;
4812 
4813 	r = kvm_mmu_faultin_pfn(vcpu, fault, ACC_ALL);
4814 	if (r != RET_PF_CONTINUE)
4815 		return r;
4816 
4817 	r = RET_PF_RETRY;
4818 	write_lock(&vcpu->kvm->mmu_lock);
4819 
4820 	if (is_page_fault_stale(vcpu, fault))
4821 		goto out_unlock;
4822 
4823 	r = make_mmu_pages_available(vcpu);
4824 	if (r)
4825 		goto out_unlock;
4826 
4827 	r = direct_map(vcpu, fault);
4828 
4829 out_unlock:
4830 	kvm_mmu_finish_page_fault(vcpu, fault, r);
4831 	write_unlock(&vcpu->kvm->mmu_lock);
4832 	return r;
4833 }
4834 
4835 static int nonpaging_page_fault(struct kvm_vcpu *vcpu,
4836 				struct kvm_page_fault *fault)
4837 {
4838 	/* This path builds a PAE pagetable, we can map 2mb pages at maximum. */
4839 	fault->max_level = PG_LEVEL_2M;
4840 	return direct_page_fault(vcpu, fault);
4841 }
4842 
4843 int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
4844 				u64 fault_address, char *insn, int insn_len)
4845 {
4846 	int r = 1;
4847 	u32 flags = vcpu->arch.apf.host_apf_flags;
4848 
4849 #ifndef CONFIG_X86_64
4850 	/* A 64-bit CR2 should be impossible on 32-bit KVM. */
4851 	if (WARN_ON_ONCE(fault_address >> 32))
4852 		return -EFAULT;
4853 #endif
4854 	/*
4855 	 * Legacy #PF exception only have a 32-bit error code.  Simply drop the
4856 	 * upper bits as KVM doesn't use them for #PF (because they are never
4857 	 * set), and to ensure there are no collisions with KVM-defined bits.
4858 	 */
4859 	if (WARN_ON_ONCE(error_code >> 32))
4860 		error_code = lower_32_bits(error_code);
4861 
4862 	/*
4863 	 * Restrict KVM-defined flags to bits 63:32 so that it's impossible for
4864 	 * them to conflict with #PF error codes, which are limited to 32 bits.
4865 	 */
4866 	BUILD_BUG_ON(lower_32_bits(PFERR_SYNTHETIC_MASK));
4867 
4868 	kvm_request_l1tf_flush_l1d();
4869 	if (!flags) {
4870 		trace_kvm_page_fault(vcpu, fault_address, error_code);
4871 
4872 		r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn,
4873 				insn_len);
4874 	} else if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) {
4875 		vcpu->arch.apf.host_apf_flags = 0;
4876 		local_irq_disable();
4877 		kvm_async_pf_task_wait_schedule(fault_address);
4878 		local_irq_enable();
4879 	} else {
4880 		WARN_ONCE(1, "Unexpected host async PF flags: %x\n", flags);
4881 	}
4882 
4883 	return r;
4884 }
4885 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_handle_page_fault);
4886 
4887 #ifdef CONFIG_X86_64
4888 static int kvm_tdp_mmu_page_fault(struct kvm_vcpu *vcpu,
4889 				  struct kvm_page_fault *fault)
4890 {
4891 	int r;
4892 
4893 	if (page_fault_handle_page_track(vcpu, fault))
4894 		return RET_PF_WRITE_PROTECTED;
4895 
4896 	r = fast_page_fault(vcpu, fault);
4897 	if (r != RET_PF_INVALID)
4898 		return r;
4899 
4900 	r = mmu_topup_memory_caches(vcpu, false);
4901 	if (r)
4902 		return r;
4903 
4904 	r = kvm_mmu_faultin_pfn(vcpu, fault, ACC_ALL);
4905 	if (r != RET_PF_CONTINUE)
4906 		return r;
4907 
4908 	r = RET_PF_RETRY;
4909 	read_lock(&vcpu->kvm->mmu_lock);
4910 
4911 	if (is_page_fault_stale(vcpu, fault))
4912 		goto out_unlock;
4913 
4914 	r = kvm_tdp_mmu_map(vcpu, fault);
4915 
4916 out_unlock:
4917 	kvm_mmu_finish_page_fault(vcpu, fault, r);
4918 	read_unlock(&vcpu->kvm->mmu_lock);
4919 	return r;
4920 }
4921 #endif
4922 
4923 int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
4924 {
4925 #ifdef CONFIG_X86_64
4926 	if (tdp_mmu_enabled)
4927 		return kvm_tdp_mmu_page_fault(vcpu, fault);
4928 #endif
4929 
4930 	return direct_page_fault(vcpu, fault);
4931 }
4932 
4933 static int kvm_tdp_page_prefault(struct kvm_vcpu *vcpu, gpa_t gpa,
4934 				 u64 error_code, u8 *level)
4935 {
4936 	int r;
4937 
4938 	/*
4939 	 * Restrict to TDP page fault, since that's the only case where the MMU
4940 	 * is indexed by GPA.
4941 	 */
4942 	if (vcpu->arch.mmu->page_fault != kvm_tdp_page_fault)
4943 		return -EOPNOTSUPP;
4944 
4945 	do {
4946 		if (signal_pending(current))
4947 			return -EINTR;
4948 
4949 		if (kvm_check_request(KVM_REQ_VM_DEAD, vcpu))
4950 			return -EIO;
4951 
4952 		cond_resched();
4953 		r = kvm_mmu_do_page_fault(vcpu, gpa, error_code, true, NULL, level);
4954 	} while (r == RET_PF_RETRY);
4955 
4956 	if (r < 0)
4957 		return r;
4958 
4959 	switch (r) {
4960 	case RET_PF_FIXED:
4961 	case RET_PF_SPURIOUS:
4962 	case RET_PF_WRITE_PROTECTED:
4963 		return 0;
4964 
4965 	case RET_PF_EMULATE:
4966 		return -ENOENT;
4967 
4968 	case RET_PF_RETRY:
4969 	case RET_PF_CONTINUE:
4970 	case RET_PF_INVALID:
4971 	default:
4972 		WARN_ONCE(1, "could not fix page fault during prefault");
4973 		return -EIO;
4974 	}
4975 }
4976 
4977 long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
4978 				    struct kvm_pre_fault_memory *range)
4979 {
4980 	u64 error_code = PFERR_GUEST_FINAL_MASK;
4981 	u8 level = PG_LEVEL_4K;
4982 	u64 direct_bits;
4983 	u64 end;
4984 	int r;
4985 
4986 	if (!vcpu->kvm->arch.pre_fault_allowed)
4987 		return -EOPNOTSUPP;
4988 
4989 	if (kvm_is_gfn_alias(vcpu->kvm, gpa_to_gfn(range->gpa)))
4990 		return -EINVAL;
4991 
4992 	/*
4993 	 * reload is efficient when called repeatedly, so we can do it on
4994 	 * every iteration.
4995 	 */
4996 	r = kvm_mmu_reload(vcpu);
4997 	if (r)
4998 		return r;
4999 
5000 	direct_bits = 0;
5001 	if (kvm_arch_has_private_mem(vcpu->kvm) &&
5002 	    kvm_mem_is_private(vcpu->kvm, gpa_to_gfn(range->gpa)))
5003 		error_code |= PFERR_PRIVATE_ACCESS;
5004 	else
5005 		direct_bits = gfn_to_gpa(kvm_gfn_direct_bits(vcpu->kvm));
5006 
5007 	/*
5008 	 * Shadow paging uses GVA for kvm page fault, so restrict to
5009 	 * two-dimensional paging.
5010 	 */
5011 	r = kvm_tdp_page_prefault(vcpu, range->gpa | direct_bits, error_code, &level);
5012 	if (r < 0)
5013 		return r;
5014 
5015 	/*
5016 	 * If the mapping that covers range->gpa can use a huge page, it
5017 	 * may start below it or end after range->gpa + range->size.
5018 	 */
5019 	end = (range->gpa & KVM_HPAGE_MASK(level)) + KVM_HPAGE_SIZE(level);
5020 	return min(range->size, end - range->gpa);
5021 }
5022 
5023 #ifdef CONFIG_KVM_GUEST_MEMFD
5024 static void kvm_assert_gmem_invalidate_lock_held(struct kvm_memory_slot *slot)
5025 {
5026 #ifdef CONFIG_PROVE_LOCKING
5027 	if (WARN_ON_ONCE(!kvm_slot_has_gmem(slot)) ||
5028 	    WARN_ON_ONCE(!slot->gmem.file) ||
5029 	    WARN_ON_ONCE(!file_count(slot->gmem.file)))
5030 		return;
5031 
5032 	lockdep_assert_held(&file_inode(slot->gmem.file)->i_mapping->invalidate_lock);
5033 #endif
5034 }
5035 
5036 int kvm_tdp_mmu_map_private_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
5037 {
5038 	struct kvm_page_fault fault = {
5039 		.addr = gfn_to_gpa(gfn),
5040 		.error_code = PFERR_GUEST_FINAL_MASK | PFERR_PRIVATE_ACCESS,
5041 		.prefetch = true,
5042 		.is_tdp = true,
5043 		.nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(vcpu->kvm),
5044 
5045 		.max_level = PG_LEVEL_4K,
5046 		.req_level = PG_LEVEL_4K,
5047 		.goal_level = PG_LEVEL_4K,
5048 		.is_private = true,
5049 
5050 		.gfn = gfn,
5051 		.slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn),
5052 		.pfn = pfn,
5053 		.map_writable = true,
5054 	};
5055 	struct kvm *kvm = vcpu->kvm;
5056 	int r;
5057 
5058 	lockdep_assert_held(&kvm->slots_lock);
5059 
5060 	/*
5061 	 * Mapping a pre-determined private pfn is intended only for use when
5062 	 * populating a guest_memfd instance.  Assert that the slot is backed
5063 	 * by guest_memfd and that the gmem instance's invalidate_lock is held.
5064 	 */
5065 	kvm_assert_gmem_invalidate_lock_held(fault.slot);
5066 
5067 	if (KVM_BUG_ON(!tdp_mmu_enabled, kvm))
5068 		return -EIO;
5069 
5070 	if (kvm_gfn_is_write_tracked(kvm, fault.slot, fault.gfn))
5071 		return -EPERM;
5072 
5073 	r = kvm_mmu_reload(vcpu);
5074 	if (r)
5075 		return r;
5076 
5077 	r = mmu_topup_memory_caches(vcpu, false);
5078 	if (r)
5079 		return r;
5080 
5081 	do {
5082 		if (signal_pending(current))
5083 			return -EINTR;
5084 
5085 		if (kvm_test_request(KVM_REQ_VM_DEAD, vcpu))
5086 			return -EIO;
5087 
5088 		cond_resched();
5089 
5090 		guard(read_lock)(&kvm->mmu_lock);
5091 
5092 		r = kvm_tdp_mmu_map(vcpu, &fault);
5093 	} while (r == RET_PF_RETRY);
5094 
5095 	if (r != RET_PF_FIXED)
5096 		return -EIO;
5097 
5098 	return 0;
5099 }
5100 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_tdp_mmu_map_private_pfn);
5101 #endif
5102 
5103 static void nonpaging_init_context(struct kvm_mmu *context)
5104 {
5105 	context->page_fault = nonpaging_page_fault;
5106 	context->gva_to_gpa = nonpaging_gva_to_gpa;
5107 	context->sync_spte = NULL;
5108 }
5109 
5110 static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
5111 				  union kvm_mmu_page_role role)
5112 {
5113 	struct kvm_mmu_page *sp;
5114 
5115 	if (!VALID_PAGE(root->hpa))
5116 		return false;
5117 
5118 	if (!role.direct && pgd != root->pgd)
5119 		return false;
5120 
5121 	sp = root_to_sp(root->hpa);
5122 	if (WARN_ON_ONCE(!sp))
5123 		return false;
5124 
5125 	return role.word == sp->role.word;
5126 }
5127 
5128 /*
5129  * Find out if a previously cached root matching the new pgd/role is available,
5130  * and insert the current root as the MRU in the cache.
5131  * If a matching root is found, it is assigned to kvm_mmu->root and
5132  * true is returned.
5133  * If no match is found, kvm_mmu->root is left invalid, the LRU root is
5134  * evicted to make room for the current root, and false is returned.
5135  */
5136 static bool cached_root_find_and_keep_current(struct kvm *kvm, struct kvm_mmu *mmu,
5137 					      gpa_t new_pgd,
5138 					      union kvm_mmu_page_role new_role)
5139 {
5140 	uint i;
5141 
5142 	if (is_root_usable(&mmu->root, new_pgd, new_role))
5143 		return true;
5144 
5145 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
5146 		/*
5147 		 * The swaps end up rotating the cache like this:
5148 		 *   C   0 1 2 3   (on entry to the function)
5149 		 *   0   C 1 2 3
5150 		 *   1   C 0 2 3
5151 		 *   2   C 0 1 3
5152 		 *   3   C 0 1 2   (on exit from the loop)
5153 		 */
5154 		swap(mmu->root, mmu->prev_roots[i]);
5155 		if (is_root_usable(&mmu->root, new_pgd, new_role))
5156 			return true;
5157 	}
5158 
5159 	kvm_mmu_free_roots(kvm, mmu, KVM_MMU_ROOT_CURRENT);
5160 	return false;
5161 }
5162 
5163 /*
5164  * Find out if a previously cached root matching the new pgd/role is available.
5165  * On entry, mmu->root is invalid.
5166  * If a matching root is found, it is assigned to kvm_mmu->root, the LRU entry
5167  * of the cache becomes invalid, and true is returned.
5168  * If no match is found, kvm_mmu->root is left invalid and false is returned.
5169  */
5170 static bool cached_root_find_without_current(struct kvm *kvm, struct kvm_mmu *mmu,
5171 					     gpa_t new_pgd,
5172 					     union kvm_mmu_page_role new_role)
5173 {
5174 	uint i;
5175 
5176 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
5177 		if (is_root_usable(&mmu->prev_roots[i], new_pgd, new_role))
5178 			goto hit;
5179 
5180 	return false;
5181 
5182 hit:
5183 	swap(mmu->root, mmu->prev_roots[i]);
5184 	/* Bubble up the remaining roots.  */
5185 	for (; i < KVM_MMU_NUM_PREV_ROOTS - 1; i++)
5186 		mmu->prev_roots[i] = mmu->prev_roots[i + 1];
5187 	mmu->prev_roots[i].hpa = INVALID_PAGE;
5188 	return true;
5189 }
5190 
5191 static bool fast_pgd_switch(struct kvm *kvm, struct kvm_mmu *mmu,
5192 			    gpa_t new_pgd, union kvm_mmu_page_role new_role)
5193 {
5194 	/*
5195 	 * Limit reuse to 64-bit hosts+VMs without "special" roots in order to
5196 	 * avoid having to deal with PDPTEs and other complexities.
5197 	 */
5198 	if (VALID_PAGE(mmu->root.hpa) && !root_to_sp(mmu->root.hpa))
5199 		kvm_mmu_free_roots(kvm, mmu, KVM_MMU_ROOT_CURRENT);
5200 
5201 	if (VALID_PAGE(mmu->root.hpa))
5202 		return cached_root_find_and_keep_current(kvm, mmu, new_pgd, new_role);
5203 	else
5204 		return cached_root_find_without_current(kvm, mmu, new_pgd, new_role);
5205 }
5206 
5207 void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
5208 {
5209 	struct kvm_mmu *mmu = vcpu->arch.mmu;
5210 	union kvm_mmu_page_role new_role = mmu->root_role;
5211 
5212 	/*
5213 	 * Return immediately if no usable root was found, kvm_mmu_reload()
5214 	 * will establish a valid root prior to the next VM-Enter.
5215 	 */
5216 	if (!fast_pgd_switch(vcpu->kvm, mmu, new_pgd, new_role))
5217 		return;
5218 
5219 	/*
5220 	 * It's possible that the cached previous root page is obsolete because
5221 	 * of a change in the MMU generation number. However, changing the
5222 	 * generation number is accompanied by KVM_REQ_MMU_FREE_OBSOLETE_ROOTS,
5223 	 * which will free the root set here and allocate a new one.
5224 	 */
5225 	kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
5226 
5227 	if (force_flush_and_sync_on_reuse) {
5228 		kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
5229 		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
5230 	}
5231 
5232 	/*
5233 	 * The last MMIO access's GVA and GPA are cached in the VCPU. When
5234 	 * switching to a new CR3, that GVA->GPA mapping may no longer be
5235 	 * valid. So clear any cached MMIO info even when we don't need to sync
5236 	 * the shadow page tables.
5237 	 */
5238 	vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
5239 
5240 	/*
5241 	 * If this is a direct root page, it doesn't have a write flooding
5242 	 * count. Otherwise, clear the write flooding count.
5243 	 */
5244 	if (!new_role.direct) {
5245 		struct kvm_mmu_page *sp = root_to_sp(vcpu->arch.mmu->root.hpa);
5246 
5247 		if (!WARN_ON_ONCE(!sp))
5248 			__clear_sp_write_flooding_count(sp);
5249 	}
5250 }
5251 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_new_pgd);
5252 
5253 static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
5254 			   unsigned int access)
5255 {
5256 	if (unlikely(is_mmio_spte(vcpu->kvm, *sptep))) {
5257 		if (gfn != get_mmio_spte_gfn(*sptep)) {
5258 			mmu_spte_clear_no_track(sptep);
5259 			return true;
5260 		}
5261 
5262 		mark_mmio_spte(vcpu, sptep, gfn, access);
5263 		return true;
5264 	}
5265 
5266 	return false;
5267 }
5268 
5269 #define PTTYPE_EPT 18 /* arbitrary */
5270 #define PTTYPE PTTYPE_EPT
5271 #include "paging_tmpl.h"
5272 #undef PTTYPE
5273 
5274 #define PTTYPE 64
5275 #include "paging_tmpl.h"
5276 #undef PTTYPE
5277 
5278 #define PTTYPE 32
5279 #include "paging_tmpl.h"
5280 #undef PTTYPE
5281 
5282 static void __reset_rsvds_bits_mask(struct rsvd_bits_validate *rsvd_check,
5283 				    u64 pa_bits_rsvd, int level, bool nx,
5284 				    bool gbpages, bool pse, bool amd)
5285 {
5286 	u64 gbpages_bit_rsvd = 0;
5287 	u64 nonleaf_bit8_rsvd = 0;
5288 	u64 high_bits_rsvd;
5289 
5290 	rsvd_check->bad_mt_xwr = 0;
5291 
5292 	if (!gbpages)
5293 		gbpages_bit_rsvd = rsvd_bits(7, 7);
5294 
5295 	if (level == PT32E_ROOT_LEVEL)
5296 		high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 62);
5297 	else
5298 		high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);
5299 
5300 	/* Note, NX doesn't exist in PDPTEs, this is handled below. */
5301 	if (!nx)
5302 		high_bits_rsvd |= rsvd_bits(63, 63);
5303 
5304 	/*
5305 	 * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for
5306 	 * leaf entries) on AMD CPUs only.
5307 	 */
5308 	if (amd)
5309 		nonleaf_bit8_rsvd = rsvd_bits(8, 8);
5310 
5311 	switch (level) {
5312 	case PT32_ROOT_LEVEL:
5313 		/* no rsvd bits for 2 level 4K page table entries */
5314 		rsvd_check->rsvd_bits_mask[0][1] = 0;
5315 		rsvd_check->rsvd_bits_mask[0][0] = 0;
5316 		rsvd_check->rsvd_bits_mask[1][0] =
5317 			rsvd_check->rsvd_bits_mask[0][0];
5318 
5319 		if (!pse) {
5320 			rsvd_check->rsvd_bits_mask[1][1] = 0;
5321 			break;
5322 		}
5323 
5324 		if (is_cpuid_PSE36())
5325 			/* 36bits PSE 4MB page */
5326 			rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
5327 		else
5328 			/* 32 bits PSE 4MB page */
5329 			rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
5330 		break;
5331 	case PT32E_ROOT_LEVEL:
5332 		rsvd_check->rsvd_bits_mask[0][2] = rsvd_bits(63, 63) |
5333 						   high_bits_rsvd |
5334 						   rsvd_bits(5, 8) |
5335 						   rsvd_bits(1, 2);	/* PDPTE */
5336 		rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd;	/* PDE */
5337 		rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;	/* PTE */
5338 		rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
5339 						   rsvd_bits(13, 20);	/* large page */
5340 		rsvd_check->rsvd_bits_mask[1][0] =
5341 			rsvd_check->rsvd_bits_mask[0][0];
5342 		break;
5343 	case PT64_ROOT_5LEVEL:
5344 		rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd |
5345 						   nonleaf_bit8_rsvd |
5346 						   rsvd_bits(7, 7);
5347 		rsvd_check->rsvd_bits_mask[1][4] =
5348 			rsvd_check->rsvd_bits_mask[0][4];
5349 		fallthrough;
5350 	case PT64_ROOT_4LEVEL:
5351 		rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd |
5352 						   nonleaf_bit8_rsvd |
5353 						   rsvd_bits(7, 7);
5354 		rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd |
5355 						   gbpages_bit_rsvd;
5356 		rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd;
5357 		rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
5358 		rsvd_check->rsvd_bits_mask[1][3] =
5359 			rsvd_check->rsvd_bits_mask[0][3];
5360 		rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd |
5361 						   gbpages_bit_rsvd |
5362 						   rsvd_bits(13, 29);
5363 		rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
5364 						   rsvd_bits(13, 20); /* large page */
5365 		rsvd_check->rsvd_bits_mask[1][0] =
5366 			rsvd_check->rsvd_bits_mask[0][0];
5367 		break;
5368 	}
5369 }
5370 
5371 static void reset_guest_rsvds_bits_mask(struct kvm_vcpu *vcpu,
5372 					struct kvm_mmu *context)
5373 {
5374 	__reset_rsvds_bits_mask(&context->guest_rsvd_check,
5375 				vcpu->arch.reserved_gpa_bits,
5376 				context->cpu_role.base.level, is_efer_nx(context),
5377 				guest_cpu_cap_has(vcpu, X86_FEATURE_GBPAGES),
5378 				is_cr4_pse(context),
5379 				guest_cpuid_is_amd_compatible(vcpu));
5380 }
5381 
5382 static void __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
5383 					u64 pa_bits_rsvd, bool execonly,
5384 					int huge_page_level)
5385 {
5386 	u64 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);
5387 	u64 large_1g_rsvd = 0, large_2m_rsvd = 0;
5388 	u64 bad_mt_xwr;
5389 
5390 	if (huge_page_level < PG_LEVEL_1G)
5391 		large_1g_rsvd = rsvd_bits(7, 7);
5392 	if (huge_page_level < PG_LEVEL_2M)
5393 		large_2m_rsvd = rsvd_bits(7, 7);
5394 
5395 	rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd | rsvd_bits(3, 7);
5396 	rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd | rsvd_bits(3, 7);
5397 	rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd | rsvd_bits(3, 6) | large_1g_rsvd;
5398 	rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd | rsvd_bits(3, 6) | large_2m_rsvd;
5399 	rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
5400 
5401 	/* large page */
5402 	rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4];
5403 	rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3];
5404 	rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd | rsvd_bits(12, 29) | large_1g_rsvd;
5405 	rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd | rsvd_bits(12, 20) | large_2m_rsvd;
5406 	rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0];
5407 
5408 	bad_mt_xwr = 0xFFull << (2 * 8);	/* bits 3..5 must not be 2 */
5409 	bad_mt_xwr |= 0xFFull << (3 * 8);	/* bits 3..5 must not be 3 */
5410 	bad_mt_xwr |= 0xFFull << (7 * 8);	/* bits 3..5 must not be 7 */
5411 	bad_mt_xwr |= REPEAT_BYTE(1ull << 2);	/* bits 0..2 must not be 010 */
5412 	bad_mt_xwr |= REPEAT_BYTE(1ull << 6);	/* bits 0..2 must not be 110 */
5413 	if (!execonly) {
5414 		/* bits 0..2 must not be 100 unless VMX capabilities allow it */
5415 		bad_mt_xwr |= REPEAT_BYTE(1ull << 4);
5416 	}
5417 	rsvd_check->bad_mt_xwr = bad_mt_xwr;
5418 }
5419 
5420 static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
5421 		struct kvm_mmu *context, bool execonly, int huge_page_level)
5422 {
5423 	__reset_rsvds_bits_mask_ept(&context->guest_rsvd_check,
5424 				    vcpu->arch.reserved_gpa_bits, execonly,
5425 				    huge_page_level);
5426 }
5427 
5428 static inline u64 reserved_hpa_bits(void)
5429 {
5430 	return rsvd_bits(kvm_host.maxphyaddr, 63);
5431 }
5432 
5433 /*
5434  * the page table on host is the shadow page table for the page
5435  * table in guest or amd nested guest, its mmu features completely
5436  * follow the features in guest.
5437  */
5438 static void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
5439 					struct kvm_mmu *context)
5440 {
5441 	/* @amd adds a check on bit of SPTEs, which KVM shouldn't use anyways. */
5442 	bool is_amd = true;
5443 	/* KVM doesn't use 2-level page tables for the shadow MMU. */
5444 	bool is_pse = false;
5445 	struct rsvd_bits_validate *shadow_zero_check;
5446 	int i;
5447 
5448 	WARN_ON_ONCE(context->root_role.level < PT32E_ROOT_LEVEL);
5449 
5450 	shadow_zero_check = &context->shadow_zero_check;
5451 	__reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
5452 				context->root_role.level,
5453 				context->root_role.efer_nx,
5454 				guest_cpu_cap_has(vcpu, X86_FEATURE_GBPAGES),
5455 				is_pse, is_amd);
5456 
5457 	if (!shadow_me_mask)
5458 		return;
5459 
5460 	for (i = context->root_role.level; --i >= 0;) {
5461 		/*
5462 		 * So far shadow_me_value is a constant during KVM's life
5463 		 * time.  Bits in shadow_me_value are allowed to be set.
5464 		 * Bits in shadow_me_mask but not in shadow_me_value are
5465 		 * not allowed to be set.
5466 		 */
5467 		shadow_zero_check->rsvd_bits_mask[0][i] |= shadow_me_mask;
5468 		shadow_zero_check->rsvd_bits_mask[1][i] |= shadow_me_mask;
5469 		shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_value;
5470 		shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_value;
5471 	}
5472 
5473 }
5474 
5475 static inline bool boot_cpu_is_amd(void)
5476 {
5477 	WARN_ON_ONCE(!tdp_enabled);
5478 	return shadow_x_mask == 0;
5479 }
5480 
5481 /*
5482  * the direct page table on host, use as much mmu features as
5483  * possible, however, kvm currently does not do execution-protection.
5484  */
5485 static void reset_tdp_shadow_zero_bits_mask(struct kvm_mmu *context)
5486 {
5487 	struct rsvd_bits_validate *shadow_zero_check;
5488 	int i;
5489 
5490 	shadow_zero_check = &context->shadow_zero_check;
5491 
5492 	if (boot_cpu_is_amd())
5493 		__reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
5494 					context->root_role.level, true,
5495 					boot_cpu_has(X86_FEATURE_GBPAGES),
5496 					false, true);
5497 	else
5498 		__reset_rsvds_bits_mask_ept(shadow_zero_check,
5499 					    reserved_hpa_bits(), false,
5500 					    max_huge_page_level);
5501 
5502 	if (!shadow_me_mask)
5503 		return;
5504 
5505 	for (i = context->root_role.level; --i >= 0;) {
5506 		shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
5507 		shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
5508 	}
5509 }
5510 
5511 /*
5512  * as the comments in reset_shadow_zero_bits_mask() except it
5513  * is the shadow page table for intel nested guest.
5514  */
5515 static void
5516 reset_ept_shadow_zero_bits_mask(struct kvm_mmu *context, bool execonly)
5517 {
5518 	__reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
5519 				    reserved_hpa_bits(), execonly,
5520 				    max_huge_page_level);
5521 }
5522 
5523 #define BYTE_MASK(access) \
5524 	((1 & (access) ? 2 : 0) | \
5525 	 (2 & (access) ? 4 : 0) | \
5526 	 (3 & (access) ? 8 : 0) | \
5527 	 (4 & (access) ? 16 : 0) | \
5528 	 (5 & (access) ? 32 : 0) | \
5529 	 (6 & (access) ? 64 : 0) | \
5530 	 (7 & (access) ? 128 : 0))
5531 
5532 
5533 static void update_permission_bitmask(struct kvm_mmu *mmu, bool ept)
5534 {
5535 	unsigned byte;
5536 
5537 	const u8 x = BYTE_MASK(ACC_EXEC_MASK);
5538 	const u8 w = BYTE_MASK(ACC_WRITE_MASK);
5539 	const u8 u = BYTE_MASK(ACC_USER_MASK);
5540 
5541 	bool cr4_smep = is_cr4_smep(mmu);
5542 	bool cr4_smap = is_cr4_smap(mmu);
5543 	bool cr0_wp = is_cr0_wp(mmu);
5544 	bool efer_nx = is_efer_nx(mmu);
5545 
5546 	for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) {
5547 		unsigned pfec = byte << 1;
5548 
5549 		/*
5550 		 * Each "*f" variable has a 1 bit for each UWX value
5551 		 * that causes a fault with the given PFEC.
5552 		 */
5553 
5554 		/* Faults from writes to non-writable pages */
5555 		u8 wf = (pfec & PFERR_WRITE_MASK) ? (u8)~w : 0;
5556 		/* Faults from user mode accesses to supervisor pages */
5557 		u8 uf = (pfec & PFERR_USER_MASK) ? (u8)~u : 0;
5558 		/* Faults from fetches of non-executable pages*/
5559 		u8 ff = (pfec & PFERR_FETCH_MASK) ? (u8)~x : 0;
5560 		/* Faults from kernel mode fetches of user pages */
5561 		u8 smepf = 0;
5562 		/* Faults from kernel mode accesses of user pages */
5563 		u8 smapf = 0;
5564 
5565 		if (!ept) {
5566 			/* Faults from kernel mode accesses to user pages */
5567 			u8 kf = (pfec & PFERR_USER_MASK) ? 0 : u;
5568 
5569 			/* Not really needed: !nx will cause pte.nx to fault */
5570 			if (!efer_nx)
5571 				ff = 0;
5572 
5573 			/* Allow supervisor writes if !cr0.wp */
5574 			if (!cr0_wp)
5575 				wf = (pfec & PFERR_USER_MASK) ? wf : 0;
5576 
5577 			/* Disallow supervisor fetches of user code if cr4.smep */
5578 			if (cr4_smep)
5579 				smepf = (pfec & PFERR_FETCH_MASK) ? kf : 0;
5580 
5581 			/*
5582 			 * SMAP:kernel-mode data accesses from user-mode
5583 			 * mappings should fault. A fault is considered
5584 			 * as a SMAP violation if all of the following
5585 			 * conditions are true:
5586 			 *   - X86_CR4_SMAP is set in CR4
5587 			 *   - A user page is accessed
5588 			 *   - The access is not a fetch
5589 			 *   - The access is supervisor mode
5590 			 *   - If implicit supervisor access or X86_EFLAGS_AC is clear
5591 			 *
5592 			 * Here, we cover the first four conditions.
5593 			 * The fifth is computed dynamically in permission_fault();
5594 			 * PFERR_RSVD_MASK bit will be set in PFEC if the access is
5595 			 * *not* subject to SMAP restrictions.
5596 			 */
5597 			if (cr4_smap)
5598 				smapf = (pfec & (PFERR_RSVD_MASK|PFERR_FETCH_MASK)) ? 0 : kf;
5599 		}
5600 
5601 		mmu->permissions[byte] = ff | uf | wf | smepf | smapf;
5602 	}
5603 }
5604 
5605 /*
5606 * PKU is an additional mechanism by which the paging controls access to
5607 * user-mode addresses based on the value in the PKRU register.  Protection
5608 * key violations are reported through a bit in the page fault error code.
5609 * Unlike other bits of the error code, the PK bit is not known at the
5610 * call site of e.g. gva_to_gpa; it must be computed directly in
5611 * permission_fault based on two bits of PKRU, on some machine state (CR4,
5612 * CR0, EFER, CPL), and on other bits of the error code and the page tables.
5613 *
5614 * In particular the following conditions come from the error code, the
5615 * page tables and the machine state:
5616 * - PK is always zero unless CR4.PKE=1 and EFER.LMA=1
5617 * - PK is always zero if RSVD=1 (reserved bit set) or F=1 (instruction fetch)
5618 * - PK is always zero if U=0 in the page tables
5619 * - PKRU.WD is ignored if CR0.WP=0 and the access is a supervisor access.
5620 *
5621 * The PKRU bitmask caches the result of these four conditions.  The error
5622 * code (minus the P bit) and the page table's U bit form an index into the
5623 * PKRU bitmask.  Two bits of the PKRU bitmask are then extracted and ANDed
5624 * with the two bits of the PKRU register corresponding to the protection key.
5625 * For the first three conditions above the bits will be 00, thus masking
5626 * away both AD and WD.  For all reads or if the last condition holds, WD
5627 * only will be masked away.
5628 */
5629 static void update_pkru_bitmask(struct kvm_mmu *mmu)
5630 {
5631 	unsigned bit;
5632 	bool wp;
5633 
5634 	mmu->pkru_mask = 0;
5635 
5636 	if (!is_cr4_pke(mmu))
5637 		return;
5638 
5639 	wp = is_cr0_wp(mmu);
5640 
5641 	for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) {
5642 		unsigned pfec, pkey_bits;
5643 		bool check_pkey, check_write, ff, uf, wf, pte_user;
5644 
5645 		pfec = bit << 1;
5646 		ff = pfec & PFERR_FETCH_MASK;
5647 		uf = pfec & PFERR_USER_MASK;
5648 		wf = pfec & PFERR_WRITE_MASK;
5649 
5650 		/* PFEC.RSVD is replaced by ACC_USER_MASK. */
5651 		pte_user = pfec & PFERR_RSVD_MASK;
5652 
5653 		/*
5654 		 * Only need to check the access which is not an
5655 		 * instruction fetch and is to a user page.
5656 		 */
5657 		check_pkey = (!ff && pte_user);
5658 		/*
5659 		 * write access is controlled by PKRU if it is a
5660 		 * user access or CR0.WP = 1.
5661 		 */
5662 		check_write = check_pkey && wf && (uf || wp);
5663 
5664 		/* PKRU.AD stops both read and write access. */
5665 		pkey_bits = !!check_pkey;
5666 		/* PKRU.WD stops write access. */
5667 		pkey_bits |= (!!check_write) << 1;
5668 
5669 		mmu->pkru_mask |= (pkey_bits & 3) << pfec;
5670 	}
5671 }
5672 
5673 static void reset_guest_paging_metadata(struct kvm_vcpu *vcpu,
5674 					struct kvm_mmu *mmu)
5675 {
5676 	if (!is_cr0_pg(mmu))
5677 		return;
5678 
5679 	reset_guest_rsvds_bits_mask(vcpu, mmu);
5680 	update_permission_bitmask(mmu, false);
5681 	update_pkru_bitmask(mmu);
5682 }
5683 
5684 static void paging64_init_context(struct kvm_mmu *context)
5685 {
5686 	context->page_fault = paging64_page_fault;
5687 	context->gva_to_gpa = paging64_gva_to_gpa;
5688 	context->sync_spte = paging64_sync_spte;
5689 }
5690 
5691 static void paging32_init_context(struct kvm_mmu *context)
5692 {
5693 	context->page_fault = paging32_page_fault;
5694 	context->gva_to_gpa = paging32_gva_to_gpa;
5695 	context->sync_spte = paging32_sync_spte;
5696 }
5697 
5698 static union kvm_cpu_role kvm_calc_cpu_role(struct kvm_vcpu *vcpu,
5699 					    const struct kvm_mmu_role_regs *regs)
5700 {
5701 	union kvm_cpu_role role = {0};
5702 
5703 	role.base.access = ACC_ALL;
5704 	role.base.smm = is_smm(vcpu);
5705 	role.base.guest_mode = is_guest_mode(vcpu);
5706 	role.ext.valid = 1;
5707 
5708 	if (!____is_cr0_pg(regs)) {
5709 		role.base.direct = 1;
5710 		return role;
5711 	}
5712 
5713 	role.base.efer_nx = ____is_efer_nx(regs);
5714 	role.base.cr0_wp = ____is_cr0_wp(regs);
5715 	role.base.smep_andnot_wp = ____is_cr4_smep(regs) && !____is_cr0_wp(regs);
5716 	role.base.smap_andnot_wp = ____is_cr4_smap(regs) && !____is_cr0_wp(regs);
5717 	role.base.has_4_byte_gpte = !____is_cr4_pae(regs);
5718 
5719 	if (____is_efer_lma(regs))
5720 		role.base.level = ____is_cr4_la57(regs) ? PT64_ROOT_5LEVEL
5721 							: PT64_ROOT_4LEVEL;
5722 	else if (____is_cr4_pae(regs))
5723 		role.base.level = PT32E_ROOT_LEVEL;
5724 	else
5725 		role.base.level = PT32_ROOT_LEVEL;
5726 
5727 	role.ext.cr4_smep = ____is_cr4_smep(regs);
5728 	role.ext.cr4_smap = ____is_cr4_smap(regs);
5729 	role.ext.cr4_pse = ____is_cr4_pse(regs);
5730 
5731 	/* PKEY and LA57 are active iff long mode is active. */
5732 	role.ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs);
5733 	role.ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs);
5734 	role.ext.efer_lma = ____is_efer_lma(regs);
5735 	return role;
5736 }
5737 
5738 void __kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
5739 					struct kvm_mmu *mmu)
5740 {
5741 	const bool cr0_wp = kvm_is_cr0_bit_set(vcpu, X86_CR0_WP);
5742 
5743 	BUILD_BUG_ON((KVM_MMU_CR0_ROLE_BITS & KVM_POSSIBLE_CR0_GUEST_BITS) != X86_CR0_WP);
5744 	BUILD_BUG_ON((KVM_MMU_CR4_ROLE_BITS & KVM_POSSIBLE_CR4_GUEST_BITS));
5745 
5746 	if (is_cr0_wp(mmu) == cr0_wp)
5747 		return;
5748 
5749 	mmu->cpu_role.base.cr0_wp = cr0_wp;
5750 	reset_guest_paging_metadata(vcpu, mmu);
5751 }
5752 
5753 static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
5754 {
5755 	int maxpa;
5756 
5757 	if (vcpu->kvm->arch.vm_type == KVM_X86_TDX_VM)
5758 		maxpa = cpuid_query_maxguestphyaddr(vcpu);
5759 	else
5760 		maxpa = cpuid_maxphyaddr(vcpu);
5761 
5762 	/* tdp_root_level is architecture forced level, use it if nonzero */
5763 	if (tdp_root_level)
5764 		return tdp_root_level;
5765 
5766 	/* Use 5-level TDP if and only if it's useful/necessary. */
5767 	if (max_tdp_level == 5 && maxpa <= 48)
5768 		return 4;
5769 
5770 	return max_tdp_level;
5771 }
5772 
5773 u8 kvm_mmu_get_max_tdp_level(void)
5774 {
5775 	return tdp_root_level ? tdp_root_level : max_tdp_level;
5776 }
5777 
5778 static union kvm_mmu_page_role
5779 kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
5780 				union kvm_cpu_role cpu_role)
5781 {
5782 	union kvm_mmu_page_role role = {0};
5783 
5784 	role.access = ACC_ALL;
5785 	role.cr0_wp = true;
5786 	role.efer_nx = true;
5787 	role.smm = cpu_role.base.smm;
5788 	role.guest_mode = cpu_role.base.guest_mode;
5789 	role.ad_disabled = !kvm_ad_enabled;
5790 	role.level = kvm_mmu_get_tdp_level(vcpu);
5791 	role.direct = true;
5792 	role.has_4_byte_gpte = false;
5793 
5794 	return role;
5795 }
5796 
5797 static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,
5798 			     union kvm_cpu_role cpu_role)
5799 {
5800 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
5801 	union kvm_mmu_page_role root_role = kvm_calc_tdp_mmu_root_page_role(vcpu, cpu_role);
5802 
5803 	if (cpu_role.as_u64 == context->cpu_role.as_u64 &&
5804 	    root_role.word == context->root_role.word)
5805 		return;
5806 
5807 	context->cpu_role.as_u64 = cpu_role.as_u64;
5808 	context->root_role.word = root_role.word;
5809 	context->page_fault = kvm_tdp_page_fault;
5810 	context->sync_spte = NULL;
5811 	context->get_guest_pgd = get_guest_cr3;
5812 	context->get_pdptr = kvm_pdptr_read;
5813 	context->inject_page_fault = kvm_inject_page_fault;
5814 
5815 	if (!is_cr0_pg(context))
5816 		context->gva_to_gpa = nonpaging_gva_to_gpa;
5817 	else if (is_cr4_pae(context))
5818 		context->gva_to_gpa = paging64_gva_to_gpa;
5819 	else
5820 		context->gva_to_gpa = paging32_gva_to_gpa;
5821 
5822 	reset_guest_paging_metadata(vcpu, context);
5823 	reset_tdp_shadow_zero_bits_mask(context);
5824 }
5825 
5826 static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
5827 				    union kvm_cpu_role cpu_role,
5828 				    union kvm_mmu_page_role root_role)
5829 {
5830 	if (cpu_role.as_u64 == context->cpu_role.as_u64 &&
5831 	    root_role.word == context->root_role.word)
5832 		return;
5833 
5834 	context->cpu_role.as_u64 = cpu_role.as_u64;
5835 	context->root_role.word = root_role.word;
5836 
5837 	if (!is_cr0_pg(context))
5838 		nonpaging_init_context(context);
5839 	else if (is_cr4_pae(context))
5840 		paging64_init_context(context);
5841 	else
5842 		paging32_init_context(context);
5843 
5844 	reset_guest_paging_metadata(vcpu, context);
5845 	reset_shadow_zero_bits_mask(vcpu, context);
5846 }
5847 
5848 static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu,
5849 				union kvm_cpu_role cpu_role)
5850 {
5851 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
5852 	union kvm_mmu_page_role root_role;
5853 
5854 	root_role = cpu_role.base;
5855 
5856 	/* KVM uses PAE paging whenever the guest isn't using 64-bit paging. */
5857 	root_role.level = max_t(u32, root_role.level, PT32E_ROOT_LEVEL);
5858 
5859 	/*
5860 	 * KVM forces EFER.NX=1 when TDP is disabled, reflect it in the MMU role.
5861 	 * KVM uses NX when TDP is disabled to handle a variety of scenarios,
5862 	 * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and
5863 	 * to generate correct permissions for CR0.WP=0/CR4.SMEP=1/EFER.NX=0.
5864 	 * The iTLB multi-hit workaround can be toggled at any time, so assume
5865 	 * NX can be used by any non-nested shadow MMU to avoid having to reset
5866 	 * MMU contexts.
5867 	 */
5868 	root_role.efer_nx = true;
5869 
5870 	shadow_mmu_init_context(vcpu, context, cpu_role, root_role);
5871 }
5872 
5873 void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
5874 			     unsigned long cr4, u64 efer, gpa_t nested_cr3)
5875 {
5876 	struct kvm_mmu *context = &vcpu->arch.guest_mmu;
5877 	struct kvm_mmu_role_regs regs = {
5878 		.cr0 = cr0,
5879 		.cr4 = cr4 & ~X86_CR4_PKE,
5880 		.efer = efer,
5881 	};
5882 	union kvm_cpu_role cpu_role = kvm_calc_cpu_role(vcpu, &regs);
5883 	union kvm_mmu_page_role root_role;
5884 
5885 	/* NPT requires CR0.PG=1. */
5886 	WARN_ON_ONCE(cpu_role.base.direct || !cpu_role.base.guest_mode);
5887 
5888 	root_role = cpu_role.base;
5889 	root_role.level = kvm_mmu_get_tdp_level(vcpu);
5890 	if (root_role.level == PT64_ROOT_5LEVEL &&
5891 	    cpu_role.base.level == PT64_ROOT_4LEVEL)
5892 		root_role.passthrough = 1;
5893 
5894 	shadow_mmu_init_context(vcpu, context, cpu_role, root_role);
5895 	kvm_mmu_new_pgd(vcpu, nested_cr3);
5896 }
5897 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_init_shadow_npt_mmu);
5898 
5899 static union kvm_cpu_role
5900 kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
5901 				   bool execonly, u8 level)
5902 {
5903 	union kvm_cpu_role role = {0};
5904 
5905 	/*
5906 	 * KVM does not support SMM transfer monitors, and consequently does not
5907 	 * support the "entry to SMM" control either.  role.base.smm is always 0.
5908 	 */
5909 	WARN_ON_ONCE(is_smm(vcpu));
5910 	role.base.level = level;
5911 	role.base.has_4_byte_gpte = false;
5912 	role.base.direct = false;
5913 	role.base.ad_disabled = !accessed_dirty;
5914 	role.base.guest_mode = true;
5915 	role.base.access = ACC_ALL;
5916 
5917 	role.ext.word = 0;
5918 	role.ext.execonly = execonly;
5919 	role.ext.valid = 1;
5920 
5921 	return role;
5922 }
5923 
5924 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
5925 			     int huge_page_level, bool accessed_dirty,
5926 			     gpa_t new_eptp)
5927 {
5928 	struct kvm_mmu *context = &vcpu->arch.guest_mmu;
5929 	u8 level = vmx_eptp_page_walk_level(new_eptp);
5930 	union kvm_cpu_role new_mode =
5931 		kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
5932 						   execonly, level);
5933 
5934 	if (new_mode.as_u64 != context->cpu_role.as_u64) {
5935 		/* EPT, and thus nested EPT, does not consume CR0, CR4, nor EFER. */
5936 		context->cpu_role.as_u64 = new_mode.as_u64;
5937 		context->root_role.word = new_mode.base.word;
5938 
5939 		context->page_fault = ept_page_fault;
5940 		context->gva_to_gpa = ept_gva_to_gpa;
5941 		context->sync_spte = ept_sync_spte;
5942 
5943 		update_permission_bitmask(context, true);
5944 		context->pkru_mask = 0;
5945 		reset_rsvds_bits_mask_ept(vcpu, context, execonly, huge_page_level);
5946 		reset_ept_shadow_zero_bits_mask(context, execonly);
5947 	}
5948 
5949 	kvm_mmu_new_pgd(vcpu, new_eptp);
5950 }
5951 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_init_shadow_ept_mmu);
5952 
5953 static void init_kvm_softmmu(struct kvm_vcpu *vcpu,
5954 			     union kvm_cpu_role cpu_role)
5955 {
5956 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
5957 
5958 	kvm_init_shadow_mmu(vcpu, cpu_role);
5959 
5960 	context->get_guest_pgd     = get_guest_cr3;
5961 	context->get_pdptr         = kvm_pdptr_read;
5962 	context->inject_page_fault = kvm_inject_page_fault;
5963 }
5964 
5965 static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu,
5966 				union kvm_cpu_role new_mode)
5967 {
5968 	struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
5969 
5970 	if (new_mode.as_u64 == g_context->cpu_role.as_u64)
5971 		return;
5972 
5973 	g_context->cpu_role.as_u64   = new_mode.as_u64;
5974 	g_context->get_guest_pgd     = get_guest_cr3;
5975 	g_context->get_pdptr         = kvm_pdptr_read;
5976 	g_context->inject_page_fault = kvm_inject_page_fault;
5977 
5978 	/*
5979 	 * L2 page tables are never shadowed, so there is no need to sync
5980 	 * SPTEs.
5981 	 */
5982 	g_context->sync_spte         = NULL;
5983 
5984 	/*
5985 	 * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using
5986 	 * L1's nested page tables (e.g. EPT12). The nested translation
5987 	 * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using
5988 	 * L2's page tables as the first level of translation and L1's
5989 	 * nested page tables as the second level of translation. Basically
5990 	 * the gva_to_gpa functions between mmu and nested_mmu are swapped.
5991 	 */
5992 	if (!is_paging(vcpu))
5993 		g_context->gva_to_gpa = nonpaging_gva_to_gpa;
5994 	else if (is_long_mode(vcpu))
5995 		g_context->gva_to_gpa = paging64_gva_to_gpa;
5996 	else if (is_pae(vcpu))
5997 		g_context->gva_to_gpa = paging64_gva_to_gpa;
5998 	else
5999 		g_context->gva_to_gpa = paging32_gva_to_gpa;
6000 
6001 	reset_guest_paging_metadata(vcpu, g_context);
6002 }
6003 
6004 void kvm_init_mmu(struct kvm_vcpu *vcpu)
6005 {
6006 	struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
6007 	union kvm_cpu_role cpu_role = kvm_calc_cpu_role(vcpu, &regs);
6008 
6009 	if (mmu_is_nested(vcpu))
6010 		init_kvm_nested_mmu(vcpu, cpu_role);
6011 	else if (tdp_enabled)
6012 		init_kvm_tdp_mmu(vcpu, cpu_role);
6013 	else
6014 		init_kvm_softmmu(vcpu, cpu_role);
6015 }
6016 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_init_mmu);
6017 
6018 void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
6019 {
6020 	/*
6021 	 * Invalidate all MMU roles to force them to reinitialize as CPUID
6022 	 * information is factored into reserved bit calculations.
6023 	 *
6024 	 * Correctly handling multiple vCPU models with respect to paging and
6025 	 * physical address properties) in a single VM would require tracking
6026 	 * all relevant CPUID information in kvm_mmu_page_role. That is very
6027 	 * undesirable as it would increase the memory requirements for
6028 	 * gfn_write_track (see struct kvm_mmu_page_role comments).  For now
6029 	 * that problem is swept under the rug; KVM's CPUID API is horrific and
6030 	 * it's all but impossible to solve it without introducing a new API.
6031 	 */
6032 	vcpu->arch.root_mmu.root_role.invalid = 1;
6033 	vcpu->arch.guest_mmu.root_role.invalid = 1;
6034 	vcpu->arch.nested_mmu.root_role.invalid = 1;
6035 	vcpu->arch.root_mmu.cpu_role.ext.valid = 0;
6036 	vcpu->arch.guest_mmu.cpu_role.ext.valid = 0;
6037 	vcpu->arch.nested_mmu.cpu_role.ext.valid = 0;
6038 	kvm_mmu_reset_context(vcpu);
6039 
6040 	KVM_BUG_ON(!kvm_can_set_cpuid_and_feature_msrs(vcpu), vcpu->kvm);
6041 }
6042 
6043 void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
6044 {
6045 	kvm_mmu_unload(vcpu);
6046 	kvm_init_mmu(vcpu);
6047 }
6048 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_reset_context);
6049 
6050 int kvm_mmu_load(struct kvm_vcpu *vcpu)
6051 {
6052 	int r;
6053 
6054 	r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->root_role.direct);
6055 	if (r)
6056 		goto out;
6057 	r = mmu_alloc_special_roots(vcpu);
6058 	if (r)
6059 		goto out;
6060 	if (vcpu->arch.mmu->root_role.direct)
6061 		r = mmu_alloc_direct_roots(vcpu);
6062 	else
6063 		r = mmu_alloc_shadow_roots(vcpu);
6064 	if (r)
6065 		goto out;
6066 
6067 	kvm_mmu_sync_roots(vcpu);
6068 
6069 	kvm_mmu_load_pgd(vcpu);
6070 
6071 	/*
6072 	 * Flush any TLB entries for the new root, the provenance of the root
6073 	 * is unknown.  Even if KVM ensures there are no stale TLB entries
6074 	 * for a freed root, in theory another hypervisor could have left
6075 	 * stale entries.  Flushing on alloc also allows KVM to skip the TLB
6076 	 * flush when freeing a root (see kvm_tdp_mmu_put_root()).
6077 	 */
6078 	kvm_x86_call(flush_tlb_current)(vcpu);
6079 out:
6080 	return r;
6081 }
6082 
6083 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
6084 {
6085 	struct kvm *kvm = vcpu->kvm;
6086 
6087 	kvm_mmu_free_roots(kvm, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL);
6088 	WARN_ON_ONCE(VALID_PAGE(vcpu->arch.root_mmu.root.hpa));
6089 	kvm_mmu_free_roots(kvm, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
6090 	WARN_ON_ONCE(VALID_PAGE(vcpu->arch.guest_mmu.root.hpa));
6091 	vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
6092 }
6093 
6094 static bool is_obsolete_root(struct kvm *kvm, hpa_t root_hpa)
6095 {
6096 	struct kvm_mmu_page *sp;
6097 
6098 	if (!VALID_PAGE(root_hpa))
6099 		return false;
6100 
6101 	/*
6102 	 * When freeing obsolete roots, treat roots as obsolete if they don't
6103 	 * have an associated shadow page, as it's impossible to determine if
6104 	 * such roots are fresh or stale.  This does mean KVM will get false
6105 	 * positives and free roots that don't strictly need to be freed, but
6106 	 * such false positives are relatively rare:
6107 	 *
6108 	 *  (a) only PAE paging and nested NPT have roots without shadow pages
6109 	 *      (or any shadow paging flavor with a dummy root, see note below)
6110 	 *  (b) remote reloads due to a memslot update obsoletes _all_ roots
6111 	 *  (c) KVM doesn't track previous roots for PAE paging, and the guest
6112 	 *      is unlikely to zap an in-use PGD.
6113 	 *
6114 	 * Note!  Dummy roots are unique in that they are obsoleted by memslot
6115 	 * _creation_!  See also FNAME(fetch).
6116 	 */
6117 	sp = root_to_sp(root_hpa);
6118 	return !sp || is_obsolete_sp(kvm, sp);
6119 }
6120 
6121 static void __kvm_mmu_free_obsolete_roots(struct kvm *kvm, struct kvm_mmu *mmu)
6122 {
6123 	unsigned long roots_to_free = 0;
6124 	int i;
6125 
6126 	if (is_obsolete_root(kvm, mmu->root.hpa))
6127 		roots_to_free |= KVM_MMU_ROOT_CURRENT;
6128 
6129 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
6130 		if (is_obsolete_root(kvm, mmu->prev_roots[i].hpa))
6131 			roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
6132 	}
6133 
6134 	if (roots_to_free)
6135 		kvm_mmu_free_roots(kvm, mmu, roots_to_free);
6136 }
6137 
6138 void kvm_mmu_free_obsolete_roots(struct kvm_vcpu *vcpu)
6139 {
6140 	__kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.root_mmu);
6141 	__kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.guest_mmu);
6142 }
6143 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_free_obsolete_roots);
6144 
6145 static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
6146 				    int *bytes)
6147 {
6148 	u64 gentry = 0;
6149 	int r;
6150 
6151 	/*
6152 	 * Assume that the pte write on a page table of the same type
6153 	 * as the current vcpu paging mode since we update the sptes only
6154 	 * when they have the same mode.
6155 	 */
6156 	if (is_pae(vcpu) && *bytes == 4) {
6157 		/* Handle a 32-bit guest writing two halves of a 64-bit gpte */
6158 		*gpa &= ~(gpa_t)7;
6159 		*bytes = 8;
6160 	}
6161 
6162 	if (*bytes == 4 || *bytes == 8) {
6163 		r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes);
6164 		if (r)
6165 			gentry = 0;
6166 	}
6167 
6168 	return gentry;
6169 }
6170 
6171 /*
6172  * If we're seeing too many writes to a page, it may no longer be a page table,
6173  * or we may be forking, in which case it is better to unmap the page.
6174  */
6175 static bool detect_write_flooding(struct kvm_mmu_page *sp)
6176 {
6177 	/*
6178 	 * Skip write-flooding detected for the sp whose level is 1, because
6179 	 * it can become unsync, then the guest page is not write-protected.
6180 	 */
6181 	if (sp->role.level == PG_LEVEL_4K)
6182 		return false;
6183 
6184 	atomic_inc(&sp->write_flooding_count);
6185 	return atomic_read(&sp->write_flooding_count) >= 3;
6186 }
6187 
6188 /*
6189  * Misaligned accesses are too much trouble to fix up; also, they usually
6190  * indicate a page is not used as a page table.
6191  */
6192 static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
6193 				    int bytes)
6194 {
6195 	unsigned offset, pte_size, misaligned;
6196 
6197 	offset = offset_in_page(gpa);
6198 	pte_size = sp->role.has_4_byte_gpte ? 4 : 8;
6199 
6200 	/*
6201 	 * Sometimes, the OS only writes the last one bytes to update status
6202 	 * bits, for example, in linux, andb instruction is used in clear_bit().
6203 	 */
6204 	if (!(offset & (pte_size - 1)) && bytes == 1)
6205 		return false;
6206 
6207 	misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
6208 	misaligned |= bytes < 4;
6209 
6210 	return misaligned;
6211 }
6212 
6213 static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
6214 {
6215 	unsigned page_offset, quadrant;
6216 	u64 *spte;
6217 	int level;
6218 
6219 	page_offset = offset_in_page(gpa);
6220 	level = sp->role.level;
6221 	*nspte = 1;
6222 	if (sp->role.has_4_byte_gpte) {
6223 		page_offset <<= 1;	/* 32->64 */
6224 		/*
6225 		 * A 32-bit pde maps 4MB while the shadow pdes map
6226 		 * only 2MB.  So we need to double the offset again
6227 		 * and zap two pdes instead of one.
6228 		 */
6229 		if (level == PT32_ROOT_LEVEL) {
6230 			page_offset &= ~7; /* kill rounding error */
6231 			page_offset <<= 1;
6232 			*nspte = 2;
6233 		}
6234 		quadrant = page_offset >> PAGE_SHIFT;
6235 		page_offset &= ~PAGE_MASK;
6236 		if (quadrant != sp->role.quadrant)
6237 			return NULL;
6238 	}
6239 
6240 	spte = &sp->spt[page_offset / sizeof(*spte)];
6241 	return spte;
6242 }
6243 
6244 void kvm_mmu_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
6245 			 int bytes)
6246 {
6247 	gfn_t gfn = gpa >> PAGE_SHIFT;
6248 	struct kvm_mmu_page *sp;
6249 	LIST_HEAD(invalid_list);
6250 	u64 entry, gentry, *spte;
6251 	int npte;
6252 	bool flush = false;
6253 
6254 	/*
6255 	 * When emulating guest writes, ensure the written value is visible to
6256 	 * any task that is handling page faults before checking whether or not
6257 	 * KVM is shadowing a guest PTE.  This ensures either KVM will create
6258 	 * the correct SPTE in the page fault handler, or this task will see
6259 	 * a non-zero indirect_shadow_pages.  Pairs with the smp_mb() in
6260 	 * account_shadowed().
6261 	 */
6262 	smp_mb();
6263 	if (!vcpu->kvm->arch.indirect_shadow_pages)
6264 		return;
6265 
6266 	write_lock(&vcpu->kvm->mmu_lock);
6267 
6268 	gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes);
6269 
6270 	++vcpu->kvm->stat.mmu_pte_write;
6271 
6272 	for_each_gfn_valid_sp_with_gptes(vcpu->kvm, sp, gfn) {
6273 		if (detect_write_misaligned(sp, gpa, bytes) ||
6274 		      detect_write_flooding(sp)) {
6275 			kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
6276 			++vcpu->kvm->stat.mmu_flooded;
6277 			continue;
6278 		}
6279 
6280 		spte = get_written_sptes(sp, gpa, &npte);
6281 		if (!spte)
6282 			continue;
6283 
6284 		while (npte--) {
6285 			entry = *spte;
6286 			mmu_page_zap_pte(vcpu->kvm, sp, spte, NULL);
6287 			if (gentry && sp->role.level != PG_LEVEL_4K)
6288 				++vcpu->kvm->stat.mmu_pde_zapped;
6289 			if (is_shadow_present_pte(entry))
6290 				flush = true;
6291 			++spte;
6292 		}
6293 	}
6294 	kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush);
6295 	write_unlock(&vcpu->kvm->mmu_lock);
6296 }
6297 
6298 static bool is_write_to_guest_page_table(u64 error_code)
6299 {
6300 	const u64 mask = PFERR_GUEST_PAGE_MASK | PFERR_WRITE_MASK | PFERR_PRESENT_MASK;
6301 
6302 	return (error_code & mask) == mask;
6303 }
6304 
6305 static int kvm_mmu_write_protect_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
6306 				       u64 error_code, int *emulation_type)
6307 {
6308 	bool direct = vcpu->arch.mmu->root_role.direct;
6309 
6310 	/*
6311 	 * Do not try to unprotect and retry if the vCPU re-faulted on the same
6312 	 * RIP with the same address that was previously unprotected, as doing
6313 	 * so will likely put the vCPU into an infinite.  E.g. if the vCPU uses
6314 	 * a non-page-table modifying instruction on the PDE that points to the
6315 	 * instruction, then unprotecting the gfn will unmap the instruction's
6316 	 * code, i.e. make it impossible for the instruction to ever complete.
6317 	 */
6318 	if (vcpu->arch.last_retry_eip == kvm_rip_read(vcpu) &&
6319 	    vcpu->arch.last_retry_addr == cr2_or_gpa)
6320 		return RET_PF_EMULATE;
6321 
6322 	/*
6323 	 * Reset the unprotect+retry values that guard against infinite loops.
6324 	 * The values will be refreshed if KVM explicitly unprotects a gfn and
6325 	 * retries, in all other cases it's safe to retry in the future even if
6326 	 * the next page fault happens on the same RIP+address.
6327 	 */
6328 	vcpu->arch.last_retry_eip = 0;
6329 	vcpu->arch.last_retry_addr = 0;
6330 
6331 	/*
6332 	 * It should be impossible to reach this point with an MMIO cache hit,
6333 	 * as RET_PF_WRITE_PROTECTED is returned if and only if there's a valid,
6334 	 * writable memslot, and creating a memslot should invalidate the MMIO
6335 	 * cache by way of changing the memslot generation.  WARN and disallow
6336 	 * retry if MMIO is detected, as retrying MMIO emulation is pointless
6337 	 * and could put the vCPU into an infinite loop because the processor
6338 	 * will keep faulting on the non-existent MMIO address.
6339 	 */
6340 	if (WARN_ON_ONCE(mmio_info_in_cache(vcpu, cr2_or_gpa, direct)))
6341 		return RET_PF_EMULATE;
6342 
6343 	/*
6344 	 * Before emulating the instruction, check to see if the access was due
6345 	 * to a read-only violation while the CPU was walking non-nested NPT
6346 	 * page tables, i.e. for a direct MMU, for _guest_ page tables in L1.
6347 	 * If L1 is sharing (a subset of) its page tables with L2, e.g. by
6348 	 * having nCR3 share lower level page tables with hCR3, then when KVM
6349 	 * (L0) write-protects the nested NPTs, i.e. npt12 entries, KVM is also
6350 	 * unknowingly write-protecting L1's guest page tables, which KVM isn't
6351 	 * shadowing.
6352 	 *
6353 	 * Because the CPU (by default) walks NPT page tables using a write
6354 	 * access (to ensure the CPU can do A/D updates), page walks in L1 can
6355 	 * trigger write faults for the above case even when L1 isn't modifying
6356 	 * PTEs.  As a result, KVM will unnecessarily emulate (or at least, try
6357 	 * to emulate) an excessive number of L1 instructions; because L1's MMU
6358 	 * isn't shadowed by KVM, there is no need to write-protect L1's gPTEs
6359 	 * and thus no need to emulate in order to guarantee forward progress.
6360 	 *
6361 	 * Try to unprotect the gfn, i.e. zap any shadow pages, so that L1 can
6362 	 * proceed without triggering emulation.  If one or more shadow pages
6363 	 * was zapped, skip emulation and resume L1 to let it natively execute
6364 	 * the instruction.  If no shadow pages were zapped, then the write-
6365 	 * fault is due to something else entirely, i.e. KVM needs to emulate,
6366 	 * as resuming the guest will put it into an infinite loop.
6367 	 *
6368 	 * Note, this code also applies to Intel CPUs, even though it is *very*
6369 	 * unlikely that an L1 will share its page tables (IA32/PAE/paging64
6370 	 * format) with L2's page tables (EPT format).
6371 	 *
6372 	 * For indirect MMUs, i.e. if KVM is shadowing the current MMU, try to
6373 	 * unprotect the gfn and retry if an event is awaiting reinjection.  If
6374 	 * KVM emulates multiple instructions before completing event injection,
6375 	 * the event could be delayed beyond what is architecturally allowed,
6376 	 * e.g. KVM could inject an IRQ after the TPR has been raised.
6377 	 */
6378 	if (((direct && is_write_to_guest_page_table(error_code)) ||
6379 	     (!direct && kvm_event_needs_reinjection(vcpu))) &&
6380 	    kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa))
6381 		return RET_PF_RETRY;
6382 
6383 	/*
6384 	 * The gfn is write-protected, but if KVM detects its emulating an
6385 	 * instruction that is unlikely to be used to modify page tables, or if
6386 	 * emulation fails, KVM can try to unprotect the gfn and let the CPU
6387 	 * re-execute the instruction that caused the page fault.  Do not allow
6388 	 * retrying an instruction from a nested guest as KVM is only explicitly
6389 	 * shadowing L1's page tables, i.e. unprotecting something for L1 isn't
6390 	 * going to magically fix whatever issue caused L2 to fail.
6391 	 */
6392 	if (!is_guest_mode(vcpu))
6393 		*emulation_type |= EMULTYPE_ALLOW_RETRY_PF;
6394 
6395 	return RET_PF_EMULATE;
6396 }
6397 
6398 int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
6399 		       void *insn, int insn_len)
6400 {
6401 	int r, emulation_type = EMULTYPE_PF;
6402 	bool direct = vcpu->arch.mmu->root_role.direct;
6403 
6404 	if (WARN_ON_ONCE(!VALID_PAGE(vcpu->arch.mmu->root.hpa)))
6405 		return RET_PF_RETRY;
6406 
6407 	/*
6408 	 * Except for reserved faults (emulated MMIO is shared-only), set the
6409 	 * PFERR_PRIVATE_ACCESS flag for software-protected VMs based on the gfn's
6410 	 * current attributes, which are the source of truth for such VMs.  Note,
6411 	 * this wrong for nested MMUs as the GPA is an L2 GPA, but KVM doesn't
6412 	 * currently supported nested virtualization (among many other things)
6413 	 * for software-protected VMs.
6414 	 */
6415 	if (IS_ENABLED(CONFIG_KVM_SW_PROTECTED_VM) &&
6416 	    !(error_code & PFERR_RSVD_MASK) &&
6417 	    vcpu->kvm->arch.vm_type == KVM_X86_SW_PROTECTED_VM &&
6418 	    kvm_mem_is_private(vcpu->kvm, gpa_to_gfn(cr2_or_gpa)))
6419 		error_code |= PFERR_PRIVATE_ACCESS;
6420 
6421 	r = RET_PF_INVALID;
6422 	if (unlikely(error_code & PFERR_RSVD_MASK)) {
6423 		if (WARN_ON_ONCE(error_code & PFERR_PRIVATE_ACCESS))
6424 			return -EFAULT;
6425 
6426 		r = handle_mmio_page_fault(vcpu, cr2_or_gpa, direct);
6427 		if (r == RET_PF_EMULATE)
6428 			goto emulate;
6429 	}
6430 
6431 	if (r == RET_PF_INVALID) {
6432 		vcpu->stat.pf_taken++;
6433 
6434 		r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa, error_code, false,
6435 					  &emulation_type, NULL);
6436 		if (KVM_BUG_ON(r == RET_PF_INVALID, vcpu->kvm))
6437 			return -EIO;
6438 	}
6439 
6440 	if (r < 0)
6441 		return r;
6442 
6443 	if (r == RET_PF_WRITE_PROTECTED)
6444 		r = kvm_mmu_write_protect_fault(vcpu, cr2_or_gpa, error_code,
6445 						&emulation_type);
6446 
6447 	if (r == RET_PF_FIXED)
6448 		vcpu->stat.pf_fixed++;
6449 	else if (r == RET_PF_EMULATE)
6450 		vcpu->stat.pf_emulate++;
6451 	else if (r == RET_PF_SPURIOUS)
6452 		vcpu->stat.pf_spurious++;
6453 
6454 	/*
6455 	 * None of handle_mmio_page_fault(), kvm_mmu_do_page_fault(), or
6456 	 * kvm_mmu_write_protect_fault() return RET_PF_CONTINUE.
6457 	 * kvm_mmu_do_page_fault() only uses RET_PF_CONTINUE internally to
6458 	 * indicate continuing the page fault handling until to the final
6459 	 * page table mapping phase.
6460 	 */
6461 	WARN_ON_ONCE(r == RET_PF_CONTINUE);
6462 	if (r != RET_PF_EMULATE)
6463 		return r;
6464 
6465 emulate:
6466 	return x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn,
6467 				       insn_len);
6468 }
6469 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_page_fault);
6470 
6471 void kvm_mmu_print_sptes(struct kvm_vcpu *vcpu, gpa_t gpa, const char *msg)
6472 {
6473 	u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
6474 	int root_level, leaf, level;
6475 
6476 	leaf = get_sptes_lockless(vcpu, gpa, sptes, &root_level);
6477 	if (unlikely(leaf < 0))
6478 		return;
6479 
6480 	pr_err("%s %llx", msg, gpa);
6481 	for (level = root_level; level >= leaf; level--)
6482 		pr_cont(", spte[%d] = 0x%llx", level, sptes[level]);
6483 	pr_cont("\n");
6484 }
6485 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_print_sptes);
6486 
6487 static void __kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
6488 				      u64 addr, hpa_t root_hpa)
6489 {
6490 	struct kvm_shadow_walk_iterator iterator;
6491 
6492 	vcpu_clear_mmio_info(vcpu, addr);
6493 
6494 	/*
6495 	 * Walking and synchronizing SPTEs both assume they are operating in
6496 	 * the context of the current MMU, and would need to be reworked if
6497 	 * this is ever used to sync the guest_mmu, e.g. to emulate INVEPT.
6498 	 */
6499 	if (WARN_ON_ONCE(mmu != vcpu->arch.mmu))
6500 		return;
6501 
6502 	if (!VALID_PAGE(root_hpa))
6503 		return;
6504 
6505 	write_lock(&vcpu->kvm->mmu_lock);
6506 	for_each_shadow_entry_using_root(vcpu, root_hpa, addr, iterator) {
6507 		struct kvm_mmu_page *sp = sptep_to_sp(iterator.sptep);
6508 
6509 		if (sp->unsync) {
6510 			int ret = kvm_sync_spte(vcpu, sp, iterator.index);
6511 
6512 			if (ret < 0)
6513 				mmu_page_zap_pte(vcpu->kvm, sp, iterator.sptep, NULL);
6514 			if (ret)
6515 				kvm_flush_remote_tlbs_sptep(vcpu->kvm, iterator.sptep);
6516 		}
6517 
6518 		if (!sp->unsync_children)
6519 			break;
6520 	}
6521 	write_unlock(&vcpu->kvm->mmu_lock);
6522 }
6523 
6524 void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
6525 			     u64 addr, unsigned long roots)
6526 {
6527 	int i;
6528 
6529 	WARN_ON_ONCE(roots & ~KVM_MMU_ROOTS_ALL);
6530 
6531 	/* It's actually a GPA for vcpu->arch.guest_mmu.  */
6532 	if (mmu != &vcpu->arch.guest_mmu) {
6533 		/* INVLPG on a non-canonical address is a NOP according to the SDM.  */
6534 		if (is_noncanonical_invlpg_address(addr, vcpu))
6535 			return;
6536 
6537 		kvm_x86_call(flush_tlb_gva)(vcpu, addr);
6538 	}
6539 
6540 	if (!mmu->sync_spte)
6541 		return;
6542 
6543 	if (roots & KVM_MMU_ROOT_CURRENT)
6544 		__kvm_mmu_invalidate_addr(vcpu, mmu, addr, mmu->root.hpa);
6545 
6546 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
6547 		if (roots & KVM_MMU_ROOT_PREVIOUS(i))
6548 			__kvm_mmu_invalidate_addr(vcpu, mmu, addr, mmu->prev_roots[i].hpa);
6549 	}
6550 }
6551 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_invalidate_addr);
6552 
6553 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
6554 {
6555 	/*
6556 	 * INVLPG is required to invalidate any global mappings for the VA,
6557 	 * irrespective of PCID.  Blindly sync all roots as it would take
6558 	 * roughly the same amount of work/time to determine whether any of the
6559 	 * previous roots have a global mapping.
6560 	 *
6561 	 * Mappings not reachable via the current or previous cached roots will
6562 	 * be synced when switching to that new cr3, so nothing needs to be
6563 	 * done here for them.
6564 	 */
6565 	kvm_mmu_invalidate_addr(vcpu, vcpu->arch.walk_mmu, gva, KVM_MMU_ROOTS_ALL);
6566 	++vcpu->stat.invlpg;
6567 }
6568 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_invlpg);
6569 
6570 
6571 void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
6572 {
6573 	struct kvm_mmu *mmu = vcpu->arch.mmu;
6574 	unsigned long roots = 0;
6575 	uint i;
6576 
6577 	if (pcid == kvm_get_active_pcid(vcpu))
6578 		roots |= KVM_MMU_ROOT_CURRENT;
6579 
6580 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
6581 		if (VALID_PAGE(mmu->prev_roots[i].hpa) &&
6582 		    pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd))
6583 			roots |= KVM_MMU_ROOT_PREVIOUS(i);
6584 	}
6585 
6586 	if (roots)
6587 		kvm_mmu_invalidate_addr(vcpu, mmu, gva, roots);
6588 	++vcpu->stat.invlpg;
6589 
6590 	/*
6591 	 * Mappings not reachable via the current cr3 or the prev_roots will be
6592 	 * synced when switching to that cr3, so nothing needs to be done here
6593 	 * for them.
6594 	 */
6595 }
6596 
6597 void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
6598 		       int tdp_max_root_level, int tdp_huge_page_level)
6599 {
6600 	tdp_enabled = enable_tdp;
6601 	tdp_root_level = tdp_forced_root_level;
6602 	max_tdp_level = tdp_max_root_level;
6603 
6604 #ifdef CONFIG_X86_64
6605 	tdp_mmu_enabled = tdp_mmu_allowed && tdp_enabled;
6606 #endif
6607 	/*
6608 	 * max_huge_page_level reflects KVM's MMU capabilities irrespective
6609 	 * of kernel support, e.g. KVM may be capable of using 1GB pages when
6610 	 * the kernel is not.  But, KVM never creates a page size greater than
6611 	 * what is used by the kernel for any given HVA, i.e. the kernel's
6612 	 * capabilities are ultimately consulted by kvm_mmu_hugepage_adjust().
6613 	 */
6614 	if (tdp_enabled)
6615 		max_huge_page_level = tdp_huge_page_level;
6616 	else if (boot_cpu_has(X86_FEATURE_GBPAGES))
6617 		max_huge_page_level = PG_LEVEL_1G;
6618 	else
6619 		max_huge_page_level = PG_LEVEL_2M;
6620 }
6621 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_configure_mmu);
6622 
6623 static void free_mmu_pages(struct kvm_mmu *mmu)
6624 {
6625 	if (!tdp_enabled && mmu->pae_root)
6626 		set_memory_encrypted((unsigned long)mmu->pae_root, 1);
6627 	free_page((unsigned long)mmu->pae_root);
6628 	free_page((unsigned long)mmu->pml4_root);
6629 	free_page((unsigned long)mmu->pml5_root);
6630 }
6631 
6632 static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
6633 {
6634 	struct page *page;
6635 	int i;
6636 
6637 	mmu->root.hpa = INVALID_PAGE;
6638 	mmu->root.pgd = 0;
6639 	mmu->mirror_root_hpa = INVALID_PAGE;
6640 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
6641 		mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
6642 
6643 	/* vcpu->arch.guest_mmu isn't used when !tdp_enabled. */
6644 	if (!tdp_enabled && mmu == &vcpu->arch.guest_mmu)
6645 		return 0;
6646 
6647 	/*
6648 	 * When using PAE paging, the four PDPTEs are treated as 'root' pages,
6649 	 * while the PDP table is a per-vCPU construct that's allocated at MMU
6650 	 * creation.  When emulating 32-bit mode, cr3 is only 32 bits even on
6651 	 * x86_64.  Therefore we need to allocate the PDP table in the first
6652 	 * 4GB of memory, which happens to fit the DMA32 zone.  TDP paging
6653 	 * generally doesn't use PAE paging and can skip allocating the PDP
6654 	 * table.  The main exception, handled here, is SVM's 32-bit NPT.  The
6655 	 * other exception is for shadowing L1's 32-bit or PAE NPT on 64-bit
6656 	 * KVM; that horror is handled on-demand by mmu_alloc_special_roots().
6657 	 */
6658 	if (tdp_enabled && kvm_mmu_get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
6659 		return 0;
6660 
6661 	page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32);
6662 	if (!page)
6663 		return -ENOMEM;
6664 
6665 	mmu->pae_root = page_address(page);
6666 
6667 	/*
6668 	 * CR3 is only 32 bits when PAE paging is used, thus it's impossible to
6669 	 * get the CPU to treat the PDPTEs as encrypted.  Decrypt the page so
6670 	 * that KVM's writes and the CPU's reads get along.  Note, this is
6671 	 * only necessary when using shadow paging, as 64-bit NPT can get at
6672 	 * the C-bit even when shadowing 32-bit NPT, and SME isn't supported
6673 	 * by 32-bit kernels (when KVM itself uses 32-bit NPT).
6674 	 */
6675 	if (!tdp_enabled)
6676 		set_memory_decrypted((unsigned long)mmu->pae_root, 1);
6677 	else
6678 		WARN_ON_ONCE(shadow_me_value);
6679 
6680 	for (i = 0; i < 4; ++i)
6681 		mmu->pae_root[i] = INVALID_PAE_ROOT;
6682 
6683 	return 0;
6684 }
6685 
6686 int kvm_mmu_create(struct kvm_vcpu *vcpu)
6687 {
6688 	int ret;
6689 
6690 	vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
6691 	vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;
6692 
6693 	vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
6694 	vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
6695 
6696 	vcpu->arch.mmu_shadow_page_cache.init_value =
6697 		SHADOW_NONPRESENT_VALUE;
6698 	if (!vcpu->arch.mmu_shadow_page_cache.init_value)
6699 		vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
6700 
6701 	vcpu->arch.mmu = &vcpu->arch.root_mmu;
6702 	vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
6703 
6704 	ret = __kvm_mmu_create(vcpu, &vcpu->arch.guest_mmu);
6705 	if (ret)
6706 		return ret;
6707 
6708 	ret = __kvm_mmu_create(vcpu, &vcpu->arch.root_mmu);
6709 	if (ret)
6710 		goto fail_allocate_root;
6711 
6712 	return ret;
6713  fail_allocate_root:
6714 	free_mmu_pages(&vcpu->arch.guest_mmu);
6715 	return ret;
6716 }
6717 
6718 #define BATCH_ZAP_PAGES	10
6719 static void kvm_zap_obsolete_pages(struct kvm *kvm)
6720 {
6721 	struct kvm_mmu_page *sp, *node;
6722 	int nr_zapped, batch = 0;
6723 	LIST_HEAD(invalid_list);
6724 	bool unstable;
6725 
6726 	lockdep_assert_held(&kvm->slots_lock);
6727 
6728 restart:
6729 	list_for_each_entry_safe_reverse(sp, node,
6730 	      &kvm->arch.active_mmu_pages, link) {
6731 		/*
6732 		 * No obsolete valid page exists before a newly created page
6733 		 * since active_mmu_pages is a FIFO list.
6734 		 */
6735 		if (!is_obsolete_sp(kvm, sp))
6736 			break;
6737 
6738 		/*
6739 		 * Invalid pages should never land back on the list of active
6740 		 * pages.  Skip the bogus page, otherwise we'll get stuck in an
6741 		 * infinite loop if the page gets put back on the list (again).
6742 		 */
6743 		if (WARN_ON_ONCE(sp->role.invalid))
6744 			continue;
6745 
6746 		/*
6747 		 * No need to flush the TLB since we're only zapping shadow
6748 		 * pages with an obsolete generation number and all vCPUS have
6749 		 * loaded a new root, i.e. the shadow pages being zapped cannot
6750 		 * be in active use by the guest.
6751 		 */
6752 		if (batch >= BATCH_ZAP_PAGES &&
6753 		    cond_resched_rwlock_write(&kvm->mmu_lock)) {
6754 			batch = 0;
6755 			goto restart;
6756 		}
6757 
6758 		unstable = __kvm_mmu_prepare_zap_page(kvm, sp,
6759 				&invalid_list, &nr_zapped);
6760 		batch += nr_zapped;
6761 
6762 		if (unstable)
6763 			goto restart;
6764 	}
6765 
6766 	/*
6767 	 * Kick all vCPUs (via remote TLB flush) before freeing the page tables
6768 	 * to ensure KVM is not in the middle of a lockless shadow page table
6769 	 * walk, which may reference the pages.  The remote TLB flush itself is
6770 	 * not required and is simply a convenient way to kick vCPUs as needed.
6771 	 * KVM performs a local TLB flush when allocating a new root (see
6772 	 * kvm_mmu_load()), and the reload in the caller ensure no vCPUs are
6773 	 * running with an obsolete MMU.
6774 	 */
6775 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
6776 }
6777 
6778 /*
6779  * Fast invalidate all shadow pages and use lock-break technique
6780  * to zap obsolete pages.
6781  *
6782  * It's required when memslot is being deleted or VM is being
6783  * destroyed, in these cases, we should ensure that KVM MMU does
6784  * not use any resource of the being-deleted slot or all slots
6785  * after calling the function.
6786  */
6787 static void kvm_mmu_zap_all_fast(struct kvm *kvm)
6788 {
6789 	lockdep_assert_held(&kvm->slots_lock);
6790 
6791 	write_lock(&kvm->mmu_lock);
6792 	trace_kvm_mmu_zap_all_fast(kvm);
6793 
6794 	/*
6795 	 * Toggle mmu_valid_gen between '0' and '1'.  Because slots_lock is
6796 	 * held for the entire duration of zapping obsolete pages, it's
6797 	 * impossible for there to be multiple invalid generations associated
6798 	 * with *valid* shadow pages at any given time, i.e. there is exactly
6799 	 * one valid generation and (at most) one invalid generation.
6800 	 */
6801 	kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1;
6802 
6803 	/*
6804 	 * In order to ensure all vCPUs drop their soon-to-be invalid roots,
6805 	 * invalidating TDP MMU roots must be done while holding mmu_lock for
6806 	 * write and in the same critical section as making the reload request,
6807 	 * e.g. before kvm_zap_obsolete_pages() could drop mmu_lock and yield.
6808 	 */
6809 	if (tdp_mmu_enabled) {
6810 		/*
6811 		 * External page tables don't support fast zapping, therefore
6812 		 * their mirrors must be invalidated separately by the caller.
6813 		 */
6814 		kvm_tdp_mmu_invalidate_roots(kvm, KVM_DIRECT_ROOTS);
6815 	}
6816 
6817 	/*
6818 	 * Notify all vcpus to reload its shadow page table and flush TLB.
6819 	 * Then all vcpus will switch to new shadow page table with the new
6820 	 * mmu_valid_gen.
6821 	 *
6822 	 * Note: we need to do this under the protection of mmu_lock,
6823 	 * otherwise, vcpu would purge shadow page but miss tlb flush.
6824 	 */
6825 	kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_FREE_OBSOLETE_ROOTS);
6826 
6827 	kvm_zap_obsolete_pages(kvm);
6828 
6829 	write_unlock(&kvm->mmu_lock);
6830 
6831 	/*
6832 	 * Zap the invalidated TDP MMU roots, all SPTEs must be dropped before
6833 	 * returning to the caller, e.g. if the zap is in response to a memslot
6834 	 * deletion, mmu_notifier callbacks will be unable to reach the SPTEs
6835 	 * associated with the deleted memslot once the update completes, and
6836 	 * Deferring the zap until the final reference to the root is put would
6837 	 * lead to use-after-free.
6838 	 */
6839 	if (tdp_mmu_enabled)
6840 		kvm_tdp_mmu_zap_invalidated_roots(kvm, true);
6841 }
6842 
6843 int kvm_mmu_init_vm(struct kvm *kvm)
6844 {
6845 	int r, i;
6846 
6847 	kvm->arch.shadow_mmio_value = shadow_mmio_value;
6848 	INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
6849 	for (i = 0; i < KVM_NR_MMU_TYPES; ++i)
6850 		INIT_LIST_HEAD(&kvm->arch.possible_nx_huge_pages[i].pages);
6851 	spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
6852 
6853 	if (tdp_mmu_enabled) {
6854 		kvm_mmu_init_tdp_mmu(kvm);
6855 	} else {
6856 		r = kvm_mmu_alloc_page_hash(kvm);
6857 		if (r)
6858 			return r;
6859 	}
6860 
6861 	kvm->arch.split_page_header_cache.kmem_cache = mmu_page_header_cache;
6862 	kvm->arch.split_page_header_cache.gfp_zero = __GFP_ZERO;
6863 
6864 	kvm->arch.split_shadow_page_cache.gfp_zero = __GFP_ZERO;
6865 
6866 	kvm->arch.split_desc_cache.kmem_cache = pte_list_desc_cache;
6867 	kvm->arch.split_desc_cache.gfp_zero = __GFP_ZERO;
6868 	return 0;
6869 }
6870 
6871 static void mmu_free_vm_memory_caches(struct kvm *kvm)
6872 {
6873 	kvm_mmu_free_memory_cache(&kvm->arch.split_desc_cache);
6874 	kvm_mmu_free_memory_cache(&kvm->arch.split_page_header_cache);
6875 	kvm_mmu_free_memory_cache(&kvm->arch.split_shadow_page_cache);
6876 }
6877 
6878 void kvm_mmu_uninit_vm(struct kvm *kvm)
6879 {
6880 	kvfree(kvm->arch.mmu_page_hash);
6881 
6882 	if (tdp_mmu_enabled)
6883 		kvm_mmu_uninit_tdp_mmu(kvm);
6884 
6885 	mmu_free_vm_memory_caches(kvm);
6886 }
6887 
6888 static bool kvm_rmap_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
6889 {
6890 	const struct kvm_memory_slot *memslot;
6891 	struct kvm_memslots *slots;
6892 	struct kvm_memslot_iter iter;
6893 	bool flush = false;
6894 	gfn_t start, end;
6895 	int i;
6896 
6897 	if (!kvm_memslots_have_rmaps(kvm))
6898 		return flush;
6899 
6900 	for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
6901 		slots = __kvm_memslots(kvm, i);
6902 
6903 		kvm_for_each_memslot_in_gfn_range(&iter, slots, gfn_start, gfn_end) {
6904 			memslot = iter.slot;
6905 			start = max(gfn_start, memslot->base_gfn);
6906 			end = min(gfn_end, memslot->base_gfn + memslot->npages);
6907 			if (WARN_ON_ONCE(start >= end))
6908 				continue;
6909 
6910 			flush = __kvm_rmap_zap_gfn_range(kvm, memslot, start,
6911 							 end, true, flush);
6912 		}
6913 	}
6914 
6915 	return flush;
6916 }
6917 
6918 /*
6919  * Invalidate (zap) SPTEs that cover GFNs from gfn_start and up to gfn_end
6920  * (not including it)
6921  */
6922 void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
6923 {
6924 	bool flush;
6925 
6926 	if (WARN_ON_ONCE(gfn_end <= gfn_start))
6927 		return;
6928 
6929 	write_lock(&kvm->mmu_lock);
6930 
6931 	kvm_mmu_invalidate_begin(kvm);
6932 
6933 	kvm_mmu_invalidate_range_add(kvm, gfn_start, gfn_end);
6934 
6935 	flush = kvm_rmap_zap_gfn_range(kvm, gfn_start, gfn_end);
6936 
6937 	if (tdp_mmu_enabled)
6938 		flush = kvm_tdp_mmu_zap_leafs(kvm, gfn_start, gfn_end, flush);
6939 
6940 	if (flush)
6941 		kvm_flush_remote_tlbs_range(kvm, gfn_start, gfn_end - gfn_start);
6942 
6943 	kvm_mmu_invalidate_end(kvm);
6944 
6945 	write_unlock(&kvm->mmu_lock);
6946 }
6947 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_zap_gfn_range);
6948 
6949 static bool slot_rmap_write_protect(struct kvm *kvm,
6950 				    struct kvm_rmap_head *rmap_head,
6951 				    const struct kvm_memory_slot *slot)
6952 {
6953 	return rmap_write_protect(rmap_head, false);
6954 }
6955 
6956 void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
6957 				      const struct kvm_memory_slot *memslot,
6958 				      int start_level)
6959 {
6960 	if (kvm_memslots_have_rmaps(kvm)) {
6961 		write_lock(&kvm->mmu_lock);
6962 		walk_slot_rmaps(kvm, memslot, slot_rmap_write_protect,
6963 				start_level, KVM_MAX_HUGEPAGE_LEVEL, false);
6964 		write_unlock(&kvm->mmu_lock);
6965 	}
6966 
6967 	if (tdp_mmu_enabled) {
6968 		read_lock(&kvm->mmu_lock);
6969 		kvm_tdp_mmu_wrprot_slot(kvm, memslot, start_level);
6970 		read_unlock(&kvm->mmu_lock);
6971 	}
6972 }
6973 
6974 static inline bool need_topup(struct kvm_mmu_memory_cache *cache, int min)
6975 {
6976 	return kvm_mmu_memory_cache_nr_free_objects(cache) < min;
6977 }
6978 
6979 static bool need_topup_split_caches_or_resched(struct kvm *kvm)
6980 {
6981 	if (need_resched() || rwlock_needbreak(&kvm->mmu_lock))
6982 		return true;
6983 
6984 	/*
6985 	 * In the worst case, SPLIT_DESC_CACHE_MIN_NR_OBJECTS descriptors are needed
6986 	 * to split a single huge page. Calculating how many are actually needed
6987 	 * is possible but not worth the complexity.
6988 	 */
6989 	return need_topup(&kvm->arch.split_desc_cache, SPLIT_DESC_CACHE_MIN_NR_OBJECTS) ||
6990 	       need_topup(&kvm->arch.split_page_header_cache, 1) ||
6991 	       need_topup(&kvm->arch.split_shadow_page_cache, 1);
6992 }
6993 
6994 static int topup_split_caches(struct kvm *kvm)
6995 {
6996 	/*
6997 	 * Allocating rmap list entries when splitting huge pages for nested
6998 	 * MMUs is uncommon as KVM needs to use a list if and only if there is
6999 	 * more than one rmap entry for a gfn, i.e. requires an L1 gfn to be
7000 	 * aliased by multiple L2 gfns and/or from multiple nested roots with
7001 	 * different roles.  Aliasing gfns when using TDP is atypical for VMMs;
7002 	 * a few gfns are often aliased during boot, e.g. when remapping BIOS,
7003 	 * but aliasing rarely occurs post-boot or for many gfns.  If there is
7004 	 * only one rmap entry, rmap->val points directly at that one entry and
7005 	 * doesn't need to allocate a list.  Buffer the cache by the default
7006 	 * capacity so that KVM doesn't have to drop mmu_lock to topup if KVM
7007 	 * encounters an aliased gfn or two.
7008 	 */
7009 	const int capacity = SPLIT_DESC_CACHE_MIN_NR_OBJECTS +
7010 			     KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE;
7011 	int r;
7012 
7013 	lockdep_assert_held(&kvm->slots_lock);
7014 
7015 	r = __kvm_mmu_topup_memory_cache(&kvm->arch.split_desc_cache, capacity,
7016 					 SPLIT_DESC_CACHE_MIN_NR_OBJECTS);
7017 	if (r)
7018 		return r;
7019 
7020 	r = kvm_mmu_topup_memory_cache(&kvm->arch.split_page_header_cache, 1);
7021 	if (r)
7022 		return r;
7023 
7024 	return kvm_mmu_topup_memory_cache(&kvm->arch.split_shadow_page_cache, 1);
7025 }
7026 
7027 static struct kvm_mmu_page *shadow_mmu_get_sp_for_split(struct kvm *kvm, u64 *huge_sptep)
7028 {
7029 	struct kvm_mmu_page *huge_sp = sptep_to_sp(huge_sptep);
7030 	struct shadow_page_caches caches = {};
7031 	union kvm_mmu_page_role role;
7032 	unsigned int access;
7033 	gfn_t gfn;
7034 
7035 	gfn = kvm_mmu_page_get_gfn(huge_sp, spte_index(huge_sptep));
7036 	access = kvm_mmu_page_get_access(huge_sp, spte_index(huge_sptep));
7037 
7038 	/*
7039 	 * Note, huge page splitting always uses direct shadow pages, regardless
7040 	 * of whether the huge page itself is mapped by a direct or indirect
7041 	 * shadow page, since the huge page region itself is being directly
7042 	 * mapped with smaller pages.
7043 	 */
7044 	role = kvm_mmu_child_role(huge_sptep, /*direct=*/true, access);
7045 
7046 	/* Direct SPs do not require a shadowed_info_cache. */
7047 	caches.page_header_cache = &kvm->arch.split_page_header_cache;
7048 	caches.shadow_page_cache = &kvm->arch.split_shadow_page_cache;
7049 
7050 	/* Safe to pass NULL for vCPU since requesting a direct SP. */
7051 	return __kvm_mmu_get_shadow_page(kvm, NULL, &caches, gfn, role);
7052 }
7053 
7054 static void shadow_mmu_split_huge_page(struct kvm *kvm,
7055 				       const struct kvm_memory_slot *slot,
7056 				       u64 *huge_sptep)
7057 
7058 {
7059 	struct kvm_mmu_memory_cache *cache = &kvm->arch.split_desc_cache;
7060 	u64 huge_spte = READ_ONCE(*huge_sptep);
7061 	struct kvm_mmu_page *sp;
7062 	bool flush = false;
7063 	u64 *sptep, spte;
7064 	gfn_t gfn;
7065 	int index;
7066 
7067 	sp = shadow_mmu_get_sp_for_split(kvm, huge_sptep);
7068 
7069 	for (index = 0; index < SPTE_ENT_PER_PAGE; index++) {
7070 		sptep = &sp->spt[index];
7071 		gfn = kvm_mmu_page_get_gfn(sp, index);
7072 
7073 		/*
7074 		 * The SP may already have populated SPTEs, e.g. if this huge
7075 		 * page is aliased by multiple sptes with the same access
7076 		 * permissions. These entries are guaranteed to map the same
7077 		 * gfn-to-pfn translation since the SP is direct, so no need to
7078 		 * modify them.
7079 		 *
7080 		 * However, if a given SPTE points to a lower level page table,
7081 		 * that lower level page table may only be partially populated.
7082 		 * Installing such SPTEs would effectively unmap a potion of the
7083 		 * huge page. Unmapping guest memory always requires a TLB flush
7084 		 * since a subsequent operation on the unmapped regions would
7085 		 * fail to detect the need to flush.
7086 		 */
7087 		if (is_shadow_present_pte(*sptep)) {
7088 			flush |= !is_last_spte(*sptep, sp->role.level);
7089 			continue;
7090 		}
7091 
7092 		spte = make_small_spte(kvm, huge_spte, sp->role, index);
7093 		mmu_spte_set(sptep, spte);
7094 		__rmap_add(kvm, cache, slot, sptep, gfn, sp->role.access);
7095 	}
7096 
7097 	__link_shadow_page(kvm, cache, huge_sptep, sp, flush);
7098 }
7099 
7100 static int shadow_mmu_try_split_huge_page(struct kvm *kvm,
7101 					  const struct kvm_memory_slot *slot,
7102 					  u64 *huge_sptep)
7103 {
7104 	struct kvm_mmu_page *huge_sp = sptep_to_sp(huge_sptep);
7105 	int level, r = 0;
7106 	gfn_t gfn;
7107 	u64 spte;
7108 
7109 	/* Grab information for the tracepoint before dropping the MMU lock. */
7110 	gfn = kvm_mmu_page_get_gfn(huge_sp, spte_index(huge_sptep));
7111 	level = huge_sp->role.level;
7112 	spte = *huge_sptep;
7113 
7114 	if (kvm_mmu_available_pages(kvm) <= KVM_MIN_FREE_MMU_PAGES) {
7115 		r = -ENOSPC;
7116 		goto out;
7117 	}
7118 
7119 	if (need_topup_split_caches_or_resched(kvm)) {
7120 		write_unlock(&kvm->mmu_lock);
7121 		cond_resched();
7122 		/*
7123 		 * If the topup succeeds, return -EAGAIN to indicate that the
7124 		 * rmap iterator should be restarted because the MMU lock was
7125 		 * dropped.
7126 		 */
7127 		r = topup_split_caches(kvm) ?: -EAGAIN;
7128 		write_lock(&kvm->mmu_lock);
7129 		goto out;
7130 	}
7131 
7132 	shadow_mmu_split_huge_page(kvm, slot, huge_sptep);
7133 
7134 out:
7135 	trace_kvm_mmu_split_huge_page(gfn, spte, level, r);
7136 	return r;
7137 }
7138 
7139 static bool shadow_mmu_try_split_huge_pages(struct kvm *kvm,
7140 					    struct kvm_rmap_head *rmap_head,
7141 					    const struct kvm_memory_slot *slot)
7142 {
7143 	struct rmap_iterator iter;
7144 	struct kvm_mmu_page *sp;
7145 	u64 *huge_sptep;
7146 	int r;
7147 
7148 restart:
7149 	for_each_rmap_spte(rmap_head, &iter, huge_sptep) {
7150 		sp = sptep_to_sp(huge_sptep);
7151 
7152 		/* TDP MMU is enabled, so rmap only contains nested MMU SPs. */
7153 		if (WARN_ON_ONCE(!sp->role.guest_mode))
7154 			continue;
7155 
7156 		/* The rmaps should never contain non-leaf SPTEs. */
7157 		if (WARN_ON_ONCE(!is_large_pte(*huge_sptep)))
7158 			continue;
7159 
7160 		/* SPs with level >PG_LEVEL_4K should never by unsync. */
7161 		if (WARN_ON_ONCE(sp->unsync))
7162 			continue;
7163 
7164 		/* Don't bother splitting huge pages on invalid SPs. */
7165 		if (sp->role.invalid)
7166 			continue;
7167 
7168 		r = shadow_mmu_try_split_huge_page(kvm, slot, huge_sptep);
7169 
7170 		/*
7171 		 * The split succeeded or needs to be retried because the MMU
7172 		 * lock was dropped. Either way, restart the iterator to get it
7173 		 * back into a consistent state.
7174 		 */
7175 		if (!r || r == -EAGAIN)
7176 			goto restart;
7177 
7178 		/* The split failed and shouldn't be retried (e.g. -ENOMEM). */
7179 		break;
7180 	}
7181 
7182 	return false;
7183 }
7184 
7185 static void kvm_shadow_mmu_try_split_huge_pages(struct kvm *kvm,
7186 						const struct kvm_memory_slot *slot,
7187 						gfn_t start, gfn_t end,
7188 						int target_level)
7189 {
7190 	int level;
7191 
7192 	/*
7193 	 * Split huge pages starting with KVM_MAX_HUGEPAGE_LEVEL and working
7194 	 * down to the target level. This ensures pages are recursively split
7195 	 * all the way to the target level. There's no need to split pages
7196 	 * already at the target level.
7197 	 */
7198 	for (level = KVM_MAX_HUGEPAGE_LEVEL; level > target_level; level--)
7199 		__walk_slot_rmaps(kvm, slot, shadow_mmu_try_split_huge_pages,
7200 				  level, level, start, end - 1, true, true, false);
7201 }
7202 
7203 /* Must be called with the mmu_lock held in write-mode. */
7204 void kvm_mmu_try_split_huge_pages(struct kvm *kvm,
7205 				   const struct kvm_memory_slot *memslot,
7206 				   u64 start, u64 end,
7207 				   int target_level)
7208 {
7209 	if (!tdp_mmu_enabled)
7210 		return;
7211 
7212 	if (kvm_memslots_have_rmaps(kvm))
7213 		kvm_shadow_mmu_try_split_huge_pages(kvm, memslot, start, end, target_level);
7214 
7215 	kvm_tdp_mmu_try_split_huge_pages(kvm, memslot, start, end, target_level, false);
7216 
7217 	/*
7218 	 * A TLB flush is unnecessary at this point for the same reasons as in
7219 	 * kvm_mmu_slot_try_split_huge_pages().
7220 	 */
7221 }
7222 
7223 void kvm_mmu_slot_try_split_huge_pages(struct kvm *kvm,
7224 					const struct kvm_memory_slot *memslot,
7225 					int target_level)
7226 {
7227 	u64 start = memslot->base_gfn;
7228 	u64 end = start + memslot->npages;
7229 
7230 	if (!tdp_mmu_enabled)
7231 		return;
7232 
7233 	if (kvm_memslots_have_rmaps(kvm)) {
7234 		write_lock(&kvm->mmu_lock);
7235 		kvm_shadow_mmu_try_split_huge_pages(kvm, memslot, start, end, target_level);
7236 		write_unlock(&kvm->mmu_lock);
7237 	}
7238 
7239 	read_lock(&kvm->mmu_lock);
7240 	kvm_tdp_mmu_try_split_huge_pages(kvm, memslot, start, end, target_level, true);
7241 	read_unlock(&kvm->mmu_lock);
7242 
7243 	/*
7244 	 * No TLB flush is necessary here. KVM will flush TLBs after
7245 	 * write-protecting and/or clearing dirty on the newly split SPTEs to
7246 	 * ensure that guest writes are reflected in the dirty log before the
7247 	 * ioctl to enable dirty logging on this memslot completes. Since the
7248 	 * split SPTEs retain the write and dirty bits of the huge SPTE, it is
7249 	 * safe for KVM to decide if a TLB flush is necessary based on the split
7250 	 * SPTEs.
7251 	 */
7252 }
7253 
7254 static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
7255 					 struct kvm_rmap_head *rmap_head,
7256 					 const struct kvm_memory_slot *slot)
7257 {
7258 	u64 *sptep;
7259 	struct rmap_iterator iter;
7260 	int need_tlb_flush = 0;
7261 	struct kvm_mmu_page *sp;
7262 
7263 restart:
7264 	for_each_rmap_spte(rmap_head, &iter, sptep) {
7265 		sp = sptep_to_sp(sptep);
7266 
7267 		/*
7268 		 * We cannot do huge page mapping for indirect shadow pages,
7269 		 * which are found on the last rmap (level = 1) when not using
7270 		 * tdp; such shadow pages are synced with the page table in
7271 		 * the guest, and the guest page table is using 4K page size
7272 		 * mapping if the indirect sp has level = 1.
7273 		 */
7274 		if (sp->role.direct &&
7275 		    sp->role.level < kvm_mmu_max_mapping_level(kvm, NULL, slot, sp->gfn)) {
7276 			kvm_zap_one_rmap_spte(kvm, rmap_head, sptep);
7277 
7278 			if (kvm_available_flush_remote_tlbs_range())
7279 				kvm_flush_remote_tlbs_sptep(kvm, sptep);
7280 			else
7281 				need_tlb_flush = 1;
7282 
7283 			goto restart;
7284 		}
7285 	}
7286 
7287 	return need_tlb_flush;
7288 }
7289 
7290 static void kvm_rmap_zap_collapsible_sptes(struct kvm *kvm,
7291 					   const struct kvm_memory_slot *slot)
7292 {
7293 	/*
7294 	 * Note, use KVM_MAX_HUGEPAGE_LEVEL - 1 since there's no need to zap
7295 	 * pages that are already mapped at the maximum hugepage level.
7296 	 */
7297 	if (walk_slot_rmaps(kvm, slot, kvm_mmu_zap_collapsible_spte,
7298 			    PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL - 1, true))
7299 		kvm_flush_remote_tlbs_memslot(kvm, slot);
7300 }
7301 
7302 void kvm_mmu_recover_huge_pages(struct kvm *kvm,
7303 				const struct kvm_memory_slot *slot)
7304 {
7305 	if (kvm_memslots_have_rmaps(kvm)) {
7306 		write_lock(&kvm->mmu_lock);
7307 		kvm_rmap_zap_collapsible_sptes(kvm, slot);
7308 		write_unlock(&kvm->mmu_lock);
7309 	}
7310 
7311 	if (tdp_mmu_enabled) {
7312 		read_lock(&kvm->mmu_lock);
7313 		kvm_tdp_mmu_recover_huge_pages(kvm, slot);
7314 		read_unlock(&kvm->mmu_lock);
7315 	}
7316 }
7317 
7318 void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
7319 				   const struct kvm_memory_slot *memslot)
7320 {
7321 	if (kvm_memslots_have_rmaps(kvm)) {
7322 		write_lock(&kvm->mmu_lock);
7323 		/*
7324 		 * Clear dirty bits only on 4k SPTEs since the legacy MMU only
7325 		 * support dirty logging at a 4k granularity.
7326 		 */
7327 		walk_slot_rmaps_4k(kvm, memslot, __rmap_clear_dirty, false);
7328 		write_unlock(&kvm->mmu_lock);
7329 	}
7330 
7331 	if (tdp_mmu_enabled) {
7332 		read_lock(&kvm->mmu_lock);
7333 		kvm_tdp_mmu_clear_dirty_slot(kvm, memslot);
7334 		read_unlock(&kvm->mmu_lock);
7335 	}
7336 
7337 	/*
7338 	 * The caller will flush the TLBs after this function returns.
7339 	 *
7340 	 * It's also safe to flush TLBs out of mmu lock here as currently this
7341 	 * function is only used for dirty logging, in which case flushing TLB
7342 	 * out of mmu lock also guarantees no dirty pages will be lost in
7343 	 * dirty_bitmap.
7344 	 */
7345 }
7346 
7347 static void kvm_mmu_zap_all(struct kvm *kvm)
7348 {
7349 	struct kvm_mmu_page *sp, *node;
7350 	LIST_HEAD(invalid_list);
7351 	int ign;
7352 
7353 	write_lock(&kvm->mmu_lock);
7354 restart:
7355 	list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
7356 		if (WARN_ON_ONCE(sp->role.invalid))
7357 			continue;
7358 		if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
7359 			goto restart;
7360 		if (cond_resched_rwlock_write(&kvm->mmu_lock))
7361 			goto restart;
7362 	}
7363 
7364 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
7365 
7366 	if (tdp_mmu_enabled)
7367 		kvm_tdp_mmu_zap_all(kvm);
7368 
7369 	write_unlock(&kvm->mmu_lock);
7370 }
7371 
7372 void kvm_arch_flush_shadow_all(struct kvm *kvm)
7373 {
7374 	kvm_mmu_zap_all(kvm);
7375 }
7376 
7377 static void kvm_mmu_zap_memslot_pages_and_flush(struct kvm *kvm,
7378 						struct kvm_memory_slot *slot,
7379 						bool flush)
7380 {
7381 	LIST_HEAD(invalid_list);
7382 	unsigned long i;
7383 
7384 	if (list_empty(&kvm->arch.active_mmu_pages))
7385 		goto out_flush;
7386 
7387 	/*
7388 	 * Since accounting information is stored in struct kvm_arch_memory_slot,
7389 	 * all MMU pages that are shadowing guest PTEs must be zapped before the
7390 	 * memslot is deleted, as freeing such pages after the memslot is freed
7391 	 * will result in use-after-free, e.g. in unaccount_shadowed().
7392 	 */
7393 	for (i = 0; i < slot->npages; i++) {
7394 		struct kvm_mmu_page *sp;
7395 		gfn_t gfn = slot->base_gfn + i;
7396 
7397 		for_each_gfn_valid_sp_with_gptes(kvm, sp, gfn)
7398 			kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
7399 
7400 		if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
7401 			kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
7402 			flush = false;
7403 			cond_resched_rwlock_write(&kvm->mmu_lock);
7404 		}
7405 	}
7406 
7407 out_flush:
7408 	kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
7409 }
7410 
7411 static void kvm_mmu_zap_memslot(struct kvm *kvm,
7412 				struct kvm_memory_slot *slot)
7413 {
7414 	struct kvm_gfn_range range = {
7415 		.slot = slot,
7416 		.start = slot->base_gfn,
7417 		.end = slot->base_gfn + slot->npages,
7418 		.may_block = true,
7419 		.attr_filter = KVM_FILTER_PRIVATE | KVM_FILTER_SHARED,
7420 	};
7421 	bool flush;
7422 
7423 	write_lock(&kvm->mmu_lock);
7424 	flush = kvm_unmap_gfn_range(kvm, &range);
7425 	kvm_mmu_zap_memslot_pages_and_flush(kvm, slot, flush);
7426 	write_unlock(&kvm->mmu_lock);
7427 }
7428 
7429 static inline bool kvm_memslot_flush_zap_all(struct kvm *kvm)
7430 {
7431 	return kvm->arch.vm_type == KVM_X86_DEFAULT_VM &&
7432 	       kvm_check_has_quirk(kvm, KVM_X86_QUIRK_SLOT_ZAP_ALL);
7433 }
7434 
7435 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
7436 				   struct kvm_memory_slot *slot)
7437 {
7438 	if (kvm_memslot_flush_zap_all(kvm))
7439 		kvm_mmu_zap_all_fast(kvm);
7440 	else
7441 		kvm_mmu_zap_memslot(kvm, slot);
7442 }
7443 
7444 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
7445 {
7446 	WARN_ON_ONCE(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
7447 
7448 	if (!enable_mmio_caching)
7449 		return;
7450 
7451 	gen &= MMIO_SPTE_GEN_MASK;
7452 
7453 	/*
7454 	 * Generation numbers are incremented in multiples of the number of
7455 	 * address spaces in order to provide unique generations across all
7456 	 * address spaces.  Strip what is effectively the address space
7457 	 * modifier prior to checking for a wrap of the MMIO generation so
7458 	 * that a wrap in any address space is detected.
7459 	 */
7460 	gen &= ~((u64)kvm_arch_nr_memslot_as_ids(kvm) - 1);
7461 
7462 	/*
7463 	 * The very rare case: if the MMIO generation number has wrapped,
7464 	 * zap all shadow pages.
7465 	 */
7466 	if (unlikely(gen == 0)) {
7467 		kvm_debug_ratelimited("zapping shadow pages for mmio generation wraparound\n");
7468 		kvm_mmu_zap_all_fast(kvm);
7469 	}
7470 }
7471 
7472 static void mmu_destroy_caches(void)
7473 {
7474 	kmem_cache_destroy(pte_list_desc_cache);
7475 	kmem_cache_destroy(mmu_page_header_cache);
7476 }
7477 
7478 static void kvm_wake_nx_recovery_thread(struct kvm *kvm)
7479 {
7480 	/*
7481 	 * The NX recovery thread is spawned on-demand at the first KVM_RUN and
7482 	 * may not be valid even though the VM is globally visible.  Do nothing,
7483 	 * as such a VM can't have any possible NX huge pages.
7484 	 */
7485 	struct vhost_task *nx_thread = READ_ONCE(kvm->arch.nx_huge_page_recovery_thread);
7486 
7487 	if (nx_thread)
7488 		vhost_task_wake(nx_thread);
7489 }
7490 
7491 static int get_nx_huge_pages(char *buffer, const struct kernel_param *kp)
7492 {
7493 	if (nx_hugepage_mitigation_hard_disabled)
7494 		return sysfs_emit(buffer, "never\n");
7495 
7496 	return param_get_bool(buffer, kp);
7497 }
7498 
7499 static bool get_nx_auto_mode(void)
7500 {
7501 	/* Return true when CPU has the bug, and mitigations are ON */
7502 	return boot_cpu_has_bug(X86_BUG_ITLB_MULTIHIT) && !cpu_mitigations_off();
7503 }
7504 
7505 static void __set_nx_huge_pages(bool val)
7506 {
7507 	nx_huge_pages = itlb_multihit_kvm_mitigation = val;
7508 }
7509 
7510 static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
7511 {
7512 	bool old_val = nx_huge_pages;
7513 	bool new_val;
7514 
7515 	if (nx_hugepage_mitigation_hard_disabled)
7516 		return -EPERM;
7517 
7518 	/* In "auto" mode deploy workaround only if CPU has the bug. */
7519 	if (sysfs_streq(val, "off")) {
7520 		new_val = 0;
7521 	} else if (sysfs_streq(val, "force")) {
7522 		new_val = 1;
7523 	} else if (sysfs_streq(val, "auto")) {
7524 		new_val = get_nx_auto_mode();
7525 	} else if (sysfs_streq(val, "never")) {
7526 		new_val = 0;
7527 
7528 		mutex_lock(&kvm_lock);
7529 		if (!list_empty(&vm_list)) {
7530 			mutex_unlock(&kvm_lock);
7531 			return -EBUSY;
7532 		}
7533 		nx_hugepage_mitigation_hard_disabled = true;
7534 		mutex_unlock(&kvm_lock);
7535 	} else if (kstrtobool(val, &new_val) < 0) {
7536 		return -EINVAL;
7537 	}
7538 
7539 	__set_nx_huge_pages(new_val);
7540 
7541 	if (new_val != old_val) {
7542 		struct kvm *kvm;
7543 
7544 		mutex_lock(&kvm_lock);
7545 
7546 		list_for_each_entry(kvm, &vm_list, vm_list) {
7547 			mutex_lock(&kvm->slots_lock);
7548 			kvm_mmu_zap_all_fast(kvm);
7549 			mutex_unlock(&kvm->slots_lock);
7550 
7551 			kvm_wake_nx_recovery_thread(kvm);
7552 		}
7553 		mutex_unlock(&kvm_lock);
7554 	}
7555 
7556 	return 0;
7557 }
7558 
7559 /*
7560  * nx_huge_pages needs to be resolved to true/false when kvm.ko is loaded, as
7561  * its default value of -1 is technically undefined behavior for a boolean.
7562  * Forward the module init call to SPTE code so that it too can handle module
7563  * params that need to be resolved/snapshot.
7564  */
7565 void __init kvm_mmu_x86_module_init(void)
7566 {
7567 	if (nx_huge_pages == -1)
7568 		__set_nx_huge_pages(get_nx_auto_mode());
7569 
7570 	/*
7571 	 * Snapshot userspace's desire to enable the TDP MMU. Whether or not the
7572 	 * TDP MMU is actually enabled is determined in kvm_configure_mmu()
7573 	 * when the vendor module is loaded.
7574 	 */
7575 	tdp_mmu_allowed = tdp_mmu_enabled;
7576 
7577 	kvm_mmu_spte_module_init();
7578 }
7579 
7580 /*
7581  * The bulk of the MMU initialization is deferred until the vendor module is
7582  * loaded as many of the masks/values may be modified by VMX or SVM, i.e. need
7583  * to be reset when a potentially different vendor module is loaded.
7584  */
7585 int kvm_mmu_vendor_module_init(void)
7586 {
7587 	int ret = -ENOMEM;
7588 
7589 	/*
7590 	 * MMU roles use union aliasing which is, generally speaking, an
7591 	 * undefined behavior. However, we supposedly know how compilers behave
7592 	 * and the current status quo is unlikely to change. Guardians below are
7593 	 * supposed to let us know if the assumption becomes false.
7594 	 */
7595 	BUILD_BUG_ON(sizeof(union kvm_mmu_page_role) != sizeof(u32));
7596 	BUILD_BUG_ON(sizeof(union kvm_mmu_extended_role) != sizeof(u32));
7597 	BUILD_BUG_ON(sizeof(union kvm_cpu_role) != sizeof(u64));
7598 
7599 	kvm_mmu_reset_all_pte_masks();
7600 
7601 	pte_list_desc_cache = KMEM_CACHE(pte_list_desc, SLAB_ACCOUNT);
7602 	if (!pte_list_desc_cache)
7603 		goto out;
7604 
7605 	mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
7606 						  sizeof(struct kvm_mmu_page),
7607 						  0, SLAB_ACCOUNT, NULL);
7608 	if (!mmu_page_header_cache)
7609 		goto out;
7610 
7611 	return 0;
7612 
7613 out:
7614 	mmu_destroy_caches();
7615 	return ret;
7616 }
7617 
7618 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
7619 {
7620 	kvm_mmu_unload(vcpu);
7621 	if (tdp_mmu_enabled) {
7622 		read_lock(&vcpu->kvm->mmu_lock);
7623 		mmu_free_root_page(vcpu->kvm, &vcpu->arch.mmu->mirror_root_hpa,
7624 				   NULL);
7625 		read_unlock(&vcpu->kvm->mmu_lock);
7626 	}
7627 	free_mmu_pages(&vcpu->arch.root_mmu);
7628 	free_mmu_pages(&vcpu->arch.guest_mmu);
7629 	mmu_free_memory_caches(vcpu);
7630 }
7631 
7632 void kvm_mmu_vendor_module_exit(void)
7633 {
7634 	mmu_destroy_caches();
7635 }
7636 
7637 /*
7638  * Calculate the effective recovery period, accounting for '0' meaning "let KVM
7639  * select a halving time of 1 hour".  Returns true if recovery is enabled.
7640  */
7641 static bool calc_nx_huge_pages_recovery_period(uint *period)
7642 {
7643 	/*
7644 	 * Use READ_ONCE to get the params, this may be called outside of the
7645 	 * param setters, e.g. by the kthread to compute its next timeout.
7646 	 */
7647 	bool enabled = READ_ONCE(nx_huge_pages);
7648 	uint ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
7649 
7650 	if (!enabled || !ratio)
7651 		return false;
7652 
7653 	*period = READ_ONCE(nx_huge_pages_recovery_period_ms);
7654 	if (!*period) {
7655 		/* Make sure the period is not less than one second.  */
7656 		ratio = min(ratio, 3600u);
7657 		*period = 60 * 60 * 1000 / ratio;
7658 	}
7659 	return true;
7660 }
7661 
7662 static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp)
7663 {
7664 	bool was_recovery_enabled, is_recovery_enabled;
7665 	uint old_period, new_period;
7666 	int err;
7667 
7668 	if (nx_hugepage_mitigation_hard_disabled)
7669 		return -EPERM;
7670 
7671 	was_recovery_enabled = calc_nx_huge_pages_recovery_period(&old_period);
7672 
7673 	err = param_set_uint(val, kp);
7674 	if (err)
7675 		return err;
7676 
7677 	is_recovery_enabled = calc_nx_huge_pages_recovery_period(&new_period);
7678 
7679 	if (is_recovery_enabled &&
7680 	    (!was_recovery_enabled || old_period > new_period)) {
7681 		struct kvm *kvm;
7682 
7683 		mutex_lock(&kvm_lock);
7684 
7685 		list_for_each_entry(kvm, &vm_list, vm_list)
7686 			kvm_wake_nx_recovery_thread(kvm);
7687 
7688 		mutex_unlock(&kvm_lock);
7689 	}
7690 
7691 	return err;
7692 }
7693 
7694 static unsigned long nx_huge_pages_to_zap(struct kvm *kvm,
7695 					  enum kvm_mmu_type mmu_type)
7696 {
7697 	unsigned long pages = READ_ONCE(kvm->arch.possible_nx_huge_pages[mmu_type].nr_pages);
7698 	unsigned int ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
7699 
7700 	return ratio ? DIV_ROUND_UP(pages, ratio) : 0;
7701 }
7702 
7703 static bool kvm_mmu_sp_dirty_logging_enabled(struct kvm *kvm,
7704 					     struct kvm_mmu_page *sp)
7705 {
7706 	struct kvm_memory_slot *slot;
7707 
7708 	/*
7709 	 * Skip the memslot lookup if dirty tracking can't possibly be enabled,
7710 	 * as memslot lookups are relatively expensive.
7711 	 *
7712 	 * If a memslot update is in progress, reading an incorrect value of
7713 	 * kvm->nr_memslots_dirty_logging is not a problem: if it is becoming
7714 	 * zero, KVM will  do an unnecessary memslot lookup;  if it is becoming
7715 	 * nonzero, the page will be zapped unnecessarily.  Either way, this
7716 	 * only affects efficiency in racy situations, and not correctness.
7717 	 */
7718 	if (!atomic_read(&kvm->nr_memslots_dirty_logging))
7719 		return false;
7720 
7721 	slot = __gfn_to_memslot(kvm_memslots_for_spte_role(kvm, sp->role), sp->gfn);
7722 	if (WARN_ON_ONCE(!slot))
7723 		return false;
7724 
7725 	return kvm_slot_dirty_track_enabled(slot);
7726 }
7727 
7728 static void kvm_recover_nx_huge_pages(struct kvm *kvm,
7729 				      const enum kvm_mmu_type mmu_type)
7730 {
7731 #ifdef CONFIG_X86_64
7732 	const bool is_tdp_mmu = mmu_type == KVM_TDP_MMU;
7733 	spinlock_t *tdp_mmu_pages_lock = &kvm->arch.tdp_mmu_pages_lock;
7734 #else
7735 	const bool is_tdp_mmu = false;
7736 	spinlock_t *tdp_mmu_pages_lock = NULL;
7737 #endif
7738 	unsigned long to_zap = nx_huge_pages_to_zap(kvm, mmu_type);
7739 	struct list_head *nx_huge_pages;
7740 	struct kvm_mmu_page *sp;
7741 	LIST_HEAD(invalid_list);
7742 	bool flush = false;
7743 	int rcu_idx;
7744 
7745 	nx_huge_pages = &kvm->arch.possible_nx_huge_pages[mmu_type].pages;
7746 
7747 	rcu_idx = srcu_read_lock(&kvm->srcu);
7748 	if (is_tdp_mmu)
7749 		read_lock(&kvm->mmu_lock);
7750 	else
7751 		write_lock(&kvm->mmu_lock);
7752 
7753 	/*
7754 	 * Zapping TDP MMU shadow pages, including the remote TLB flush, must
7755 	 * be done under RCU protection, because the pages are freed via RCU
7756 	 * callback.
7757 	 */
7758 	rcu_read_lock();
7759 
7760 	for ( ; to_zap; --to_zap) {
7761 		if (is_tdp_mmu)
7762 			spin_lock(tdp_mmu_pages_lock);
7763 
7764 		if (list_empty(nx_huge_pages)) {
7765 			if (is_tdp_mmu)
7766 				spin_unlock(tdp_mmu_pages_lock);
7767 			break;
7768 		}
7769 
7770 		/*
7771 		 * We use a separate list instead of just using active_mmu_pages
7772 		 * because the number of shadow pages that be replaced with an
7773 		 * NX huge page is expected to be relatively small compared to
7774 		 * the total number of shadow pages.  And because the TDP MMU
7775 		 * doesn't use active_mmu_pages.
7776 		 */
7777 		sp = list_first_entry(nx_huge_pages,
7778 				      struct kvm_mmu_page,
7779 				      possible_nx_huge_page_link);
7780 		WARN_ON_ONCE(!sp->nx_huge_page_disallowed);
7781 		WARN_ON_ONCE(!sp->role.direct);
7782 
7783 		unaccount_nx_huge_page(kvm, sp);
7784 
7785 		if (is_tdp_mmu)
7786 			spin_unlock(tdp_mmu_pages_lock);
7787 
7788 		/*
7789 		 * Do not attempt to recover any NX Huge Pages that are being
7790 		 * dirty tracked, as they would just be faulted back in as 4KiB
7791 		 * pages. The NX Huge Pages in this slot will be recovered,
7792 		 * along with all the other huge pages in the slot, when dirty
7793 		 * logging is disabled.
7794 		 */
7795 		if (!kvm_mmu_sp_dirty_logging_enabled(kvm, sp)) {
7796 			if (is_tdp_mmu)
7797 				flush |= kvm_tdp_mmu_zap_possible_nx_huge_page(kvm, sp);
7798 			else
7799 				kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
7800 
7801 		}
7802 
7803 		WARN_ON_ONCE(sp->nx_huge_page_disallowed);
7804 
7805 		if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
7806 			kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
7807 			rcu_read_unlock();
7808 
7809 			if (is_tdp_mmu)
7810 				cond_resched_rwlock_read(&kvm->mmu_lock);
7811 			else
7812 				cond_resched_rwlock_write(&kvm->mmu_lock);
7813 
7814 			flush = false;
7815 			rcu_read_lock();
7816 		}
7817 	}
7818 	kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
7819 
7820 	rcu_read_unlock();
7821 
7822 	if (is_tdp_mmu)
7823 		read_unlock(&kvm->mmu_lock);
7824 	else
7825 		write_unlock(&kvm->mmu_lock);
7826 	srcu_read_unlock(&kvm->srcu, rcu_idx);
7827 }
7828 
7829 static void kvm_nx_huge_page_recovery_worker_kill(void *data)
7830 {
7831 }
7832 
7833 static bool kvm_nx_huge_page_recovery_worker(void *data)
7834 {
7835 	struct kvm *kvm = data;
7836 	long remaining_time;
7837 	bool enabled;
7838 	uint period;
7839 	int i;
7840 
7841 	enabled = calc_nx_huge_pages_recovery_period(&period);
7842 	if (!enabled)
7843 		return false;
7844 
7845 	remaining_time = kvm->arch.nx_huge_page_last + msecs_to_jiffies(period)
7846 		- get_jiffies_64();
7847 	if (remaining_time > 0) {
7848 		schedule_timeout(remaining_time);
7849 		/* check for signals and come back */
7850 		return true;
7851 	}
7852 
7853 	__set_current_state(TASK_RUNNING);
7854 	for (i = 0; i < KVM_NR_MMU_TYPES; ++i)
7855 		kvm_recover_nx_huge_pages(kvm, i);
7856 	kvm->arch.nx_huge_page_last = get_jiffies_64();
7857 	return true;
7858 }
7859 
7860 static int kvm_mmu_start_lpage_recovery(struct once *once)
7861 {
7862 	struct kvm_arch *ka = container_of(once, struct kvm_arch, nx_once);
7863 	struct kvm *kvm = container_of(ka, struct kvm, arch);
7864 	struct vhost_task *nx_thread;
7865 
7866 	kvm->arch.nx_huge_page_last = get_jiffies_64();
7867 	nx_thread = vhost_task_create(kvm_nx_huge_page_recovery_worker,
7868 				      kvm_nx_huge_page_recovery_worker_kill,
7869 				      kvm, "kvm-nx-lpage-recovery");
7870 
7871 	if (IS_ERR(nx_thread))
7872 		return PTR_ERR(nx_thread);
7873 
7874 	vhost_task_start(nx_thread);
7875 
7876 	/* Make the task visible only once it is fully started. */
7877 	WRITE_ONCE(kvm->arch.nx_huge_page_recovery_thread, nx_thread);
7878 	return 0;
7879 }
7880 
7881 int kvm_mmu_post_init_vm(struct kvm *kvm)
7882 {
7883 	if (nx_hugepage_mitigation_hard_disabled)
7884 		return 0;
7885 
7886 	return call_once(&kvm->arch.nx_once, kvm_mmu_start_lpage_recovery);
7887 }
7888 
7889 void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
7890 {
7891 	if (kvm->arch.nx_huge_page_recovery_thread)
7892 		vhost_task_stop(kvm->arch.nx_huge_page_recovery_thread);
7893 }
7894 
7895 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
7896 static bool hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
7897 				int level)
7898 {
7899 	return lpage_info_slot(gfn, slot, level)->disallow_lpage & KVM_LPAGE_MIXED_FLAG;
7900 }
7901 
7902 static void hugepage_clear_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
7903 				 int level)
7904 {
7905 	lpage_info_slot(gfn, slot, level)->disallow_lpage &= ~KVM_LPAGE_MIXED_FLAG;
7906 }
7907 
7908 static void hugepage_set_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
7909 			       int level)
7910 {
7911 	lpage_info_slot(gfn, slot, level)->disallow_lpage |= KVM_LPAGE_MIXED_FLAG;
7912 }
7913 
7914 bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
7915 					struct kvm_gfn_range *range)
7916 {
7917 	struct kvm_memory_slot *slot = range->slot;
7918 	int level;
7919 
7920 	/*
7921 	 * Zap SPTEs even if the slot can't be mapped PRIVATE.  KVM x86 only
7922 	 * supports KVM_MEMORY_ATTRIBUTE_PRIVATE, and so it *seems* like KVM
7923 	 * can simply ignore such slots.  But if userspace is making memory
7924 	 * PRIVATE, then KVM must prevent the guest from accessing the memory
7925 	 * as shared.  And if userspace is making memory SHARED and this point
7926 	 * is reached, then at least one page within the range was previously
7927 	 * PRIVATE, i.e. the slot's possible hugepage ranges are changing.
7928 	 * Zapping SPTEs in this case ensures KVM will reassess whether or not
7929 	 * a hugepage can be used for affected ranges.
7930 	 */
7931 	if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm)))
7932 		return false;
7933 
7934 	if (WARN_ON_ONCE(range->end <= range->start))
7935 		return false;
7936 
7937 	/*
7938 	 * If the head and tail pages of the range currently allow a hugepage,
7939 	 * i.e. reside fully in the slot and don't have mixed attributes, then
7940 	 * add each corresponding hugepage range to the ongoing invalidation,
7941 	 * e.g. to prevent KVM from creating a hugepage in response to a fault
7942 	 * for a gfn whose attributes aren't changing.  Note, only the range
7943 	 * of gfns whose attributes are being modified needs to be explicitly
7944 	 * unmapped, as that will unmap any existing hugepages.
7945 	 */
7946 	for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
7947 		gfn_t start = gfn_round_for_level(range->start, level);
7948 		gfn_t end = gfn_round_for_level(range->end - 1, level);
7949 		gfn_t nr_pages = KVM_PAGES_PER_HPAGE(level);
7950 
7951 		if ((start != range->start || start + nr_pages > range->end) &&
7952 		    start >= slot->base_gfn &&
7953 		    start + nr_pages <= slot->base_gfn + slot->npages &&
7954 		    !hugepage_test_mixed(slot, start, level))
7955 			kvm_mmu_invalidate_range_add(kvm, start, start + nr_pages);
7956 
7957 		if (end == start)
7958 			continue;
7959 
7960 		if ((end + nr_pages) > range->end &&
7961 		    (end + nr_pages) <= (slot->base_gfn + slot->npages) &&
7962 		    !hugepage_test_mixed(slot, end, level))
7963 			kvm_mmu_invalidate_range_add(kvm, end, end + nr_pages);
7964 	}
7965 
7966 	/* Unmap the old attribute page. */
7967 	if (range->arg.attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE)
7968 		range->attr_filter = KVM_FILTER_SHARED;
7969 	else
7970 		range->attr_filter = KVM_FILTER_PRIVATE;
7971 
7972 	return kvm_unmap_gfn_range(kvm, range);
7973 }
7974 
7975 
7976 
7977 static bool hugepage_has_attrs(struct kvm *kvm, struct kvm_memory_slot *slot,
7978 			       gfn_t gfn, int level, unsigned long attrs)
7979 {
7980 	const unsigned long start = gfn;
7981 	const unsigned long end = start + KVM_PAGES_PER_HPAGE(level);
7982 
7983 	if (level == PG_LEVEL_2M)
7984 		return kvm_range_has_memory_attributes(kvm, start, end, ~0, attrs);
7985 
7986 	for (gfn = start; gfn < end; gfn += KVM_PAGES_PER_HPAGE(level - 1)) {
7987 		if (hugepage_test_mixed(slot, gfn, level - 1) ||
7988 		    attrs != kvm_get_memory_attributes(kvm, gfn))
7989 			return false;
7990 	}
7991 	return true;
7992 }
7993 
7994 bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
7995 					 struct kvm_gfn_range *range)
7996 {
7997 	unsigned long attrs = range->arg.attributes;
7998 	struct kvm_memory_slot *slot = range->slot;
7999 	int level;
8000 
8001 	lockdep_assert_held_write(&kvm->mmu_lock);
8002 	lockdep_assert_held(&kvm->slots_lock);
8003 
8004 	/*
8005 	 * Calculate which ranges can be mapped with hugepages even if the slot
8006 	 * can't map memory PRIVATE.  KVM mustn't create a SHARED hugepage over
8007 	 * a range that has PRIVATE GFNs, and conversely converting a range to
8008 	 * SHARED may now allow hugepages.
8009 	 */
8010 	if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm)))
8011 		return false;
8012 
8013 	/*
8014 	 * The sequence matters here: upper levels consume the result of lower
8015 	 * level's scanning.
8016 	 */
8017 	for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
8018 		gfn_t nr_pages = KVM_PAGES_PER_HPAGE(level);
8019 		gfn_t gfn = gfn_round_for_level(range->start, level);
8020 
8021 		/* Process the head page if it straddles the range. */
8022 		if (gfn != range->start || gfn + nr_pages > range->end) {
8023 			/*
8024 			 * Skip mixed tracking if the aligned gfn isn't covered
8025 			 * by the memslot, KVM can't use a hugepage due to the
8026 			 * misaligned address regardless of memory attributes.
8027 			 */
8028 			if (gfn >= slot->base_gfn &&
8029 			    gfn + nr_pages <= slot->base_gfn + slot->npages) {
8030 				if (hugepage_has_attrs(kvm, slot, gfn, level, attrs))
8031 					hugepage_clear_mixed(slot, gfn, level);
8032 				else
8033 					hugepage_set_mixed(slot, gfn, level);
8034 			}
8035 			gfn += nr_pages;
8036 		}
8037 
8038 		/*
8039 		 * Pages entirely covered by the range are guaranteed to have
8040 		 * only the attributes which were just set.
8041 		 */
8042 		for ( ; gfn + nr_pages <= range->end; gfn += nr_pages)
8043 			hugepage_clear_mixed(slot, gfn, level);
8044 
8045 		/*
8046 		 * Process the last tail page if it straddles the range and is
8047 		 * contained by the memslot.  Like the head page, KVM can't
8048 		 * create a hugepage if the slot size is misaligned.
8049 		 */
8050 		if (gfn < range->end &&
8051 		    (gfn + nr_pages) <= (slot->base_gfn + slot->npages)) {
8052 			if (hugepage_has_attrs(kvm, slot, gfn, level, attrs))
8053 				hugepage_clear_mixed(slot, gfn, level);
8054 			else
8055 				hugepage_set_mixed(slot, gfn, level);
8056 		}
8057 	}
8058 	return false;
8059 }
8060 
8061 void kvm_mmu_init_memslot_memory_attributes(struct kvm *kvm,
8062 					    struct kvm_memory_slot *slot)
8063 {
8064 	int level;
8065 
8066 	if (!kvm_arch_has_private_mem(kvm))
8067 		return;
8068 
8069 	for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
8070 		/*
8071 		 * Don't bother tracking mixed attributes for pages that can't
8072 		 * be huge due to alignment, i.e. process only pages that are
8073 		 * entirely contained by the memslot.
8074 		 */
8075 		gfn_t end = gfn_round_for_level(slot->base_gfn + slot->npages, level);
8076 		gfn_t start = gfn_round_for_level(slot->base_gfn, level);
8077 		gfn_t nr_pages = KVM_PAGES_PER_HPAGE(level);
8078 		gfn_t gfn;
8079 
8080 		if (start < slot->base_gfn)
8081 			start += nr_pages;
8082 
8083 		/*
8084 		 * Unlike setting attributes, every potential hugepage needs to
8085 		 * be manually checked as the attributes may already be mixed.
8086 		 */
8087 		for (gfn = start; gfn < end; gfn += nr_pages) {
8088 			unsigned long attrs = kvm_get_memory_attributes(kvm, gfn);
8089 
8090 			if (hugepage_has_attrs(kvm, slot, gfn, level, attrs))
8091 				hugepage_clear_mixed(slot, gfn, level);
8092 			else
8093 				hugepage_set_mixed(slot, gfn, level);
8094 		}
8095 	}
8096 }
8097 #endif
8098