xref: /linux/arch/x86/kvm/mmu/mmu.c (revision e1914add2799225a87502051415fc5c32aeb02ae)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * This module enables machines with Intel VT-x extensions to run virtual
6  * machines without emulation or binary translation.
7  *
8  * MMU support
9  *
10  * Copyright (C) 2006 Qumranet, Inc.
11  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
12  *
13  * Authors:
14  *   Yaniv Kamay  <yaniv@qumranet.com>
15  *   Avi Kivity   <avi@qumranet.com>
16  */
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 
19 #include "irq.h"
20 #include "ioapic.h"
21 #include "mmu.h"
22 #include "mmu_internal.h"
23 #include "tdp_mmu.h"
24 #include "x86.h"
25 #include "kvm_cache_regs.h"
26 #include "smm.h"
27 #include "kvm_emulate.h"
28 #include "page_track.h"
29 #include "cpuid.h"
30 #include "spte.h"
31 
32 #include <linux/kvm_host.h>
33 #include <linux/types.h>
34 #include <linux/string.h>
35 #include <linux/mm.h>
36 #include <linux/highmem.h>
37 #include <linux/moduleparam.h>
38 #include <linux/export.h>
39 #include <linux/swap.h>
40 #include <linux/hugetlb.h>
41 #include <linux/compiler.h>
42 #include <linux/srcu.h>
43 #include <linux/slab.h>
44 #include <linux/sched/signal.h>
45 #include <linux/uaccess.h>
46 #include <linux/hash.h>
47 #include <linux/kern_levels.h>
48 #include <linux/kstrtox.h>
49 #include <linux/kthread.h>
50 #include <linux/wordpart.h>
51 
52 #include <asm/page.h>
53 #include <asm/memtype.h>
54 #include <asm/cmpxchg.h>
55 #include <asm/io.h>
56 #include <asm/set_memory.h>
57 #include <asm/spec-ctrl.h>
58 #include <asm/vmx.h>
59 
60 #include "trace.h"
61 
62 static bool nx_hugepage_mitigation_hard_disabled;
63 
64 int __read_mostly nx_huge_pages = -1;
65 static uint __read_mostly nx_huge_pages_recovery_period_ms;
66 #ifdef CONFIG_PREEMPT_RT
67 /* Recovery can cause latency spikes, disable it for PREEMPT_RT.  */
68 static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
69 #else
70 static uint __read_mostly nx_huge_pages_recovery_ratio = 60;
71 #endif
72 
73 static int get_nx_huge_pages(char *buffer, const struct kernel_param *kp);
74 static int set_nx_huge_pages(const char *val, const struct kernel_param *kp);
75 static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp);
76 
77 static const struct kernel_param_ops nx_huge_pages_ops = {
78 	.set = set_nx_huge_pages,
79 	.get = get_nx_huge_pages,
80 };
81 
82 static const struct kernel_param_ops nx_huge_pages_recovery_param_ops = {
83 	.set = set_nx_huge_pages_recovery_param,
84 	.get = param_get_uint,
85 };
86 
87 module_param_cb(nx_huge_pages, &nx_huge_pages_ops, &nx_huge_pages, 0644);
88 __MODULE_PARM_TYPE(nx_huge_pages, "bool");
89 module_param_cb(nx_huge_pages_recovery_ratio, &nx_huge_pages_recovery_param_ops,
90 		&nx_huge_pages_recovery_ratio, 0644);
91 __MODULE_PARM_TYPE(nx_huge_pages_recovery_ratio, "uint");
92 module_param_cb(nx_huge_pages_recovery_period_ms, &nx_huge_pages_recovery_param_ops,
93 		&nx_huge_pages_recovery_period_ms, 0644);
94 __MODULE_PARM_TYPE(nx_huge_pages_recovery_period_ms, "uint");
95 
96 static bool __read_mostly force_flush_and_sync_on_reuse;
97 module_param_named(flush_on_reuse, force_flush_and_sync_on_reuse, bool, 0644);
98 
99 /*
100  * When setting this variable to true it enables Two-Dimensional-Paging
101  * where the hardware walks 2 page tables:
102  * 1. the guest-virtual to guest-physical
103  * 2. while doing 1. it walks guest-physical to host-physical
104  * If the hardware supports that we don't need to do shadow paging.
105  */
106 bool tdp_enabled = false;
107 
108 static bool __ro_after_init tdp_mmu_allowed;
109 
110 #ifdef CONFIG_X86_64
111 bool __read_mostly tdp_mmu_enabled = true;
112 module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0444);
113 EXPORT_SYMBOL_FOR_KVM_INTERNAL(tdp_mmu_enabled);
114 #endif
115 
116 static int max_huge_page_level __read_mostly;
117 static int tdp_root_level __read_mostly;
118 static int max_tdp_level __read_mostly;
119 
120 #define PTE_PREFETCH_NUM		8
121 
122 #include <trace/events/kvm.h>
123 
124 /* make pte_list_desc fit well in cache lines */
125 #define PTE_LIST_EXT 14
126 
127 /*
128  * struct pte_list_desc is the core data structure used to implement a custom
129  * list for tracking a set of related SPTEs, e.g. all the SPTEs that map a
130  * given GFN when used in the context of rmaps.  Using a custom list allows KVM
131  * to optimize for the common case where many GFNs will have at most a handful
132  * of SPTEs pointing at them, i.e. allows packing multiple SPTEs into a small
133  * memory footprint, which in turn improves runtime performance by exploiting
134  * cache locality.
135  *
136  * A list is comprised of one or more pte_list_desc objects (descriptors).
137  * Each individual descriptor stores up to PTE_LIST_EXT SPTEs.  If a descriptor
138  * is full and a new SPTEs needs to be added, a new descriptor is allocated and
139  * becomes the head of the list.  This means that by definitions, all tail
140  * descriptors are full.
141  *
142  * Note, the meta data fields are deliberately placed at the start of the
143  * structure to optimize the cacheline layout; accessing the descriptor will
144  * touch only a single cacheline so long as @spte_count<=6 (or if only the
145  * descriptors metadata is accessed).
146  */
147 struct pte_list_desc {
148 	struct pte_list_desc *more;
149 	/* The number of PTEs stored in _this_ descriptor. */
150 	u32 spte_count;
151 	/* The number of PTEs stored in all tails of this descriptor. */
152 	u32 tail_count;
153 	u64 *sptes[PTE_LIST_EXT];
154 };
155 
156 struct kvm_shadow_walk_iterator {
157 	u64 addr;
158 	hpa_t shadow_addr;
159 	u64 *sptep;
160 	int level;
161 	unsigned index;
162 };
163 
164 #define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker)     \
165 	for (shadow_walk_init_using_root(&(_walker), (_vcpu),              \
166 					 (_root), (_addr));                \
167 	     shadow_walk_okay(&(_walker));			           \
168 	     shadow_walk_next(&(_walker)))
169 
170 #define for_each_shadow_entry(_vcpu, _addr, _walker)            \
171 	for (shadow_walk_init(&(_walker), _vcpu, _addr);	\
172 	     shadow_walk_okay(&(_walker));			\
173 	     shadow_walk_next(&(_walker)))
174 
175 #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte)	\
176 	for (shadow_walk_init(&(_walker), _vcpu, _addr);		\
177 	     shadow_walk_okay(&(_walker)) &&				\
178 		({ spte = mmu_spte_get_lockless(_walker.sptep); 1; });	\
179 	     __shadow_walk_next(&(_walker), spte))
180 
181 static struct kmem_cache *pte_list_desc_cache;
182 struct kmem_cache *mmu_page_header_cache;
183 
184 static void mmu_spte_set(u64 *sptep, u64 spte);
185 static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
186 			    u64 *spte, struct list_head *invalid_list);
187 
188 struct kvm_mmu_role_regs {
189 	const unsigned long cr0;
190 	const unsigned long cr4;
191 	const u64 efer;
192 };
193 
194 #define CREATE_TRACE_POINTS
195 #include "mmutrace.h"
196 
197 /*
198  * Yes, lot's of underscores.  They're a hint that you probably shouldn't be
199  * reading from the role_regs.  Once the root_role is constructed, it becomes
200  * the single source of truth for the MMU's state.
201  */
202 #define BUILD_MMU_ROLE_REGS_ACCESSOR(reg, name, flag)			\
203 static inline bool __maybe_unused					\
204 ____is_##reg##_##name(const struct kvm_mmu_role_regs *regs)		\
205 {									\
206 	return !!(regs->reg & flag);					\
207 }
208 BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, pg, X86_CR0_PG);
209 BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, wp, X86_CR0_WP);
210 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pse, X86_CR4_PSE);
211 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pae, X86_CR4_PAE);
212 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smep, X86_CR4_SMEP);
213 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smap, X86_CR4_SMAP);
214 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pke, X86_CR4_PKE);
215 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, la57, X86_CR4_LA57);
216 BUILD_MMU_ROLE_REGS_ACCESSOR(efer, nx, EFER_NX);
217 BUILD_MMU_ROLE_REGS_ACCESSOR(efer, lma, EFER_LMA);
218 
219 /*
220  * The MMU itself (with a valid role) is the single source of truth for the
221  * MMU.  Do not use the regs used to build the MMU/role, nor the vCPU.  The
222  * regs don't account for dependencies, e.g. clearing CR4 bits if CR0.PG=1,
223  * and the vCPU may be incorrect/irrelevant.
224  */
225 #define BUILD_MMU_ROLE_ACCESSOR(base_or_ext, reg, name)		\
226 static inline bool __maybe_unused is_##reg##_##name(struct kvm_mmu *mmu)	\
227 {								\
228 	return !!(mmu->cpu_role. base_or_ext . reg##_##name);	\
229 }
230 BUILD_MMU_ROLE_ACCESSOR(base, cr0, wp);
231 BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pse);
232 BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, smep);
233 BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, smap);
234 BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pke);
235 BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, la57);
236 BUILD_MMU_ROLE_ACCESSOR(base, efer, nx);
237 BUILD_MMU_ROLE_ACCESSOR(ext,  efer, lma);
238 
is_cr0_pg(struct kvm_mmu * mmu)239 static inline bool is_cr0_pg(struct kvm_mmu *mmu)
240 {
241         return mmu->cpu_role.base.level > 0;
242 }
243 
is_cr4_pae(struct kvm_mmu * mmu)244 static inline bool is_cr4_pae(struct kvm_mmu *mmu)
245 {
246         return !mmu->cpu_role.base.has_4_byte_gpte;
247 }
248 
vcpu_to_role_regs(struct kvm_vcpu * vcpu)249 static struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu)
250 {
251 	struct kvm_mmu_role_regs regs = {
252 		.cr0 = kvm_read_cr0_bits(vcpu, KVM_MMU_CR0_ROLE_BITS),
253 		.cr4 = kvm_read_cr4_bits(vcpu, KVM_MMU_CR4_ROLE_BITS),
254 		.efer = vcpu->arch.efer,
255 	};
256 
257 	return regs;
258 }
259 
get_guest_cr3(struct kvm_vcpu * vcpu)260 static unsigned long get_guest_cr3(struct kvm_vcpu *vcpu)
261 {
262 	return kvm_read_cr3(vcpu);
263 }
264 
kvm_mmu_get_guest_pgd(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu)265 static inline unsigned long kvm_mmu_get_guest_pgd(struct kvm_vcpu *vcpu,
266 						  struct kvm_mmu *mmu)
267 {
268 	if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) && mmu->get_guest_pgd == get_guest_cr3)
269 		return kvm_read_cr3(vcpu);
270 
271 	return mmu->get_guest_pgd(vcpu);
272 }
273 
kvm_available_flush_remote_tlbs_range(void)274 static inline bool kvm_available_flush_remote_tlbs_range(void)
275 {
276 #if IS_ENABLED(CONFIG_HYPERV)
277 	return kvm_x86_ops.flush_remote_tlbs_range;
278 #else
279 	return false;
280 #endif
281 }
282 
283 static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index);
284 
285 /* Flush the range of guest memory mapped by the given SPTE. */
kvm_flush_remote_tlbs_sptep(struct kvm * kvm,u64 * sptep)286 static void kvm_flush_remote_tlbs_sptep(struct kvm *kvm, u64 *sptep)
287 {
288 	struct kvm_mmu_page *sp = sptep_to_sp(sptep);
289 	gfn_t gfn = kvm_mmu_page_get_gfn(sp, spte_index(sptep));
290 
291 	kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level);
292 }
293 
mark_mmio_spte(struct kvm_vcpu * vcpu,u64 * sptep,u64 gfn,unsigned int access)294 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
295 			   unsigned int access)
296 {
297 	u64 spte = make_mmio_spte(vcpu, gfn, access);
298 
299 	trace_mark_mmio_spte(sptep, gfn, spte);
300 	mmu_spte_set(sptep, spte);
301 }
302 
get_mmio_spte_gfn(u64 spte)303 static gfn_t get_mmio_spte_gfn(u64 spte)
304 {
305 	u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
306 
307 	gpa |= (spte >> SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)
308 	       & shadow_nonpresent_or_rsvd_mask;
309 
310 	return gpa >> PAGE_SHIFT;
311 }
312 
get_mmio_spte_access(u64 spte)313 static unsigned get_mmio_spte_access(u64 spte)
314 {
315 	return spte & shadow_mmio_access_mask;
316 }
317 
check_mmio_spte(struct kvm_vcpu * vcpu,u64 spte)318 static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
319 {
320 	u64 kvm_gen, spte_gen, gen;
321 
322 	gen = kvm_vcpu_memslots(vcpu)->generation;
323 	if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
324 		return false;
325 
326 	kvm_gen = gen & MMIO_SPTE_GEN_MASK;
327 	spte_gen = get_mmio_spte_generation(spte);
328 
329 	trace_check_mmio_spte(spte, kvm_gen, spte_gen);
330 	return likely(kvm_gen == spte_gen);
331 }
332 
is_cpuid_PSE36(void)333 static int is_cpuid_PSE36(void)
334 {
335 	return 1;
336 }
337 
338 #ifdef CONFIG_X86_64
__set_spte(u64 * sptep,u64 spte)339 static void __set_spte(u64 *sptep, u64 spte)
340 {
341 	KVM_MMU_WARN_ON(is_ept_ve_possible(spte));
342 	WRITE_ONCE(*sptep, spte);
343 }
344 
__update_clear_spte_fast(u64 * sptep,u64 spte)345 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
346 {
347 	KVM_MMU_WARN_ON(is_ept_ve_possible(spte));
348 	WRITE_ONCE(*sptep, spte);
349 }
350 
__update_clear_spte_slow(u64 * sptep,u64 spte)351 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
352 {
353 	KVM_MMU_WARN_ON(is_ept_ve_possible(spte));
354 	return xchg(sptep, spte);
355 }
356 
__get_spte_lockless(u64 * sptep)357 static u64 __get_spte_lockless(u64 *sptep)
358 {
359 	return READ_ONCE(*sptep);
360 }
361 #else
362 union split_spte {
363 	struct {
364 		u32 spte_low;
365 		u32 spte_high;
366 	};
367 	u64 spte;
368 };
369 
count_spte_clear(u64 * sptep,u64 spte)370 static void count_spte_clear(u64 *sptep, u64 spte)
371 {
372 	struct kvm_mmu_page *sp =  sptep_to_sp(sptep);
373 
374 	if (is_shadow_present_pte(spte))
375 		return;
376 
377 	/* Ensure the spte is completely set before we increase the count */
378 	smp_wmb();
379 	sp->clear_spte_count++;
380 }
381 
__set_spte(u64 * sptep,u64 spte)382 static void __set_spte(u64 *sptep, u64 spte)
383 {
384 	union split_spte *ssptep, sspte;
385 
386 	ssptep = (union split_spte *)sptep;
387 	sspte = (union split_spte)spte;
388 
389 	ssptep->spte_high = sspte.spte_high;
390 
391 	/*
392 	 * If we map the spte from nonpresent to present, We should store
393 	 * the high bits firstly, then set present bit, so cpu can not
394 	 * fetch this spte while we are setting the spte.
395 	 */
396 	smp_wmb();
397 
398 	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
399 }
400 
__update_clear_spte_fast(u64 * sptep,u64 spte)401 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
402 {
403 	union split_spte *ssptep, sspte;
404 
405 	ssptep = (union split_spte *)sptep;
406 	sspte = (union split_spte)spte;
407 
408 	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
409 
410 	/*
411 	 * If we map the spte from present to nonpresent, we should clear
412 	 * present bit firstly to avoid vcpu fetch the old high bits.
413 	 */
414 	smp_wmb();
415 
416 	ssptep->spte_high = sspte.spte_high;
417 	count_spte_clear(sptep, spte);
418 }
419 
__update_clear_spte_slow(u64 * sptep,u64 spte)420 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
421 {
422 	union split_spte *ssptep, sspte, orig;
423 
424 	ssptep = (union split_spte *)sptep;
425 	sspte = (union split_spte)spte;
426 
427 	/* xchg acts as a barrier before the setting of the high bits */
428 	orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low);
429 	orig.spte_high = ssptep->spte_high;
430 	ssptep->spte_high = sspte.spte_high;
431 	count_spte_clear(sptep, spte);
432 
433 	return orig.spte;
434 }
435 
436 /*
437  * The idea using the light way get the spte on x86_32 guest is from
438  * gup_get_pte (mm/gup.c).
439  *
440  * An spte tlb flush may be pending, because they are coalesced and
441  * we are running out of the MMU lock.  Therefore
442  * we need to protect against in-progress updates of the spte.
443  *
444  * Reading the spte while an update is in progress may get the old value
445  * for the high part of the spte.  The race is fine for a present->non-present
446  * change (because the high part of the spte is ignored for non-present spte),
447  * but for a present->present change we must reread the spte.
448  *
449  * All such changes are done in two steps (present->non-present and
450  * non-present->present), hence it is enough to count the number of
451  * present->non-present updates: if it changed while reading the spte,
452  * we might have hit the race.  This is done using clear_spte_count.
453  */
__get_spte_lockless(u64 * sptep)454 static u64 __get_spte_lockless(u64 *sptep)
455 {
456 	struct kvm_mmu_page *sp =  sptep_to_sp(sptep);
457 	union split_spte spte, *orig = (union split_spte *)sptep;
458 	int count;
459 
460 retry:
461 	count = sp->clear_spte_count;
462 	smp_rmb();
463 
464 	spte.spte_low = orig->spte_low;
465 	smp_rmb();
466 
467 	spte.spte_high = orig->spte_high;
468 	smp_rmb();
469 
470 	if (unlikely(spte.spte_low != orig->spte_low ||
471 	      count != sp->clear_spte_count))
472 		goto retry;
473 
474 	return spte.spte;
475 }
476 #endif
477 
478 /* Rules for using mmu_spte_set:
479  * Set the sptep from nonpresent to present.
480  * Note: the sptep being assigned *must* be either not present
481  * or in a state where the hardware will not attempt to update
482  * the spte.
483  */
mmu_spte_set(u64 * sptep,u64 new_spte)484 static void mmu_spte_set(u64 *sptep, u64 new_spte)
485 {
486 	WARN_ON_ONCE(is_shadow_present_pte(*sptep));
487 	__set_spte(sptep, new_spte);
488 }
489 
490 /* Rules for using mmu_spte_update:
491  * Update the state bits, it means the mapped pfn is not changed.
492  *
493  * Returns true if the TLB needs to be flushed
494  */
mmu_spte_update(u64 * sptep,u64 new_spte)495 static bool mmu_spte_update(u64 *sptep, u64 new_spte)
496 {
497 	u64 old_spte = *sptep;
498 
499 	WARN_ON_ONCE(!is_shadow_present_pte(new_spte));
500 	check_spte_writable_invariants(new_spte);
501 
502 	if (!is_shadow_present_pte(old_spte)) {
503 		mmu_spte_set(sptep, new_spte);
504 		return false;
505 	}
506 
507 	if (!spte_needs_atomic_update(old_spte))
508 		__update_clear_spte_fast(sptep, new_spte);
509 	else
510 		old_spte = __update_clear_spte_slow(sptep, new_spte);
511 
512 	WARN_ON_ONCE(!is_shadow_present_pte(old_spte) ||
513 		     spte_to_pfn(old_spte) != spte_to_pfn(new_spte));
514 
515 	return leaf_spte_change_needs_tlb_flush(old_spte, new_spte);
516 }
517 
518 /*
519  * Rules for using mmu_spte_clear_track_bits:
520  * It sets the sptep from present to nonpresent, and track the
521  * state bits, it is used to clear the last level sptep.
522  * Returns the old PTE.
523  */
mmu_spte_clear_track_bits(struct kvm * kvm,u64 * sptep)524 static u64 mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)
525 {
526 	u64 old_spte = *sptep;
527 	int level = sptep_to_sp(sptep)->role.level;
528 
529 	if (!is_shadow_present_pte(old_spte) ||
530 	    !spte_needs_atomic_update(old_spte))
531 		__update_clear_spte_fast(sptep, SHADOW_NONPRESENT_VALUE);
532 	else
533 		old_spte = __update_clear_spte_slow(sptep, SHADOW_NONPRESENT_VALUE);
534 
535 	if (!is_shadow_present_pte(old_spte))
536 		return old_spte;
537 
538 	kvm_update_page_stats(kvm, level, -1);
539 	return old_spte;
540 }
541 
542 /*
543  * Rules for using mmu_spte_clear_no_track:
544  * Directly clear spte without caring the state bits of sptep,
545  * it is used to set the upper level spte.
546  */
mmu_spte_clear_no_track(u64 * sptep)547 static void mmu_spte_clear_no_track(u64 *sptep)
548 {
549 	__update_clear_spte_fast(sptep, SHADOW_NONPRESENT_VALUE);
550 }
551 
mmu_spte_get_lockless(u64 * sptep)552 static u64 mmu_spte_get_lockless(u64 *sptep)
553 {
554 	return __get_spte_lockless(sptep);
555 }
556 
is_tdp_mmu_active(struct kvm_vcpu * vcpu)557 static inline bool is_tdp_mmu_active(struct kvm_vcpu *vcpu)
558 {
559 	return tdp_mmu_enabled && vcpu->arch.mmu->root_role.direct;
560 }
561 
walk_shadow_page_lockless_begin(struct kvm_vcpu * vcpu)562 static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
563 {
564 	if (is_tdp_mmu_active(vcpu)) {
565 		kvm_tdp_mmu_walk_lockless_begin();
566 	} else {
567 		/*
568 		 * Prevent page table teardown by making any free-er wait during
569 		 * kvm_flush_remote_tlbs() IPI to all active vcpus.
570 		 */
571 		local_irq_disable();
572 
573 		/*
574 		 * Make sure a following spte read is not reordered ahead of the write
575 		 * to vcpu->mode.
576 		 */
577 		smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
578 	}
579 }
580 
walk_shadow_page_lockless_end(struct kvm_vcpu * vcpu)581 static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
582 {
583 	if (is_tdp_mmu_active(vcpu)) {
584 		kvm_tdp_mmu_walk_lockless_end();
585 	} else {
586 		/*
587 		 * Make sure the write to vcpu->mode is not reordered in front of
588 		 * reads to sptes.  If it does, kvm_mmu_commit_zap_page() can see us
589 		 * OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
590 		 */
591 		smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
592 		local_irq_enable();
593 	}
594 }
595 
mmu_topup_memory_caches(struct kvm_vcpu * vcpu,bool maybe_indirect)596 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
597 {
598 	int r;
599 
600 	/* 1 rmap, 1 parent PTE per level, and the prefetched rmaps. */
601 	r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
602 				       1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);
603 	if (r)
604 		return r;
605 	if (kvm_has_mirrored_tdp(vcpu->kvm)) {
606 		r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_external_spt_cache,
607 					       PT64_ROOT_MAX_LEVEL);
608 		if (r)
609 			return r;
610 	}
611 	r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
612 				       PT64_ROOT_MAX_LEVEL);
613 	if (r)
614 		return r;
615 	if (maybe_indirect) {
616 		r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadowed_info_cache,
617 					       PT64_ROOT_MAX_LEVEL);
618 		if (r)
619 			return r;
620 	}
621 	return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
622 					  PT64_ROOT_MAX_LEVEL);
623 }
624 
mmu_free_memory_caches(struct kvm_vcpu * vcpu)625 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
626 {
627 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
628 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
629 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadowed_info_cache);
630 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_external_spt_cache);
631 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
632 }
633 
mmu_free_pte_list_desc(struct pte_list_desc * pte_list_desc)634 static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
635 {
636 	kmem_cache_free(pte_list_desc_cache, pte_list_desc);
637 }
638 
639 static bool sp_has_gptes(struct kvm_mmu_page *sp);
640 
kvm_mmu_page_get_gfn(struct kvm_mmu_page * sp,int index)641 static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
642 {
643 	if (sp->role.passthrough)
644 		return sp->gfn;
645 
646 	if (sp->shadowed_translation)
647 		return sp->shadowed_translation[index] >> PAGE_SHIFT;
648 
649 	return sp->gfn + (index << ((sp->role.level - 1) * SPTE_LEVEL_BITS));
650 }
651 
652 /*
653  * For leaf SPTEs, fetch the *guest* access permissions being shadowed. Note
654  * that the SPTE itself may have a more constrained access permissions that
655  * what the guest enforces. For example, a guest may create an executable
656  * huge PTE but KVM may disallow execution to mitigate iTLB multihit.
657  */
kvm_mmu_page_get_access(struct kvm_mmu_page * sp,int index)658 static u32 kvm_mmu_page_get_access(struct kvm_mmu_page *sp, int index)
659 {
660 	if (sp->shadowed_translation)
661 		return sp->shadowed_translation[index] & ACC_ALL;
662 
663 	/*
664 	 * For direct MMUs (e.g. TDP or non-paging guests) or passthrough SPs,
665 	 * KVM is not shadowing any guest page tables, so the "guest access
666 	 * permissions" are just ACC_ALL.
667 	 *
668 	 * For direct SPs in indirect MMUs (shadow paging), i.e. when KVM
669 	 * is shadowing a guest huge page with small pages, the guest access
670 	 * permissions being shadowed are the access permissions of the huge
671 	 * page.
672 	 *
673 	 * In both cases, sp->role.access contains the correct access bits.
674 	 */
675 	return sp->role.access;
676 }
677 
kvm_mmu_page_set_translation(struct kvm_mmu_page * sp,int index,gfn_t gfn,unsigned int access)678 static void kvm_mmu_page_set_translation(struct kvm_mmu_page *sp, int index,
679 					 gfn_t gfn, unsigned int access)
680 {
681 	if (sp->shadowed_translation) {
682 		sp->shadowed_translation[index] = (gfn << PAGE_SHIFT) | access;
683 		return;
684 	}
685 
686 	WARN_ONCE(access != kvm_mmu_page_get_access(sp, index),
687 	          "access mismatch under %s page %llx (expected %u, got %u)\n",
688 	          sp->role.passthrough ? "passthrough" : "direct",
689 	          sp->gfn, kvm_mmu_page_get_access(sp, index), access);
690 
691 	WARN_ONCE(gfn != kvm_mmu_page_get_gfn(sp, index),
692 	          "gfn mismatch under %s page %llx (expected %llx, got %llx)\n",
693 	          sp->role.passthrough ? "passthrough" : "direct",
694 	          sp->gfn, kvm_mmu_page_get_gfn(sp, index), gfn);
695 }
696 
kvm_mmu_page_set_access(struct kvm_mmu_page * sp,int index,unsigned int access)697 static void kvm_mmu_page_set_access(struct kvm_mmu_page *sp, int index,
698 				    unsigned int access)
699 {
700 	gfn_t gfn = kvm_mmu_page_get_gfn(sp, index);
701 
702 	kvm_mmu_page_set_translation(sp, index, gfn, access);
703 }
704 
705 /*
706  * Return the pointer to the large page information for a given gfn,
707  * handling slots that are not large page aligned.
708  */
lpage_info_slot(gfn_t gfn,const struct kvm_memory_slot * slot,int level)709 static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
710 		const struct kvm_memory_slot *slot, int level)
711 {
712 	unsigned long idx;
713 
714 	idx = gfn_to_index(gfn, slot->base_gfn, level);
715 	return &slot->arch.lpage_info[level - 2][idx];
716 }
717 
718 /*
719  * The most significant bit in disallow_lpage tracks whether or not memory
720  * attributes are mixed, i.e. not identical for all gfns at the current level.
721  * The lower order bits are used to refcount other cases where a hugepage is
722  * disallowed, e.g. if KVM has shadow a page table at the gfn.
723  */
724 #define KVM_LPAGE_MIXED_FLAG	BIT(31)
725 
update_gfn_disallow_lpage_count(const struct kvm_memory_slot * slot,gfn_t gfn,int count)726 static void update_gfn_disallow_lpage_count(const struct kvm_memory_slot *slot,
727 					    gfn_t gfn, int count)
728 {
729 	struct kvm_lpage_info *linfo;
730 	int old, i;
731 
732 	for (i = PG_LEVEL_2M; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
733 		linfo = lpage_info_slot(gfn, slot, i);
734 
735 		old = linfo->disallow_lpage;
736 		linfo->disallow_lpage += count;
737 		WARN_ON_ONCE((old ^ linfo->disallow_lpage) & KVM_LPAGE_MIXED_FLAG);
738 	}
739 }
740 
kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot * slot,gfn_t gfn)741 void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
742 {
743 	update_gfn_disallow_lpage_count(slot, gfn, 1);
744 }
745 
kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot * slot,gfn_t gfn)746 void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
747 {
748 	update_gfn_disallow_lpage_count(slot, gfn, -1);
749 }
750 
account_shadowed(struct kvm * kvm,struct kvm_mmu_page * sp)751 static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
752 {
753 	struct kvm_memslots *slots;
754 	struct kvm_memory_slot *slot;
755 	gfn_t gfn;
756 
757 	kvm->arch.indirect_shadow_pages++;
758 	/*
759 	 * Ensure indirect_shadow_pages is elevated prior to re-reading guest
760 	 * child PTEs in FNAME(gpte_changed), i.e. guarantee either in-flight
761 	 * emulated writes are visible before re-reading guest PTEs, or that
762 	 * an emulated write will see the elevated count and acquire mmu_lock
763 	 * to update SPTEs.  Pairs with the smp_mb() in kvm_mmu_track_write().
764 	 */
765 	smp_mb();
766 
767 	gfn = sp->gfn;
768 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
769 	slot = __gfn_to_memslot(slots, gfn);
770 
771 	/* the non-leaf shadow pages are keeping readonly. */
772 	if (sp->role.level > PG_LEVEL_4K)
773 		return __kvm_write_track_add_gfn(kvm, slot, gfn);
774 
775 	kvm_mmu_gfn_disallow_lpage(slot, gfn);
776 
777 	if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K))
778 		kvm_flush_remote_tlbs_gfn(kvm, gfn, PG_LEVEL_4K);
779 }
780 
track_possible_nx_huge_page(struct kvm * kvm,struct kvm_mmu_page * sp,enum kvm_mmu_type mmu_type)781 void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp,
782 				 enum kvm_mmu_type mmu_type)
783 {
784 	/*
785 	 * If it's possible to replace the shadow page with an NX huge page,
786 	 * i.e. if the shadow page is the only thing currently preventing KVM
787 	 * from using a huge page, add the shadow page to the list of "to be
788 	 * zapped for NX recovery" pages.  Note, the shadow page can already be
789 	 * on the list if KVM is reusing an existing shadow page, i.e. if KVM
790 	 * links a shadow page at multiple points.
791 	 */
792 	if (!list_empty(&sp->possible_nx_huge_page_link))
793 		return;
794 
795 	++kvm->stat.nx_lpage_splits;
796 	++kvm->arch.possible_nx_huge_pages[mmu_type].nr_pages;
797 	list_add_tail(&sp->possible_nx_huge_page_link,
798 		      &kvm->arch.possible_nx_huge_pages[mmu_type].pages);
799 }
800 
account_nx_huge_page(struct kvm * kvm,struct kvm_mmu_page * sp,bool nx_huge_page_possible)801 static void account_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp,
802 				 bool nx_huge_page_possible)
803 {
804 	sp->nx_huge_page_disallowed = true;
805 
806 	if (nx_huge_page_possible)
807 		track_possible_nx_huge_page(kvm, sp, KVM_SHADOW_MMU);
808 }
809 
unaccount_shadowed(struct kvm * kvm,struct kvm_mmu_page * sp)810 static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
811 {
812 	struct kvm_memslots *slots;
813 	struct kvm_memory_slot *slot;
814 	gfn_t gfn;
815 
816 	kvm->arch.indirect_shadow_pages--;
817 	gfn = sp->gfn;
818 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
819 	slot = __gfn_to_memslot(slots, gfn);
820 	if (sp->role.level > PG_LEVEL_4K)
821 		return __kvm_write_track_remove_gfn(kvm, slot, gfn);
822 
823 	kvm_mmu_gfn_allow_lpage(slot, gfn);
824 }
825 
untrack_possible_nx_huge_page(struct kvm * kvm,struct kvm_mmu_page * sp,enum kvm_mmu_type mmu_type)826 void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp,
827 				   enum kvm_mmu_type mmu_type)
828 {
829 	if (list_empty(&sp->possible_nx_huge_page_link))
830 		return;
831 
832 	--kvm->stat.nx_lpage_splits;
833 	--kvm->arch.possible_nx_huge_pages[mmu_type].nr_pages;
834 	list_del_init(&sp->possible_nx_huge_page_link);
835 }
836 
unaccount_nx_huge_page(struct kvm * kvm,struct kvm_mmu_page * sp)837 static void unaccount_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
838 {
839 	sp->nx_huge_page_disallowed = false;
840 
841 	untrack_possible_nx_huge_page(kvm, sp, KVM_SHADOW_MMU);
842 }
843 
gfn_to_memslot_dirty_bitmap(struct kvm_vcpu * vcpu,gfn_t gfn,bool no_dirty_log)844 static struct kvm_memory_slot *gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu,
845 							   gfn_t gfn,
846 							   bool no_dirty_log)
847 {
848 	struct kvm_memory_slot *slot;
849 
850 	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
851 	if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
852 		return NULL;
853 	if (no_dirty_log && kvm_slot_dirty_track_enabled(slot))
854 		return NULL;
855 
856 	return slot;
857 }
858 
859 /*
860  * About rmap_head encoding:
861  *
862  * If the bit zero of rmap_head->val is clear, then it points to the only spte
863  * in this rmap chain. Otherwise, (rmap_head->val & ~3) points to a struct
864  * pte_list_desc containing more mappings.
865  */
866 #define KVM_RMAP_MANY	BIT(0)
867 
868 /*
869  * rmaps and PTE lists are mostly protected by mmu_lock (the shadow MMU always
870  * operates with mmu_lock held for write), but rmaps can be walked without
871  * holding mmu_lock so long as the caller can tolerate SPTEs in the rmap chain
872  * being zapped/dropped _while the rmap is locked_.
873  *
874  * Other than the KVM_RMAP_LOCKED flag, modifications to rmap entries must be
875  * done while holding mmu_lock for write.  This allows a task walking rmaps
876  * without holding mmu_lock to concurrently walk the same entries as a task
877  * that is holding mmu_lock but _not_ the rmap lock.  Neither task will modify
878  * the rmaps, thus the walks are stable.
879  *
880  * As alluded to above, SPTEs in rmaps are _not_ protected by KVM_RMAP_LOCKED,
881  * only the rmap chains themselves are protected.  E.g. holding an rmap's lock
882  * ensures all "struct pte_list_desc" fields are stable.
883  */
884 #define KVM_RMAP_LOCKED	BIT(1)
885 
__kvm_rmap_lock(struct kvm_rmap_head * rmap_head)886 static unsigned long __kvm_rmap_lock(struct kvm_rmap_head *rmap_head)
887 {
888 	unsigned long old_val, new_val;
889 
890 	lockdep_assert_preemption_disabled();
891 
892 	/*
893 	 * Elide the lock if the rmap is empty, as lockless walkers (read-only
894 	 * mode) don't need to (and can't) walk an empty rmap, nor can they add
895 	 * entries to the rmap.  I.e. the only paths that process empty rmaps
896 	 * do so while holding mmu_lock for write, and are mutually exclusive.
897 	 */
898 	old_val = atomic_long_read(&rmap_head->val);
899 	if (!old_val)
900 		return 0;
901 
902 	do {
903 		/*
904 		 * If the rmap is locked, wait for it to be unlocked before
905 		 * trying acquire the lock, e.g. to avoid bouncing the cache
906 		 * line.
907 		 */
908 		while (old_val & KVM_RMAP_LOCKED) {
909 			cpu_relax();
910 			old_val = atomic_long_read(&rmap_head->val);
911 		}
912 
913 		/*
914 		 * Recheck for an empty rmap, it may have been purged by the
915 		 * task that held the lock.
916 		 */
917 		if (!old_val)
918 			return 0;
919 
920 		new_val = old_val | KVM_RMAP_LOCKED;
921 	/*
922 	 * Use try_cmpxchg_acquire() to prevent reads and writes to the rmap
923 	 * from being reordered outside of the critical section created by
924 	 * __kvm_rmap_lock().
925 	 *
926 	 * Pairs with the atomic_long_set_release() in kvm_rmap_unlock().
927 	 *
928 	 * For the !old_val case, no ordering is needed, as there is no rmap
929 	 * to walk.
930 	 */
931 	} while (!atomic_long_try_cmpxchg_acquire(&rmap_head->val, &old_val, new_val));
932 
933 	/*
934 	 * Return the old value, i.e. _without_ the LOCKED bit set.  It's
935 	 * impossible for the return value to be 0 (see above), i.e. the read-
936 	 * only unlock flow can't get a false positive and fail to unlock.
937 	 */
938 	return old_val;
939 }
940 
kvm_rmap_lock(struct kvm * kvm,struct kvm_rmap_head * rmap_head)941 static unsigned long kvm_rmap_lock(struct kvm *kvm,
942 				   struct kvm_rmap_head *rmap_head)
943 {
944 	lockdep_assert_held_write(&kvm->mmu_lock);
945 
946 	return __kvm_rmap_lock(rmap_head);
947 }
948 
__kvm_rmap_unlock(struct kvm_rmap_head * rmap_head,unsigned long val)949 static void __kvm_rmap_unlock(struct kvm_rmap_head *rmap_head,
950 			      unsigned long val)
951 {
952 	KVM_MMU_WARN_ON(val & KVM_RMAP_LOCKED);
953 	/*
954 	 * Ensure that all accesses to the rmap have completed before unlocking
955 	 * the rmap.
956 	 *
957 	 * Pairs with the atomic_long_try_cmpxchg_acquire() in __kvm_rmap_lock().
958 	 */
959 	atomic_long_set_release(&rmap_head->val, val);
960 }
961 
kvm_rmap_unlock(struct kvm * kvm,struct kvm_rmap_head * rmap_head,unsigned long new_val)962 static void kvm_rmap_unlock(struct kvm *kvm,
963 			    struct kvm_rmap_head *rmap_head,
964 			    unsigned long new_val)
965 {
966 	lockdep_assert_held_write(&kvm->mmu_lock);
967 
968 	__kvm_rmap_unlock(rmap_head, new_val);
969 }
970 
kvm_rmap_get(struct kvm_rmap_head * rmap_head)971 static unsigned long kvm_rmap_get(struct kvm_rmap_head *rmap_head)
972 {
973 	return atomic_long_read(&rmap_head->val) & ~KVM_RMAP_LOCKED;
974 }
975 
976 /*
977  * If mmu_lock isn't held, rmaps can only be locked in read-only mode.  The
978  * actual locking is the same, but the caller is disallowed from modifying the
979  * rmap, and so the unlock flow is a nop if the rmap is/was empty.
980  */
kvm_rmap_lock_readonly(struct kvm_rmap_head * rmap_head)981 static unsigned long kvm_rmap_lock_readonly(struct kvm_rmap_head *rmap_head)
982 {
983 	unsigned long rmap_val;
984 
985 	preempt_disable();
986 	rmap_val = __kvm_rmap_lock(rmap_head);
987 
988 	if (!rmap_val)
989 		preempt_enable();
990 
991 	return rmap_val;
992 }
993 
kvm_rmap_unlock_readonly(struct kvm_rmap_head * rmap_head,unsigned long old_val)994 static void kvm_rmap_unlock_readonly(struct kvm_rmap_head *rmap_head,
995 				     unsigned long old_val)
996 {
997 	if (!old_val)
998 		return;
999 
1000 	KVM_MMU_WARN_ON(old_val != kvm_rmap_get(rmap_head));
1001 
1002 	__kvm_rmap_unlock(rmap_head, old_val);
1003 	preempt_enable();
1004 }
1005 
1006 /*
1007  * Returns the number of pointers in the rmap chain, not counting the new one.
1008  */
pte_list_add(struct kvm * kvm,struct kvm_mmu_memory_cache * cache,u64 * spte,struct kvm_rmap_head * rmap_head)1009 static int pte_list_add(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
1010 			u64 *spte, struct kvm_rmap_head *rmap_head)
1011 {
1012 	unsigned long old_val, new_val;
1013 	struct pte_list_desc *desc;
1014 	int count = 0;
1015 
1016 	old_val = kvm_rmap_lock(kvm, rmap_head);
1017 
1018 	if (!old_val) {
1019 		new_val = (unsigned long)spte;
1020 	} else if (!(old_val & KVM_RMAP_MANY)) {
1021 		desc = kvm_mmu_memory_cache_alloc(cache);
1022 		desc->sptes[0] = (u64 *)old_val;
1023 		desc->sptes[1] = spte;
1024 		desc->spte_count = 2;
1025 		desc->tail_count = 0;
1026 		new_val = (unsigned long)desc | KVM_RMAP_MANY;
1027 		++count;
1028 	} else {
1029 		desc = (struct pte_list_desc *)(old_val & ~KVM_RMAP_MANY);
1030 		count = desc->tail_count + desc->spte_count;
1031 
1032 		/*
1033 		 * If the previous head is full, allocate a new head descriptor
1034 		 * as tail descriptors are always kept full.
1035 		 */
1036 		if (desc->spte_count == PTE_LIST_EXT) {
1037 			desc = kvm_mmu_memory_cache_alloc(cache);
1038 			desc->more = (struct pte_list_desc *)(old_val & ~KVM_RMAP_MANY);
1039 			desc->spte_count = 0;
1040 			desc->tail_count = count;
1041 			new_val = (unsigned long)desc | KVM_RMAP_MANY;
1042 		} else {
1043 			new_val = old_val;
1044 		}
1045 		desc->sptes[desc->spte_count++] = spte;
1046 	}
1047 
1048 	kvm_rmap_unlock(kvm, rmap_head, new_val);
1049 
1050 	return count;
1051 }
1052 
pte_list_desc_remove_entry(struct kvm * kvm,unsigned long * rmap_val,struct pte_list_desc * desc,int i)1053 static void pte_list_desc_remove_entry(struct kvm *kvm, unsigned long *rmap_val,
1054 				       struct pte_list_desc *desc, int i)
1055 {
1056 	struct pte_list_desc *head_desc = (struct pte_list_desc *)(*rmap_val & ~KVM_RMAP_MANY);
1057 	int j = head_desc->spte_count - 1;
1058 
1059 	/*
1060 	 * The head descriptor should never be empty.  A new head is added only
1061 	 * when adding an entry and the previous head is full, and heads are
1062 	 * removed (this flow) when they become empty.
1063 	 */
1064 	KVM_BUG_ON_DATA_CORRUPTION(j < 0, kvm);
1065 
1066 	/*
1067 	 * Replace the to-be-freed SPTE with the last valid entry from the head
1068 	 * descriptor to ensure that tail descriptors are full at all times.
1069 	 * Note, this also means that tail_count is stable for each descriptor.
1070 	 */
1071 	desc->sptes[i] = head_desc->sptes[j];
1072 	head_desc->sptes[j] = NULL;
1073 	head_desc->spte_count--;
1074 	if (head_desc->spte_count)
1075 		return;
1076 
1077 	/*
1078 	 * The head descriptor is empty.  If there are no tail descriptors,
1079 	 * nullify the rmap head to mark the list as empty, else point the rmap
1080 	 * head at the next descriptor, i.e. the new head.
1081 	 */
1082 	if (!head_desc->more)
1083 		*rmap_val = 0;
1084 	else
1085 		*rmap_val = (unsigned long)head_desc->more | KVM_RMAP_MANY;
1086 	mmu_free_pte_list_desc(head_desc);
1087 }
1088 
pte_list_remove(struct kvm * kvm,u64 * spte,struct kvm_rmap_head * rmap_head)1089 static void pte_list_remove(struct kvm *kvm, u64 *spte,
1090 			    struct kvm_rmap_head *rmap_head)
1091 {
1092 	struct pte_list_desc *desc;
1093 	unsigned long rmap_val;
1094 	int i;
1095 
1096 	rmap_val = kvm_rmap_lock(kvm, rmap_head);
1097 	if (KVM_BUG_ON_DATA_CORRUPTION(!rmap_val, kvm))
1098 		goto out;
1099 
1100 	if (!(rmap_val & KVM_RMAP_MANY)) {
1101 		if (KVM_BUG_ON_DATA_CORRUPTION((u64 *)rmap_val != spte, kvm))
1102 			goto out;
1103 
1104 		rmap_val = 0;
1105 	} else {
1106 		desc = (struct pte_list_desc *)(rmap_val & ~KVM_RMAP_MANY);
1107 		while (desc) {
1108 			for (i = 0; i < desc->spte_count; ++i) {
1109 				if (desc->sptes[i] == spte) {
1110 					pte_list_desc_remove_entry(kvm, &rmap_val,
1111 								   desc, i);
1112 					goto out;
1113 				}
1114 			}
1115 			desc = desc->more;
1116 		}
1117 
1118 		KVM_BUG_ON_DATA_CORRUPTION(true, kvm);
1119 	}
1120 
1121 out:
1122 	kvm_rmap_unlock(kvm, rmap_head, rmap_val);
1123 }
1124 
kvm_zap_one_rmap_spte(struct kvm * kvm,struct kvm_rmap_head * rmap_head,u64 * sptep)1125 static void kvm_zap_one_rmap_spte(struct kvm *kvm,
1126 				  struct kvm_rmap_head *rmap_head, u64 *sptep)
1127 {
1128 	mmu_spte_clear_track_bits(kvm, sptep);
1129 	pte_list_remove(kvm, sptep, rmap_head);
1130 }
1131 
1132 /* Return true if at least one SPTE was zapped, false otherwise */
kvm_zap_all_rmap_sptes(struct kvm * kvm,struct kvm_rmap_head * rmap_head)1133 static bool kvm_zap_all_rmap_sptes(struct kvm *kvm,
1134 				   struct kvm_rmap_head *rmap_head)
1135 {
1136 	struct pte_list_desc *desc, *next;
1137 	unsigned long rmap_val;
1138 	int i;
1139 
1140 	rmap_val = kvm_rmap_lock(kvm, rmap_head);
1141 	if (!rmap_val)
1142 		return false;
1143 
1144 	if (!(rmap_val & KVM_RMAP_MANY)) {
1145 		mmu_spte_clear_track_bits(kvm, (u64 *)rmap_val);
1146 		goto out;
1147 	}
1148 
1149 	desc = (struct pte_list_desc *)(rmap_val & ~KVM_RMAP_MANY);
1150 
1151 	for (; desc; desc = next) {
1152 		for (i = 0; i < desc->spte_count; i++)
1153 			mmu_spte_clear_track_bits(kvm, desc->sptes[i]);
1154 		next = desc->more;
1155 		mmu_free_pte_list_desc(desc);
1156 	}
1157 out:
1158 	/* rmap_head is meaningless now, remember to reset it */
1159 	kvm_rmap_unlock(kvm, rmap_head, 0);
1160 	return true;
1161 }
1162 
pte_list_count(struct kvm_rmap_head * rmap_head)1163 unsigned int pte_list_count(struct kvm_rmap_head *rmap_head)
1164 {
1165 	unsigned long rmap_val = kvm_rmap_get(rmap_head);
1166 	struct pte_list_desc *desc;
1167 
1168 	if (!rmap_val)
1169 		return 0;
1170 	else if (!(rmap_val & KVM_RMAP_MANY))
1171 		return 1;
1172 
1173 	desc = (struct pte_list_desc *)(rmap_val & ~KVM_RMAP_MANY);
1174 	return desc->tail_count + desc->spte_count;
1175 }
1176 
gfn_to_rmap(gfn_t gfn,int level,const struct kvm_memory_slot * slot)1177 static struct kvm_rmap_head *gfn_to_rmap(gfn_t gfn, int level,
1178 					 const struct kvm_memory_slot *slot)
1179 {
1180 	unsigned long idx;
1181 
1182 	idx = gfn_to_index(gfn, slot->base_gfn, level);
1183 	return &slot->arch.rmap[level - PG_LEVEL_4K][idx];
1184 }
1185 
rmap_remove(struct kvm * kvm,u64 * spte)1186 static void rmap_remove(struct kvm *kvm, u64 *spte)
1187 {
1188 	struct kvm_memslots *slots;
1189 	struct kvm_memory_slot *slot;
1190 	struct kvm_mmu_page *sp;
1191 	gfn_t gfn;
1192 	struct kvm_rmap_head *rmap_head;
1193 
1194 	sp = sptep_to_sp(spte);
1195 	gfn = kvm_mmu_page_get_gfn(sp, spte_index(spte));
1196 
1197 	/*
1198 	 * Unlike rmap_add, rmap_remove does not run in the context of a vCPU
1199 	 * so we have to determine which memslots to use based on context
1200 	 * information in sp->role.
1201 	 */
1202 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
1203 
1204 	slot = __gfn_to_memslot(slots, gfn);
1205 	rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1206 
1207 	pte_list_remove(kvm, spte, rmap_head);
1208 }
1209 
1210 /*
1211  * Used by the following functions to iterate through the sptes linked by a
1212  * rmap.  All fields are private and not assumed to be used outside.
1213  */
1214 struct rmap_iterator {
1215 	/* private fields */
1216 	struct rmap_head *head;
1217 	struct pte_list_desc *desc;	/* holds the sptep if not NULL */
1218 	int pos;			/* index of the sptep */
1219 };
1220 
1221 /*
1222  * Iteration must be started by this function.  This should also be used after
1223  * removing/dropping sptes from the rmap link because in such cases the
1224  * information in the iterator may not be valid.
1225  *
1226  * Returns sptep if found, NULL otherwise.
1227  */
rmap_get_first(struct kvm_rmap_head * rmap_head,struct rmap_iterator * iter)1228 static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head,
1229 			   struct rmap_iterator *iter)
1230 {
1231 	unsigned long rmap_val = kvm_rmap_get(rmap_head);
1232 
1233 	if (!rmap_val)
1234 		return NULL;
1235 
1236 	if (!(rmap_val & KVM_RMAP_MANY)) {
1237 		iter->desc = NULL;
1238 		return (u64 *)rmap_val;
1239 	}
1240 
1241 	iter->desc = (struct pte_list_desc *)(rmap_val & ~KVM_RMAP_MANY);
1242 	iter->pos = 0;
1243 	return iter->desc->sptes[iter->pos];
1244 }
1245 
1246 /*
1247  * Must be used with a valid iterator: e.g. after rmap_get_first().
1248  *
1249  * Returns sptep if found, NULL otherwise.
1250  */
rmap_get_next(struct rmap_iterator * iter)1251 static u64 *rmap_get_next(struct rmap_iterator *iter)
1252 {
1253 	if (iter->desc) {
1254 		if (iter->pos < PTE_LIST_EXT - 1) {
1255 			++iter->pos;
1256 			if (iter->desc->sptes[iter->pos])
1257 				return iter->desc->sptes[iter->pos];
1258 		}
1259 
1260 		iter->desc = iter->desc->more;
1261 
1262 		if (iter->desc) {
1263 			iter->pos = 0;
1264 			/* desc->sptes[0] cannot be NULL */
1265 			return iter->desc->sptes[iter->pos];
1266 		}
1267 	}
1268 
1269 	return NULL;
1270 }
1271 
1272 #define __for_each_rmap_spte(_rmap_head_, _iter_, _sptep_)	\
1273 	for (_sptep_ = rmap_get_first(_rmap_head_, _iter_);	\
1274 	     _sptep_; _sptep_ = rmap_get_next(_iter_))
1275 
1276 #define for_each_rmap_spte(_rmap_head_, _iter_, _sptep_)			\
1277 	__for_each_rmap_spte(_rmap_head_, _iter_, _sptep_)			\
1278 		if (!WARN_ON_ONCE(!is_shadow_present_pte(*(_sptep_))))	\
1279 
1280 #define for_each_rmap_spte_lockless(_rmap_head_, _iter_, _sptep_, _spte_)	\
1281 	__for_each_rmap_spte(_rmap_head_, _iter_, _sptep_)			\
1282 		if (is_shadow_present_pte(_spte_ = mmu_spte_get_lockless(sptep)))
1283 
drop_spte(struct kvm * kvm,u64 * sptep)1284 static void drop_spte(struct kvm *kvm, u64 *sptep)
1285 {
1286 	u64 old_spte = mmu_spte_clear_track_bits(kvm, sptep);
1287 
1288 	if (is_shadow_present_pte(old_spte))
1289 		rmap_remove(kvm, sptep);
1290 }
1291 
1292 /*
1293  * Write-protect on the specified @sptep, @pt_protect indicates whether
1294  * spte write-protection is caused by protecting shadow page table.
1295  *
1296  * Note: write protection is difference between dirty logging and spte
1297  * protection:
1298  * - for dirty logging, the spte can be set to writable at anytime if
1299  *   its dirty bitmap is properly set.
1300  * - for spte protection, the spte can be writable only after unsync-ing
1301  *   shadow page.
1302  *
1303  * Return true if tlb need be flushed.
1304  */
spte_write_protect(u64 * sptep,bool pt_protect)1305 static bool spte_write_protect(u64 *sptep, bool pt_protect)
1306 {
1307 	u64 spte = *sptep;
1308 
1309 	if (!is_writable_pte(spte) &&
1310 	    !(pt_protect && is_mmu_writable_spte(spte)))
1311 		return false;
1312 
1313 	if (pt_protect)
1314 		spte &= ~shadow_mmu_writable_mask;
1315 	spte = spte & ~PT_WRITABLE_MASK;
1316 
1317 	return mmu_spte_update(sptep, spte);
1318 }
1319 
rmap_write_protect(struct kvm_rmap_head * rmap_head,bool pt_protect)1320 static bool rmap_write_protect(struct kvm_rmap_head *rmap_head,
1321 			       bool pt_protect)
1322 {
1323 	u64 *sptep;
1324 	struct rmap_iterator iter;
1325 	bool flush = false;
1326 
1327 	for_each_rmap_spte(rmap_head, &iter, sptep)
1328 		flush |= spte_write_protect(sptep, pt_protect);
1329 
1330 	return flush;
1331 }
1332 
spte_clear_dirty(u64 * sptep)1333 static bool spte_clear_dirty(u64 *sptep)
1334 {
1335 	u64 spte = *sptep;
1336 
1337 	KVM_MMU_WARN_ON(!spte_ad_enabled(spte));
1338 	spte &= ~shadow_dirty_mask;
1339 	return mmu_spte_update(sptep, spte);
1340 }
1341 
1342 /*
1343  * Gets the GFN ready for another round of dirty logging by clearing the
1344  *	- D bit on ad-enabled SPTEs, and
1345  *	- W bit on ad-disabled SPTEs.
1346  * Returns true iff any D or W bits were cleared.
1347  */
__rmap_clear_dirty(struct kvm * kvm,struct kvm_rmap_head * rmap_head,const struct kvm_memory_slot * slot)1348 static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1349 			       const struct kvm_memory_slot *slot)
1350 {
1351 	u64 *sptep;
1352 	struct rmap_iterator iter;
1353 	bool flush = false;
1354 
1355 	for_each_rmap_spte(rmap_head, &iter, sptep) {
1356 		if (spte_ad_need_write_protect(*sptep))
1357 			flush |= test_and_clear_bit(PT_WRITABLE_SHIFT,
1358 						    (unsigned long *)sptep);
1359 		else
1360 			flush |= spte_clear_dirty(sptep);
1361 	}
1362 
1363 	return flush;
1364 }
1365 
kvm_mmu_write_protect_pt_masked(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn_offset,unsigned long mask)1366 static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1367 				     struct kvm_memory_slot *slot,
1368 				     gfn_t gfn_offset, unsigned long mask)
1369 {
1370 	struct kvm_rmap_head *rmap_head;
1371 
1372 	if (tdp_mmu_enabled)
1373 		kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
1374 				slot->base_gfn + gfn_offset, mask, true);
1375 
1376 	if (!kvm_memslots_have_rmaps(kvm))
1377 		return;
1378 
1379 	while (mask) {
1380 		rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
1381 					PG_LEVEL_4K, slot);
1382 		rmap_write_protect(rmap_head, false);
1383 
1384 		/* clear the first set bit */
1385 		mask &= mask - 1;
1386 	}
1387 }
1388 
kvm_mmu_clear_dirty_pt_masked(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn_offset,unsigned long mask)1389 static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1390 					 struct kvm_memory_slot *slot,
1391 					 gfn_t gfn_offset, unsigned long mask)
1392 {
1393 	struct kvm_rmap_head *rmap_head;
1394 
1395 	if (tdp_mmu_enabled)
1396 		kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
1397 				slot->base_gfn + gfn_offset, mask, false);
1398 
1399 	if (!kvm_memslots_have_rmaps(kvm))
1400 		return;
1401 
1402 	while (mask) {
1403 		rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
1404 					PG_LEVEL_4K, slot);
1405 		__rmap_clear_dirty(kvm, rmap_head, slot);
1406 
1407 		/* clear the first set bit */
1408 		mask &= mask - 1;
1409 	}
1410 }
1411 
kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn_offset,unsigned long mask)1412 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1413 				struct kvm_memory_slot *slot,
1414 				gfn_t gfn_offset, unsigned long mask)
1415 {
1416 	/*
1417 	 * If the slot was assumed to be "initially all dirty", write-protect
1418 	 * huge pages to ensure they are split to 4KiB on the first write (KVM
1419 	 * dirty logs at 4KiB granularity). If eager page splitting is enabled,
1420 	 * immediately try to split huge pages, e.g. so that vCPUs don't get
1421 	 * saddled with the cost of splitting.
1422 	 *
1423 	 * The gfn_offset is guaranteed to be aligned to 64, but the base_gfn
1424 	 * of memslot has no such restriction, so the range can cross two large
1425 	 * pages.
1426 	 */
1427 	if (kvm_dirty_log_manual_protect_and_init_set(kvm)) {
1428 		gfn_t start = slot->base_gfn + gfn_offset + __ffs(mask);
1429 		gfn_t end = slot->base_gfn + gfn_offset + __fls(mask);
1430 
1431 		if (READ_ONCE(eager_page_split))
1432 			kvm_mmu_try_split_huge_pages(kvm, slot, start, end + 1, PG_LEVEL_4K);
1433 
1434 		kvm_mmu_slot_gfn_write_protect(kvm, slot, start, PG_LEVEL_2M);
1435 
1436 		/* Cross two large pages? */
1437 		if (ALIGN(start << PAGE_SHIFT, PMD_SIZE) !=
1438 		    ALIGN(end << PAGE_SHIFT, PMD_SIZE))
1439 			kvm_mmu_slot_gfn_write_protect(kvm, slot, end,
1440 						       PG_LEVEL_2M);
1441 	}
1442 
1443 	/*
1444 	 * (Re)Enable dirty logging for all 4KiB SPTEs that map the GFNs in
1445 	 * mask.  If PML is enabled and the GFN doesn't need to be write-
1446 	 * protected for other reasons, e.g. shadow paging, clear the Dirty bit.
1447 	 * Otherwise clear the Writable bit.
1448 	 *
1449 	 * Note that kvm_mmu_clear_dirty_pt_masked() is called whenever PML is
1450 	 * enabled but it chooses between clearing the Dirty bit and Writeable
1451 	 * bit based on the context.
1452 	 */
1453 	if (kvm->arch.cpu_dirty_log_size)
1454 		kvm_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset, mask);
1455 	else
1456 		kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1457 }
1458 
kvm_cpu_dirty_log_size(struct kvm * kvm)1459 int kvm_cpu_dirty_log_size(struct kvm *kvm)
1460 {
1461 	return kvm->arch.cpu_dirty_log_size;
1462 }
1463 
kvm_mmu_slot_gfn_write_protect(struct kvm * kvm,struct kvm_memory_slot * slot,u64 gfn,int min_level)1464 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
1465 				    struct kvm_memory_slot *slot, u64 gfn,
1466 				    int min_level)
1467 {
1468 	struct kvm_rmap_head *rmap_head;
1469 	int i;
1470 	bool write_protected = false;
1471 
1472 	if (kvm_memslots_have_rmaps(kvm)) {
1473 		for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
1474 			rmap_head = gfn_to_rmap(gfn, i, slot);
1475 			write_protected |= rmap_write_protect(rmap_head, true);
1476 		}
1477 	}
1478 
1479 	if (tdp_mmu_enabled)
1480 		write_protected |=
1481 			kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn, min_level);
1482 
1483 	return write_protected;
1484 }
1485 
kvm_vcpu_write_protect_gfn(struct kvm_vcpu * vcpu,u64 gfn)1486 static bool kvm_vcpu_write_protect_gfn(struct kvm_vcpu *vcpu, u64 gfn)
1487 {
1488 	struct kvm_memory_slot *slot;
1489 
1490 	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1491 	return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K);
1492 }
1493 
kvm_zap_rmap(struct kvm * kvm,struct kvm_rmap_head * rmap_head,const struct kvm_memory_slot * slot)1494 static bool kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1495 			 const struct kvm_memory_slot *slot)
1496 {
1497 	return kvm_zap_all_rmap_sptes(kvm, rmap_head);
1498 }
1499 
1500 struct slot_rmap_walk_iterator {
1501 	/* input fields. */
1502 	const struct kvm_memory_slot *slot;
1503 	gfn_t start_gfn;
1504 	gfn_t end_gfn;
1505 	int start_level;
1506 	int end_level;
1507 
1508 	/* output fields. */
1509 	gfn_t gfn;
1510 	struct kvm_rmap_head *rmap;
1511 	int level;
1512 
1513 	/* private field. */
1514 	struct kvm_rmap_head *end_rmap;
1515 };
1516 
rmap_walk_init_level(struct slot_rmap_walk_iterator * iterator,int level)1517 static void rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator,
1518 				 int level)
1519 {
1520 	iterator->level = level;
1521 	iterator->gfn = iterator->start_gfn;
1522 	iterator->rmap = gfn_to_rmap(iterator->gfn, level, iterator->slot);
1523 	iterator->end_rmap = gfn_to_rmap(iterator->end_gfn, level, iterator->slot);
1524 }
1525 
slot_rmap_walk_init(struct slot_rmap_walk_iterator * iterator,const struct kvm_memory_slot * slot,int start_level,int end_level,gfn_t start_gfn,gfn_t end_gfn)1526 static void slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator,
1527 				const struct kvm_memory_slot *slot,
1528 				int start_level, int end_level,
1529 				gfn_t start_gfn, gfn_t end_gfn)
1530 {
1531 	iterator->slot = slot;
1532 	iterator->start_level = start_level;
1533 	iterator->end_level = end_level;
1534 	iterator->start_gfn = start_gfn;
1535 	iterator->end_gfn = end_gfn;
1536 
1537 	rmap_walk_init_level(iterator, iterator->start_level);
1538 }
1539 
slot_rmap_walk_okay(struct slot_rmap_walk_iterator * iterator)1540 static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator)
1541 {
1542 	return !!iterator->rmap;
1543 }
1544 
slot_rmap_walk_next(struct slot_rmap_walk_iterator * iterator)1545 static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
1546 {
1547 	while (++iterator->rmap <= iterator->end_rmap) {
1548 		iterator->gfn += KVM_PAGES_PER_HPAGE(iterator->level);
1549 
1550 		if (atomic_long_read(&iterator->rmap->val))
1551 			return;
1552 	}
1553 
1554 	if (++iterator->level > iterator->end_level) {
1555 		iterator->rmap = NULL;
1556 		return;
1557 	}
1558 
1559 	rmap_walk_init_level(iterator, iterator->level);
1560 }
1561 
1562 #define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_,	\
1563 	   _start_gfn, _end_gfn, _iter_)				\
1564 	for (slot_rmap_walk_init(_iter_, _slot_, _start_level_,		\
1565 				 _end_level_, _start_gfn, _end_gfn);	\
1566 	     slot_rmap_walk_okay(_iter_);				\
1567 	     slot_rmap_walk_next(_iter_))
1568 
1569 /* The return value indicates if tlb flush on all vcpus is needed. */
1570 typedef bool (*slot_rmaps_handler) (struct kvm *kvm,
1571 				    struct kvm_rmap_head *rmap_head,
1572 				    const struct kvm_memory_slot *slot);
1573 
__walk_slot_rmaps(struct kvm * kvm,const struct kvm_memory_slot * slot,slot_rmaps_handler fn,int start_level,int end_level,gfn_t start_gfn,gfn_t end_gfn,bool can_yield,bool flush_on_yield,bool flush)1574 static __always_inline bool __walk_slot_rmaps(struct kvm *kvm,
1575 					      const struct kvm_memory_slot *slot,
1576 					      slot_rmaps_handler fn,
1577 					      int start_level, int end_level,
1578 					      gfn_t start_gfn, gfn_t end_gfn,
1579 					      bool can_yield, bool flush_on_yield,
1580 					      bool flush)
1581 {
1582 	struct slot_rmap_walk_iterator iterator;
1583 
1584 	lockdep_assert_held_write(&kvm->mmu_lock);
1585 
1586 	for_each_slot_rmap_range(slot, start_level, end_level, start_gfn,
1587 			end_gfn, &iterator) {
1588 		if (iterator.rmap)
1589 			flush |= fn(kvm, iterator.rmap, slot);
1590 
1591 		if (!can_yield)
1592 			continue;
1593 
1594 		if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
1595 			if (flush && flush_on_yield) {
1596 				kvm_flush_remote_tlbs_range(kvm, start_gfn,
1597 							    iterator.gfn - start_gfn + 1);
1598 				flush = false;
1599 			}
1600 			cond_resched_rwlock_write(&kvm->mmu_lock);
1601 		}
1602 	}
1603 
1604 	return flush;
1605 }
1606 
walk_slot_rmaps(struct kvm * kvm,const struct kvm_memory_slot * slot,slot_rmaps_handler fn,int start_level,int end_level,bool flush_on_yield)1607 static __always_inline bool walk_slot_rmaps(struct kvm *kvm,
1608 					    const struct kvm_memory_slot *slot,
1609 					    slot_rmaps_handler fn,
1610 					    int start_level, int end_level,
1611 					    bool flush_on_yield)
1612 {
1613 	return __walk_slot_rmaps(kvm, slot, fn, start_level, end_level,
1614 				 slot->base_gfn, slot->base_gfn + slot->npages - 1,
1615 				 true, flush_on_yield, false);
1616 }
1617 
walk_slot_rmaps_4k(struct kvm * kvm,const struct kvm_memory_slot * slot,slot_rmaps_handler fn,bool flush_on_yield)1618 static __always_inline bool walk_slot_rmaps_4k(struct kvm *kvm,
1619 					       const struct kvm_memory_slot *slot,
1620 					       slot_rmaps_handler fn,
1621 					       bool flush_on_yield)
1622 {
1623 	return walk_slot_rmaps(kvm, slot, fn, PG_LEVEL_4K, PG_LEVEL_4K, flush_on_yield);
1624 }
1625 
__kvm_rmap_zap_gfn_range(struct kvm * kvm,const struct kvm_memory_slot * slot,gfn_t start,gfn_t end,bool can_yield,bool flush)1626 static bool __kvm_rmap_zap_gfn_range(struct kvm *kvm,
1627 				     const struct kvm_memory_slot *slot,
1628 				     gfn_t start, gfn_t end, bool can_yield,
1629 				     bool flush)
1630 {
1631 	return __walk_slot_rmaps(kvm, slot, kvm_zap_rmap,
1632 				 PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
1633 				 start, end - 1, can_yield, true, flush);
1634 }
1635 
kvm_unmap_gfn_range(struct kvm * kvm,struct kvm_gfn_range * range)1636 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1637 {
1638 	bool flush = false;
1639 
1640 	/*
1641 	 * To prevent races with vCPUs faulting in a gfn using stale data,
1642 	 * zapping a gfn range must be protected by mmu_invalidate_in_progress
1643 	 * (and mmu_invalidate_seq).  The only exception is memslot deletion;
1644 	 * in that case, SRCU synchronization ensures that SPTEs are zapped
1645 	 * after all vCPUs have unlocked SRCU, guaranteeing that vCPUs see the
1646 	 * invalid slot.
1647 	 */
1648 	lockdep_assert_once(kvm->mmu_invalidate_in_progress ||
1649 			    lockdep_is_held(&kvm->slots_lock));
1650 
1651 	if (kvm_memslots_have_rmaps(kvm))
1652 		flush = __kvm_rmap_zap_gfn_range(kvm, range->slot,
1653 						 range->start, range->end,
1654 						 range->may_block, flush);
1655 
1656 	if (tdp_mmu_enabled)
1657 		flush = kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
1658 
1659 	if (kvm_x86_ops.set_apic_access_page_addr &&
1660 	    range->slot->id == APIC_ACCESS_PAGE_PRIVATE_MEMSLOT)
1661 		kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
1662 
1663 	return flush;
1664 }
1665 
1666 #define RMAP_RECYCLE_THRESHOLD 1000
1667 
__rmap_add(struct kvm * kvm,struct kvm_mmu_memory_cache * cache,const struct kvm_memory_slot * slot,u64 * spte,gfn_t gfn,unsigned int access)1668 static void __rmap_add(struct kvm *kvm,
1669 		       struct kvm_mmu_memory_cache *cache,
1670 		       const struct kvm_memory_slot *slot,
1671 		       u64 *spte, gfn_t gfn, unsigned int access)
1672 {
1673 	struct kvm_mmu_page *sp;
1674 	struct kvm_rmap_head *rmap_head;
1675 	int rmap_count;
1676 
1677 	sp = sptep_to_sp(spte);
1678 	kvm_mmu_page_set_translation(sp, spte_index(spte), gfn, access);
1679 	kvm_update_page_stats(kvm, sp->role.level, 1);
1680 
1681 	rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1682 	rmap_count = pte_list_add(kvm, cache, spte, rmap_head);
1683 
1684 	if (rmap_count > kvm->stat.max_mmu_rmap_size)
1685 		kvm->stat.max_mmu_rmap_size = rmap_count;
1686 	if (rmap_count > RMAP_RECYCLE_THRESHOLD) {
1687 		kvm_zap_all_rmap_sptes(kvm, rmap_head);
1688 		kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level);
1689 	}
1690 }
1691 
rmap_add(struct kvm_vcpu * vcpu,const struct kvm_memory_slot * slot,u64 * spte,gfn_t gfn,unsigned int access)1692 static void rmap_add(struct kvm_vcpu *vcpu, const struct kvm_memory_slot *slot,
1693 		     u64 *spte, gfn_t gfn, unsigned int access)
1694 {
1695 	struct kvm_mmu_memory_cache *cache = &vcpu->arch.mmu_pte_list_desc_cache;
1696 
1697 	__rmap_add(vcpu->kvm, cache, slot, spte, gfn, access);
1698 }
1699 
kvm_rmap_age_gfn_range(struct kvm * kvm,struct kvm_gfn_range * range,bool test_only)1700 static bool kvm_rmap_age_gfn_range(struct kvm *kvm,
1701 				   struct kvm_gfn_range *range,
1702 				   bool test_only)
1703 {
1704 	struct kvm_rmap_head *rmap_head;
1705 	struct rmap_iterator iter;
1706 	unsigned long rmap_val;
1707 	bool young = false;
1708 	u64 *sptep;
1709 	gfn_t gfn;
1710 	int level;
1711 	u64 spte;
1712 
1713 	for (level = PG_LEVEL_4K; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
1714 		for (gfn = range->start; gfn < range->end;
1715 		     gfn += KVM_PAGES_PER_HPAGE(level)) {
1716 			rmap_head = gfn_to_rmap(gfn, level, range->slot);
1717 			rmap_val = kvm_rmap_lock_readonly(rmap_head);
1718 
1719 			for_each_rmap_spte_lockless(rmap_head, &iter, sptep, spte) {
1720 				if (!is_accessed_spte(spte))
1721 					continue;
1722 
1723 				if (test_only) {
1724 					kvm_rmap_unlock_readonly(rmap_head, rmap_val);
1725 					return true;
1726 				}
1727 
1728 				if (spte_ad_enabled(spte))
1729 					clear_bit((ffs(shadow_accessed_mask) - 1),
1730 						  (unsigned long *)sptep);
1731 				else
1732 					/*
1733 					 * If the following cmpxchg fails, the
1734 					 * spte is being concurrently modified
1735 					 * and should most likely stay young.
1736 					 */
1737 					cmpxchg64(sptep, spte,
1738 					      mark_spte_for_access_track(spte));
1739 				young = true;
1740 			}
1741 
1742 			kvm_rmap_unlock_readonly(rmap_head, rmap_val);
1743 		}
1744 	}
1745 	return young;
1746 }
1747 
kvm_may_have_shadow_mmu_sptes(struct kvm * kvm)1748 static bool kvm_may_have_shadow_mmu_sptes(struct kvm *kvm)
1749 {
1750 	return !tdp_mmu_enabled || READ_ONCE(kvm->arch.indirect_shadow_pages);
1751 }
1752 
kvm_age_gfn(struct kvm * kvm,struct kvm_gfn_range * range)1753 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1754 {
1755 	bool young = false;
1756 
1757 	if (tdp_mmu_enabled)
1758 		young = kvm_tdp_mmu_age_gfn_range(kvm, range);
1759 
1760 	if (kvm_may_have_shadow_mmu_sptes(kvm))
1761 		young |= kvm_rmap_age_gfn_range(kvm, range, false);
1762 
1763 	return young;
1764 }
1765 
kvm_test_age_gfn(struct kvm * kvm,struct kvm_gfn_range * range)1766 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1767 {
1768 	bool young = false;
1769 
1770 	if (tdp_mmu_enabled)
1771 		young = kvm_tdp_mmu_test_age_gfn(kvm, range);
1772 
1773 	if (young)
1774 		return young;
1775 
1776 	if (kvm_may_have_shadow_mmu_sptes(kvm))
1777 		young |= kvm_rmap_age_gfn_range(kvm, range, true);
1778 
1779 	return young;
1780 }
1781 
kvm_mmu_check_sptes_at_free(struct kvm_mmu_page * sp)1782 static void kvm_mmu_check_sptes_at_free(struct kvm_mmu_page *sp)
1783 {
1784 #ifdef CONFIG_KVM_PROVE_MMU
1785 	int i;
1786 
1787 	for (i = 0; i < SPTE_ENT_PER_PAGE; i++) {
1788 		if (KVM_MMU_WARN_ON(is_shadow_present_pte(sp->spt[i])))
1789 			pr_err_ratelimited("SPTE %llx (@ %p) for gfn %llx shadow-present at free",
1790 					   sp->spt[i], &sp->spt[i],
1791 					   kvm_mmu_page_get_gfn(sp, i));
1792 	}
1793 #endif
1794 }
1795 
kvm_account_mmu_page(struct kvm * kvm,struct kvm_mmu_page * sp)1796 static void kvm_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1797 {
1798 	kvm->arch.n_used_mmu_pages++;
1799 	kvm_account_pgtable_pages((void *)sp->spt, +1);
1800 }
1801 
kvm_unaccount_mmu_page(struct kvm * kvm,struct kvm_mmu_page * sp)1802 static void kvm_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1803 {
1804 	kvm->arch.n_used_mmu_pages--;
1805 	kvm_account_pgtable_pages((void *)sp->spt, -1);
1806 }
1807 
kvm_mmu_free_shadow_page(struct kvm_mmu_page * sp)1808 static void kvm_mmu_free_shadow_page(struct kvm_mmu_page *sp)
1809 {
1810 	kvm_mmu_check_sptes_at_free(sp);
1811 
1812 	hlist_del(&sp->hash_link);
1813 	list_del(&sp->link);
1814 	free_page((unsigned long)sp->spt);
1815 	free_page((unsigned long)sp->shadowed_translation);
1816 	kmem_cache_free(mmu_page_header_cache, sp);
1817 }
1818 
kvm_page_table_hashfn(gfn_t gfn)1819 static unsigned kvm_page_table_hashfn(gfn_t gfn)
1820 {
1821 	return hash_64(gfn, KVM_MMU_HASH_SHIFT);
1822 }
1823 
mmu_page_add_parent_pte(struct kvm * kvm,struct kvm_mmu_memory_cache * cache,struct kvm_mmu_page * sp,u64 * parent_pte)1824 static void mmu_page_add_parent_pte(struct kvm *kvm,
1825 				    struct kvm_mmu_memory_cache *cache,
1826 				    struct kvm_mmu_page *sp, u64 *parent_pte)
1827 {
1828 	if (!parent_pte)
1829 		return;
1830 
1831 	pte_list_add(kvm, cache, parent_pte, &sp->parent_ptes);
1832 }
1833 
mmu_page_remove_parent_pte(struct kvm * kvm,struct kvm_mmu_page * sp,u64 * parent_pte)1834 static void mmu_page_remove_parent_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
1835 				       u64 *parent_pte)
1836 {
1837 	pte_list_remove(kvm, parent_pte, &sp->parent_ptes);
1838 }
1839 
drop_parent_pte(struct kvm * kvm,struct kvm_mmu_page * sp,u64 * parent_pte)1840 static void drop_parent_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
1841 			    u64 *parent_pte)
1842 {
1843 	mmu_page_remove_parent_pte(kvm, sp, parent_pte);
1844 	mmu_spte_clear_no_track(parent_pte);
1845 }
1846 
1847 static void mark_unsync(u64 *spte);
kvm_mmu_mark_parents_unsync(struct kvm_mmu_page * sp)1848 static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
1849 {
1850 	u64 *sptep;
1851 	struct rmap_iterator iter;
1852 
1853 	for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) {
1854 		mark_unsync(sptep);
1855 	}
1856 }
1857 
mark_unsync(u64 * spte)1858 static void mark_unsync(u64 *spte)
1859 {
1860 	struct kvm_mmu_page *sp;
1861 
1862 	sp = sptep_to_sp(spte);
1863 	if (__test_and_set_bit(spte_index(spte), sp->unsync_child_bitmap))
1864 		return;
1865 	if (sp->unsync_children++)
1866 		return;
1867 	kvm_mmu_mark_parents_unsync(sp);
1868 }
1869 
1870 #define KVM_PAGE_ARRAY_NR 16
1871 
1872 struct kvm_mmu_pages {
1873 	struct mmu_page_and_offset {
1874 		struct kvm_mmu_page *sp;
1875 		unsigned int idx;
1876 	} page[KVM_PAGE_ARRAY_NR];
1877 	unsigned int nr;
1878 };
1879 
mmu_pages_add(struct kvm_mmu_pages * pvec,struct kvm_mmu_page * sp,int idx)1880 static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
1881 			 int idx)
1882 {
1883 	int i;
1884 
1885 	if (sp->unsync)
1886 		for (i=0; i < pvec->nr; i++)
1887 			if (pvec->page[i].sp == sp)
1888 				return 0;
1889 
1890 	pvec->page[pvec->nr].sp = sp;
1891 	pvec->page[pvec->nr].idx = idx;
1892 	pvec->nr++;
1893 	return (pvec->nr == KVM_PAGE_ARRAY_NR);
1894 }
1895 
clear_unsync_child_bit(struct kvm_mmu_page * sp,int idx)1896 static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx)
1897 {
1898 	--sp->unsync_children;
1899 	WARN_ON_ONCE((int)sp->unsync_children < 0);
1900 	__clear_bit(idx, sp->unsync_child_bitmap);
1901 }
1902 
__mmu_unsync_walk(struct kvm_mmu_page * sp,struct kvm_mmu_pages * pvec)1903 static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
1904 			   struct kvm_mmu_pages *pvec)
1905 {
1906 	int i, ret, nr_unsync_leaf = 0;
1907 
1908 	for_each_set_bit(i, sp->unsync_child_bitmap, 512) {
1909 		struct kvm_mmu_page *child;
1910 		u64 ent = sp->spt[i];
1911 
1912 		if (!is_shadow_present_pte(ent) || is_large_pte(ent)) {
1913 			clear_unsync_child_bit(sp, i);
1914 			continue;
1915 		}
1916 
1917 		child = spte_to_child_sp(ent);
1918 
1919 		if (child->unsync_children) {
1920 			if (mmu_pages_add(pvec, child, i))
1921 				return -ENOSPC;
1922 
1923 			ret = __mmu_unsync_walk(child, pvec);
1924 			if (!ret) {
1925 				clear_unsync_child_bit(sp, i);
1926 				continue;
1927 			} else if (ret > 0) {
1928 				nr_unsync_leaf += ret;
1929 			} else
1930 				return ret;
1931 		} else if (child->unsync) {
1932 			nr_unsync_leaf++;
1933 			if (mmu_pages_add(pvec, child, i))
1934 				return -ENOSPC;
1935 		} else
1936 			clear_unsync_child_bit(sp, i);
1937 	}
1938 
1939 	return nr_unsync_leaf;
1940 }
1941 
1942 #define INVALID_INDEX (-1)
1943 
mmu_unsync_walk(struct kvm_mmu_page * sp,struct kvm_mmu_pages * pvec)1944 static int mmu_unsync_walk(struct kvm_mmu_page *sp,
1945 			   struct kvm_mmu_pages *pvec)
1946 {
1947 	pvec->nr = 0;
1948 	if (!sp->unsync_children)
1949 		return 0;
1950 
1951 	mmu_pages_add(pvec, sp, INVALID_INDEX);
1952 	return __mmu_unsync_walk(sp, pvec);
1953 }
1954 
kvm_unlink_unsync_page(struct kvm * kvm,struct kvm_mmu_page * sp)1955 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1956 {
1957 	WARN_ON_ONCE(!sp->unsync);
1958 	trace_kvm_mmu_sync_page(sp);
1959 	sp->unsync = 0;
1960 	--kvm->stat.mmu_unsync;
1961 }
1962 
1963 static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1964 				     struct list_head *invalid_list);
1965 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1966 				    struct list_head *invalid_list);
1967 
sp_has_gptes(struct kvm_mmu_page * sp)1968 static bool sp_has_gptes(struct kvm_mmu_page *sp)
1969 {
1970 	if (sp->role.direct)
1971 		return false;
1972 
1973 	if (sp->role.passthrough)
1974 		return false;
1975 
1976 	return true;
1977 }
1978 
1979 static __ro_after_init HLIST_HEAD(empty_page_hash);
1980 
kvm_get_mmu_page_hash(struct kvm * kvm,gfn_t gfn)1981 static struct hlist_head *kvm_get_mmu_page_hash(struct kvm *kvm, gfn_t gfn)
1982 {
1983 	/*
1984 	 * Ensure the load of the hash table pointer itself is ordered before
1985 	 * loads to walk the table.  The pointer is set at runtime outside of
1986 	 * mmu_lock when the TDP MMU is enabled, i.e. when the hash table of
1987 	 * shadow pages becomes necessary only when KVM needs to shadow L1's
1988 	 * TDP for an L2 guest.  Pairs with the smp_store_release() in
1989 	 * kvm_mmu_alloc_page_hash().
1990 	 */
1991 	struct hlist_head *page_hash = smp_load_acquire(&kvm->arch.mmu_page_hash);
1992 
1993 	lockdep_assert_held(&kvm->mmu_lock);
1994 
1995 	if (!page_hash)
1996 		return &empty_page_hash;
1997 
1998 	return &page_hash[kvm_page_table_hashfn(gfn)];
1999 }
2000 
2001 #define for_each_valid_sp(_kvm, _sp, _list)				\
2002 	hlist_for_each_entry(_sp, _list, hash_link)			\
2003 		if (is_obsolete_sp((_kvm), (_sp))) {			\
2004 		} else
2005 
2006 #define for_each_gfn_valid_sp_with_gptes(_kvm, _sp, _gfn)		\
2007 	for_each_valid_sp(_kvm, _sp, kvm_get_mmu_page_hash(_kvm, _gfn))	\
2008 		if ((_sp)->gfn != (_gfn) || !sp_has_gptes(_sp)) {} else
2009 
kvm_sync_page_check(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp)2010 static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
2011 {
2012 	union kvm_mmu_page_role root_role = vcpu->arch.mmu->root_role;
2013 
2014 	/*
2015 	 * Ignore various flags when verifying that it's safe to sync a shadow
2016 	 * page using the current MMU context.
2017 	 *
2018 	 *  - level: not part of the overall MMU role and will never match as the MMU's
2019 	 *           level tracks the root level
2020 	 *  - access: updated based on the new guest PTE
2021 	 *  - quadrant: not part of the overall MMU role (similar to level)
2022 	 */
2023 	const union kvm_mmu_page_role sync_role_ign = {
2024 		.level = 0xf,
2025 		.access = 0x7,
2026 		.quadrant = 0x3,
2027 		.passthrough = 0x1,
2028 	};
2029 
2030 	/*
2031 	 * Direct pages can never be unsync, and KVM should never attempt to
2032 	 * sync a shadow page for a different MMU context, e.g. if the role
2033 	 * differs then the memslot lookup (SMM vs. non-SMM) will be bogus, the
2034 	 * reserved bits checks will be wrong, etc...
2035 	 */
2036 	if (WARN_ON_ONCE(sp->role.direct || !vcpu->arch.mmu->sync_spte ||
2037 			 (sp->role.word ^ root_role.word) & ~sync_role_ign.word))
2038 		return false;
2039 
2040 	return true;
2041 }
2042 
kvm_sync_spte(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp,int i)2043 static int kvm_sync_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int i)
2044 {
2045 	/* sp->spt[i] has initial value of shadow page table allocation */
2046 	if (sp->spt[i] == SHADOW_NONPRESENT_VALUE)
2047 		return 0;
2048 
2049 	return vcpu->arch.mmu->sync_spte(vcpu, sp, i);
2050 }
2051 
__kvm_sync_page(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp)2052 static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
2053 {
2054 	int flush = 0;
2055 	int i;
2056 
2057 	if (!kvm_sync_page_check(vcpu, sp))
2058 		return -1;
2059 
2060 	for (i = 0; i < SPTE_ENT_PER_PAGE; i++) {
2061 		int ret = kvm_sync_spte(vcpu, sp, i);
2062 
2063 		if (ret < -1)
2064 			return -1;
2065 		flush |= ret;
2066 	}
2067 
2068 	/*
2069 	 * Note, any flush is purely for KVM's correctness, e.g. when dropping
2070 	 * an existing SPTE or clearing W/A/D bits to ensure an mmu_notifier
2071 	 * unmap or dirty logging event doesn't fail to flush.  The guest is
2072 	 * responsible for flushing the TLB to ensure any changes in protection
2073 	 * bits are recognized, i.e. until the guest flushes or page faults on
2074 	 * a relevant address, KVM is architecturally allowed to let vCPUs use
2075 	 * cached translations with the old protection bits.
2076 	 */
2077 	return flush;
2078 }
2079 
kvm_sync_page(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp,struct list_head * invalid_list)2080 static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
2081 			 struct list_head *invalid_list)
2082 {
2083 	int ret = __kvm_sync_page(vcpu, sp);
2084 
2085 	if (ret < 0)
2086 		kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
2087 	return ret;
2088 }
2089 
kvm_mmu_remote_flush_or_zap(struct kvm * kvm,struct list_head * invalid_list,bool remote_flush)2090 static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
2091 					struct list_head *invalid_list,
2092 					bool remote_flush)
2093 {
2094 	if (!remote_flush && list_empty(invalid_list))
2095 		return false;
2096 
2097 	if (!list_empty(invalid_list))
2098 		kvm_mmu_commit_zap_page(kvm, invalid_list);
2099 	else
2100 		kvm_flush_remote_tlbs(kvm);
2101 	return true;
2102 }
2103 
is_obsolete_sp(struct kvm * kvm,struct kvm_mmu_page * sp)2104 static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
2105 {
2106 	if (sp->role.invalid)
2107 		return true;
2108 
2109 	/* TDP MMU pages do not use the MMU generation. */
2110 	return !is_tdp_mmu_page(sp) &&
2111 	       unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
2112 }
2113 
2114 struct mmu_page_path {
2115 	struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL];
2116 	unsigned int idx[PT64_ROOT_MAX_LEVEL];
2117 };
2118 
2119 #define for_each_sp(pvec, sp, parents, i)			\
2120 		for (i = mmu_pages_first(&pvec, &parents);	\
2121 			i < pvec.nr && ({ sp = pvec.page[i].sp; 1;});	\
2122 			i = mmu_pages_next(&pvec, &parents, i))
2123 
mmu_pages_next(struct kvm_mmu_pages * pvec,struct mmu_page_path * parents,int i)2124 static int mmu_pages_next(struct kvm_mmu_pages *pvec,
2125 			  struct mmu_page_path *parents,
2126 			  int i)
2127 {
2128 	int n;
2129 
2130 	for (n = i+1; n < pvec->nr; n++) {
2131 		struct kvm_mmu_page *sp = pvec->page[n].sp;
2132 		unsigned idx = pvec->page[n].idx;
2133 		int level = sp->role.level;
2134 
2135 		parents->idx[level-1] = idx;
2136 		if (level == PG_LEVEL_4K)
2137 			break;
2138 
2139 		parents->parent[level-2] = sp;
2140 	}
2141 
2142 	return n;
2143 }
2144 
mmu_pages_first(struct kvm_mmu_pages * pvec,struct mmu_page_path * parents)2145 static int mmu_pages_first(struct kvm_mmu_pages *pvec,
2146 			   struct mmu_page_path *parents)
2147 {
2148 	struct kvm_mmu_page *sp;
2149 	int level;
2150 
2151 	if (pvec->nr == 0)
2152 		return 0;
2153 
2154 	WARN_ON_ONCE(pvec->page[0].idx != INVALID_INDEX);
2155 
2156 	sp = pvec->page[0].sp;
2157 	level = sp->role.level;
2158 	WARN_ON_ONCE(level == PG_LEVEL_4K);
2159 
2160 	parents->parent[level-2] = sp;
2161 
2162 	/* Also set up a sentinel.  Further entries in pvec are all
2163 	 * children of sp, so this element is never overwritten.
2164 	 */
2165 	parents->parent[level-1] = NULL;
2166 	return mmu_pages_next(pvec, parents, 0);
2167 }
2168 
mmu_pages_clear_parents(struct mmu_page_path * parents)2169 static void mmu_pages_clear_parents(struct mmu_page_path *parents)
2170 {
2171 	struct kvm_mmu_page *sp;
2172 	unsigned int level = 0;
2173 
2174 	do {
2175 		unsigned int idx = parents->idx[level];
2176 		sp = parents->parent[level];
2177 		if (!sp)
2178 			return;
2179 
2180 		WARN_ON_ONCE(idx == INVALID_INDEX);
2181 		clear_unsync_child_bit(sp, idx);
2182 		level++;
2183 	} while (!sp->unsync_children);
2184 }
2185 
mmu_sync_children(struct kvm_vcpu * vcpu,struct kvm_mmu_page * parent,bool can_yield)2186 static int mmu_sync_children(struct kvm_vcpu *vcpu,
2187 			     struct kvm_mmu_page *parent, bool can_yield)
2188 {
2189 	int i;
2190 	struct kvm_mmu_page *sp;
2191 	struct mmu_page_path parents;
2192 	struct kvm_mmu_pages pages;
2193 	LIST_HEAD(invalid_list);
2194 	bool flush = false;
2195 
2196 	while (mmu_unsync_walk(parent, &pages)) {
2197 		bool protected = false;
2198 
2199 		for_each_sp(pages, sp, parents, i)
2200 			protected |= kvm_vcpu_write_protect_gfn(vcpu, sp->gfn);
2201 
2202 		if (protected) {
2203 			kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, true);
2204 			flush = false;
2205 		}
2206 
2207 		for_each_sp(pages, sp, parents, i) {
2208 			kvm_unlink_unsync_page(vcpu->kvm, sp);
2209 			flush |= kvm_sync_page(vcpu, sp, &invalid_list) > 0;
2210 			mmu_pages_clear_parents(&parents);
2211 		}
2212 		if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
2213 			kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush);
2214 			if (!can_yield) {
2215 				kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
2216 				return -EINTR;
2217 			}
2218 
2219 			cond_resched_rwlock_write(&vcpu->kvm->mmu_lock);
2220 			flush = false;
2221 		}
2222 	}
2223 
2224 	kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush);
2225 	return 0;
2226 }
2227 
__clear_sp_write_flooding_count(struct kvm_mmu_page * sp)2228 static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
2229 {
2230 	atomic_set(&sp->write_flooding_count,  0);
2231 }
2232 
clear_sp_write_flooding_count(u64 * spte)2233 static void clear_sp_write_flooding_count(u64 *spte)
2234 {
2235 	__clear_sp_write_flooding_count(sptep_to_sp(spte));
2236 }
2237 
2238 /*
2239  * The vCPU is required when finding indirect shadow pages; the shadow
2240  * page may already exist and syncing it needs the vCPU pointer in
2241  * order to read guest page tables.  Direct shadow pages are never
2242  * unsync, thus @vcpu can be NULL if @role.direct is true.
2243  */
kvm_mmu_find_shadow_page(struct kvm * kvm,struct kvm_vcpu * vcpu,gfn_t gfn,struct hlist_head * sp_list,union kvm_mmu_page_role role)2244 static struct kvm_mmu_page *kvm_mmu_find_shadow_page(struct kvm *kvm,
2245 						     struct kvm_vcpu *vcpu,
2246 						     gfn_t gfn,
2247 						     struct hlist_head *sp_list,
2248 						     union kvm_mmu_page_role role)
2249 {
2250 	struct kvm_mmu_page *sp;
2251 	int ret;
2252 	int collisions = 0;
2253 	LIST_HEAD(invalid_list);
2254 
2255 	for_each_valid_sp(kvm, sp, sp_list) {
2256 		if (sp->gfn != gfn) {
2257 			collisions++;
2258 			continue;
2259 		}
2260 
2261 		if (sp->role.word != role.word) {
2262 			/*
2263 			 * If the guest is creating an upper-level page, zap
2264 			 * unsync pages for the same gfn.  While it's possible
2265 			 * the guest is using recursive page tables, in all
2266 			 * likelihood the guest has stopped using the unsync
2267 			 * page and is installing a completely unrelated page.
2268 			 * Unsync pages must not be left as is, because the new
2269 			 * upper-level page will be write-protected.
2270 			 */
2271 			if (role.level > PG_LEVEL_4K && sp->unsync)
2272 				kvm_mmu_prepare_zap_page(kvm, sp,
2273 							 &invalid_list);
2274 			continue;
2275 		}
2276 
2277 		/* unsync and write-flooding only apply to indirect SPs. */
2278 		if (sp->role.direct)
2279 			goto out;
2280 
2281 		if (sp->unsync) {
2282 			if (KVM_BUG_ON(!vcpu, kvm))
2283 				break;
2284 
2285 			/*
2286 			 * The page is good, but is stale.  kvm_sync_page does
2287 			 * get the latest guest state, but (unlike mmu_unsync_children)
2288 			 * it doesn't write-protect the page or mark it synchronized!
2289 			 * This way the validity of the mapping is ensured, but the
2290 			 * overhead of write protection is not incurred until the
2291 			 * guest invalidates the TLB mapping.  This allows multiple
2292 			 * SPs for a single gfn to be unsync.
2293 			 *
2294 			 * If the sync fails, the page is zapped.  If so, break
2295 			 * in order to rebuild it.
2296 			 */
2297 			ret = kvm_sync_page(vcpu, sp, &invalid_list);
2298 			if (ret < 0)
2299 				break;
2300 
2301 			WARN_ON_ONCE(!list_empty(&invalid_list));
2302 			if (ret > 0)
2303 				kvm_flush_remote_tlbs(kvm);
2304 		}
2305 
2306 		__clear_sp_write_flooding_count(sp);
2307 
2308 		goto out;
2309 	}
2310 
2311 	sp = NULL;
2312 	++kvm->stat.mmu_cache_miss;
2313 
2314 out:
2315 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
2316 
2317 	if (collisions > kvm->stat.max_mmu_page_hash_collisions)
2318 		kvm->stat.max_mmu_page_hash_collisions = collisions;
2319 	return sp;
2320 }
2321 
2322 /* Caches used when allocating a new shadow page. */
2323 struct shadow_page_caches {
2324 	struct kvm_mmu_memory_cache *page_header_cache;
2325 	struct kvm_mmu_memory_cache *shadow_page_cache;
2326 	struct kvm_mmu_memory_cache *shadowed_info_cache;
2327 };
2328 
kvm_mmu_alloc_shadow_page(struct kvm * kvm,struct shadow_page_caches * caches,gfn_t gfn,struct hlist_head * sp_list,union kvm_mmu_page_role role)2329 static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm *kvm,
2330 						      struct shadow_page_caches *caches,
2331 						      gfn_t gfn,
2332 						      struct hlist_head *sp_list,
2333 						      union kvm_mmu_page_role role)
2334 {
2335 	struct kvm_mmu_page *sp;
2336 
2337 	sp = kvm_mmu_memory_cache_alloc(caches->page_header_cache);
2338 	sp->spt = kvm_mmu_memory_cache_alloc(caches->shadow_page_cache);
2339 	if (!role.direct && role.level <= KVM_MAX_HUGEPAGE_LEVEL)
2340 		sp->shadowed_translation = kvm_mmu_memory_cache_alloc(caches->shadowed_info_cache);
2341 
2342 	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
2343 
2344 	INIT_LIST_HEAD(&sp->possible_nx_huge_page_link);
2345 
2346 	/*
2347 	 * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
2348 	 * depends on valid pages being added to the head of the list.  See
2349 	 * comments in kvm_zap_obsolete_pages().
2350 	 */
2351 	sp->mmu_valid_gen = kvm->arch.mmu_valid_gen;
2352 	list_add(&sp->link, &kvm->arch.active_mmu_pages);
2353 	kvm_account_mmu_page(kvm, sp);
2354 
2355 	sp->gfn = gfn;
2356 	sp->role = role;
2357 	hlist_add_head(&sp->hash_link, sp_list);
2358 	if (sp_has_gptes(sp))
2359 		account_shadowed(kvm, sp);
2360 
2361 	return sp;
2362 }
2363 
2364 /* Note, @vcpu may be NULL if @role.direct is true; see kvm_mmu_find_shadow_page. */
__kvm_mmu_get_shadow_page(struct kvm * kvm,struct kvm_vcpu * vcpu,struct shadow_page_caches * caches,gfn_t gfn,union kvm_mmu_page_role role)2365 static struct kvm_mmu_page *__kvm_mmu_get_shadow_page(struct kvm *kvm,
2366 						      struct kvm_vcpu *vcpu,
2367 						      struct shadow_page_caches *caches,
2368 						      gfn_t gfn,
2369 						      union kvm_mmu_page_role role)
2370 {
2371 	struct hlist_head *sp_list;
2372 	struct kvm_mmu_page *sp;
2373 	bool created = false;
2374 
2375 	/*
2376 	 * No need for memory barriers, unlike in kvm_get_mmu_page_hash(), as
2377 	 * mmu_page_hash must be set prior to creating the first shadow root,
2378 	 * i.e. reaching this point is fully serialized by slots_arch_lock.
2379 	 */
2380 	BUG_ON(!kvm->arch.mmu_page_hash);
2381 	sp_list = &kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
2382 
2383 	sp = kvm_mmu_find_shadow_page(kvm, vcpu, gfn, sp_list, role);
2384 	if (!sp) {
2385 		created = true;
2386 		sp = kvm_mmu_alloc_shadow_page(kvm, caches, gfn, sp_list, role);
2387 	}
2388 
2389 	trace_kvm_mmu_get_page(sp, created);
2390 	return sp;
2391 }
2392 
kvm_mmu_get_shadow_page(struct kvm_vcpu * vcpu,gfn_t gfn,union kvm_mmu_page_role role)2393 static struct kvm_mmu_page *kvm_mmu_get_shadow_page(struct kvm_vcpu *vcpu,
2394 						    gfn_t gfn,
2395 						    union kvm_mmu_page_role role)
2396 {
2397 	struct shadow_page_caches caches = {
2398 		.page_header_cache = &vcpu->arch.mmu_page_header_cache,
2399 		.shadow_page_cache = &vcpu->arch.mmu_shadow_page_cache,
2400 		.shadowed_info_cache = &vcpu->arch.mmu_shadowed_info_cache,
2401 	};
2402 
2403 	return __kvm_mmu_get_shadow_page(vcpu->kvm, vcpu, &caches, gfn, role);
2404 }
2405 
kvm_mmu_child_role(u64 * sptep,bool direct,unsigned int access)2406 static union kvm_mmu_page_role kvm_mmu_child_role(u64 *sptep, bool direct,
2407 						  unsigned int access)
2408 {
2409 	struct kvm_mmu_page *parent_sp = sptep_to_sp(sptep);
2410 	union kvm_mmu_page_role role;
2411 
2412 	role = parent_sp->role;
2413 	role.level--;
2414 	role.access = access;
2415 	role.direct = direct;
2416 	role.passthrough = 0;
2417 
2418 	/*
2419 	 * If the guest has 4-byte PTEs then that means it's using 32-bit,
2420 	 * 2-level, non-PAE paging. KVM shadows such guests with PAE paging
2421 	 * (i.e. 8-byte PTEs). The difference in PTE size means that KVM must
2422 	 * shadow each guest page table with multiple shadow page tables, which
2423 	 * requires extra bookkeeping in the role.
2424 	 *
2425 	 * Specifically, to shadow the guest's page directory (which covers a
2426 	 * 4GiB address space), KVM uses 4 PAE page directories, each mapping
2427 	 * 1GiB of the address space. @role.quadrant encodes which quarter of
2428 	 * the address space each maps.
2429 	 *
2430 	 * To shadow the guest's page tables (which each map a 4MiB region), KVM
2431 	 * uses 2 PAE page tables, each mapping a 2MiB region. For these,
2432 	 * @role.quadrant encodes which half of the region they map.
2433 	 *
2434 	 * Concretely, a 4-byte PDE consumes bits 31:22, while an 8-byte PDE
2435 	 * consumes bits 29:21.  To consume bits 31:30, KVM's uses 4 shadow
2436 	 * PDPTEs; those 4 PAE page directories are pre-allocated and their
2437 	 * quadrant is assigned in mmu_alloc_root().   A 4-byte PTE consumes
2438 	 * bits 21:12, while an 8-byte PTE consumes bits 20:12.  To consume
2439 	 * bit 21 in the PTE (the child here), KVM propagates that bit to the
2440 	 * quadrant, i.e. sets quadrant to '0' or '1'.  The parent 8-byte PDE
2441 	 * covers bit 21 (see above), thus the quadrant is calculated from the
2442 	 * _least_ significant bit of the PDE index.
2443 	 */
2444 	if (role.has_4_byte_gpte) {
2445 		WARN_ON_ONCE(role.level != PG_LEVEL_4K);
2446 		role.quadrant = spte_index(sptep) & 1;
2447 	}
2448 
2449 	return role;
2450 }
2451 
kvm_mmu_get_child_sp(struct kvm_vcpu * vcpu,u64 * sptep,gfn_t gfn,bool direct,unsigned int access)2452 static struct kvm_mmu_page *kvm_mmu_get_child_sp(struct kvm_vcpu *vcpu,
2453 						 u64 *sptep, gfn_t gfn,
2454 						 bool direct, unsigned int access)
2455 {
2456 	union kvm_mmu_page_role role;
2457 
2458 	if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep) &&
2459 	    spte_to_child_sp(*sptep) && spte_to_child_sp(*sptep)->gfn == gfn)
2460 		return ERR_PTR(-EEXIST);
2461 
2462 	role = kvm_mmu_child_role(sptep, direct, access);
2463 	return kvm_mmu_get_shadow_page(vcpu, gfn, role);
2464 }
2465 
shadow_walk_init_using_root(struct kvm_shadow_walk_iterator * iterator,struct kvm_vcpu * vcpu,hpa_t root,u64 addr)2466 static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator,
2467 					struct kvm_vcpu *vcpu, hpa_t root,
2468 					u64 addr)
2469 {
2470 	iterator->addr = addr;
2471 	iterator->shadow_addr = root;
2472 	iterator->level = vcpu->arch.mmu->root_role.level;
2473 
2474 	if (iterator->level >= PT64_ROOT_4LEVEL &&
2475 	    vcpu->arch.mmu->cpu_role.base.level < PT64_ROOT_4LEVEL &&
2476 	    !vcpu->arch.mmu->root_role.direct)
2477 		iterator->level = PT32E_ROOT_LEVEL;
2478 
2479 	if (iterator->level == PT32E_ROOT_LEVEL) {
2480 		/*
2481 		 * prev_root is currently only used for 64-bit hosts. So only
2482 		 * the active root_hpa is valid here.
2483 		 */
2484 		BUG_ON(root != vcpu->arch.mmu->root.hpa);
2485 
2486 		iterator->shadow_addr
2487 			= vcpu->arch.mmu->pae_root[(addr >> 30) & 3];
2488 		iterator->shadow_addr &= SPTE_BASE_ADDR_MASK;
2489 		--iterator->level;
2490 		if (!iterator->shadow_addr)
2491 			iterator->level = 0;
2492 	}
2493 }
2494 
shadow_walk_init(struct kvm_shadow_walk_iterator * iterator,struct kvm_vcpu * vcpu,u64 addr)2495 static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
2496 			     struct kvm_vcpu *vcpu, u64 addr)
2497 {
2498 	shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root.hpa,
2499 				    addr);
2500 }
2501 
shadow_walk_okay(struct kvm_shadow_walk_iterator * iterator)2502 static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
2503 {
2504 	if (iterator->level < PG_LEVEL_4K)
2505 		return false;
2506 
2507 	iterator->index = SPTE_INDEX(iterator->addr, iterator->level);
2508 	iterator->sptep	= ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
2509 	return true;
2510 }
2511 
__shadow_walk_next(struct kvm_shadow_walk_iterator * iterator,u64 spte)2512 static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator,
2513 			       u64 spte)
2514 {
2515 	if (!is_shadow_present_pte(spte) || is_last_spte(spte, iterator->level)) {
2516 		iterator->level = 0;
2517 		return;
2518 	}
2519 
2520 	iterator->shadow_addr = spte & SPTE_BASE_ADDR_MASK;
2521 	--iterator->level;
2522 }
2523 
shadow_walk_next(struct kvm_shadow_walk_iterator * iterator)2524 static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
2525 {
2526 	__shadow_walk_next(iterator, *iterator->sptep);
2527 }
2528 
2529 /*
2530  * Note: while normally KVM uses a "bool flush" return value to let
2531  * the caller batch flushes, __link_shadow_page() flushes immediately
2532  * before populating the parent PTE with the new shadow page.  The
2533  * typical callers, direct_map() and FNAME(fetch)(), are not going
2534  * to zap more than one huge SPTE anyway.
2535  *
2536  * The only exception, where @flush can be false, is when a huge SPTE
2537  * is replaced with a shadow page SPTE with a fully populated page table,
2538  * which can happen from shadow_mmu_split_huge_page().  In this case,
2539  * no memory is unmapped across the change to the page tables and no
2540  * immediate flush is needed for correctness.
2541  *
2542  * Even in that case, calls to kvm_mmu_commit_zap_page() are not
2543  * batched.  Doing so would require adding an invalid_list argument
2544  * all the way down to __walk_slot_rmaps().
2545  */
__link_shadow_page(struct kvm * kvm,struct kvm_mmu_memory_cache * cache,u64 * sptep,struct kvm_mmu_page * sp,bool flush)2546 static void __link_shadow_page(struct kvm *kvm,
2547 			       struct kvm_mmu_memory_cache *cache, u64 *sptep,
2548 			       struct kvm_mmu_page *sp, bool flush)
2549 {
2550 	u64 spte;
2551 
2552 	BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
2553 
2554 	if (is_shadow_present_pte(*sptep)) {
2555 		struct kvm_mmu_page *parent_sp;
2556 		LIST_HEAD(invalid_list);
2557 
2558 		parent_sp = sptep_to_sp(sptep);
2559 		WARN_ON_ONCE(parent_sp->role.level == PG_LEVEL_4K);
2560 
2561 		if (mmu_page_zap_pte(kvm, parent_sp, sptep, &invalid_list))
2562 			kvm_mmu_commit_zap_page(kvm, &invalid_list);
2563 		else if (flush)
2564 			kvm_flush_remote_tlbs_sptep(kvm, sptep);
2565 	}
2566 
2567 	spte = make_nonleaf_spte(sp->spt, sp_ad_disabled(sp));
2568 
2569 	mmu_spte_set(sptep, spte);
2570 
2571 	mmu_page_add_parent_pte(kvm, cache, sp, sptep);
2572 
2573 	/*
2574 	 * The non-direct sub-pagetable must be updated before linking.  For
2575 	 * L1 sp, the pagetable is updated via kvm_sync_page() in
2576 	 * kvm_mmu_find_shadow_page() without write-protecting the gfn,
2577 	 * so sp->unsync can be true or false.  For higher level non-direct
2578 	 * sp, the pagetable is updated/synced via mmu_sync_children() in
2579 	 * FNAME(fetch)(), so sp->unsync_children can only be false.
2580 	 * WARN_ON_ONCE() if anything happens unexpectedly.
2581 	 */
2582 	if (WARN_ON_ONCE(sp->unsync_children) || sp->unsync)
2583 		mark_unsync(sptep);
2584 }
2585 
link_shadow_page(struct kvm_vcpu * vcpu,u64 * sptep,struct kvm_mmu_page * sp)2586 static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
2587 			     struct kvm_mmu_page *sp)
2588 {
2589 	__link_shadow_page(vcpu->kvm, &vcpu->arch.mmu_pte_list_desc_cache, sptep, sp, true);
2590 }
2591 
validate_direct_spte(struct kvm_vcpu * vcpu,u64 * sptep,unsigned direct_access)2592 static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2593 				   unsigned direct_access)
2594 {
2595 	if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
2596 		struct kvm_mmu_page *child;
2597 
2598 		/*
2599 		 * For the direct sp, if the guest pte's dirty bit
2600 		 * changed form clean to dirty, it will corrupt the
2601 		 * sp's access: allow writable in the read-only sp,
2602 		 * so we should update the spte at this point to get
2603 		 * a new sp with the correct access.
2604 		 */
2605 		child = spte_to_child_sp(*sptep);
2606 		if (child->role.access == direct_access)
2607 			return;
2608 
2609 		drop_parent_pte(vcpu->kvm, child, sptep);
2610 		kvm_flush_remote_tlbs_sptep(vcpu->kvm, sptep);
2611 	}
2612 }
2613 
2614 /* Returns the number of zapped non-leaf child shadow pages. */
mmu_page_zap_pte(struct kvm * kvm,struct kvm_mmu_page * sp,u64 * spte,struct list_head * invalid_list)2615 static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
2616 			    u64 *spte, struct list_head *invalid_list)
2617 {
2618 	u64 pte;
2619 	struct kvm_mmu_page *child;
2620 
2621 	pte = *spte;
2622 	if (is_shadow_present_pte(pte)) {
2623 		if (is_last_spte(pte, sp->role.level)) {
2624 			drop_spte(kvm, spte);
2625 		} else {
2626 			child = spte_to_child_sp(pte);
2627 			drop_parent_pte(kvm, child, spte);
2628 
2629 			/*
2630 			 * Recursively zap nested TDP SPs, parentless SPs are
2631 			 * unlikely to be used again in the near future.  This
2632 			 * avoids retaining a large number of stale nested SPs.
2633 			 */
2634 			if (tdp_enabled && invalid_list &&
2635 			    child->role.guest_mode &&
2636 			    !atomic_long_read(&child->parent_ptes.val))
2637 				return kvm_mmu_prepare_zap_page(kvm, child,
2638 								invalid_list);
2639 		}
2640 	} else if (is_mmio_spte(kvm, pte)) {
2641 		mmu_spte_clear_no_track(spte);
2642 	}
2643 	return 0;
2644 }
2645 
kvm_mmu_page_unlink_children(struct kvm * kvm,struct kvm_mmu_page * sp,struct list_head * invalid_list)2646 static int kvm_mmu_page_unlink_children(struct kvm *kvm,
2647 					struct kvm_mmu_page *sp,
2648 					struct list_head *invalid_list)
2649 {
2650 	int zapped = 0;
2651 	unsigned i;
2652 
2653 	for (i = 0; i < SPTE_ENT_PER_PAGE; ++i)
2654 		zapped += mmu_page_zap_pte(kvm, sp, sp->spt + i, invalid_list);
2655 
2656 	return zapped;
2657 }
2658 
kvm_mmu_unlink_parents(struct kvm * kvm,struct kvm_mmu_page * sp)2659 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
2660 {
2661 	u64 *sptep;
2662 	struct rmap_iterator iter;
2663 
2664 	while ((sptep = rmap_get_first(&sp->parent_ptes, &iter)))
2665 		drop_parent_pte(kvm, sp, sptep);
2666 }
2667 
mmu_zap_unsync_children(struct kvm * kvm,struct kvm_mmu_page * parent,struct list_head * invalid_list)2668 static int mmu_zap_unsync_children(struct kvm *kvm,
2669 				   struct kvm_mmu_page *parent,
2670 				   struct list_head *invalid_list)
2671 {
2672 	int i, zapped = 0;
2673 	struct mmu_page_path parents;
2674 	struct kvm_mmu_pages pages;
2675 
2676 	if (parent->role.level == PG_LEVEL_4K)
2677 		return 0;
2678 
2679 	while (mmu_unsync_walk(parent, &pages)) {
2680 		struct kvm_mmu_page *sp;
2681 
2682 		for_each_sp(pages, sp, parents, i) {
2683 			kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
2684 			mmu_pages_clear_parents(&parents);
2685 			zapped++;
2686 		}
2687 	}
2688 
2689 	return zapped;
2690 }
2691 
__kvm_mmu_prepare_zap_page(struct kvm * kvm,struct kvm_mmu_page * sp,struct list_head * invalid_list,int * nr_zapped)2692 static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
2693 				       struct kvm_mmu_page *sp,
2694 				       struct list_head *invalid_list,
2695 				       int *nr_zapped)
2696 {
2697 	bool list_unstable, zapped_root = false;
2698 
2699 	lockdep_assert_held_write(&kvm->mmu_lock);
2700 	trace_kvm_mmu_prepare_zap_page(sp);
2701 	++kvm->stat.mmu_shadow_zapped;
2702 	*nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list);
2703 	*nr_zapped += kvm_mmu_page_unlink_children(kvm, sp, invalid_list);
2704 	kvm_mmu_unlink_parents(kvm, sp);
2705 
2706 	/* Zapping children means active_mmu_pages has become unstable. */
2707 	list_unstable = *nr_zapped;
2708 
2709 	if (!sp->role.invalid && sp_has_gptes(sp))
2710 		unaccount_shadowed(kvm, sp);
2711 
2712 	if (sp->unsync)
2713 		kvm_unlink_unsync_page(kvm, sp);
2714 	if (!sp->root_count) {
2715 		/* Count self */
2716 		(*nr_zapped)++;
2717 
2718 		/*
2719 		 * Already invalid pages (previously active roots) are not on
2720 		 * the active page list.  See list_del() in the "else" case of
2721 		 * !sp->root_count.
2722 		 */
2723 		if (sp->role.invalid)
2724 			list_add(&sp->link, invalid_list);
2725 		else
2726 			list_move(&sp->link, invalid_list);
2727 		kvm_unaccount_mmu_page(kvm, sp);
2728 	} else {
2729 		/*
2730 		 * Remove the active root from the active page list, the root
2731 		 * will be explicitly freed when the root_count hits zero.
2732 		 */
2733 		list_del(&sp->link);
2734 
2735 		/*
2736 		 * Obsolete pages cannot be used on any vCPUs, see the comment
2737 		 * in kvm_mmu_zap_all_fast().  Note, is_obsolete_sp() also
2738 		 * treats invalid shadow pages as being obsolete.
2739 		 */
2740 		zapped_root = !is_obsolete_sp(kvm, sp);
2741 	}
2742 
2743 	if (sp->nx_huge_page_disallowed)
2744 		unaccount_nx_huge_page(kvm, sp);
2745 
2746 	sp->role.invalid = 1;
2747 
2748 	/*
2749 	 * Make the request to free obsolete roots after marking the root
2750 	 * invalid, otherwise other vCPUs may not see it as invalid.
2751 	 */
2752 	if (zapped_root)
2753 		kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_FREE_OBSOLETE_ROOTS);
2754 	return list_unstable;
2755 }
2756 
kvm_mmu_prepare_zap_page(struct kvm * kvm,struct kvm_mmu_page * sp,struct list_head * invalid_list)2757 static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2758 				     struct list_head *invalid_list)
2759 {
2760 	int nr_zapped;
2761 
2762 	__kvm_mmu_prepare_zap_page(kvm, sp, invalid_list, &nr_zapped);
2763 	return nr_zapped;
2764 }
2765 
kvm_mmu_commit_zap_page(struct kvm * kvm,struct list_head * invalid_list)2766 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
2767 				    struct list_head *invalid_list)
2768 {
2769 	struct kvm_mmu_page *sp, *nsp;
2770 
2771 	if (list_empty(invalid_list))
2772 		return;
2773 
2774 	/*
2775 	 * We need to make sure everyone sees our modifications to
2776 	 * the page tables and see changes to vcpu->mode here. The barrier
2777 	 * in the kvm_flush_remote_tlbs() achieves this. This pairs
2778 	 * with vcpu_enter_guest and walk_shadow_page_lockless_begin/end.
2779 	 *
2780 	 * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit
2781 	 * guest mode and/or lockless shadow page table walks.
2782 	 */
2783 	kvm_flush_remote_tlbs(kvm);
2784 
2785 	list_for_each_entry_safe(sp, nsp, invalid_list, link) {
2786 		WARN_ON_ONCE(!sp->role.invalid || sp->root_count);
2787 		kvm_mmu_free_shadow_page(sp);
2788 	}
2789 }
2790 
kvm_mmu_zap_oldest_mmu_pages(struct kvm * kvm,unsigned long nr_to_zap)2791 static unsigned long kvm_mmu_zap_oldest_mmu_pages(struct kvm *kvm,
2792 						  unsigned long nr_to_zap)
2793 {
2794 	unsigned long total_zapped = 0;
2795 	struct kvm_mmu_page *sp, *tmp;
2796 	LIST_HEAD(invalid_list);
2797 	bool unstable;
2798 	int nr_zapped;
2799 
2800 	if (list_empty(&kvm->arch.active_mmu_pages))
2801 		return 0;
2802 
2803 restart:
2804 	list_for_each_entry_safe_reverse(sp, tmp, &kvm->arch.active_mmu_pages, link) {
2805 		/*
2806 		 * Don't zap active root pages, the page itself can't be freed
2807 		 * and zapping it will just force vCPUs to realloc and reload.
2808 		 */
2809 		if (sp->root_count)
2810 			continue;
2811 
2812 		unstable = __kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list,
2813 						      &nr_zapped);
2814 		total_zapped += nr_zapped;
2815 		if (total_zapped >= nr_to_zap)
2816 			break;
2817 
2818 		if (unstable)
2819 			goto restart;
2820 	}
2821 
2822 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
2823 
2824 	kvm->stat.mmu_recycled += total_zapped;
2825 	return total_zapped;
2826 }
2827 
kvm_mmu_available_pages(struct kvm * kvm)2828 static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
2829 {
2830 	if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
2831 		return kvm->arch.n_max_mmu_pages -
2832 			kvm->arch.n_used_mmu_pages;
2833 
2834 	return 0;
2835 }
2836 
make_mmu_pages_available(struct kvm_vcpu * vcpu)2837 static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
2838 {
2839 	unsigned long avail = kvm_mmu_available_pages(vcpu->kvm);
2840 
2841 	if (likely(avail >= KVM_MIN_FREE_MMU_PAGES))
2842 		return 0;
2843 
2844 	kvm_mmu_zap_oldest_mmu_pages(vcpu->kvm, KVM_REFILL_PAGES - avail);
2845 
2846 	/*
2847 	 * Note, this check is intentionally soft, it only guarantees that one
2848 	 * page is available, while the caller may end up allocating as many as
2849 	 * four pages, e.g. for PAE roots or for 5-level paging.  Temporarily
2850 	 * exceeding the (arbitrary by default) limit will not harm the host,
2851 	 * being too aggressive may unnecessarily kill the guest, and getting an
2852 	 * exact count is far more trouble than it's worth, especially in the
2853 	 * page fault paths.
2854 	 */
2855 	if (!kvm_mmu_available_pages(vcpu->kvm))
2856 		return -ENOSPC;
2857 	return 0;
2858 }
2859 
2860 /*
2861  * Changing the number of mmu pages allocated to the vm
2862  * Note: if goal_nr_mmu_pages is too small, you will get dead lock
2863  */
kvm_mmu_change_mmu_pages(struct kvm * kvm,unsigned long goal_nr_mmu_pages)2864 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
2865 {
2866 	write_lock(&kvm->mmu_lock);
2867 
2868 	if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
2869 		kvm_mmu_zap_oldest_mmu_pages(kvm, kvm->arch.n_used_mmu_pages -
2870 						  goal_nr_mmu_pages);
2871 
2872 		goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
2873 	}
2874 
2875 	kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
2876 
2877 	write_unlock(&kvm->mmu_lock);
2878 }
2879 
__kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu * vcpu,gpa_t cr2_or_gpa,bool always_retry)2880 bool __kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
2881 				       bool always_retry)
2882 {
2883 	struct kvm *kvm = vcpu->kvm;
2884 	LIST_HEAD(invalid_list);
2885 	struct kvm_mmu_page *sp;
2886 	gpa_t gpa = cr2_or_gpa;
2887 	bool r = false;
2888 
2889 	/*
2890 	 * Bail early if there aren't any write-protected shadow pages to avoid
2891 	 * unnecessarily taking mmu_lock lock, e.g. if the gfn is write-tracked
2892 	 * by a third party.  Reading indirect_shadow_pages without holding
2893 	 * mmu_lock is safe, as this is purely an optimization, i.e. a false
2894 	 * positive is benign, and a false negative will simply result in KVM
2895 	 * skipping the unprotect+retry path, which is also an optimization.
2896 	 */
2897 	if (!READ_ONCE(kvm->arch.indirect_shadow_pages))
2898 		goto out;
2899 
2900 	if (!vcpu->arch.mmu->root_role.direct) {
2901 		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
2902 		if (gpa == INVALID_GPA)
2903 			goto out;
2904 	}
2905 
2906 	write_lock(&kvm->mmu_lock);
2907 	for_each_gfn_valid_sp_with_gptes(kvm, sp, gpa_to_gfn(gpa))
2908 		kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
2909 
2910 	/*
2911 	 * Snapshot the result before zapping, as zapping will remove all list
2912 	 * entries, i.e. checking the list later would yield a false negative.
2913 	 */
2914 	r = !list_empty(&invalid_list);
2915 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
2916 	write_unlock(&kvm->mmu_lock);
2917 
2918 out:
2919 	if (r || always_retry) {
2920 		vcpu->arch.last_retry_eip = kvm_rip_read(vcpu);
2921 		vcpu->arch.last_retry_addr = cr2_or_gpa;
2922 	}
2923 	return r;
2924 }
2925 
kvm_unsync_page(struct kvm * kvm,struct kvm_mmu_page * sp)2926 static void kvm_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
2927 {
2928 	trace_kvm_mmu_unsync_page(sp);
2929 	++kvm->stat.mmu_unsync;
2930 	sp->unsync = 1;
2931 
2932 	kvm_mmu_mark_parents_unsync(sp);
2933 }
2934 
2935 /*
2936  * Attempt to unsync any shadow pages that can be reached by the specified gfn,
2937  * KVM is creating a writable mapping for said gfn.  Returns 0 if all pages
2938  * were marked unsync (or if there is no shadow page), -EPERM if the SPTE must
2939  * be write-protected.
2940  */
mmu_try_to_unsync_pages(struct kvm * kvm,const struct kvm_memory_slot * slot,gfn_t gfn,bool synchronizing,bool prefetch)2941 int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot,
2942 			    gfn_t gfn, bool synchronizing, bool prefetch)
2943 {
2944 	struct kvm_mmu_page *sp;
2945 	bool locked = false;
2946 
2947 	/*
2948 	 * Force write-protection if the page is being tracked.  Note, the page
2949 	 * track machinery is used to write-protect upper-level shadow pages,
2950 	 * i.e. this guards the role.level == 4K assertion below!
2951 	 */
2952 	if (kvm_gfn_is_write_tracked(kvm, slot, gfn))
2953 		return -EPERM;
2954 
2955 	/*
2956 	 * Only 4KiB mappings can become unsync, and KVM disallows hugepages
2957 	 * when accounting 4KiB shadow pages.  Upper-level gPTEs are always
2958 	 * write-protected (see above), thus if the gfn can be mapped with a
2959 	 * hugepage and isn't write-tracked, it can't have a shadow page.
2960 	 */
2961 	if (!lpage_info_slot(gfn, slot, PG_LEVEL_2M)->disallow_lpage)
2962 		return 0;
2963 
2964 	/*
2965 	 * The page is not write-tracked, mark existing shadow pages unsync
2966 	 * unless KVM is synchronizing an unsync SP.  In that case, KVM must
2967 	 * complete emulation of the guest TLB flush before allowing shadow
2968 	 * pages to become unsync (writable by the guest).
2969 	 */
2970 	for_each_gfn_valid_sp_with_gptes(kvm, sp, gfn) {
2971 		if (synchronizing)
2972 			return -EPERM;
2973 
2974 		if (sp->unsync)
2975 			continue;
2976 
2977 		if (prefetch)
2978 			return -EEXIST;
2979 
2980 		/*
2981 		 * TDP MMU page faults require an additional spinlock as they
2982 		 * run with mmu_lock held for read, not write, and the unsync
2983 		 * logic is not thread safe.  Take the spinklock regardless of
2984 		 * the MMU type to avoid extra conditionals/parameters, there's
2985 		 * no meaningful penalty if mmu_lock is held for write.
2986 		 */
2987 		if (!locked) {
2988 			locked = true;
2989 			spin_lock(&kvm->arch.mmu_unsync_pages_lock);
2990 
2991 			/*
2992 			 * Recheck after taking the spinlock, a different vCPU
2993 			 * may have since marked the page unsync.  A false
2994 			 * negative on the unprotected check above is not
2995 			 * possible as clearing sp->unsync _must_ hold mmu_lock
2996 			 * for write, i.e. unsync cannot transition from 1->0
2997 			 * while this CPU holds mmu_lock for read (or write).
2998 			 */
2999 			if (READ_ONCE(sp->unsync))
3000 				continue;
3001 		}
3002 
3003 		WARN_ON_ONCE(sp->role.level != PG_LEVEL_4K);
3004 		kvm_unsync_page(kvm, sp);
3005 	}
3006 	if (locked)
3007 		spin_unlock(&kvm->arch.mmu_unsync_pages_lock);
3008 
3009 	/*
3010 	 * We need to ensure that the marking of unsync pages is visible
3011 	 * before the SPTE is updated to allow writes because
3012 	 * kvm_mmu_sync_roots() checks the unsync flags without holding
3013 	 * the MMU lock and so can race with this. If the SPTE was updated
3014 	 * before the page had been marked as unsync-ed, something like the
3015 	 * following could happen:
3016 	 *
3017 	 * CPU 1                    CPU 2
3018 	 * ---------------------------------------------------------------------
3019 	 * 1.2 Host updates SPTE
3020 	 *     to be writable
3021 	 *                      2.1 Guest writes a GPTE for GVA X.
3022 	 *                          (GPTE being in the guest page table shadowed
3023 	 *                           by the SP from CPU 1.)
3024 	 *                          This reads SPTE during the page table walk.
3025 	 *                          Since SPTE.W is read as 1, there is no
3026 	 *                          fault.
3027 	 *
3028 	 *                      2.2 Guest issues TLB flush.
3029 	 *                          That causes a VM Exit.
3030 	 *
3031 	 *                      2.3 Walking of unsync pages sees sp->unsync is
3032 	 *                          false and skips the page.
3033 	 *
3034 	 *                      2.4 Guest accesses GVA X.
3035 	 *                          Since the mapping in the SP was not updated,
3036 	 *                          so the old mapping for GVA X incorrectly
3037 	 *                          gets used.
3038 	 * 1.1 Host marks SP
3039 	 *     as unsync
3040 	 *     (sp->unsync = true)
3041 	 *
3042 	 * The write barrier below ensures that 1.1 happens before 1.2 and thus
3043 	 * the situation in 2.4 does not arise.  It pairs with the read barrier
3044 	 * in is_unsync_root(), placed between 2.1's load of SPTE.W and 2.3.
3045 	 */
3046 	smp_wmb();
3047 
3048 	return 0;
3049 }
3050 
mmu_set_spte(struct kvm_vcpu * vcpu,struct kvm_memory_slot * slot,u64 * sptep,unsigned int pte_access,gfn_t gfn,kvm_pfn_t pfn,struct kvm_page_fault * fault)3051 static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
3052 			u64 *sptep, unsigned int pte_access, gfn_t gfn,
3053 			kvm_pfn_t pfn, struct kvm_page_fault *fault)
3054 {
3055 	struct kvm_mmu_page *sp = sptep_to_sp(sptep);
3056 	int level = sp->role.level;
3057 	int was_rmapped = 0;
3058 	int ret = RET_PF_FIXED;
3059 	bool flush = false;
3060 	bool wrprot;
3061 	u64 spte;
3062 
3063 	/* Prefetching always gets a writable pfn.  */
3064 	bool host_writable = !fault || fault->map_writable;
3065 	bool prefetch = !fault || fault->prefetch;
3066 	bool write_fault = fault && fault->write;
3067 
3068 	if (is_shadow_present_pte(*sptep)) {
3069 		if (prefetch && is_last_spte(*sptep, level) &&
3070 		    pfn == spte_to_pfn(*sptep))
3071 			return RET_PF_SPURIOUS;
3072 
3073 		/*
3074 		 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
3075 		 * the parent of the now unreachable PTE.
3076 		 */
3077 		if (level > PG_LEVEL_4K && !is_large_pte(*sptep)) {
3078 			struct kvm_mmu_page *child;
3079 			u64 pte = *sptep;
3080 
3081 			child = spte_to_child_sp(pte);
3082 			drop_parent_pte(vcpu->kvm, child, sptep);
3083 			flush = true;
3084 		} else if (pfn != spte_to_pfn(*sptep)) {
3085 			WARN_ON_ONCE(vcpu->arch.mmu->root_role.direct);
3086 			drop_spte(vcpu->kvm, sptep);
3087 			flush = true;
3088 		} else
3089 			was_rmapped = 1;
3090 	}
3091 
3092 	if (unlikely(is_noslot_pfn(pfn))) {
3093 		vcpu->stat.pf_mmio_spte_created++;
3094 		mark_mmio_spte(vcpu, sptep, gfn, pte_access);
3095 		if (flush)
3096 			kvm_flush_remote_tlbs_gfn(vcpu->kvm, gfn, level);
3097 		return RET_PF_EMULATE;
3098 	}
3099 
3100 	wrprot = make_spte(vcpu, sp, slot, pte_access, gfn, pfn, *sptep, prefetch,
3101 			   false, host_writable, &spte);
3102 
3103 	if (*sptep == spte) {
3104 		ret = RET_PF_SPURIOUS;
3105 	} else {
3106 		flush |= mmu_spte_update(sptep, spte);
3107 		trace_kvm_mmu_set_spte(level, gfn, sptep);
3108 	}
3109 
3110 	if (wrprot && write_fault)
3111 		ret = RET_PF_WRITE_PROTECTED;
3112 
3113 	if (flush)
3114 		kvm_flush_remote_tlbs_gfn(vcpu->kvm, gfn, level);
3115 
3116 	if (!was_rmapped) {
3117 		WARN_ON_ONCE(ret == RET_PF_SPURIOUS);
3118 		rmap_add(vcpu, slot, sptep, gfn, pte_access);
3119 	} else {
3120 		/* Already rmapped but the pte_access bits may have changed. */
3121 		kvm_mmu_page_set_access(sp, spte_index(sptep), pte_access);
3122 	}
3123 
3124 	return ret;
3125 }
3126 
kvm_mmu_prefetch_sptes(struct kvm_vcpu * vcpu,gfn_t gfn,u64 * sptep,int nr_pages,unsigned int access)3127 static bool kvm_mmu_prefetch_sptes(struct kvm_vcpu *vcpu, gfn_t gfn, u64 *sptep,
3128 				   int nr_pages, unsigned int access)
3129 {
3130 	struct page *pages[PTE_PREFETCH_NUM];
3131 	struct kvm_memory_slot *slot;
3132 	int i;
3133 
3134 	if (WARN_ON_ONCE(nr_pages > PTE_PREFETCH_NUM))
3135 		return false;
3136 
3137 	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
3138 	if (!slot)
3139 		return false;
3140 
3141 	nr_pages = kvm_prefetch_pages(slot, gfn, pages, nr_pages);
3142 	if (nr_pages <= 0)
3143 		return false;
3144 
3145 	for (i = 0; i < nr_pages; i++, gfn++, sptep++) {
3146 		mmu_set_spte(vcpu, slot, sptep, access, gfn,
3147 			     page_to_pfn(pages[i]), NULL);
3148 
3149 		/*
3150 		 * KVM always prefetches writable pages from the primary MMU,
3151 		 * and KVM can make its SPTE writable in the fast page handler,
3152 		 * without notifying the primary MMU.  Mark pages/folios dirty
3153 		 * now to ensure file data is written back if it ends up being
3154 		 * written by the guest.  Because KVM's prefetching GUPs
3155 		 * writable PTEs, the probability of unnecessary writeback is
3156 		 * extremely low.
3157 		 */
3158 		kvm_release_page_dirty(pages[i]);
3159 	}
3160 
3161 	return true;
3162 }
3163 
direct_pte_prefetch_many(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp,u64 * start,u64 * end)3164 static bool direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
3165 				     struct kvm_mmu_page *sp,
3166 				     u64 *start, u64 *end)
3167 {
3168 	gfn_t gfn = kvm_mmu_page_get_gfn(sp, spte_index(start));
3169 	unsigned int access = sp->role.access;
3170 
3171 	return kvm_mmu_prefetch_sptes(vcpu, gfn, start, end - start, access);
3172 }
3173 
__direct_pte_prefetch(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp,u64 * sptep)3174 static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
3175 				  struct kvm_mmu_page *sp, u64 *sptep)
3176 {
3177 	u64 *spte, *start = NULL;
3178 	int i;
3179 
3180 	WARN_ON_ONCE(!sp->role.direct);
3181 
3182 	i = spte_index(sptep) & ~(PTE_PREFETCH_NUM - 1);
3183 	spte = sp->spt + i;
3184 
3185 	for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
3186 		if (is_shadow_present_pte(*spte) || spte == sptep) {
3187 			if (!start)
3188 				continue;
3189 			if (!direct_pte_prefetch_many(vcpu, sp, start, spte))
3190 				return;
3191 
3192 			start = NULL;
3193 		} else if (!start)
3194 			start = spte;
3195 	}
3196 	if (start)
3197 		direct_pte_prefetch_many(vcpu, sp, start, spte);
3198 }
3199 
direct_pte_prefetch(struct kvm_vcpu * vcpu,u64 * sptep)3200 static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
3201 {
3202 	struct kvm_mmu_page *sp;
3203 
3204 	sp = sptep_to_sp(sptep);
3205 
3206 	/*
3207 	 * Without accessed bits, there's no way to distinguish between
3208 	 * actually accessed translations and prefetched, so disable pte
3209 	 * prefetch if accessed bits aren't available.
3210 	 */
3211 	if (sp_ad_disabled(sp))
3212 		return;
3213 
3214 	if (sp->role.level > PG_LEVEL_4K)
3215 		return;
3216 
3217 	/*
3218 	 * If addresses are being invalidated, skip prefetching to avoid
3219 	 * accidentally prefetching those addresses.
3220 	 */
3221 	if (unlikely(vcpu->kvm->mmu_invalidate_in_progress))
3222 		return;
3223 
3224 	__direct_pte_prefetch(vcpu, sp, sptep);
3225 }
3226 
3227 /*
3228  * Lookup the mapping level for @gfn in the current mm.
3229  *
3230  * WARNING!  Use of host_pfn_mapping_level() requires the caller and the end
3231  * consumer to be tied into KVM's handlers for MMU notifier events!
3232  *
3233  * There are several ways to safely use this helper:
3234  *
3235  * - Check mmu_invalidate_retry_gfn() after grabbing the mapping level, before
3236  *   consuming it.  In this case, mmu_lock doesn't need to be held during the
3237  *   lookup, but it does need to be held while checking the MMU notifier.
3238  *
3239  * - Hold mmu_lock AND ensure there is no in-progress MMU notifier invalidation
3240  *   event for the hva.  This can be done by explicit checking the MMU notifier
3241  *   or by ensuring that KVM already has a valid mapping that covers the hva.
3242  *
3243  * - Do not use the result to install new mappings, e.g. use the host mapping
3244  *   level only to decide whether or not to zap an entry.  In this case, it's
3245  *   not required to hold mmu_lock (though it's highly likely the caller will
3246  *   want to hold mmu_lock anyways, e.g. to modify SPTEs).
3247  *
3248  * Note!  The lookup can still race with modifications to host page tables, but
3249  * the above "rules" ensure KVM will not _consume_ the result of the walk if a
3250  * race with the primary MMU occurs.
3251  */
host_pfn_mapping_level(struct kvm * kvm,gfn_t gfn,const struct kvm_memory_slot * slot)3252 static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn,
3253 				  const struct kvm_memory_slot *slot)
3254 {
3255 	int level = PG_LEVEL_4K;
3256 	unsigned long hva;
3257 	unsigned long flags;
3258 	pgd_t pgd;
3259 	p4d_t p4d;
3260 	pud_t pud;
3261 	pmd_t pmd;
3262 
3263 	/*
3264 	 * Note, using the already-retrieved memslot and __gfn_to_hva_memslot()
3265 	 * is not solely for performance, it's also necessary to avoid the
3266 	 * "writable" check in __gfn_to_hva_many(), which will always fail on
3267 	 * read-only memslots due to gfn_to_hva() assuming writes.  Earlier
3268 	 * page fault steps have already verified the guest isn't writing a
3269 	 * read-only memslot.
3270 	 */
3271 	hva = __gfn_to_hva_memslot(slot, gfn);
3272 
3273 	/*
3274 	 * Disable IRQs to prevent concurrent tear down of host page tables,
3275 	 * e.g. if the primary MMU promotes a P*D to a huge page and then frees
3276 	 * the original page table.
3277 	 */
3278 	local_irq_save(flags);
3279 
3280 	/*
3281 	 * Read each entry once.  As above, a non-leaf entry can be promoted to
3282 	 * a huge page _during_ this walk.  Re-reading the entry could send the
3283 	 * walk into the weeks, e.g. p*d_leaf() returns false (sees the old
3284 	 * value) and then p*d_offset() walks into the target huge page instead
3285 	 * of the old page table (sees the new value).
3286 	 */
3287 	pgd = READ_ONCE(*pgd_offset(kvm->mm, hva));
3288 	if (pgd_none(pgd))
3289 		goto out;
3290 
3291 	p4d = READ_ONCE(*p4d_offset(&pgd, hva));
3292 	if (p4d_none(p4d) || !p4d_present(p4d))
3293 		goto out;
3294 
3295 	pud = READ_ONCE(*pud_offset(&p4d, hva));
3296 	if (pud_none(pud) || !pud_present(pud))
3297 		goto out;
3298 
3299 	if (pud_leaf(pud)) {
3300 		level = PG_LEVEL_1G;
3301 		goto out;
3302 	}
3303 
3304 	pmd = READ_ONCE(*pmd_offset(&pud, hva));
3305 	if (pmd_none(pmd) || !pmd_present(pmd))
3306 		goto out;
3307 
3308 	if (pmd_leaf(pmd))
3309 		level = PG_LEVEL_2M;
3310 
3311 out:
3312 	local_irq_restore(flags);
3313 	return level;
3314 }
3315 
kvm_max_level_for_order(int order)3316 static u8 kvm_max_level_for_order(int order)
3317 {
3318 	BUILD_BUG_ON(KVM_MAX_HUGEPAGE_LEVEL > PG_LEVEL_1G);
3319 
3320 	KVM_MMU_WARN_ON(order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G) &&
3321 			order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M) &&
3322 			order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_4K));
3323 
3324 	if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G))
3325 		return PG_LEVEL_1G;
3326 
3327 	if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M))
3328 		return PG_LEVEL_2M;
3329 
3330 	return PG_LEVEL_4K;
3331 }
3332 
kvm_gmem_max_mapping_level(struct kvm * kvm,struct kvm_page_fault * fault,const struct kvm_memory_slot * slot,gfn_t gfn,bool is_private)3333 static u8 kvm_gmem_max_mapping_level(struct kvm *kvm, struct kvm_page_fault *fault,
3334 				     const struct kvm_memory_slot *slot, gfn_t gfn,
3335 				     bool is_private)
3336 {
3337 	u8 max_level, coco_level;
3338 	kvm_pfn_t pfn;
3339 
3340 	/* For faults, use the gmem information that was resolved earlier. */
3341 	if (fault) {
3342 		pfn = fault->pfn;
3343 		max_level = fault->max_level;
3344 	} else {
3345 		/* TODO: Call into guest_memfd once hugepages are supported. */
3346 		WARN_ONCE(1, "Get pfn+order from guest_memfd");
3347 		pfn = KVM_PFN_ERR_FAULT;
3348 		max_level = PG_LEVEL_4K;
3349 	}
3350 
3351 	if (max_level == PG_LEVEL_4K)
3352 		return max_level;
3353 
3354 	/*
3355 	 * CoCo may influence the max mapping level, e.g. due to RMP or S-EPT
3356 	 * restrictions.  A return of '0' means "no additional restrictions", to
3357 	 * allow for using an optional "ret0" static call.
3358 	 */
3359 	coco_level = kvm_x86_call(gmem_max_mapping_level)(kvm, pfn, is_private);
3360 	if (coco_level)
3361 		max_level = min(max_level, coco_level);
3362 
3363 	return max_level;
3364 }
3365 
kvm_mmu_max_mapping_level(struct kvm * kvm,struct kvm_page_fault * fault,const struct kvm_memory_slot * slot,gfn_t gfn)3366 int kvm_mmu_max_mapping_level(struct kvm *kvm, struct kvm_page_fault *fault,
3367 			      const struct kvm_memory_slot *slot, gfn_t gfn)
3368 {
3369 	struct kvm_lpage_info *linfo;
3370 	int host_level, max_level;
3371 	bool is_private;
3372 
3373 	lockdep_assert_held(&kvm->mmu_lock);
3374 
3375 	if (fault) {
3376 		max_level = fault->max_level;
3377 		is_private = fault->is_private;
3378 	} else {
3379 		max_level = PG_LEVEL_NUM;
3380 		is_private = kvm_mem_is_private(kvm, gfn);
3381 	}
3382 
3383 	max_level = min(max_level, max_huge_page_level);
3384 	for ( ; max_level > PG_LEVEL_4K; max_level--) {
3385 		linfo = lpage_info_slot(gfn, slot, max_level);
3386 		if (!linfo->disallow_lpage)
3387 			break;
3388 	}
3389 
3390 	if (max_level == PG_LEVEL_4K)
3391 		return PG_LEVEL_4K;
3392 
3393 	if (is_private || kvm_memslot_is_gmem_only(slot))
3394 		host_level = kvm_gmem_max_mapping_level(kvm, fault, slot, gfn,
3395 							is_private);
3396 	else
3397 		host_level = host_pfn_mapping_level(kvm, gfn, slot);
3398 	return min(host_level, max_level);
3399 }
3400 
kvm_mmu_hugepage_adjust(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)3401 void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
3402 {
3403 	struct kvm_memory_slot *slot = fault->slot;
3404 	kvm_pfn_t mask;
3405 
3406 	fault->huge_page_disallowed = fault->exec && fault->nx_huge_page_workaround_enabled;
3407 
3408 	if (unlikely(fault->max_level == PG_LEVEL_4K))
3409 		return;
3410 
3411 	if (is_error_noslot_pfn(fault->pfn))
3412 		return;
3413 
3414 	if (kvm_slot_dirty_track_enabled(slot))
3415 		return;
3416 
3417 	/*
3418 	 * Enforce the iTLB multihit workaround after capturing the requested
3419 	 * level, which will be used to do precise, accurate accounting.
3420 	 */
3421 	fault->req_level = kvm_mmu_max_mapping_level(vcpu->kvm, fault,
3422 						     fault->slot, fault->gfn);
3423 	if (fault->req_level == PG_LEVEL_4K || fault->huge_page_disallowed)
3424 		return;
3425 
3426 	/*
3427 	 * mmu_invalidate_retry() was successful and mmu_lock is held, so
3428 	 * the pmd can't be split from under us.
3429 	 */
3430 	fault->goal_level = fault->req_level;
3431 	mask = KVM_PAGES_PER_HPAGE(fault->goal_level) - 1;
3432 	VM_BUG_ON((fault->gfn & mask) != (fault->pfn & mask));
3433 	fault->pfn &= ~mask;
3434 }
3435 
disallowed_hugepage_adjust(struct kvm_page_fault * fault,u64 spte,int cur_level)3436 void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level)
3437 {
3438 	if (cur_level > PG_LEVEL_4K &&
3439 	    cur_level == fault->goal_level &&
3440 	    is_shadow_present_pte(spte) &&
3441 	    !is_large_pte(spte) &&
3442 	    spte_to_child_sp(spte)->nx_huge_page_disallowed) {
3443 		/*
3444 		 * A small SPTE exists for this pfn, but FNAME(fetch),
3445 		 * direct_map(), or kvm_tdp_mmu_map() would like to create a
3446 		 * large PTE instead: just force them to go down another level,
3447 		 * patching back for them into pfn the next 9 bits of the
3448 		 * address.
3449 		 */
3450 		u64 page_mask = KVM_PAGES_PER_HPAGE(cur_level) -
3451 				KVM_PAGES_PER_HPAGE(cur_level - 1);
3452 		fault->pfn |= fault->gfn & page_mask;
3453 		fault->goal_level--;
3454 	}
3455 }
3456 
direct_map(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)3457 static int direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
3458 {
3459 	struct kvm_shadow_walk_iterator it;
3460 	struct kvm_mmu_page *sp;
3461 	int ret;
3462 	gfn_t base_gfn = fault->gfn;
3463 
3464 	kvm_mmu_hugepage_adjust(vcpu, fault);
3465 
3466 	trace_kvm_mmu_spte_requested(fault);
3467 	for_each_shadow_entry(vcpu, fault->addr, it) {
3468 		/*
3469 		 * We cannot overwrite existing page tables with an NX
3470 		 * large page, as the leaf could be executable.
3471 		 */
3472 		if (fault->nx_huge_page_workaround_enabled)
3473 			disallowed_hugepage_adjust(fault, *it.sptep, it.level);
3474 
3475 		base_gfn = gfn_round_for_level(fault->gfn, it.level);
3476 		if (it.level == fault->goal_level)
3477 			break;
3478 
3479 		sp = kvm_mmu_get_child_sp(vcpu, it.sptep, base_gfn, true, ACC_ALL);
3480 		if (sp == ERR_PTR(-EEXIST))
3481 			continue;
3482 
3483 		link_shadow_page(vcpu, it.sptep, sp);
3484 		if (fault->huge_page_disallowed)
3485 			account_nx_huge_page(vcpu->kvm, sp,
3486 					     fault->req_level >= it.level);
3487 	}
3488 
3489 	if (WARN_ON_ONCE(it.level != fault->goal_level))
3490 		return -EFAULT;
3491 
3492 	ret = mmu_set_spte(vcpu, fault->slot, it.sptep, ACC_ALL,
3493 			   base_gfn, fault->pfn, fault);
3494 	if (ret == RET_PF_SPURIOUS)
3495 		return ret;
3496 
3497 	direct_pte_prefetch(vcpu, it.sptep);
3498 	return ret;
3499 }
3500 
kvm_send_hwpoison_signal(struct kvm_memory_slot * slot,gfn_t gfn)3501 static void kvm_send_hwpoison_signal(struct kvm_memory_slot *slot, gfn_t gfn)
3502 {
3503 	unsigned long hva = gfn_to_hva_memslot(slot, gfn);
3504 
3505 	send_sig_mceerr(BUS_MCEERR_AR, (void __user *)hva, PAGE_SHIFT, current);
3506 }
3507 
kvm_handle_error_pfn(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)3508 static int kvm_handle_error_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
3509 {
3510 	if (is_sigpending_pfn(fault->pfn)) {
3511 		kvm_handle_signal_exit(vcpu);
3512 		return -EINTR;
3513 	}
3514 
3515 	/*
3516 	 * Do not cache the mmio info caused by writing the readonly gfn
3517 	 * into the spte otherwise read access on readonly gfn also can
3518 	 * caused mmio page fault and treat it as mmio access.
3519 	 */
3520 	if (fault->pfn == KVM_PFN_ERR_RO_FAULT)
3521 		return RET_PF_EMULATE;
3522 
3523 	if (fault->pfn == KVM_PFN_ERR_HWPOISON) {
3524 		kvm_send_hwpoison_signal(fault->slot, fault->gfn);
3525 		return RET_PF_RETRY;
3526 	}
3527 
3528 	return -EFAULT;
3529 }
3530 
kvm_handle_noslot_fault(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault,unsigned int access)3531 static int kvm_handle_noslot_fault(struct kvm_vcpu *vcpu,
3532 				   struct kvm_page_fault *fault,
3533 				   unsigned int access)
3534 {
3535 	gva_t gva = fault->is_tdp ? 0 : fault->addr;
3536 
3537 	if (fault->is_private) {
3538 		kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
3539 		return -EFAULT;
3540 	}
3541 
3542 	vcpu_cache_mmio_info(vcpu, gva, fault->gfn,
3543 			     access & shadow_mmio_access_mask);
3544 
3545 	fault->slot = NULL;
3546 	fault->pfn = KVM_PFN_NOSLOT;
3547 	fault->map_writable = false;
3548 
3549 	/*
3550 	 * If MMIO caching is disabled, emulate immediately without
3551 	 * touching the shadow page tables as attempting to install an
3552 	 * MMIO SPTE will just be an expensive nop.
3553 	 */
3554 	if (unlikely(!enable_mmio_caching))
3555 		return RET_PF_EMULATE;
3556 
3557 	/*
3558 	 * Do not create an MMIO SPTE for a gfn greater than host.MAXPHYADDR,
3559 	 * any guest that generates such gfns is running nested and is being
3560 	 * tricked by L0 userspace (you can observe gfn > L1.MAXPHYADDR if and
3561 	 * only if L1's MAXPHYADDR is inaccurate with respect to the
3562 	 * hardware's).
3563 	 */
3564 	if (unlikely(fault->gfn > kvm_mmu_max_gfn()))
3565 		return RET_PF_EMULATE;
3566 
3567 	return RET_PF_CONTINUE;
3568 }
3569 
page_fault_can_be_fast(struct kvm * kvm,struct kvm_page_fault * fault)3570 static bool page_fault_can_be_fast(struct kvm *kvm, struct kvm_page_fault *fault)
3571 {
3572 	/*
3573 	 * Page faults with reserved bits set, i.e. faults on MMIO SPTEs, only
3574 	 * reach the common page fault handler if the SPTE has an invalid MMIO
3575 	 * generation number.  Refreshing the MMIO generation needs to go down
3576 	 * the slow path.  Note, EPT Misconfigs do NOT set the PRESENT flag!
3577 	 */
3578 	if (fault->rsvd)
3579 		return false;
3580 
3581 	/*
3582 	 * For hardware-protected VMs, certain conditions like attempting to
3583 	 * perform a write to a page which is not in the state that the guest
3584 	 * expects it to be in can result in a nested/extended #PF. In this
3585 	 * case, the below code might misconstrue this situation as being the
3586 	 * result of a write-protected access, and treat it as a spurious case
3587 	 * rather than taking any action to satisfy the real source of the #PF
3588 	 * such as generating a KVM_EXIT_MEMORY_FAULT. This can lead to the
3589 	 * guest spinning on a #PF indefinitely, so don't attempt the fast path
3590 	 * in this case.
3591 	 *
3592 	 * Note that the kvm_mem_is_private() check might race with an
3593 	 * attribute update, but this will either result in the guest spinning
3594 	 * on RET_PF_SPURIOUS until the update completes, or an actual spurious
3595 	 * case might go down the slow path. Either case will resolve itself.
3596 	 */
3597 	if (kvm->arch.has_private_mem &&
3598 	    fault->is_private != kvm_mem_is_private(kvm, fault->gfn))
3599 		return false;
3600 
3601 	/*
3602 	 * #PF can be fast if:
3603 	 *
3604 	 * 1. The shadow page table entry is not present and A/D bits are
3605 	 *    disabled _by KVM_, which could mean that the fault is potentially
3606 	 *    caused by access tracking (if enabled).  If A/D bits are enabled
3607 	 *    by KVM, but disabled by L1 for L2, KVM is forced to disable A/D
3608 	 *    bits for L2 and employ access tracking, but the fast page fault
3609 	 *    mechanism only supports direct MMUs.
3610 	 * 2. The shadow page table entry is present, the access is a write,
3611 	 *    and no reserved bits are set (MMIO SPTEs cannot be "fixed"), i.e.
3612 	 *    the fault was caused by a write-protection violation.  If the
3613 	 *    SPTE is MMU-writable (determined later), the fault can be fixed
3614 	 *    by setting the Writable bit, which can be done out of mmu_lock.
3615 	 */
3616 	if (!fault->present)
3617 		return !kvm_ad_enabled;
3618 
3619 	/*
3620 	 * Note, instruction fetches and writes are mutually exclusive, ignore
3621 	 * the "exec" flag.
3622 	 */
3623 	return fault->write;
3624 }
3625 
3626 /*
3627  * Returns true if the SPTE was fixed successfully. Otherwise,
3628  * someone else modified the SPTE from its original value.
3629  */
fast_pf_fix_direct_spte(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault,u64 * sptep,u64 old_spte,u64 new_spte)3630 static bool fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu,
3631 				    struct kvm_page_fault *fault,
3632 				    u64 *sptep, u64 old_spte, u64 new_spte)
3633 {
3634 	/*
3635 	 * Theoretically we could also set dirty bit (and flush TLB) here in
3636 	 * order to eliminate unnecessary PML logging. See comments in
3637 	 * set_spte. But fast_page_fault is very unlikely to happen with PML
3638 	 * enabled, so we do not do this. This might result in the same GPA
3639 	 * to be logged in PML buffer again when the write really happens, and
3640 	 * eventually to be called by mark_page_dirty twice. But it's also no
3641 	 * harm. This also avoids the TLB flush needed after setting dirty bit
3642 	 * so non-PML cases won't be impacted.
3643 	 *
3644 	 * Compare with make_spte() where instead shadow_dirty_mask is set.
3645 	 */
3646 	if (!try_cmpxchg64(sptep, &old_spte, new_spte))
3647 		return false;
3648 
3649 	if (is_writable_pte(new_spte) && !is_writable_pte(old_spte))
3650 		mark_page_dirty_in_slot(vcpu->kvm, fault->slot, fault->gfn);
3651 
3652 	return true;
3653 }
3654 
3655 /*
3656  * Returns the last level spte pointer of the shadow page walk for the given
3657  * gpa, and sets *spte to the spte value. This spte may be non-preset. If no
3658  * walk could be performed, returns NULL and *spte does not contain valid data.
3659  *
3660  * Contract:
3661  *  - Must be called between walk_shadow_page_lockless_{begin,end}.
3662  *  - The returned sptep must not be used after walk_shadow_page_lockless_end.
3663  */
fast_pf_get_last_sptep(struct kvm_vcpu * vcpu,gpa_t gpa,u64 * spte)3664 static u64 *fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gpa_t gpa, u64 *spte)
3665 {
3666 	struct kvm_shadow_walk_iterator iterator;
3667 	u64 old_spte;
3668 	u64 *sptep = NULL;
3669 
3670 	for_each_shadow_entry_lockless(vcpu, gpa, iterator, old_spte) {
3671 		sptep = iterator.sptep;
3672 		*spte = old_spte;
3673 	}
3674 
3675 	return sptep;
3676 }
3677 
3678 /*
3679  * Returns one of RET_PF_INVALID, RET_PF_FIXED or RET_PF_SPURIOUS.
3680  */
fast_page_fault(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)3681 static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
3682 {
3683 	struct kvm_mmu_page *sp;
3684 	int ret = RET_PF_INVALID;
3685 	u64 spte;
3686 	u64 *sptep;
3687 	uint retry_count = 0;
3688 
3689 	if (!page_fault_can_be_fast(vcpu->kvm, fault))
3690 		return ret;
3691 
3692 	walk_shadow_page_lockless_begin(vcpu);
3693 
3694 	do {
3695 		u64 new_spte;
3696 
3697 		if (tdp_mmu_enabled)
3698 			sptep = kvm_tdp_mmu_fast_pf_get_last_sptep(vcpu, fault->gfn, &spte);
3699 		else
3700 			sptep = fast_pf_get_last_sptep(vcpu, fault->addr, &spte);
3701 
3702 		/*
3703 		 * It's entirely possible for the mapping to have been zapped
3704 		 * by a different task, but the root page should always be
3705 		 * available as the vCPU holds a reference to its root(s).
3706 		 */
3707 		if (WARN_ON_ONCE(!sptep))
3708 			spte = FROZEN_SPTE;
3709 
3710 		if (!is_shadow_present_pte(spte))
3711 			break;
3712 
3713 		sp = sptep_to_sp(sptep);
3714 		if (!is_last_spte(spte, sp->role.level))
3715 			break;
3716 
3717 		/*
3718 		 * Check whether the memory access that caused the fault would
3719 		 * still cause it if it were to be performed right now. If not,
3720 		 * then this is a spurious fault caused by TLB lazily flushed,
3721 		 * or some other CPU has already fixed the PTE after the
3722 		 * current CPU took the fault.
3723 		 *
3724 		 * Need not check the access of upper level table entries since
3725 		 * they are always ACC_ALL.
3726 		 */
3727 		if (is_access_allowed(fault, spte)) {
3728 			ret = RET_PF_SPURIOUS;
3729 			break;
3730 		}
3731 
3732 		new_spte = spte;
3733 
3734 		/*
3735 		 * KVM only supports fixing page faults outside of MMU lock for
3736 		 * direct MMUs, nested MMUs are always indirect, and KVM always
3737 		 * uses A/D bits for non-nested MMUs.  Thus, if A/D bits are
3738 		 * enabled, the SPTE can't be an access-tracked SPTE.
3739 		 */
3740 		if (unlikely(!kvm_ad_enabled) && is_access_track_spte(spte))
3741 			new_spte = restore_acc_track_spte(new_spte) |
3742 				   shadow_accessed_mask;
3743 
3744 		/*
3745 		 * To keep things simple, only SPTEs that are MMU-writable can
3746 		 * be made fully writable outside of mmu_lock, e.g. only SPTEs
3747 		 * that were write-protected for dirty-logging or access
3748 		 * tracking are handled here.  Don't bother checking if the
3749 		 * SPTE is writable to prioritize running with A/D bits enabled.
3750 		 * The is_access_allowed() check above handles the common case
3751 		 * of the fault being spurious, and the SPTE is known to be
3752 		 * shadow-present, i.e. except for access tracking restoration
3753 		 * making the new SPTE writable, the check is wasteful.
3754 		 */
3755 		if (fault->write && is_mmu_writable_spte(spte)) {
3756 			new_spte |= PT_WRITABLE_MASK;
3757 
3758 			/*
3759 			 * Do not fix write-permission on the large spte when
3760 			 * dirty logging is enabled. Since we only dirty the
3761 			 * first page into the dirty-bitmap in
3762 			 * fast_pf_fix_direct_spte(), other pages are missed
3763 			 * if its slot has dirty logging enabled.
3764 			 *
3765 			 * Instead, we let the slow page fault path create a
3766 			 * normal spte to fix the access.
3767 			 */
3768 			if (sp->role.level > PG_LEVEL_4K &&
3769 			    kvm_slot_dirty_track_enabled(fault->slot))
3770 				break;
3771 		}
3772 
3773 		/* Verify that the fault can be handled in the fast path */
3774 		if (new_spte == spte ||
3775 		    !is_access_allowed(fault, new_spte))
3776 			break;
3777 
3778 		/*
3779 		 * Currently, fast page fault only works for direct mapping
3780 		 * since the gfn is not stable for indirect shadow page. See
3781 		 * Documentation/virt/kvm/locking.rst to get more detail.
3782 		 */
3783 		if (fast_pf_fix_direct_spte(vcpu, fault, sptep, spte, new_spte)) {
3784 			ret = RET_PF_FIXED;
3785 			break;
3786 		}
3787 
3788 		if (++retry_count > 4) {
3789 			pr_warn_once("Fast #PF retrying more than 4 times.\n");
3790 			break;
3791 		}
3792 
3793 	} while (true);
3794 
3795 	trace_fast_page_fault(vcpu, fault, sptep, spte, ret);
3796 	walk_shadow_page_lockless_end(vcpu);
3797 
3798 	if (ret != RET_PF_INVALID)
3799 		vcpu->stat.pf_fast++;
3800 
3801 	return ret;
3802 }
3803 
mmu_free_root_page(struct kvm * kvm,hpa_t * root_hpa,struct list_head * invalid_list)3804 static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
3805 			       struct list_head *invalid_list)
3806 {
3807 	struct kvm_mmu_page *sp;
3808 
3809 	if (!VALID_PAGE(*root_hpa))
3810 		return;
3811 
3812 	sp = root_to_sp(*root_hpa);
3813 	if (WARN_ON_ONCE(!sp))
3814 		return;
3815 
3816 	if (is_tdp_mmu_page(sp)) {
3817 		lockdep_assert_held_read(&kvm->mmu_lock);
3818 		kvm_tdp_mmu_put_root(kvm, sp);
3819 	} else {
3820 		lockdep_assert_held_write(&kvm->mmu_lock);
3821 		if (!--sp->root_count && sp->role.invalid)
3822 			kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
3823 	}
3824 
3825 	*root_hpa = INVALID_PAGE;
3826 }
3827 
3828 /* roots_to_free must be some combination of the KVM_MMU_ROOT_* flags */
kvm_mmu_free_roots(struct kvm * kvm,struct kvm_mmu * mmu,ulong roots_to_free)3829 void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu,
3830 			ulong roots_to_free)
3831 {
3832 	bool is_tdp_mmu = tdp_mmu_enabled && mmu->root_role.direct;
3833 	int i;
3834 	LIST_HEAD(invalid_list);
3835 	bool free_active_root;
3836 
3837 	WARN_ON_ONCE(roots_to_free & ~KVM_MMU_ROOTS_ALL);
3838 
3839 	BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG);
3840 
3841 	/* Before acquiring the MMU lock, see if we need to do any real work. */
3842 	free_active_root = (roots_to_free & KVM_MMU_ROOT_CURRENT)
3843 		&& VALID_PAGE(mmu->root.hpa);
3844 
3845 	if (!free_active_root) {
3846 		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
3847 			if ((roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) &&
3848 			    VALID_PAGE(mmu->prev_roots[i].hpa))
3849 				break;
3850 
3851 		if (i == KVM_MMU_NUM_PREV_ROOTS)
3852 			return;
3853 	}
3854 
3855 	if (is_tdp_mmu)
3856 		read_lock(&kvm->mmu_lock);
3857 	else
3858 		write_lock(&kvm->mmu_lock);
3859 
3860 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
3861 		if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i))
3862 			mmu_free_root_page(kvm, &mmu->prev_roots[i].hpa,
3863 					   &invalid_list);
3864 
3865 	if (free_active_root) {
3866 		if (kvm_mmu_is_dummy_root(mmu->root.hpa)) {
3867 			/* Nothing to cleanup for dummy roots. */
3868 		} else if (root_to_sp(mmu->root.hpa)) {
3869 			mmu_free_root_page(kvm, &mmu->root.hpa, &invalid_list);
3870 		} else if (mmu->pae_root) {
3871 			for (i = 0; i < 4; ++i) {
3872 				if (!IS_VALID_PAE_ROOT(mmu->pae_root[i]))
3873 					continue;
3874 
3875 				mmu_free_root_page(kvm, &mmu->pae_root[i],
3876 						   &invalid_list);
3877 				mmu->pae_root[i] = INVALID_PAE_ROOT;
3878 			}
3879 		}
3880 		mmu->root.hpa = INVALID_PAGE;
3881 		mmu->root.pgd = 0;
3882 	}
3883 
3884 	if (is_tdp_mmu) {
3885 		read_unlock(&kvm->mmu_lock);
3886 		WARN_ON_ONCE(!list_empty(&invalid_list));
3887 	} else {
3888 		kvm_mmu_commit_zap_page(kvm, &invalid_list);
3889 		write_unlock(&kvm->mmu_lock);
3890 	}
3891 }
3892 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_free_roots);
3893 
kvm_mmu_free_guest_mode_roots(struct kvm * kvm,struct kvm_mmu * mmu)3894 void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu)
3895 {
3896 	unsigned long roots_to_free = 0;
3897 	struct kvm_mmu_page *sp;
3898 	hpa_t root_hpa;
3899 	int i;
3900 
3901 	/*
3902 	 * This should not be called while L2 is active, L2 can't invalidate
3903 	 * _only_ its own roots, e.g. INVVPID unconditionally exits.
3904 	 */
3905 	WARN_ON_ONCE(mmu->root_role.guest_mode);
3906 
3907 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
3908 		root_hpa = mmu->prev_roots[i].hpa;
3909 		if (!VALID_PAGE(root_hpa))
3910 			continue;
3911 
3912 		sp = root_to_sp(root_hpa);
3913 		if (!sp || sp->role.guest_mode)
3914 			roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
3915 	}
3916 
3917 	kvm_mmu_free_roots(kvm, mmu, roots_to_free);
3918 }
3919 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_free_guest_mode_roots);
3920 
mmu_alloc_root(struct kvm_vcpu * vcpu,gfn_t gfn,int quadrant,u8 level)3921 static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, int quadrant,
3922 			    u8 level)
3923 {
3924 	union kvm_mmu_page_role role = vcpu->arch.mmu->root_role;
3925 	struct kvm_mmu_page *sp;
3926 
3927 	role.level = level;
3928 	role.quadrant = quadrant;
3929 
3930 	WARN_ON_ONCE(quadrant && !role.has_4_byte_gpte);
3931 	WARN_ON_ONCE(role.direct && role.has_4_byte_gpte);
3932 
3933 	sp = kvm_mmu_get_shadow_page(vcpu, gfn, role);
3934 	++sp->root_count;
3935 
3936 	return __pa(sp->spt);
3937 }
3938 
mmu_alloc_direct_roots(struct kvm_vcpu * vcpu)3939 static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
3940 {
3941 	struct kvm_mmu *mmu = vcpu->arch.mmu;
3942 	u8 shadow_root_level = mmu->root_role.level;
3943 	hpa_t root;
3944 	unsigned i;
3945 	int r;
3946 
3947 	if (tdp_mmu_enabled) {
3948 		if (kvm_has_mirrored_tdp(vcpu->kvm) &&
3949 		    !VALID_PAGE(mmu->mirror_root_hpa))
3950 			kvm_tdp_mmu_alloc_root(vcpu, true);
3951 		kvm_tdp_mmu_alloc_root(vcpu, false);
3952 		return 0;
3953 	}
3954 
3955 	write_lock(&vcpu->kvm->mmu_lock);
3956 	r = make_mmu_pages_available(vcpu);
3957 	if (r < 0)
3958 		goto out_unlock;
3959 
3960 	if (shadow_root_level >= PT64_ROOT_4LEVEL) {
3961 		root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level);
3962 		mmu->root.hpa = root;
3963 	} else if (shadow_root_level == PT32E_ROOT_LEVEL) {
3964 		if (WARN_ON_ONCE(!mmu->pae_root)) {
3965 			r = -EIO;
3966 			goto out_unlock;
3967 		}
3968 
3969 		for (i = 0; i < 4; ++i) {
3970 			WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
3971 
3972 			root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT), 0,
3973 					      PT32_ROOT_LEVEL);
3974 			mmu->pae_root[i] = root | PT_PRESENT_MASK |
3975 					   shadow_me_value;
3976 		}
3977 		mmu->root.hpa = __pa(mmu->pae_root);
3978 	} else {
3979 		WARN_ONCE(1, "Bad TDP root level = %d\n", shadow_root_level);
3980 		r = -EIO;
3981 		goto out_unlock;
3982 	}
3983 
3984 	/* root.pgd is ignored for direct MMUs. */
3985 	mmu->root.pgd = 0;
3986 out_unlock:
3987 	write_unlock(&vcpu->kvm->mmu_lock);
3988 	return r;
3989 }
3990 
kvm_mmu_alloc_page_hash(struct kvm * kvm)3991 static int kvm_mmu_alloc_page_hash(struct kvm *kvm)
3992 {
3993 	struct hlist_head *h;
3994 
3995 	if (kvm->arch.mmu_page_hash)
3996 		return 0;
3997 
3998 	h = kvzalloc_objs(*h, KVM_NUM_MMU_PAGES, GFP_KERNEL_ACCOUNT);
3999 	if (!h)
4000 		return -ENOMEM;
4001 
4002 	/*
4003 	 * Ensure the hash table pointer is set only after all stores to zero
4004 	 * the memory are retired.  Pairs with the smp_load_acquire() in
4005 	 * kvm_get_mmu_page_hash().  Note, mmu_lock must be held for write to
4006 	 * add (or remove) shadow pages, and so readers are guaranteed to see
4007 	 * an empty list for their current mmu_lock critical section.
4008 	 */
4009 	smp_store_release(&kvm->arch.mmu_page_hash, h);
4010 	return 0;
4011 }
4012 
mmu_first_shadow_root_alloc(struct kvm * kvm)4013 static int mmu_first_shadow_root_alloc(struct kvm *kvm)
4014 {
4015 	struct kvm_memslots *slots;
4016 	struct kvm_memory_slot *slot;
4017 	int r = 0, i, bkt;
4018 
4019 	/*
4020 	 * Check if this is the first shadow root being allocated before
4021 	 * taking the lock.
4022 	 */
4023 	if (kvm_shadow_root_allocated(kvm))
4024 		return 0;
4025 
4026 	mutex_lock(&kvm->slots_arch_lock);
4027 
4028 	/* Recheck, under the lock, whether this is the first shadow root. */
4029 	if (kvm_shadow_root_allocated(kvm))
4030 		goto out_unlock;
4031 
4032 	r = kvm_mmu_alloc_page_hash(kvm);
4033 	if (r)
4034 		goto out_unlock;
4035 
4036 	/*
4037 	 * Check if memslot metadata actually needs to be allocated, e.g. all
4038 	 * metadata will be allocated upfront if TDP is disabled.
4039 	 */
4040 	if (kvm_memslots_have_rmaps(kvm) &&
4041 	    kvm_page_track_write_tracking_enabled(kvm))
4042 		goto out_success;
4043 
4044 	for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
4045 		slots = __kvm_memslots(kvm, i);
4046 		kvm_for_each_memslot(slot, bkt, slots) {
4047 			/*
4048 			 * Both of these functions are no-ops if the target is
4049 			 * already allocated, so unconditionally calling both
4050 			 * is safe.  Intentionally do NOT free allocations on
4051 			 * failure to avoid having to track which allocations
4052 			 * were made now versus when the memslot was created.
4053 			 * The metadata is guaranteed to be freed when the slot
4054 			 * is freed, and will be kept/used if userspace retries
4055 			 * KVM_RUN instead of killing the VM.
4056 			 */
4057 			r = memslot_rmap_alloc(slot, slot->npages);
4058 			if (r)
4059 				goto out_unlock;
4060 			r = kvm_page_track_write_tracking_alloc(slot);
4061 			if (r)
4062 				goto out_unlock;
4063 		}
4064 	}
4065 
4066 	/*
4067 	 * Ensure that shadow_root_allocated becomes true strictly after
4068 	 * all the related pointers are set.
4069 	 */
4070 out_success:
4071 	smp_store_release(&kvm->arch.shadow_root_allocated, true);
4072 
4073 out_unlock:
4074 	mutex_unlock(&kvm->slots_arch_lock);
4075 	return r;
4076 }
4077 
mmu_alloc_shadow_roots(struct kvm_vcpu * vcpu)4078 static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
4079 {
4080 	struct kvm_mmu *mmu = vcpu->arch.mmu;
4081 	u64 pdptrs[4], pm_mask;
4082 	gfn_t root_gfn, root_pgd;
4083 	int quadrant, i, r;
4084 	hpa_t root;
4085 
4086 	root_pgd = kvm_mmu_get_guest_pgd(vcpu, mmu);
4087 	root_gfn = (root_pgd & __PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
4088 
4089 	if (!kvm_vcpu_is_visible_gfn(vcpu, root_gfn)) {
4090 		mmu->root.hpa = kvm_mmu_get_dummy_root();
4091 		return 0;
4092 	}
4093 
4094 	/*
4095 	 * On SVM, reading PDPTRs might access guest memory, which might fault
4096 	 * and thus might sleep.  Grab the PDPTRs before acquiring mmu_lock.
4097 	 */
4098 	if (mmu->cpu_role.base.level == PT32E_ROOT_LEVEL) {
4099 		for (i = 0; i < 4; ++i) {
4100 			pdptrs[i] = mmu->get_pdptr(vcpu, i);
4101 			if (!(pdptrs[i] & PT_PRESENT_MASK))
4102 				continue;
4103 
4104 			if (!kvm_vcpu_is_visible_gfn(vcpu, pdptrs[i] >> PAGE_SHIFT))
4105 				pdptrs[i] = 0;
4106 		}
4107 	}
4108 
4109 	r = mmu_first_shadow_root_alloc(vcpu->kvm);
4110 	if (r)
4111 		return r;
4112 
4113 	write_lock(&vcpu->kvm->mmu_lock);
4114 	r = make_mmu_pages_available(vcpu);
4115 	if (r < 0)
4116 		goto out_unlock;
4117 
4118 	/*
4119 	 * Do we shadow a long mode page table? If so we need to
4120 	 * write-protect the guests page table root.
4121 	 */
4122 	if (mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL) {
4123 		root = mmu_alloc_root(vcpu, root_gfn, 0,
4124 				      mmu->root_role.level);
4125 		mmu->root.hpa = root;
4126 		goto set_root_pgd;
4127 	}
4128 
4129 	if (WARN_ON_ONCE(!mmu->pae_root)) {
4130 		r = -EIO;
4131 		goto out_unlock;
4132 	}
4133 
4134 	/*
4135 	 * We shadow a 32 bit page table. This may be a legacy 2-level
4136 	 * or a PAE 3-level page table. In either case we need to be aware that
4137 	 * the shadow page table may be a PAE or a long mode page table.
4138 	 */
4139 	pm_mask = PT_PRESENT_MASK | shadow_me_value;
4140 	if (mmu->root_role.level >= PT64_ROOT_4LEVEL) {
4141 		pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
4142 
4143 		if (WARN_ON_ONCE(!mmu->pml4_root)) {
4144 			r = -EIO;
4145 			goto out_unlock;
4146 		}
4147 		mmu->pml4_root[0] = __pa(mmu->pae_root) | pm_mask;
4148 
4149 		if (mmu->root_role.level == PT64_ROOT_5LEVEL) {
4150 			if (WARN_ON_ONCE(!mmu->pml5_root)) {
4151 				r = -EIO;
4152 				goto out_unlock;
4153 			}
4154 			mmu->pml5_root[0] = __pa(mmu->pml4_root) | pm_mask;
4155 		}
4156 	}
4157 
4158 	for (i = 0; i < 4; ++i) {
4159 		WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
4160 
4161 		if (mmu->cpu_role.base.level == PT32E_ROOT_LEVEL) {
4162 			if (!(pdptrs[i] & PT_PRESENT_MASK)) {
4163 				mmu->pae_root[i] = INVALID_PAE_ROOT;
4164 				continue;
4165 			}
4166 			root_gfn = pdptrs[i] >> PAGE_SHIFT;
4167 		}
4168 
4169 		/*
4170 		 * If shadowing 32-bit non-PAE page tables, each PAE page
4171 		 * directory maps one quarter of the guest's non-PAE page
4172 		 * directory. Othwerise each PAE page direct shadows one guest
4173 		 * PAE page directory so that quadrant should be 0.
4174 		 */
4175 		quadrant = (mmu->cpu_role.base.level == PT32_ROOT_LEVEL) ? i : 0;
4176 
4177 		root = mmu_alloc_root(vcpu, root_gfn, quadrant, PT32_ROOT_LEVEL);
4178 		mmu->pae_root[i] = root | pm_mask;
4179 	}
4180 
4181 	if (mmu->root_role.level == PT64_ROOT_5LEVEL)
4182 		mmu->root.hpa = __pa(mmu->pml5_root);
4183 	else if (mmu->root_role.level == PT64_ROOT_4LEVEL)
4184 		mmu->root.hpa = __pa(mmu->pml4_root);
4185 	else
4186 		mmu->root.hpa = __pa(mmu->pae_root);
4187 
4188 set_root_pgd:
4189 	mmu->root.pgd = root_pgd;
4190 out_unlock:
4191 	write_unlock(&vcpu->kvm->mmu_lock);
4192 
4193 	return r;
4194 }
4195 
mmu_alloc_special_roots(struct kvm_vcpu * vcpu)4196 static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
4197 {
4198 	struct kvm_mmu *mmu = vcpu->arch.mmu;
4199 	bool need_pml5 = mmu->root_role.level > PT64_ROOT_4LEVEL;
4200 	u64 *pml5_root = NULL;
4201 	u64 *pml4_root = NULL;
4202 	u64 *pae_root;
4203 
4204 	/*
4205 	 * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP
4206 	 * tables are allocated and initialized at root creation as there is no
4207 	 * equivalent level in the guest's NPT to shadow.  Allocate the tables
4208 	 * on demand, as running a 32-bit L1 VMM on 64-bit KVM is very rare.
4209 	 */
4210 	if (mmu->root_role.direct ||
4211 	    mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL ||
4212 	    mmu->root_role.level < PT64_ROOT_4LEVEL)
4213 		return 0;
4214 
4215 	/*
4216 	 * NPT, the only paging mode that uses this horror, uses a fixed number
4217 	 * of levels for the shadow page tables, e.g. all MMUs are 4-level or
4218 	 * all MMus are 5-level.  Thus, this can safely require that pml5_root
4219 	 * is allocated if the other roots are valid and pml5 is needed, as any
4220 	 * prior MMU would also have required pml5.
4221 	 */
4222 	if (mmu->pae_root && mmu->pml4_root && (!need_pml5 || mmu->pml5_root))
4223 		return 0;
4224 
4225 	/*
4226 	 * The special roots should always be allocated in concert.  Yell and
4227 	 * bail if KVM ends up in a state where only one of the roots is valid.
4228 	 */
4229 	if (WARN_ON_ONCE(!tdp_enabled || mmu->pae_root || mmu->pml4_root ||
4230 			 (need_pml5 && mmu->pml5_root)))
4231 		return -EIO;
4232 
4233 	/*
4234 	 * Unlike 32-bit NPT, the PDP table doesn't need to be in low mem, and
4235 	 * doesn't need to be decrypted.
4236 	 */
4237 	pae_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
4238 	if (!pae_root)
4239 		return -ENOMEM;
4240 
4241 #ifdef CONFIG_X86_64
4242 	pml4_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
4243 	if (!pml4_root)
4244 		goto err_pml4;
4245 
4246 	if (need_pml5) {
4247 		pml5_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
4248 		if (!pml5_root)
4249 			goto err_pml5;
4250 	}
4251 #endif
4252 
4253 	mmu->pae_root = pae_root;
4254 	mmu->pml4_root = pml4_root;
4255 	mmu->pml5_root = pml5_root;
4256 
4257 	return 0;
4258 
4259 #ifdef CONFIG_X86_64
4260 err_pml5:
4261 	free_page((unsigned long)pml4_root);
4262 err_pml4:
4263 	free_page((unsigned long)pae_root);
4264 	return -ENOMEM;
4265 #endif
4266 }
4267 
is_unsync_root(hpa_t root)4268 static bool is_unsync_root(hpa_t root)
4269 {
4270 	struct kvm_mmu_page *sp;
4271 
4272 	if (!VALID_PAGE(root) || kvm_mmu_is_dummy_root(root))
4273 		return false;
4274 
4275 	/*
4276 	 * The read barrier orders the CPU's read of SPTE.W during the page table
4277 	 * walk before the reads of sp->unsync/sp->unsync_children here.
4278 	 *
4279 	 * Even if another CPU was marking the SP as unsync-ed simultaneously,
4280 	 * any guest page table changes are not guaranteed to be visible anyway
4281 	 * until this VCPU issues a TLB flush strictly after those changes are
4282 	 * made.  We only need to ensure that the other CPU sets these flags
4283 	 * before any actual changes to the page tables are made.  The comments
4284 	 * in mmu_try_to_unsync_pages() describe what could go wrong if this
4285 	 * requirement isn't satisfied.
4286 	 */
4287 	smp_rmb();
4288 	sp = root_to_sp(root);
4289 
4290 	/*
4291 	 * PAE roots (somewhat arbitrarily) aren't backed by shadow pages, the
4292 	 * PDPTEs for a given PAE root need to be synchronized individually.
4293 	 */
4294 	if (WARN_ON_ONCE(!sp))
4295 		return false;
4296 
4297 	if (sp->unsync || sp->unsync_children)
4298 		return true;
4299 
4300 	return false;
4301 }
4302 
kvm_mmu_sync_roots(struct kvm_vcpu * vcpu)4303 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
4304 {
4305 	int i;
4306 	struct kvm_mmu_page *sp;
4307 
4308 	if (vcpu->arch.mmu->root_role.direct)
4309 		return;
4310 
4311 	if (!VALID_PAGE(vcpu->arch.mmu->root.hpa))
4312 		return;
4313 
4314 	vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
4315 
4316 	if (vcpu->arch.mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL) {
4317 		hpa_t root = vcpu->arch.mmu->root.hpa;
4318 
4319 		if (!is_unsync_root(root))
4320 			return;
4321 
4322 		sp = root_to_sp(root);
4323 
4324 		write_lock(&vcpu->kvm->mmu_lock);
4325 		mmu_sync_children(vcpu, sp, true);
4326 		write_unlock(&vcpu->kvm->mmu_lock);
4327 		return;
4328 	}
4329 
4330 	write_lock(&vcpu->kvm->mmu_lock);
4331 
4332 	for (i = 0; i < 4; ++i) {
4333 		hpa_t root = vcpu->arch.mmu->pae_root[i];
4334 
4335 		if (IS_VALID_PAE_ROOT(root)) {
4336 			sp = spte_to_child_sp(root);
4337 			mmu_sync_children(vcpu, sp, true);
4338 		}
4339 	}
4340 
4341 	write_unlock(&vcpu->kvm->mmu_lock);
4342 }
4343 
kvm_mmu_sync_prev_roots(struct kvm_vcpu * vcpu)4344 void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu)
4345 {
4346 	unsigned long roots_to_free = 0;
4347 	int i;
4348 
4349 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
4350 		if (is_unsync_root(vcpu->arch.mmu->prev_roots[i].hpa))
4351 			roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
4352 
4353 	/* sync prev_roots by simply freeing them */
4354 	kvm_mmu_free_roots(vcpu->kvm, vcpu->arch.mmu, roots_to_free);
4355 }
4356 
nonpaging_gva_to_gpa(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu,gpa_t vaddr,u64 access,struct x86_exception * exception)4357 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
4358 				  gpa_t vaddr, u64 access,
4359 				  struct x86_exception *exception)
4360 {
4361 	if (exception)
4362 		exception->error_code = 0;
4363 	return kvm_translate_gpa(vcpu, mmu, vaddr, access, exception);
4364 }
4365 
mmio_info_in_cache(struct kvm_vcpu * vcpu,u64 addr,bool direct)4366 static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
4367 {
4368 	/*
4369 	 * A nested guest cannot use the MMIO cache if it is using nested
4370 	 * page tables, because cr2 is a nGPA while the cache stores GPAs.
4371 	 */
4372 	if (mmu_is_nested(vcpu))
4373 		return false;
4374 
4375 	if (direct)
4376 		return vcpu_match_mmio_gpa(vcpu, addr);
4377 
4378 	return vcpu_match_mmio_gva(vcpu, addr);
4379 }
4380 
4381 /*
4382  * Return the level of the lowest level SPTE added to sptes.
4383  * That SPTE may be non-present.
4384  *
4385  * Must be called between walk_shadow_page_lockless_{begin,end}.
4386  */
get_walk(struct kvm_vcpu * vcpu,u64 addr,u64 * sptes,int * root_level)4387 static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level)
4388 {
4389 	struct kvm_shadow_walk_iterator iterator;
4390 	int leaf = -1;
4391 	u64 spte;
4392 
4393 	for (shadow_walk_init(&iterator, vcpu, addr),
4394 	     *root_level = iterator.level;
4395 	     shadow_walk_okay(&iterator);
4396 	     __shadow_walk_next(&iterator, spte)) {
4397 		leaf = iterator.level;
4398 		spte = mmu_spte_get_lockless(iterator.sptep);
4399 
4400 		sptes[leaf] = spte;
4401 	}
4402 
4403 	return leaf;
4404 }
4405 
get_sptes_lockless(struct kvm_vcpu * vcpu,u64 addr,u64 * sptes,int * root_level)4406 static int get_sptes_lockless(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
4407 			      int *root_level)
4408 {
4409 	int leaf;
4410 
4411 	walk_shadow_page_lockless_begin(vcpu);
4412 
4413 	if (is_tdp_mmu_active(vcpu))
4414 		leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, root_level);
4415 	else
4416 		leaf = get_walk(vcpu, addr, sptes, root_level);
4417 
4418 	walk_shadow_page_lockless_end(vcpu);
4419 	return leaf;
4420 }
4421 
4422 /* return true if reserved bit(s) are detected on a valid, non-MMIO SPTE. */
get_mmio_spte(struct kvm_vcpu * vcpu,u64 addr,u64 * sptep)4423 static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
4424 {
4425 	u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
4426 	struct rsvd_bits_validate *rsvd_check;
4427 	int root, leaf, level;
4428 	bool reserved = false;
4429 
4430 	leaf = get_sptes_lockless(vcpu, addr, sptes, &root);
4431 	if (unlikely(leaf < 0)) {
4432 		*sptep = 0ull;
4433 		return reserved;
4434 	}
4435 
4436 	*sptep = sptes[leaf];
4437 
4438 	/*
4439 	 * Skip reserved bits checks on the terminal leaf if it's not a valid
4440 	 * SPTE.  Note, this also (intentionally) skips MMIO SPTEs, which, by
4441 	 * design, always have reserved bits set.  The purpose of the checks is
4442 	 * to detect reserved bits on non-MMIO SPTEs. i.e. buggy SPTEs.
4443 	 */
4444 	if (!is_shadow_present_pte(sptes[leaf]))
4445 		leaf++;
4446 
4447 	rsvd_check = &vcpu->arch.mmu->shadow_zero_check;
4448 
4449 	for (level = root; level >= leaf; level--)
4450 		reserved |= is_rsvd_spte(rsvd_check, sptes[level], level);
4451 
4452 	if (reserved) {
4453 		pr_err("%s: reserved bits set on MMU-present spte, addr 0x%llx, hierarchy:\n",
4454 		       __func__, addr);
4455 		for (level = root; level >= leaf; level--)
4456 			pr_err("------ spte = 0x%llx level = %d, rsvd bits = 0x%llx",
4457 			       sptes[level], level,
4458 			       get_rsvd_bits(rsvd_check, sptes[level], level));
4459 	}
4460 
4461 	return reserved;
4462 }
4463 
handle_mmio_page_fault(struct kvm_vcpu * vcpu,u64 addr,bool direct)4464 static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
4465 {
4466 	u64 spte;
4467 	bool reserved;
4468 
4469 	if (mmio_info_in_cache(vcpu, addr, direct))
4470 		return RET_PF_EMULATE;
4471 
4472 	reserved = get_mmio_spte(vcpu, addr, &spte);
4473 	if (WARN_ON_ONCE(reserved))
4474 		return -EINVAL;
4475 
4476 	if (is_mmio_spte(vcpu->kvm, spte)) {
4477 		gfn_t gfn = get_mmio_spte_gfn(spte);
4478 		unsigned int access = get_mmio_spte_access(spte);
4479 
4480 		if (!check_mmio_spte(vcpu, spte))
4481 			return RET_PF_INVALID;
4482 
4483 		if (direct)
4484 			addr = 0;
4485 
4486 		trace_handle_mmio_page_fault(addr, gfn, access);
4487 		vcpu_cache_mmio_info(vcpu, addr, gfn, access);
4488 		return RET_PF_EMULATE;
4489 	}
4490 
4491 	/*
4492 	 * If the page table is zapped by other cpus, let CPU fault again on
4493 	 * the address.
4494 	 */
4495 	return RET_PF_RETRY;
4496 }
4497 
page_fault_handle_page_track(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4498 static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
4499 					 struct kvm_page_fault *fault)
4500 {
4501 	if (unlikely(fault->rsvd))
4502 		return false;
4503 
4504 	if (!fault->present || !fault->write)
4505 		return false;
4506 
4507 	/*
4508 	 * guest is writing the page which is write tracked which can
4509 	 * not be fixed by page fault handler.
4510 	 */
4511 	if (kvm_gfn_is_write_tracked(vcpu->kvm, fault->slot, fault->gfn))
4512 		return true;
4513 
4514 	return false;
4515 }
4516 
shadow_page_table_clear_flood(struct kvm_vcpu * vcpu,gva_t addr)4517 static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
4518 {
4519 	struct kvm_shadow_walk_iterator iterator;
4520 	u64 spte;
4521 
4522 	walk_shadow_page_lockless_begin(vcpu);
4523 	for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
4524 		clear_sp_write_flooding_count(iterator.sptep);
4525 	walk_shadow_page_lockless_end(vcpu);
4526 }
4527 
alloc_apf_token(struct kvm_vcpu * vcpu)4528 static u32 alloc_apf_token(struct kvm_vcpu *vcpu)
4529 {
4530 	/* make sure the token value is not 0 */
4531 	u32 id = vcpu->arch.apf.id;
4532 
4533 	if (id << 12 == 0)
4534 		vcpu->arch.apf.id = 1;
4535 
4536 	return (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
4537 }
4538 
kvm_arch_setup_async_pf(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4539 static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu,
4540 				    struct kvm_page_fault *fault)
4541 {
4542 	struct kvm_arch_async_pf arch;
4543 
4544 	arch.token = alloc_apf_token(vcpu);
4545 	arch.gfn = fault->gfn;
4546 	arch.error_code = fault->error_code;
4547 	arch.direct_map = vcpu->arch.mmu->root_role.direct;
4548 	if (arch.direct_map)
4549 		arch.cr3 = (unsigned long)INVALID_GPA;
4550 	else
4551 		arch.cr3 = kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu);
4552 
4553 	return kvm_setup_async_pf(vcpu, fault->addr,
4554 				  kvm_vcpu_gfn_to_hva(vcpu, fault->gfn), &arch);
4555 }
4556 
kvm_arch_async_page_ready(struct kvm_vcpu * vcpu,struct kvm_async_pf * work)4557 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
4558 {
4559 	int r;
4560 
4561 	if (WARN_ON_ONCE(work->arch.error_code & PFERR_PRIVATE_ACCESS))
4562 		return;
4563 
4564 	if ((vcpu->arch.mmu->root_role.direct != work->arch.direct_map) ||
4565 	      work->wakeup_all)
4566 		return;
4567 
4568 	r = kvm_mmu_reload(vcpu);
4569 	if (unlikely(r))
4570 		return;
4571 
4572 	if (!vcpu->arch.mmu->root_role.direct &&
4573 	      work->arch.cr3 != kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu))
4574 		return;
4575 
4576 	r = kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, work->arch.error_code,
4577 				  true, NULL, NULL);
4578 
4579 	/*
4580 	 * Account fixed page faults, otherwise they'll never be counted, but
4581 	 * ignore stats for all other return times.  Page-ready "faults" aren't
4582 	 * truly spurious and never trigger emulation
4583 	 */
4584 	if (r == RET_PF_FIXED)
4585 		vcpu->stat.pf_fixed++;
4586 }
4587 
kvm_mmu_finish_page_fault(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault,int r)4588 static void kvm_mmu_finish_page_fault(struct kvm_vcpu *vcpu,
4589 				      struct kvm_page_fault *fault, int r)
4590 {
4591 	kvm_release_faultin_page(vcpu->kvm, fault->refcounted_page,
4592 				 r == RET_PF_RETRY, fault->map_writable);
4593 }
4594 
kvm_mmu_faultin_pfn_gmem(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4595 static int kvm_mmu_faultin_pfn_gmem(struct kvm_vcpu *vcpu,
4596 				    struct kvm_page_fault *fault)
4597 {
4598 	int max_order, r;
4599 
4600 	if (!kvm_slot_has_gmem(fault->slot)) {
4601 		kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
4602 		return -EFAULT;
4603 	}
4604 
4605 	r = kvm_gmem_get_pfn(vcpu->kvm, fault->slot, fault->gfn, &fault->pfn,
4606 			     &fault->refcounted_page, &max_order);
4607 	if (r) {
4608 		kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
4609 		return r;
4610 	}
4611 
4612 	fault->map_writable = !(fault->slot->flags & KVM_MEM_READONLY);
4613 	fault->max_level = kvm_max_level_for_order(max_order);
4614 
4615 	return RET_PF_CONTINUE;
4616 }
4617 
__kvm_mmu_faultin_pfn(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4618 static int __kvm_mmu_faultin_pfn(struct kvm_vcpu *vcpu,
4619 				 struct kvm_page_fault *fault)
4620 {
4621 	unsigned int foll = fault->write ? FOLL_WRITE : 0;
4622 
4623 	if (fault->is_private || kvm_memslot_is_gmem_only(fault->slot))
4624 		return kvm_mmu_faultin_pfn_gmem(vcpu, fault);
4625 
4626 	foll |= FOLL_NOWAIT;
4627 	fault->pfn = __kvm_faultin_pfn(fault->slot, fault->gfn, foll,
4628 				       &fault->map_writable, &fault->refcounted_page);
4629 
4630 	/*
4631 	 * If resolving the page failed because I/O is needed to fault-in the
4632 	 * page, then either set up an asynchronous #PF to do the I/O, or if
4633 	 * doing an async #PF isn't possible, retry with I/O allowed.  All
4634 	 * other failures are terminal, i.e. retrying won't help.
4635 	 */
4636 	if (fault->pfn != KVM_PFN_ERR_NEEDS_IO)
4637 		return RET_PF_CONTINUE;
4638 
4639 	if (!fault->prefetch && kvm_can_do_async_pf(vcpu)) {
4640 		trace_kvm_try_async_get_page(fault->addr, fault->gfn);
4641 		if (kvm_find_async_pf_gfn(vcpu, fault->gfn)) {
4642 			trace_kvm_async_pf_repeated_fault(fault->addr, fault->gfn);
4643 			kvm_make_request(KVM_REQ_APF_HALT, vcpu);
4644 			return RET_PF_RETRY;
4645 		} else if (kvm_arch_setup_async_pf(vcpu, fault)) {
4646 			return RET_PF_RETRY;
4647 		}
4648 	}
4649 
4650 	/*
4651 	 * Allow gup to bail on pending non-fatal signals when it's also allowed
4652 	 * to wait for IO.  Note, gup always bails if it is unable to quickly
4653 	 * get a page and a fatal signal, i.e. SIGKILL, is pending.
4654 	 */
4655 	foll |= FOLL_INTERRUPTIBLE;
4656 	foll &= ~FOLL_NOWAIT;
4657 	fault->pfn = __kvm_faultin_pfn(fault->slot, fault->gfn, foll,
4658 				       &fault->map_writable, &fault->refcounted_page);
4659 
4660 	return RET_PF_CONTINUE;
4661 }
4662 
kvm_mmu_faultin_pfn(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault,unsigned int access)4663 static int kvm_mmu_faultin_pfn(struct kvm_vcpu *vcpu,
4664 			       struct kvm_page_fault *fault, unsigned int access)
4665 {
4666 	struct kvm_memory_slot *slot = fault->slot;
4667 	struct kvm *kvm = vcpu->kvm;
4668 	int ret;
4669 
4670 	if (KVM_BUG_ON(kvm_is_gfn_alias(kvm, fault->gfn), kvm))
4671 		return -EFAULT;
4672 
4673 	/*
4674 	 * Note that the mmu_invalidate_seq also serves to detect a concurrent
4675 	 * change in attributes.  is_page_fault_stale() will detect an
4676 	 * invalidation relate to fault->fn and resume the guest without
4677 	 * installing a mapping in the page tables.
4678 	 */
4679 	fault->mmu_seq = vcpu->kvm->mmu_invalidate_seq;
4680 	smp_rmb();
4681 
4682 	/*
4683 	 * Now that we have a snapshot of mmu_invalidate_seq we can check for a
4684 	 * private vs. shared mismatch.
4685 	 */
4686 	if (fault->is_private != kvm_mem_is_private(kvm, fault->gfn)) {
4687 		kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
4688 		return -EFAULT;
4689 	}
4690 
4691 	if (unlikely(!slot))
4692 		return kvm_handle_noslot_fault(vcpu, fault, access);
4693 
4694 	/*
4695 	 * Retry the page fault if the gfn hit a memslot that is being deleted
4696 	 * or moved.  This ensures any existing SPTEs for the old memslot will
4697 	 * be zapped before KVM inserts a new MMIO SPTE for the gfn.  Punt the
4698 	 * error to userspace if this is a prefault, as KVM's prefaulting ABI
4699 	 * doesn't provide the same forward progress guarantees as KVM_RUN.
4700 	 */
4701 	if (slot->flags & KVM_MEMSLOT_INVALID) {
4702 		if (fault->prefetch)
4703 			return -EAGAIN;
4704 
4705 		return RET_PF_RETRY;
4706 	}
4707 
4708 	if (slot->id == APIC_ACCESS_PAGE_PRIVATE_MEMSLOT) {
4709 		/*
4710 		 * Don't map L1's APIC access page into L2, KVM doesn't support
4711 		 * using APICv/AVIC to accelerate L2 accesses to L1's APIC,
4712 		 * i.e. the access needs to be emulated.  Emulating access to
4713 		 * L1's APIC is also correct if L1 is accelerating L2's own
4714 		 * virtual APIC, but for some reason L1 also maps _L1's_ APIC
4715 		 * into L2.  Note, vcpu_is_mmio_gpa() always treats access to
4716 		 * the APIC as MMIO.  Allow an MMIO SPTE to be created, as KVM
4717 		 * uses different roots for L1 vs. L2, i.e. there is no danger
4718 		 * of breaking APICv/AVIC for L1.
4719 		 */
4720 		if (is_guest_mode(vcpu))
4721 			return kvm_handle_noslot_fault(vcpu, fault, access);
4722 
4723 		/*
4724 		 * If the APIC access page exists but is disabled, go directly
4725 		 * to emulation without caching the MMIO access or creating a
4726 		 * MMIO SPTE.  That way the cache doesn't need to be purged
4727 		 * when the AVIC is re-enabled.
4728 		 */
4729 		if (!kvm_apicv_activated(vcpu->kvm))
4730 			return RET_PF_EMULATE;
4731 	}
4732 
4733 	/*
4734 	 * Check for a relevant mmu_notifier invalidation event before getting
4735 	 * the pfn from the primary MMU, and before acquiring mmu_lock.
4736 	 *
4737 	 * For mmu_lock, if there is an in-progress invalidation and the kernel
4738 	 * allows preemption, the invalidation task may drop mmu_lock and yield
4739 	 * in response to mmu_lock being contended, which is *very* counter-
4740 	 * productive as this vCPU can't actually make forward progress until
4741 	 * the invalidation completes.
4742 	 *
4743 	 * Retrying now can also avoid unnessary lock contention in the primary
4744 	 * MMU, as the primary MMU doesn't necessarily hold a single lock for
4745 	 * the duration of the invalidation, i.e. faulting in a conflicting pfn
4746 	 * can cause the invalidation to take longer by holding locks that are
4747 	 * needed to complete the invalidation.
4748 	 *
4749 	 * Do the pre-check even for non-preemtible kernels, i.e. even if KVM
4750 	 * will never yield mmu_lock in response to contention, as this vCPU is
4751 	 * *guaranteed* to need to retry, i.e. waiting until mmu_lock is held
4752 	 * to detect retry guarantees the worst case latency for the vCPU.
4753 	 */
4754 	if (mmu_invalidate_retry_gfn_unsafe(kvm, fault->mmu_seq, fault->gfn))
4755 		return RET_PF_RETRY;
4756 
4757 	ret = __kvm_mmu_faultin_pfn(vcpu, fault);
4758 	if (ret != RET_PF_CONTINUE)
4759 		return ret;
4760 
4761 	if (unlikely(is_error_pfn(fault->pfn)))
4762 		return kvm_handle_error_pfn(vcpu, fault);
4763 
4764 	if (WARN_ON_ONCE(!fault->slot || is_noslot_pfn(fault->pfn)))
4765 		return kvm_handle_noslot_fault(vcpu, fault, access);
4766 
4767 	/*
4768 	 * Check again for a relevant mmu_notifier invalidation event purely to
4769 	 * avoid contending mmu_lock.  Most invalidations will be detected by
4770 	 * the previous check, but checking is extremely cheap relative to the
4771 	 * overall cost of failing to detect the invalidation until after
4772 	 * mmu_lock is acquired.
4773 	 */
4774 	if (mmu_invalidate_retry_gfn_unsafe(kvm, fault->mmu_seq, fault->gfn)) {
4775 		kvm_mmu_finish_page_fault(vcpu, fault, RET_PF_RETRY);
4776 		return RET_PF_RETRY;
4777 	}
4778 
4779 	return RET_PF_CONTINUE;
4780 }
4781 
4782 /*
4783  * Returns true if the page fault is stale and needs to be retried, i.e. if the
4784  * root was invalidated by a memslot update or a relevant mmu_notifier fired.
4785  */
is_page_fault_stale(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4786 static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
4787 				struct kvm_page_fault *fault)
4788 {
4789 	struct kvm_mmu_page *sp = root_to_sp(vcpu->arch.mmu->root.hpa);
4790 
4791 	/* Special roots, e.g. pae_root, are not backed by shadow pages. */
4792 	if (sp && is_obsolete_sp(vcpu->kvm, sp))
4793 		return true;
4794 
4795 	/*
4796 	 * Roots without an associated shadow page are considered invalid if
4797 	 * there is a pending request to free obsolete roots.  The request is
4798 	 * only a hint that the current root _may_ be obsolete and needs to be
4799 	 * reloaded, e.g. if the guest frees a PGD that KVM is tracking as a
4800 	 * previous root, then __kvm_mmu_prepare_zap_page() signals all vCPUs
4801 	 * to reload even if no vCPU is actively using the root.
4802 	 */
4803 	if (!sp && kvm_test_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu))
4804 		return true;
4805 
4806 	/*
4807 	 * Check for a relevant mmu_notifier invalidation event one last time
4808 	 * now that mmu_lock is held, as the "unsafe" checks performed without
4809 	 * holding mmu_lock can get false negatives.
4810 	 */
4811 	return fault->slot &&
4812 	       mmu_invalidate_retry_gfn(vcpu->kvm, fault->mmu_seq, fault->gfn);
4813 }
4814 
direct_page_fault(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4815 static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
4816 {
4817 	int r;
4818 
4819 	/* Dummy roots are used only for shadowing bad guest roots. */
4820 	if (WARN_ON_ONCE(kvm_mmu_is_dummy_root(vcpu->arch.mmu->root.hpa)))
4821 		return RET_PF_RETRY;
4822 
4823 	if (page_fault_handle_page_track(vcpu, fault))
4824 		return RET_PF_WRITE_PROTECTED;
4825 
4826 	r = fast_page_fault(vcpu, fault);
4827 	if (r != RET_PF_INVALID)
4828 		return r;
4829 
4830 	r = mmu_topup_memory_caches(vcpu, false);
4831 	if (r)
4832 		return r;
4833 
4834 	r = kvm_mmu_faultin_pfn(vcpu, fault, ACC_ALL);
4835 	if (r != RET_PF_CONTINUE)
4836 		return r;
4837 
4838 	r = RET_PF_RETRY;
4839 	write_lock(&vcpu->kvm->mmu_lock);
4840 
4841 	if (is_page_fault_stale(vcpu, fault))
4842 		goto out_unlock;
4843 
4844 	r = make_mmu_pages_available(vcpu);
4845 	if (r)
4846 		goto out_unlock;
4847 
4848 	r = direct_map(vcpu, fault);
4849 
4850 out_unlock:
4851 	kvm_mmu_finish_page_fault(vcpu, fault, r);
4852 	write_unlock(&vcpu->kvm->mmu_lock);
4853 	return r;
4854 }
4855 
nonpaging_page_fault(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4856 static int nonpaging_page_fault(struct kvm_vcpu *vcpu,
4857 				struct kvm_page_fault *fault)
4858 {
4859 	/* This path builds a PAE pagetable, we can map 2mb pages at maximum. */
4860 	fault->max_level = PG_LEVEL_2M;
4861 	return direct_page_fault(vcpu, fault);
4862 }
4863 
kvm_handle_page_fault(struct kvm_vcpu * vcpu,u64 error_code,u64 fault_address,char * insn,int insn_len)4864 int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
4865 				u64 fault_address, char *insn, int insn_len)
4866 {
4867 	int r = 1;
4868 	u32 flags = vcpu->arch.apf.host_apf_flags;
4869 
4870 #ifndef CONFIG_X86_64
4871 	/* A 64-bit CR2 should be impossible on 32-bit KVM. */
4872 	if (WARN_ON_ONCE(fault_address >> 32))
4873 		return -EFAULT;
4874 #endif
4875 	/*
4876 	 * Legacy #PF exception only have a 32-bit error code.  Simply drop the
4877 	 * upper bits as KVM doesn't use them for #PF (because they are never
4878 	 * set), and to ensure there are no collisions with KVM-defined bits.
4879 	 */
4880 	if (WARN_ON_ONCE(error_code >> 32))
4881 		error_code = lower_32_bits(error_code);
4882 
4883 	/*
4884 	 * Restrict KVM-defined flags to bits 63:32 so that it's impossible for
4885 	 * them to conflict with #PF error codes, which are limited to 32 bits.
4886 	 */
4887 	BUILD_BUG_ON(lower_32_bits(PFERR_SYNTHETIC_MASK));
4888 
4889 	kvm_request_l1tf_flush_l1d();
4890 	if (!flags) {
4891 		trace_kvm_page_fault(vcpu, fault_address, error_code);
4892 
4893 		r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn,
4894 				insn_len);
4895 	} else if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) {
4896 		vcpu->arch.apf.host_apf_flags = 0;
4897 		local_irq_disable();
4898 		kvm_async_pf_task_wait_schedule(fault_address);
4899 		local_irq_enable();
4900 	} else {
4901 		WARN_ONCE(1, "Unexpected host async PF flags: %x\n", flags);
4902 	}
4903 
4904 	return r;
4905 }
4906 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_handle_page_fault);
4907 
4908 #ifdef CONFIG_X86_64
kvm_tdp_mmu_page_fault(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4909 static int kvm_tdp_mmu_page_fault(struct kvm_vcpu *vcpu,
4910 				  struct kvm_page_fault *fault)
4911 {
4912 	int r;
4913 
4914 	if (page_fault_handle_page_track(vcpu, fault))
4915 		return RET_PF_WRITE_PROTECTED;
4916 
4917 	r = fast_page_fault(vcpu, fault);
4918 	if (r != RET_PF_INVALID)
4919 		return r;
4920 
4921 	r = mmu_topup_memory_caches(vcpu, false);
4922 	if (r)
4923 		return r;
4924 
4925 	r = kvm_mmu_faultin_pfn(vcpu, fault, ACC_ALL);
4926 	if (r != RET_PF_CONTINUE)
4927 		return r;
4928 
4929 	r = RET_PF_RETRY;
4930 	read_lock(&vcpu->kvm->mmu_lock);
4931 
4932 	if (is_page_fault_stale(vcpu, fault))
4933 		goto out_unlock;
4934 
4935 	r = kvm_tdp_mmu_map(vcpu, fault);
4936 
4937 out_unlock:
4938 	kvm_mmu_finish_page_fault(vcpu, fault, r);
4939 	read_unlock(&vcpu->kvm->mmu_lock);
4940 	return r;
4941 }
4942 #endif
4943 
kvm_tdp_page_fault(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4944 int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
4945 {
4946 #ifdef CONFIG_X86_64
4947 	if (tdp_mmu_enabled)
4948 		return kvm_tdp_mmu_page_fault(vcpu, fault);
4949 #endif
4950 
4951 	return direct_page_fault(vcpu, fault);
4952 }
4953 
kvm_tdp_page_prefault(struct kvm_vcpu * vcpu,gpa_t gpa,u64 error_code,u8 * level)4954 static int kvm_tdp_page_prefault(struct kvm_vcpu *vcpu, gpa_t gpa,
4955 				 u64 error_code, u8 *level)
4956 {
4957 	int r;
4958 
4959 	/*
4960 	 * Restrict to TDP page fault, since that's the only case where the MMU
4961 	 * is indexed by GPA.
4962 	 */
4963 	if (vcpu->arch.mmu->page_fault != kvm_tdp_page_fault)
4964 		return -EOPNOTSUPP;
4965 
4966 	do {
4967 		if (signal_pending(current))
4968 			return -EINTR;
4969 
4970 		if (kvm_check_request(KVM_REQ_VM_DEAD, vcpu))
4971 			return -EIO;
4972 
4973 		cond_resched();
4974 		r = kvm_mmu_do_page_fault(vcpu, gpa, error_code, true, NULL, level);
4975 	} while (r == RET_PF_RETRY);
4976 
4977 	if (r < 0)
4978 		return r;
4979 
4980 	switch (r) {
4981 	case RET_PF_FIXED:
4982 	case RET_PF_SPURIOUS:
4983 	case RET_PF_WRITE_PROTECTED:
4984 		return 0;
4985 
4986 	case RET_PF_EMULATE:
4987 		return -ENOENT;
4988 
4989 	case RET_PF_RETRY:
4990 	case RET_PF_CONTINUE:
4991 	case RET_PF_INVALID:
4992 	default:
4993 		WARN_ONCE(1, "could not fix page fault during prefault");
4994 		return -EIO;
4995 	}
4996 }
4997 
kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu * vcpu,struct kvm_pre_fault_memory * range)4998 long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
4999 				    struct kvm_pre_fault_memory *range)
5000 {
5001 	u64 error_code = PFERR_GUEST_FINAL_MASK;
5002 	u8 level = PG_LEVEL_4K;
5003 	u64 direct_bits;
5004 	u64 end;
5005 	int r;
5006 
5007 	if (!vcpu->kvm->arch.pre_fault_allowed)
5008 		return -EOPNOTSUPP;
5009 
5010 	if (kvm_is_gfn_alias(vcpu->kvm, gpa_to_gfn(range->gpa)))
5011 		return -EINVAL;
5012 
5013 	/*
5014 	 * reload is efficient when called repeatedly, so we can do it on
5015 	 * every iteration.
5016 	 */
5017 	r = kvm_mmu_reload(vcpu);
5018 	if (r)
5019 		return r;
5020 
5021 	direct_bits = 0;
5022 	if (kvm_arch_has_private_mem(vcpu->kvm) &&
5023 	    kvm_mem_is_private(vcpu->kvm, gpa_to_gfn(range->gpa)))
5024 		error_code |= PFERR_PRIVATE_ACCESS;
5025 	else
5026 		direct_bits = gfn_to_gpa(kvm_gfn_direct_bits(vcpu->kvm));
5027 
5028 	/*
5029 	 * Shadow paging uses GVA for kvm page fault, so restrict to
5030 	 * two-dimensional paging.
5031 	 */
5032 	r = kvm_tdp_page_prefault(vcpu, range->gpa | direct_bits, error_code, &level);
5033 	if (r < 0)
5034 		return r;
5035 
5036 	/*
5037 	 * If the mapping that covers range->gpa can use a huge page, it
5038 	 * may start below it or end after range->gpa + range->size.
5039 	 */
5040 	end = (range->gpa & KVM_HPAGE_MASK(level)) + KVM_HPAGE_SIZE(level);
5041 	return min(range->size, end - range->gpa);
5042 }
5043 
5044 #ifdef CONFIG_KVM_GUEST_MEMFD
kvm_assert_gmem_invalidate_lock_held(struct kvm_memory_slot * slot)5045 static void kvm_assert_gmem_invalidate_lock_held(struct kvm_memory_slot *slot)
5046 {
5047 #ifdef CONFIG_PROVE_LOCKING
5048 	if (WARN_ON_ONCE(!kvm_slot_has_gmem(slot)) ||
5049 	    WARN_ON_ONCE(!slot->gmem.file) ||
5050 	    WARN_ON_ONCE(!file_count(slot->gmem.file)))
5051 		return;
5052 
5053 	lockdep_assert_held(&file_inode(slot->gmem.file)->i_mapping->invalidate_lock);
5054 #endif
5055 }
5056 
kvm_tdp_mmu_map_private_pfn(struct kvm_vcpu * vcpu,gfn_t gfn,kvm_pfn_t pfn)5057 int kvm_tdp_mmu_map_private_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
5058 {
5059 	struct kvm_page_fault fault = {
5060 		.addr = gfn_to_gpa(gfn),
5061 		.error_code = PFERR_GUEST_FINAL_MASK | PFERR_PRIVATE_ACCESS,
5062 		.prefetch = true,
5063 		.is_tdp = true,
5064 		.nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(vcpu->kvm),
5065 
5066 		.max_level = PG_LEVEL_4K,
5067 		.req_level = PG_LEVEL_4K,
5068 		.goal_level = PG_LEVEL_4K,
5069 		.is_private = true,
5070 
5071 		.gfn = gfn,
5072 		.slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn),
5073 		.pfn = pfn,
5074 		.map_writable = true,
5075 	};
5076 	struct kvm *kvm = vcpu->kvm;
5077 	int r;
5078 
5079 	lockdep_assert_held(&kvm->slots_lock);
5080 
5081 	/*
5082 	 * Mapping a pre-determined private pfn is intended only for use when
5083 	 * populating a guest_memfd instance.  Assert that the slot is backed
5084 	 * by guest_memfd and that the gmem instance's invalidate_lock is held.
5085 	 */
5086 	kvm_assert_gmem_invalidate_lock_held(fault.slot);
5087 
5088 	if (KVM_BUG_ON(!tdp_mmu_enabled, kvm))
5089 		return -EIO;
5090 
5091 	if (kvm_gfn_is_write_tracked(kvm, fault.slot, fault.gfn))
5092 		return -EPERM;
5093 
5094 	r = kvm_mmu_reload(vcpu);
5095 	if (r)
5096 		return r;
5097 
5098 	r = mmu_topup_memory_caches(vcpu, false);
5099 	if (r)
5100 		return r;
5101 
5102 	do {
5103 		if (signal_pending(current))
5104 			return -EINTR;
5105 
5106 		if (kvm_test_request(KVM_REQ_VM_DEAD, vcpu))
5107 			return -EIO;
5108 
5109 		cond_resched();
5110 
5111 		guard(read_lock)(&kvm->mmu_lock);
5112 
5113 		r = kvm_tdp_mmu_map(vcpu, &fault);
5114 	} while (r == RET_PF_RETRY);
5115 
5116 	if (r != RET_PF_FIXED)
5117 		return -EIO;
5118 
5119 	return 0;
5120 }
5121 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_tdp_mmu_map_private_pfn);
5122 #endif
5123 
nonpaging_init_context(struct kvm_mmu * context)5124 static void nonpaging_init_context(struct kvm_mmu *context)
5125 {
5126 	context->page_fault = nonpaging_page_fault;
5127 	context->gva_to_gpa = nonpaging_gva_to_gpa;
5128 	context->sync_spte = NULL;
5129 }
5130 
is_root_usable(struct kvm_mmu_root_info * root,gpa_t pgd,union kvm_mmu_page_role role)5131 static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
5132 				  union kvm_mmu_page_role role)
5133 {
5134 	struct kvm_mmu_page *sp;
5135 
5136 	if (!VALID_PAGE(root->hpa))
5137 		return false;
5138 
5139 	if (!role.direct && pgd != root->pgd)
5140 		return false;
5141 
5142 	sp = root_to_sp(root->hpa);
5143 	if (WARN_ON_ONCE(!sp))
5144 		return false;
5145 
5146 	return role.word == sp->role.word;
5147 }
5148 
5149 /*
5150  * Find out if a previously cached root matching the new pgd/role is available,
5151  * and insert the current root as the MRU in the cache.
5152  * If a matching root is found, it is assigned to kvm_mmu->root and
5153  * true is returned.
5154  * If no match is found, kvm_mmu->root is left invalid, the LRU root is
5155  * evicted to make room for the current root, and false is returned.
5156  */
cached_root_find_and_keep_current(struct kvm * kvm,struct kvm_mmu * mmu,gpa_t new_pgd,union kvm_mmu_page_role new_role)5157 static bool cached_root_find_and_keep_current(struct kvm *kvm, struct kvm_mmu *mmu,
5158 					      gpa_t new_pgd,
5159 					      union kvm_mmu_page_role new_role)
5160 {
5161 	uint i;
5162 
5163 	if (is_root_usable(&mmu->root, new_pgd, new_role))
5164 		return true;
5165 
5166 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
5167 		/*
5168 		 * The swaps end up rotating the cache like this:
5169 		 *   C   0 1 2 3   (on entry to the function)
5170 		 *   0   C 1 2 3
5171 		 *   1   C 0 2 3
5172 		 *   2   C 0 1 3
5173 		 *   3   C 0 1 2   (on exit from the loop)
5174 		 */
5175 		swap(mmu->root, mmu->prev_roots[i]);
5176 		if (is_root_usable(&mmu->root, new_pgd, new_role))
5177 			return true;
5178 	}
5179 
5180 	kvm_mmu_free_roots(kvm, mmu, KVM_MMU_ROOT_CURRENT);
5181 	return false;
5182 }
5183 
5184 /*
5185  * Find out if a previously cached root matching the new pgd/role is available.
5186  * On entry, mmu->root is invalid.
5187  * If a matching root is found, it is assigned to kvm_mmu->root, the LRU entry
5188  * of the cache becomes invalid, and true is returned.
5189  * If no match is found, kvm_mmu->root is left invalid and false is returned.
5190  */
cached_root_find_without_current(struct kvm * kvm,struct kvm_mmu * mmu,gpa_t new_pgd,union kvm_mmu_page_role new_role)5191 static bool cached_root_find_without_current(struct kvm *kvm, struct kvm_mmu *mmu,
5192 					     gpa_t new_pgd,
5193 					     union kvm_mmu_page_role new_role)
5194 {
5195 	uint i;
5196 
5197 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
5198 		if (is_root_usable(&mmu->prev_roots[i], new_pgd, new_role))
5199 			goto hit;
5200 
5201 	return false;
5202 
5203 hit:
5204 	swap(mmu->root, mmu->prev_roots[i]);
5205 	/* Bubble up the remaining roots.  */
5206 	for (; i < KVM_MMU_NUM_PREV_ROOTS - 1; i++)
5207 		mmu->prev_roots[i] = mmu->prev_roots[i + 1];
5208 	mmu->prev_roots[i].hpa = INVALID_PAGE;
5209 	return true;
5210 }
5211 
fast_pgd_switch(struct kvm * kvm,struct kvm_mmu * mmu,gpa_t new_pgd,union kvm_mmu_page_role new_role)5212 static bool fast_pgd_switch(struct kvm *kvm, struct kvm_mmu *mmu,
5213 			    gpa_t new_pgd, union kvm_mmu_page_role new_role)
5214 {
5215 	/*
5216 	 * Limit reuse to 64-bit hosts+VMs without "special" roots in order to
5217 	 * avoid having to deal with PDPTEs and other complexities.
5218 	 */
5219 	if (VALID_PAGE(mmu->root.hpa) && !root_to_sp(mmu->root.hpa))
5220 		kvm_mmu_free_roots(kvm, mmu, KVM_MMU_ROOT_CURRENT);
5221 
5222 	if (VALID_PAGE(mmu->root.hpa))
5223 		return cached_root_find_and_keep_current(kvm, mmu, new_pgd, new_role);
5224 	else
5225 		return cached_root_find_without_current(kvm, mmu, new_pgd, new_role);
5226 }
5227 
kvm_mmu_new_pgd(struct kvm_vcpu * vcpu,gpa_t new_pgd)5228 void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
5229 {
5230 	struct kvm_mmu *mmu = vcpu->arch.mmu;
5231 	union kvm_mmu_page_role new_role = mmu->root_role;
5232 
5233 	/*
5234 	 * Return immediately if no usable root was found, kvm_mmu_reload()
5235 	 * will establish a valid root prior to the next VM-Enter.
5236 	 */
5237 	if (!fast_pgd_switch(vcpu->kvm, mmu, new_pgd, new_role))
5238 		return;
5239 
5240 	/*
5241 	 * It's possible that the cached previous root page is obsolete because
5242 	 * of a change in the MMU generation number. However, changing the
5243 	 * generation number is accompanied by KVM_REQ_MMU_FREE_OBSOLETE_ROOTS,
5244 	 * which will free the root set here and allocate a new one.
5245 	 */
5246 	kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
5247 
5248 	if (force_flush_and_sync_on_reuse) {
5249 		kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
5250 		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
5251 	}
5252 
5253 	/*
5254 	 * The last MMIO access's GVA and GPA are cached in the VCPU. When
5255 	 * switching to a new CR3, that GVA->GPA mapping may no longer be
5256 	 * valid. So clear any cached MMIO info even when we don't need to sync
5257 	 * the shadow page tables.
5258 	 */
5259 	vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
5260 
5261 	/*
5262 	 * If this is a direct root page, it doesn't have a write flooding
5263 	 * count. Otherwise, clear the write flooding count.
5264 	 */
5265 	if (!new_role.direct) {
5266 		struct kvm_mmu_page *sp = root_to_sp(vcpu->arch.mmu->root.hpa);
5267 
5268 		if (!WARN_ON_ONCE(!sp))
5269 			__clear_sp_write_flooding_count(sp);
5270 	}
5271 }
5272 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_new_pgd);
5273 
sync_mmio_spte(struct kvm_vcpu * vcpu,u64 * sptep,gfn_t gfn,unsigned int access)5274 static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
5275 			   unsigned int access)
5276 {
5277 	if (unlikely(is_mmio_spte(vcpu->kvm, *sptep))) {
5278 		if (gfn != get_mmio_spte_gfn(*sptep)) {
5279 			mmu_spte_clear_no_track(sptep);
5280 			return true;
5281 		}
5282 
5283 		mark_mmio_spte(vcpu, sptep, gfn, access);
5284 		return true;
5285 	}
5286 
5287 	return false;
5288 }
5289 
5290 #define PTTYPE_EPT 18 /* arbitrary */
5291 #define PTTYPE PTTYPE_EPT
5292 #include "paging_tmpl.h"
5293 #undef PTTYPE
5294 
5295 #define PTTYPE 64
5296 #include "paging_tmpl.h"
5297 #undef PTTYPE
5298 
5299 #define PTTYPE 32
5300 #include "paging_tmpl.h"
5301 #undef PTTYPE
5302 
__reset_rsvds_bits_mask(struct rsvd_bits_validate * rsvd_check,u64 pa_bits_rsvd,int level,bool nx,bool gbpages,bool pse,bool amd)5303 static void __reset_rsvds_bits_mask(struct rsvd_bits_validate *rsvd_check,
5304 				    u64 pa_bits_rsvd, int level, bool nx,
5305 				    bool gbpages, bool pse, bool amd)
5306 {
5307 	u64 gbpages_bit_rsvd = 0;
5308 	u64 nonleaf_bit8_rsvd = 0;
5309 	u64 high_bits_rsvd;
5310 
5311 	rsvd_check->bad_mt_xwr = 0;
5312 
5313 	if (!gbpages)
5314 		gbpages_bit_rsvd = rsvd_bits(7, 7);
5315 
5316 	if (level == PT32E_ROOT_LEVEL)
5317 		high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 62);
5318 	else
5319 		high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);
5320 
5321 	/* Note, NX doesn't exist in PDPTEs, this is handled below. */
5322 	if (!nx)
5323 		high_bits_rsvd |= rsvd_bits(63, 63);
5324 
5325 	/*
5326 	 * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for
5327 	 * leaf entries) on AMD CPUs only.
5328 	 */
5329 	if (amd)
5330 		nonleaf_bit8_rsvd = rsvd_bits(8, 8);
5331 
5332 	switch (level) {
5333 	case PT32_ROOT_LEVEL:
5334 		/* no rsvd bits for 2 level 4K page table entries */
5335 		rsvd_check->rsvd_bits_mask[0][1] = 0;
5336 		rsvd_check->rsvd_bits_mask[0][0] = 0;
5337 		rsvd_check->rsvd_bits_mask[1][0] =
5338 			rsvd_check->rsvd_bits_mask[0][0];
5339 
5340 		if (!pse) {
5341 			rsvd_check->rsvd_bits_mask[1][1] = 0;
5342 			break;
5343 		}
5344 
5345 		if (is_cpuid_PSE36())
5346 			/* 36bits PSE 4MB page */
5347 			rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
5348 		else
5349 			/* 32 bits PSE 4MB page */
5350 			rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
5351 		break;
5352 	case PT32E_ROOT_LEVEL:
5353 		rsvd_check->rsvd_bits_mask[0][2] = rsvd_bits(63, 63) |
5354 						   high_bits_rsvd |
5355 						   rsvd_bits(5, 8) |
5356 						   rsvd_bits(1, 2);	/* PDPTE */
5357 		rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd;	/* PDE */
5358 		rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;	/* PTE */
5359 		rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
5360 						   rsvd_bits(13, 20);	/* large page */
5361 		rsvd_check->rsvd_bits_mask[1][0] =
5362 			rsvd_check->rsvd_bits_mask[0][0];
5363 		break;
5364 	case PT64_ROOT_5LEVEL:
5365 		rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd |
5366 						   nonleaf_bit8_rsvd |
5367 						   rsvd_bits(7, 7);
5368 		rsvd_check->rsvd_bits_mask[1][4] =
5369 			rsvd_check->rsvd_bits_mask[0][4];
5370 		fallthrough;
5371 	case PT64_ROOT_4LEVEL:
5372 		rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd |
5373 						   nonleaf_bit8_rsvd |
5374 						   rsvd_bits(7, 7);
5375 		rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd |
5376 						   gbpages_bit_rsvd;
5377 		rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd;
5378 		rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
5379 		rsvd_check->rsvd_bits_mask[1][3] =
5380 			rsvd_check->rsvd_bits_mask[0][3];
5381 		rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd |
5382 						   gbpages_bit_rsvd |
5383 						   rsvd_bits(13, 29);
5384 		rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
5385 						   rsvd_bits(13, 20); /* large page */
5386 		rsvd_check->rsvd_bits_mask[1][0] =
5387 			rsvd_check->rsvd_bits_mask[0][0];
5388 		break;
5389 	}
5390 }
5391 
reset_guest_rsvds_bits_mask(struct kvm_vcpu * vcpu,struct kvm_mmu * context)5392 static void reset_guest_rsvds_bits_mask(struct kvm_vcpu *vcpu,
5393 					struct kvm_mmu *context)
5394 {
5395 	__reset_rsvds_bits_mask(&context->guest_rsvd_check,
5396 				vcpu->arch.reserved_gpa_bits,
5397 				context->cpu_role.base.level, is_efer_nx(context),
5398 				guest_cpu_cap_has(vcpu, X86_FEATURE_GBPAGES),
5399 				is_cr4_pse(context),
5400 				guest_cpuid_is_amd_compatible(vcpu));
5401 }
5402 
__reset_rsvds_bits_mask_ept(struct rsvd_bits_validate * rsvd_check,u64 pa_bits_rsvd,bool execonly,int huge_page_level)5403 static void __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
5404 					u64 pa_bits_rsvd, bool execonly,
5405 					int huge_page_level)
5406 {
5407 	u64 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);
5408 	u64 large_1g_rsvd = 0, large_2m_rsvd = 0;
5409 	u64 bad_mt_xwr;
5410 
5411 	if (huge_page_level < PG_LEVEL_1G)
5412 		large_1g_rsvd = rsvd_bits(7, 7);
5413 	if (huge_page_level < PG_LEVEL_2M)
5414 		large_2m_rsvd = rsvd_bits(7, 7);
5415 
5416 	rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd | rsvd_bits(3, 7);
5417 	rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd | rsvd_bits(3, 7);
5418 	rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd | rsvd_bits(3, 6) | large_1g_rsvd;
5419 	rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd | rsvd_bits(3, 6) | large_2m_rsvd;
5420 	rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
5421 
5422 	/* large page */
5423 	rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4];
5424 	rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3];
5425 	rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd | rsvd_bits(12, 29) | large_1g_rsvd;
5426 	rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd | rsvd_bits(12, 20) | large_2m_rsvd;
5427 	rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0];
5428 
5429 	bad_mt_xwr = 0xFFull << (2 * 8);	/* bits 3..5 must not be 2 */
5430 	bad_mt_xwr |= 0xFFull << (3 * 8);	/* bits 3..5 must not be 3 */
5431 	bad_mt_xwr |= 0xFFull << (7 * 8);	/* bits 3..5 must not be 7 */
5432 	bad_mt_xwr |= REPEAT_BYTE(1ull << 2);	/* bits 0..2 must not be 010 */
5433 	bad_mt_xwr |= REPEAT_BYTE(1ull << 6);	/* bits 0..2 must not be 110 */
5434 	if (!execonly) {
5435 		/* bits 0..2 must not be 100 unless VMX capabilities allow it */
5436 		bad_mt_xwr |= REPEAT_BYTE(1ull << 4);
5437 	}
5438 	rsvd_check->bad_mt_xwr = bad_mt_xwr;
5439 }
5440 
reset_rsvds_bits_mask_ept(struct kvm_vcpu * vcpu,struct kvm_mmu * context,bool execonly,int huge_page_level)5441 static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
5442 		struct kvm_mmu *context, bool execonly, int huge_page_level)
5443 {
5444 	__reset_rsvds_bits_mask_ept(&context->guest_rsvd_check,
5445 				    vcpu->arch.reserved_gpa_bits, execonly,
5446 				    huge_page_level);
5447 }
5448 
reserved_hpa_bits(void)5449 static inline u64 reserved_hpa_bits(void)
5450 {
5451 	return rsvd_bits(kvm_host.maxphyaddr, 63);
5452 }
5453 
5454 /*
5455  * the page table on host is the shadow page table for the page
5456  * table in guest or amd nested guest, its mmu features completely
5457  * follow the features in guest.
5458  */
reset_shadow_zero_bits_mask(struct kvm_vcpu * vcpu,struct kvm_mmu * context)5459 static void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
5460 					struct kvm_mmu *context)
5461 {
5462 	/* @amd adds a check on bit of SPTEs, which KVM shouldn't use anyways. */
5463 	bool is_amd = true;
5464 	/* KVM doesn't use 2-level page tables for the shadow MMU. */
5465 	bool is_pse = false;
5466 	struct rsvd_bits_validate *shadow_zero_check;
5467 	int i;
5468 
5469 	WARN_ON_ONCE(context->root_role.level < PT32E_ROOT_LEVEL);
5470 
5471 	shadow_zero_check = &context->shadow_zero_check;
5472 	__reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
5473 				context->root_role.level,
5474 				context->root_role.efer_nx,
5475 				guest_cpu_cap_has(vcpu, X86_FEATURE_GBPAGES),
5476 				is_pse, is_amd);
5477 
5478 	if (!shadow_me_mask)
5479 		return;
5480 
5481 	for (i = context->root_role.level; --i >= 0;) {
5482 		/*
5483 		 * So far shadow_me_value is a constant during KVM's life
5484 		 * time.  Bits in shadow_me_value are allowed to be set.
5485 		 * Bits in shadow_me_mask but not in shadow_me_value are
5486 		 * not allowed to be set.
5487 		 */
5488 		shadow_zero_check->rsvd_bits_mask[0][i] |= shadow_me_mask;
5489 		shadow_zero_check->rsvd_bits_mask[1][i] |= shadow_me_mask;
5490 		shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_value;
5491 		shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_value;
5492 	}
5493 
5494 }
5495 
boot_cpu_is_amd(void)5496 static inline bool boot_cpu_is_amd(void)
5497 {
5498 	WARN_ON_ONCE(!tdp_enabled);
5499 	return shadow_x_mask == 0;
5500 }
5501 
5502 /*
5503  * the direct page table on host, use as much mmu features as
5504  * possible, however, kvm currently does not do execution-protection.
5505  */
reset_tdp_shadow_zero_bits_mask(struct kvm_mmu * context)5506 static void reset_tdp_shadow_zero_bits_mask(struct kvm_mmu *context)
5507 {
5508 	struct rsvd_bits_validate *shadow_zero_check;
5509 	int i;
5510 
5511 	shadow_zero_check = &context->shadow_zero_check;
5512 
5513 	if (boot_cpu_is_amd())
5514 		__reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
5515 					context->root_role.level, true,
5516 					boot_cpu_has(X86_FEATURE_GBPAGES),
5517 					false, true);
5518 	else
5519 		__reset_rsvds_bits_mask_ept(shadow_zero_check,
5520 					    reserved_hpa_bits(), false,
5521 					    max_huge_page_level);
5522 
5523 	if (!shadow_me_mask)
5524 		return;
5525 
5526 	for (i = context->root_role.level; --i >= 0;) {
5527 		shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
5528 		shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
5529 	}
5530 }
5531 
5532 /*
5533  * as the comments in reset_shadow_zero_bits_mask() except it
5534  * is the shadow page table for intel nested guest.
5535  */
5536 static void
reset_ept_shadow_zero_bits_mask(struct kvm_mmu * context,bool execonly)5537 reset_ept_shadow_zero_bits_mask(struct kvm_mmu *context, bool execonly)
5538 {
5539 	__reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
5540 				    reserved_hpa_bits(), execonly,
5541 				    max_huge_page_level);
5542 }
5543 
5544 #define BYTE_MASK(access) \
5545 	((1 & (access) ? 2 : 0) | \
5546 	 (2 & (access) ? 4 : 0) | \
5547 	 (3 & (access) ? 8 : 0) | \
5548 	 (4 & (access) ? 16 : 0) | \
5549 	 (5 & (access) ? 32 : 0) | \
5550 	 (6 & (access) ? 64 : 0) | \
5551 	 (7 & (access) ? 128 : 0))
5552 
5553 
update_permission_bitmask(struct kvm_mmu * mmu,bool ept)5554 static void update_permission_bitmask(struct kvm_mmu *mmu, bool ept)
5555 {
5556 	unsigned byte;
5557 
5558 	const u8 x = BYTE_MASK(ACC_EXEC_MASK);
5559 	const u8 w = BYTE_MASK(ACC_WRITE_MASK);
5560 	const u8 u = BYTE_MASK(ACC_USER_MASK);
5561 
5562 	bool cr4_smep = is_cr4_smep(mmu);
5563 	bool cr4_smap = is_cr4_smap(mmu);
5564 	bool cr0_wp = is_cr0_wp(mmu);
5565 	bool efer_nx = is_efer_nx(mmu);
5566 
5567 	for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) {
5568 		unsigned pfec = byte << 1;
5569 
5570 		/*
5571 		 * Each "*f" variable has a 1 bit for each UWX value
5572 		 * that causes a fault with the given PFEC.
5573 		 */
5574 
5575 		/* Faults from writes to non-writable pages */
5576 		u8 wf = (pfec & PFERR_WRITE_MASK) ? (u8)~w : 0;
5577 		/* Faults from user mode accesses to supervisor pages */
5578 		u8 uf = (pfec & PFERR_USER_MASK) ? (u8)~u : 0;
5579 		/* Faults from fetches of non-executable pages*/
5580 		u8 ff = (pfec & PFERR_FETCH_MASK) ? (u8)~x : 0;
5581 		/* Faults from kernel mode fetches of user pages */
5582 		u8 smepf = 0;
5583 		/* Faults from kernel mode accesses of user pages */
5584 		u8 smapf = 0;
5585 
5586 		if (!ept) {
5587 			/* Faults from kernel mode accesses to user pages */
5588 			u8 kf = (pfec & PFERR_USER_MASK) ? 0 : u;
5589 
5590 			/* Not really needed: !nx will cause pte.nx to fault */
5591 			if (!efer_nx)
5592 				ff = 0;
5593 
5594 			/* Allow supervisor writes if !cr0.wp */
5595 			if (!cr0_wp)
5596 				wf = (pfec & PFERR_USER_MASK) ? wf : 0;
5597 
5598 			/* Disallow supervisor fetches of user code if cr4.smep */
5599 			if (cr4_smep)
5600 				smepf = (pfec & PFERR_FETCH_MASK) ? kf : 0;
5601 
5602 			/*
5603 			 * SMAP:kernel-mode data accesses from user-mode
5604 			 * mappings should fault. A fault is considered
5605 			 * as a SMAP violation if all of the following
5606 			 * conditions are true:
5607 			 *   - X86_CR4_SMAP is set in CR4
5608 			 *   - A user page is accessed
5609 			 *   - The access is not a fetch
5610 			 *   - The access is supervisor mode
5611 			 *   - If implicit supervisor access or X86_EFLAGS_AC is clear
5612 			 *
5613 			 * Here, we cover the first four conditions.
5614 			 * The fifth is computed dynamically in permission_fault();
5615 			 * PFERR_RSVD_MASK bit will be set in PFEC if the access is
5616 			 * *not* subject to SMAP restrictions.
5617 			 */
5618 			if (cr4_smap)
5619 				smapf = (pfec & (PFERR_RSVD_MASK|PFERR_FETCH_MASK)) ? 0 : kf;
5620 		}
5621 
5622 		mmu->permissions[byte] = ff | uf | wf | smepf | smapf;
5623 	}
5624 }
5625 
5626 /*
5627 * PKU is an additional mechanism by which the paging controls access to
5628 * user-mode addresses based on the value in the PKRU register.  Protection
5629 * key violations are reported through a bit in the page fault error code.
5630 * Unlike other bits of the error code, the PK bit is not known at the
5631 * call site of e.g. gva_to_gpa; it must be computed directly in
5632 * permission_fault based on two bits of PKRU, on some machine state (CR4,
5633 * CR0, EFER, CPL), and on other bits of the error code and the page tables.
5634 *
5635 * In particular the following conditions come from the error code, the
5636 * page tables and the machine state:
5637 * - PK is always zero unless CR4.PKE=1 and EFER.LMA=1
5638 * - PK is always zero if RSVD=1 (reserved bit set) or F=1 (instruction fetch)
5639 * - PK is always zero if U=0 in the page tables
5640 * - PKRU.WD is ignored if CR0.WP=0 and the access is a supervisor access.
5641 *
5642 * The PKRU bitmask caches the result of these four conditions.  The error
5643 * code (minus the P bit) and the page table's U bit form an index into the
5644 * PKRU bitmask.  Two bits of the PKRU bitmask are then extracted and ANDed
5645 * with the two bits of the PKRU register corresponding to the protection key.
5646 * For the first three conditions above the bits will be 00, thus masking
5647 * away both AD and WD.  For all reads or if the last condition holds, WD
5648 * only will be masked away.
5649 */
update_pkru_bitmask(struct kvm_mmu * mmu)5650 static void update_pkru_bitmask(struct kvm_mmu *mmu)
5651 {
5652 	unsigned bit;
5653 	bool wp;
5654 
5655 	mmu->pkru_mask = 0;
5656 
5657 	if (!is_cr4_pke(mmu))
5658 		return;
5659 
5660 	wp = is_cr0_wp(mmu);
5661 
5662 	for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) {
5663 		unsigned pfec, pkey_bits;
5664 		bool check_pkey, check_write, ff, uf, wf, pte_user;
5665 
5666 		pfec = bit << 1;
5667 		ff = pfec & PFERR_FETCH_MASK;
5668 		uf = pfec & PFERR_USER_MASK;
5669 		wf = pfec & PFERR_WRITE_MASK;
5670 
5671 		/* PFEC.RSVD is replaced by ACC_USER_MASK. */
5672 		pte_user = pfec & PFERR_RSVD_MASK;
5673 
5674 		/*
5675 		 * Only need to check the access which is not an
5676 		 * instruction fetch and is to a user page.
5677 		 */
5678 		check_pkey = (!ff && pte_user);
5679 		/*
5680 		 * write access is controlled by PKRU if it is a
5681 		 * user access or CR0.WP = 1.
5682 		 */
5683 		check_write = check_pkey && wf && (uf || wp);
5684 
5685 		/* PKRU.AD stops both read and write access. */
5686 		pkey_bits = !!check_pkey;
5687 		/* PKRU.WD stops write access. */
5688 		pkey_bits |= (!!check_write) << 1;
5689 
5690 		mmu->pkru_mask |= (pkey_bits & 3) << pfec;
5691 	}
5692 }
5693 
reset_guest_paging_metadata(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu)5694 static void reset_guest_paging_metadata(struct kvm_vcpu *vcpu,
5695 					struct kvm_mmu *mmu)
5696 {
5697 	if (!is_cr0_pg(mmu))
5698 		return;
5699 
5700 	reset_guest_rsvds_bits_mask(vcpu, mmu);
5701 	update_permission_bitmask(mmu, false);
5702 	update_pkru_bitmask(mmu);
5703 }
5704 
paging64_init_context(struct kvm_mmu * context)5705 static void paging64_init_context(struct kvm_mmu *context)
5706 {
5707 	context->page_fault = paging64_page_fault;
5708 	context->gva_to_gpa = paging64_gva_to_gpa;
5709 	context->sync_spte = paging64_sync_spte;
5710 }
5711 
paging32_init_context(struct kvm_mmu * context)5712 static void paging32_init_context(struct kvm_mmu *context)
5713 {
5714 	context->page_fault = paging32_page_fault;
5715 	context->gva_to_gpa = paging32_gva_to_gpa;
5716 	context->sync_spte = paging32_sync_spte;
5717 }
5718 
kvm_calc_cpu_role(struct kvm_vcpu * vcpu,const struct kvm_mmu_role_regs * regs)5719 static union kvm_cpu_role kvm_calc_cpu_role(struct kvm_vcpu *vcpu,
5720 					    const struct kvm_mmu_role_regs *regs)
5721 {
5722 	union kvm_cpu_role role = {0};
5723 
5724 	role.base.access = ACC_ALL;
5725 	role.base.smm = is_smm(vcpu);
5726 	role.base.guest_mode = is_guest_mode(vcpu);
5727 	role.ext.valid = 1;
5728 
5729 	if (!____is_cr0_pg(regs)) {
5730 		role.base.direct = 1;
5731 		return role;
5732 	}
5733 
5734 	role.base.efer_nx = ____is_efer_nx(regs);
5735 	role.base.cr0_wp = ____is_cr0_wp(regs);
5736 	role.base.smep_andnot_wp = ____is_cr4_smep(regs) && !____is_cr0_wp(regs);
5737 	role.base.smap_andnot_wp = ____is_cr4_smap(regs) && !____is_cr0_wp(regs);
5738 	role.base.has_4_byte_gpte = !____is_cr4_pae(regs);
5739 
5740 	if (____is_efer_lma(regs))
5741 		role.base.level = ____is_cr4_la57(regs) ? PT64_ROOT_5LEVEL
5742 							: PT64_ROOT_4LEVEL;
5743 	else if (____is_cr4_pae(regs))
5744 		role.base.level = PT32E_ROOT_LEVEL;
5745 	else
5746 		role.base.level = PT32_ROOT_LEVEL;
5747 
5748 	role.ext.cr4_smep = ____is_cr4_smep(regs);
5749 	role.ext.cr4_smap = ____is_cr4_smap(regs);
5750 	role.ext.cr4_pse = ____is_cr4_pse(regs);
5751 
5752 	/* PKEY and LA57 are active iff long mode is active. */
5753 	role.ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs);
5754 	role.ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs);
5755 	role.ext.efer_lma = ____is_efer_lma(regs);
5756 	return role;
5757 }
5758 
__kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu)5759 void __kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
5760 					struct kvm_mmu *mmu)
5761 {
5762 	const bool cr0_wp = kvm_is_cr0_bit_set(vcpu, X86_CR0_WP);
5763 
5764 	BUILD_BUG_ON((KVM_MMU_CR0_ROLE_BITS & KVM_POSSIBLE_CR0_GUEST_BITS) != X86_CR0_WP);
5765 	BUILD_BUG_ON((KVM_MMU_CR4_ROLE_BITS & KVM_POSSIBLE_CR4_GUEST_BITS));
5766 
5767 	if (is_cr0_wp(mmu) == cr0_wp)
5768 		return;
5769 
5770 	mmu->cpu_role.base.cr0_wp = cr0_wp;
5771 	reset_guest_paging_metadata(vcpu, mmu);
5772 }
5773 
kvm_mmu_get_tdp_level(struct kvm_vcpu * vcpu)5774 static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
5775 {
5776 	int maxpa;
5777 
5778 	if (vcpu->kvm->arch.vm_type == KVM_X86_TDX_VM)
5779 		maxpa = cpuid_query_maxguestphyaddr(vcpu);
5780 	else
5781 		maxpa = cpuid_maxphyaddr(vcpu);
5782 
5783 	/* tdp_root_level is architecture forced level, use it if nonzero */
5784 	if (tdp_root_level)
5785 		return tdp_root_level;
5786 
5787 	/* Use 5-level TDP if and only if it's useful/necessary. */
5788 	if (max_tdp_level == 5 && maxpa <= 48)
5789 		return 4;
5790 
5791 	return max_tdp_level;
5792 }
5793 
kvm_mmu_get_max_tdp_level(void)5794 u8 kvm_mmu_get_max_tdp_level(void)
5795 {
5796 	return tdp_root_level ? tdp_root_level : max_tdp_level;
5797 }
5798 
5799 static union kvm_mmu_page_role
kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu * vcpu,union kvm_cpu_role cpu_role)5800 kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
5801 				union kvm_cpu_role cpu_role)
5802 {
5803 	union kvm_mmu_page_role role = {0};
5804 
5805 	role.access = ACC_ALL;
5806 	role.cr0_wp = true;
5807 	role.efer_nx = true;
5808 	role.smm = cpu_role.base.smm;
5809 	role.guest_mode = cpu_role.base.guest_mode;
5810 	role.ad_disabled = !kvm_ad_enabled;
5811 	role.level = kvm_mmu_get_tdp_level(vcpu);
5812 	role.direct = true;
5813 	role.has_4_byte_gpte = false;
5814 
5815 	return role;
5816 }
5817 
init_kvm_tdp_mmu(struct kvm_vcpu * vcpu,union kvm_cpu_role cpu_role)5818 static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,
5819 			     union kvm_cpu_role cpu_role)
5820 {
5821 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
5822 	union kvm_mmu_page_role root_role = kvm_calc_tdp_mmu_root_page_role(vcpu, cpu_role);
5823 
5824 	if (cpu_role.as_u64 == context->cpu_role.as_u64 &&
5825 	    root_role.word == context->root_role.word)
5826 		return;
5827 
5828 	context->cpu_role.as_u64 = cpu_role.as_u64;
5829 	context->root_role.word = root_role.word;
5830 	context->page_fault = kvm_tdp_page_fault;
5831 	context->sync_spte = NULL;
5832 	context->get_guest_pgd = get_guest_cr3;
5833 	context->get_pdptr = kvm_pdptr_read;
5834 	context->inject_page_fault = kvm_inject_page_fault;
5835 
5836 	if (!is_cr0_pg(context))
5837 		context->gva_to_gpa = nonpaging_gva_to_gpa;
5838 	else if (is_cr4_pae(context))
5839 		context->gva_to_gpa = paging64_gva_to_gpa;
5840 	else
5841 		context->gva_to_gpa = paging32_gva_to_gpa;
5842 
5843 	reset_guest_paging_metadata(vcpu, context);
5844 	reset_tdp_shadow_zero_bits_mask(context);
5845 }
5846 
shadow_mmu_init_context(struct kvm_vcpu * vcpu,struct kvm_mmu * context,union kvm_cpu_role cpu_role,union kvm_mmu_page_role root_role)5847 static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
5848 				    union kvm_cpu_role cpu_role,
5849 				    union kvm_mmu_page_role root_role)
5850 {
5851 	if (cpu_role.as_u64 == context->cpu_role.as_u64 &&
5852 	    root_role.word == context->root_role.word)
5853 		return;
5854 
5855 	context->cpu_role.as_u64 = cpu_role.as_u64;
5856 	context->root_role.word = root_role.word;
5857 
5858 	if (!is_cr0_pg(context))
5859 		nonpaging_init_context(context);
5860 	else if (is_cr4_pae(context))
5861 		paging64_init_context(context);
5862 	else
5863 		paging32_init_context(context);
5864 
5865 	reset_guest_paging_metadata(vcpu, context);
5866 	reset_shadow_zero_bits_mask(vcpu, context);
5867 }
5868 
kvm_init_shadow_mmu(struct kvm_vcpu * vcpu,union kvm_cpu_role cpu_role)5869 static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu,
5870 				union kvm_cpu_role cpu_role)
5871 {
5872 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
5873 	union kvm_mmu_page_role root_role;
5874 
5875 	root_role = cpu_role.base;
5876 
5877 	/* KVM uses PAE paging whenever the guest isn't using 64-bit paging. */
5878 	root_role.level = max_t(u32, root_role.level, PT32E_ROOT_LEVEL);
5879 
5880 	/*
5881 	 * KVM forces EFER.NX=1 when TDP is disabled, reflect it in the MMU role.
5882 	 * KVM uses NX when TDP is disabled to handle a variety of scenarios,
5883 	 * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and
5884 	 * to generate correct permissions for CR0.WP=0/CR4.SMEP=1/EFER.NX=0.
5885 	 * The iTLB multi-hit workaround can be toggled at any time, so assume
5886 	 * NX can be used by any non-nested shadow MMU to avoid having to reset
5887 	 * MMU contexts.
5888 	 */
5889 	root_role.efer_nx = true;
5890 
5891 	shadow_mmu_init_context(vcpu, context, cpu_role, root_role);
5892 }
5893 
kvm_init_shadow_npt_mmu(struct kvm_vcpu * vcpu,unsigned long cr0,unsigned long cr4,u64 efer,gpa_t nested_cr3)5894 void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
5895 			     unsigned long cr4, u64 efer, gpa_t nested_cr3)
5896 {
5897 	struct kvm_mmu *context = &vcpu->arch.guest_mmu;
5898 	struct kvm_mmu_role_regs regs = {
5899 		.cr0 = cr0,
5900 		.cr4 = cr4 & ~X86_CR4_PKE,
5901 		.efer = efer,
5902 	};
5903 	union kvm_cpu_role cpu_role = kvm_calc_cpu_role(vcpu, &regs);
5904 	union kvm_mmu_page_role root_role;
5905 
5906 	/* NPT requires CR0.PG=1. */
5907 	WARN_ON_ONCE(cpu_role.base.direct || !cpu_role.base.guest_mode);
5908 
5909 	root_role = cpu_role.base;
5910 	root_role.level = kvm_mmu_get_tdp_level(vcpu);
5911 	if (root_role.level == PT64_ROOT_5LEVEL &&
5912 	    cpu_role.base.level == PT64_ROOT_4LEVEL)
5913 		root_role.passthrough = 1;
5914 
5915 	shadow_mmu_init_context(vcpu, context, cpu_role, root_role);
5916 	kvm_mmu_new_pgd(vcpu, nested_cr3);
5917 }
5918 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_init_shadow_npt_mmu);
5919 
5920 static union kvm_cpu_role
kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu * vcpu,bool accessed_dirty,bool execonly,u8 level)5921 kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
5922 				   bool execonly, u8 level)
5923 {
5924 	union kvm_cpu_role role = {0};
5925 
5926 	/*
5927 	 * KVM does not support SMM transfer monitors, and consequently does not
5928 	 * support the "entry to SMM" control either.  role.base.smm is always 0.
5929 	 */
5930 	WARN_ON_ONCE(is_smm(vcpu));
5931 	role.base.level = level;
5932 	role.base.has_4_byte_gpte = false;
5933 	role.base.direct = false;
5934 	role.base.ad_disabled = !accessed_dirty;
5935 	role.base.guest_mode = true;
5936 	role.base.access = ACC_ALL;
5937 
5938 	role.ext.word = 0;
5939 	role.ext.execonly = execonly;
5940 	role.ext.valid = 1;
5941 
5942 	return role;
5943 }
5944 
kvm_init_shadow_ept_mmu(struct kvm_vcpu * vcpu,bool execonly,int huge_page_level,bool accessed_dirty,gpa_t new_eptp)5945 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
5946 			     int huge_page_level, bool accessed_dirty,
5947 			     gpa_t new_eptp)
5948 {
5949 	struct kvm_mmu *context = &vcpu->arch.guest_mmu;
5950 	u8 level = vmx_eptp_page_walk_level(new_eptp);
5951 	union kvm_cpu_role new_mode =
5952 		kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
5953 						   execonly, level);
5954 
5955 	if (new_mode.as_u64 != context->cpu_role.as_u64) {
5956 		/* EPT, and thus nested EPT, does not consume CR0, CR4, nor EFER. */
5957 		context->cpu_role.as_u64 = new_mode.as_u64;
5958 		context->root_role.word = new_mode.base.word;
5959 
5960 		context->page_fault = ept_page_fault;
5961 		context->gva_to_gpa = ept_gva_to_gpa;
5962 		context->sync_spte = ept_sync_spte;
5963 
5964 		update_permission_bitmask(context, true);
5965 		context->pkru_mask = 0;
5966 		reset_rsvds_bits_mask_ept(vcpu, context, execonly, huge_page_level);
5967 		reset_ept_shadow_zero_bits_mask(context, execonly);
5968 	}
5969 
5970 	kvm_mmu_new_pgd(vcpu, new_eptp);
5971 }
5972 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_init_shadow_ept_mmu);
5973 
init_kvm_softmmu(struct kvm_vcpu * vcpu,union kvm_cpu_role cpu_role)5974 static void init_kvm_softmmu(struct kvm_vcpu *vcpu,
5975 			     union kvm_cpu_role cpu_role)
5976 {
5977 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
5978 
5979 	kvm_init_shadow_mmu(vcpu, cpu_role);
5980 
5981 	context->get_guest_pgd     = get_guest_cr3;
5982 	context->get_pdptr         = kvm_pdptr_read;
5983 	context->inject_page_fault = kvm_inject_page_fault;
5984 }
5985 
init_kvm_nested_mmu(struct kvm_vcpu * vcpu,union kvm_cpu_role new_mode)5986 static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu,
5987 				union kvm_cpu_role new_mode)
5988 {
5989 	struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
5990 
5991 	if (new_mode.as_u64 == g_context->cpu_role.as_u64)
5992 		return;
5993 
5994 	g_context->cpu_role.as_u64   = new_mode.as_u64;
5995 	g_context->get_guest_pgd     = get_guest_cr3;
5996 	g_context->get_pdptr         = kvm_pdptr_read;
5997 	g_context->inject_page_fault = kvm_inject_page_fault;
5998 
5999 	/*
6000 	 * L2 page tables are never shadowed, so there is no need to sync
6001 	 * SPTEs.
6002 	 */
6003 	g_context->sync_spte         = NULL;
6004 
6005 	/*
6006 	 * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using
6007 	 * L1's nested page tables (e.g. EPT12). The nested translation
6008 	 * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using
6009 	 * L2's page tables as the first level of translation and L1's
6010 	 * nested page tables as the second level of translation. Basically
6011 	 * the gva_to_gpa functions between mmu and nested_mmu are swapped.
6012 	 */
6013 	if (!is_paging(vcpu))
6014 		g_context->gva_to_gpa = nonpaging_gva_to_gpa;
6015 	else if (is_long_mode(vcpu))
6016 		g_context->gva_to_gpa = paging64_gva_to_gpa;
6017 	else if (is_pae(vcpu))
6018 		g_context->gva_to_gpa = paging64_gva_to_gpa;
6019 	else
6020 		g_context->gva_to_gpa = paging32_gva_to_gpa;
6021 
6022 	reset_guest_paging_metadata(vcpu, g_context);
6023 }
6024 
kvm_init_mmu(struct kvm_vcpu * vcpu)6025 void kvm_init_mmu(struct kvm_vcpu *vcpu)
6026 {
6027 	struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
6028 	union kvm_cpu_role cpu_role = kvm_calc_cpu_role(vcpu, &regs);
6029 
6030 	if (mmu_is_nested(vcpu))
6031 		init_kvm_nested_mmu(vcpu, cpu_role);
6032 	else if (tdp_enabled)
6033 		init_kvm_tdp_mmu(vcpu, cpu_role);
6034 	else
6035 		init_kvm_softmmu(vcpu, cpu_role);
6036 }
6037 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_init_mmu);
6038 
kvm_mmu_after_set_cpuid(struct kvm_vcpu * vcpu)6039 void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
6040 {
6041 	/*
6042 	 * Invalidate all MMU roles to force them to reinitialize as CPUID
6043 	 * information is factored into reserved bit calculations.
6044 	 *
6045 	 * Correctly handling multiple vCPU models with respect to paging and
6046 	 * physical address properties) in a single VM would require tracking
6047 	 * all relevant CPUID information in kvm_mmu_page_role. That is very
6048 	 * undesirable as it would increase the memory requirements for
6049 	 * gfn_write_track (see struct kvm_mmu_page_role comments).  For now
6050 	 * that problem is swept under the rug; KVM's CPUID API is horrific and
6051 	 * it's all but impossible to solve it without introducing a new API.
6052 	 */
6053 	vcpu->arch.root_mmu.root_role.invalid = 1;
6054 	vcpu->arch.guest_mmu.root_role.invalid = 1;
6055 	vcpu->arch.nested_mmu.root_role.invalid = 1;
6056 	vcpu->arch.root_mmu.cpu_role.ext.valid = 0;
6057 	vcpu->arch.guest_mmu.cpu_role.ext.valid = 0;
6058 	vcpu->arch.nested_mmu.cpu_role.ext.valid = 0;
6059 	kvm_mmu_reset_context(vcpu);
6060 
6061 	KVM_BUG_ON(!kvm_can_set_cpuid_and_feature_msrs(vcpu), vcpu->kvm);
6062 }
6063 
kvm_mmu_reset_context(struct kvm_vcpu * vcpu)6064 void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
6065 {
6066 	kvm_mmu_unload(vcpu);
6067 	kvm_init_mmu(vcpu);
6068 }
6069 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_reset_context);
6070 
kvm_mmu_load(struct kvm_vcpu * vcpu)6071 int kvm_mmu_load(struct kvm_vcpu *vcpu)
6072 {
6073 	int r;
6074 
6075 	r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->root_role.direct);
6076 	if (r)
6077 		goto out;
6078 	r = mmu_alloc_special_roots(vcpu);
6079 	if (r)
6080 		goto out;
6081 	if (vcpu->arch.mmu->root_role.direct)
6082 		r = mmu_alloc_direct_roots(vcpu);
6083 	else
6084 		r = mmu_alloc_shadow_roots(vcpu);
6085 	if (r)
6086 		goto out;
6087 
6088 	kvm_mmu_sync_roots(vcpu);
6089 
6090 	kvm_mmu_load_pgd(vcpu);
6091 
6092 	/*
6093 	 * Flush any TLB entries for the new root, the provenance of the root
6094 	 * is unknown.  Even if KVM ensures there are no stale TLB entries
6095 	 * for a freed root, in theory another hypervisor could have left
6096 	 * stale entries.  Flushing on alloc also allows KVM to skip the TLB
6097 	 * flush when freeing a root (see kvm_tdp_mmu_put_root()).
6098 	 */
6099 	kvm_x86_call(flush_tlb_current)(vcpu);
6100 out:
6101 	return r;
6102 }
6103 
kvm_mmu_unload(struct kvm_vcpu * vcpu)6104 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
6105 {
6106 	struct kvm *kvm = vcpu->kvm;
6107 
6108 	kvm_mmu_free_roots(kvm, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL);
6109 	WARN_ON_ONCE(VALID_PAGE(vcpu->arch.root_mmu.root.hpa));
6110 	kvm_mmu_free_roots(kvm, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
6111 	WARN_ON_ONCE(VALID_PAGE(vcpu->arch.guest_mmu.root.hpa));
6112 	vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
6113 }
6114 
is_obsolete_root(struct kvm * kvm,hpa_t root_hpa)6115 static bool is_obsolete_root(struct kvm *kvm, hpa_t root_hpa)
6116 {
6117 	struct kvm_mmu_page *sp;
6118 
6119 	if (!VALID_PAGE(root_hpa))
6120 		return false;
6121 
6122 	/*
6123 	 * When freeing obsolete roots, treat roots as obsolete if they don't
6124 	 * have an associated shadow page, as it's impossible to determine if
6125 	 * such roots are fresh or stale.  This does mean KVM will get false
6126 	 * positives and free roots that don't strictly need to be freed, but
6127 	 * such false positives are relatively rare:
6128 	 *
6129 	 *  (a) only PAE paging and nested NPT have roots without shadow pages
6130 	 *      (or any shadow paging flavor with a dummy root, see note below)
6131 	 *  (b) remote reloads due to a memslot update obsoletes _all_ roots
6132 	 *  (c) KVM doesn't track previous roots for PAE paging, and the guest
6133 	 *      is unlikely to zap an in-use PGD.
6134 	 *
6135 	 * Note!  Dummy roots are unique in that they are obsoleted by memslot
6136 	 * _creation_!  See also FNAME(fetch).
6137 	 */
6138 	sp = root_to_sp(root_hpa);
6139 	return !sp || is_obsolete_sp(kvm, sp);
6140 }
6141 
__kvm_mmu_free_obsolete_roots(struct kvm * kvm,struct kvm_mmu * mmu)6142 static void __kvm_mmu_free_obsolete_roots(struct kvm *kvm, struct kvm_mmu *mmu)
6143 {
6144 	unsigned long roots_to_free = 0;
6145 	int i;
6146 
6147 	if (is_obsolete_root(kvm, mmu->root.hpa))
6148 		roots_to_free |= KVM_MMU_ROOT_CURRENT;
6149 
6150 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
6151 		if (is_obsolete_root(kvm, mmu->prev_roots[i].hpa))
6152 			roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
6153 	}
6154 
6155 	if (roots_to_free)
6156 		kvm_mmu_free_roots(kvm, mmu, roots_to_free);
6157 }
6158 
kvm_mmu_free_obsolete_roots(struct kvm_vcpu * vcpu)6159 void kvm_mmu_free_obsolete_roots(struct kvm_vcpu *vcpu)
6160 {
6161 	__kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.root_mmu);
6162 	__kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.guest_mmu);
6163 }
6164 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_free_obsolete_roots);
6165 
mmu_pte_write_fetch_gpte(struct kvm_vcpu * vcpu,gpa_t * gpa,int * bytes)6166 static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
6167 				    int *bytes)
6168 {
6169 	u64 gentry = 0;
6170 	int r;
6171 
6172 	/*
6173 	 * Assume that the pte write on a page table of the same type
6174 	 * as the current vcpu paging mode since we update the sptes only
6175 	 * when they have the same mode.
6176 	 */
6177 	if (is_pae(vcpu) && *bytes == 4) {
6178 		/* Handle a 32-bit guest writing two halves of a 64-bit gpte */
6179 		*gpa &= ~(gpa_t)7;
6180 		*bytes = 8;
6181 	}
6182 
6183 	if (*bytes == 4 || *bytes == 8) {
6184 		r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes);
6185 		if (r)
6186 			gentry = 0;
6187 	}
6188 
6189 	return gentry;
6190 }
6191 
6192 /*
6193  * If we're seeing too many writes to a page, it may no longer be a page table,
6194  * or we may be forking, in which case it is better to unmap the page.
6195  */
detect_write_flooding(struct kvm_mmu_page * sp)6196 static bool detect_write_flooding(struct kvm_mmu_page *sp)
6197 {
6198 	/*
6199 	 * Skip write-flooding detected for the sp whose level is 1, because
6200 	 * it can become unsync, then the guest page is not write-protected.
6201 	 */
6202 	if (sp->role.level == PG_LEVEL_4K)
6203 		return false;
6204 
6205 	atomic_inc(&sp->write_flooding_count);
6206 	return atomic_read(&sp->write_flooding_count) >= 3;
6207 }
6208 
6209 /*
6210  * Misaligned accesses are too much trouble to fix up; also, they usually
6211  * indicate a page is not used as a page table.
6212  */
detect_write_misaligned(struct kvm_mmu_page * sp,gpa_t gpa,int bytes)6213 static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
6214 				    int bytes)
6215 {
6216 	unsigned offset, pte_size, misaligned;
6217 
6218 	offset = offset_in_page(gpa);
6219 	pte_size = sp->role.has_4_byte_gpte ? 4 : 8;
6220 
6221 	/*
6222 	 * Sometimes, the OS only writes the last one bytes to update status
6223 	 * bits, for example, in linux, andb instruction is used in clear_bit().
6224 	 */
6225 	if (!(offset & (pte_size - 1)) && bytes == 1)
6226 		return false;
6227 
6228 	misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
6229 	misaligned |= bytes < 4;
6230 
6231 	return misaligned;
6232 }
6233 
get_written_sptes(struct kvm_mmu_page * sp,gpa_t gpa,int * nspte)6234 static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
6235 {
6236 	unsigned page_offset, quadrant;
6237 	u64 *spte;
6238 	int level;
6239 
6240 	page_offset = offset_in_page(gpa);
6241 	level = sp->role.level;
6242 	*nspte = 1;
6243 	if (sp->role.has_4_byte_gpte) {
6244 		page_offset <<= 1;	/* 32->64 */
6245 		/*
6246 		 * A 32-bit pde maps 4MB while the shadow pdes map
6247 		 * only 2MB.  So we need to double the offset again
6248 		 * and zap two pdes instead of one.
6249 		 */
6250 		if (level == PT32_ROOT_LEVEL) {
6251 			page_offset &= ~7; /* kill rounding error */
6252 			page_offset <<= 1;
6253 			*nspte = 2;
6254 		}
6255 		quadrant = page_offset >> PAGE_SHIFT;
6256 		page_offset &= ~PAGE_MASK;
6257 		if (quadrant != sp->role.quadrant)
6258 			return NULL;
6259 	}
6260 
6261 	spte = &sp->spt[page_offset / sizeof(*spte)];
6262 	return spte;
6263 }
6264 
kvm_mmu_track_write(struct kvm_vcpu * vcpu,gpa_t gpa,const u8 * new,int bytes)6265 void kvm_mmu_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
6266 			 int bytes)
6267 {
6268 	gfn_t gfn = gpa >> PAGE_SHIFT;
6269 	struct kvm_mmu_page *sp;
6270 	LIST_HEAD(invalid_list);
6271 	u64 entry, gentry, *spte;
6272 	int npte;
6273 	bool flush = false;
6274 
6275 	/*
6276 	 * When emulating guest writes, ensure the written value is visible to
6277 	 * any task that is handling page faults before checking whether or not
6278 	 * KVM is shadowing a guest PTE.  This ensures either KVM will create
6279 	 * the correct SPTE in the page fault handler, or this task will see
6280 	 * a non-zero indirect_shadow_pages.  Pairs with the smp_mb() in
6281 	 * account_shadowed().
6282 	 */
6283 	smp_mb();
6284 	if (!vcpu->kvm->arch.indirect_shadow_pages)
6285 		return;
6286 
6287 	write_lock(&vcpu->kvm->mmu_lock);
6288 
6289 	gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes);
6290 
6291 	++vcpu->kvm->stat.mmu_pte_write;
6292 
6293 	for_each_gfn_valid_sp_with_gptes(vcpu->kvm, sp, gfn) {
6294 		if (detect_write_misaligned(sp, gpa, bytes) ||
6295 		      detect_write_flooding(sp)) {
6296 			kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
6297 			++vcpu->kvm->stat.mmu_flooded;
6298 			continue;
6299 		}
6300 
6301 		spte = get_written_sptes(sp, gpa, &npte);
6302 		if (!spte)
6303 			continue;
6304 
6305 		while (npte--) {
6306 			entry = *spte;
6307 			mmu_page_zap_pte(vcpu->kvm, sp, spte, NULL);
6308 			if (gentry && sp->role.level != PG_LEVEL_4K)
6309 				++vcpu->kvm->stat.mmu_pde_zapped;
6310 			if (is_shadow_present_pte(entry))
6311 				flush = true;
6312 			++spte;
6313 		}
6314 	}
6315 	kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush);
6316 	write_unlock(&vcpu->kvm->mmu_lock);
6317 }
6318 
is_write_to_guest_page_table(u64 error_code)6319 static bool is_write_to_guest_page_table(u64 error_code)
6320 {
6321 	const u64 mask = PFERR_GUEST_PAGE_MASK | PFERR_WRITE_MASK | PFERR_PRESENT_MASK;
6322 
6323 	return (error_code & mask) == mask;
6324 }
6325 
kvm_mmu_write_protect_fault(struct kvm_vcpu * vcpu,gpa_t cr2_or_gpa,u64 error_code,int * emulation_type)6326 static int kvm_mmu_write_protect_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
6327 				       u64 error_code, int *emulation_type)
6328 {
6329 	bool direct = vcpu->arch.mmu->root_role.direct;
6330 
6331 	/*
6332 	 * Do not try to unprotect and retry if the vCPU re-faulted on the same
6333 	 * RIP with the same address that was previously unprotected, as doing
6334 	 * so will likely put the vCPU into an infinite.  E.g. if the vCPU uses
6335 	 * a non-page-table modifying instruction on the PDE that points to the
6336 	 * instruction, then unprotecting the gfn will unmap the instruction's
6337 	 * code, i.e. make it impossible for the instruction to ever complete.
6338 	 */
6339 	if (vcpu->arch.last_retry_eip == kvm_rip_read(vcpu) &&
6340 	    vcpu->arch.last_retry_addr == cr2_or_gpa)
6341 		return RET_PF_EMULATE;
6342 
6343 	/*
6344 	 * Reset the unprotect+retry values that guard against infinite loops.
6345 	 * The values will be refreshed if KVM explicitly unprotects a gfn and
6346 	 * retries, in all other cases it's safe to retry in the future even if
6347 	 * the next page fault happens on the same RIP+address.
6348 	 */
6349 	vcpu->arch.last_retry_eip = 0;
6350 	vcpu->arch.last_retry_addr = 0;
6351 
6352 	/*
6353 	 * It should be impossible to reach this point with an MMIO cache hit,
6354 	 * as RET_PF_WRITE_PROTECTED is returned if and only if there's a valid,
6355 	 * writable memslot, and creating a memslot should invalidate the MMIO
6356 	 * cache by way of changing the memslot generation.  WARN and disallow
6357 	 * retry if MMIO is detected, as retrying MMIO emulation is pointless
6358 	 * and could put the vCPU into an infinite loop because the processor
6359 	 * will keep faulting on the non-existent MMIO address.
6360 	 */
6361 	if (WARN_ON_ONCE(mmio_info_in_cache(vcpu, cr2_or_gpa, direct)))
6362 		return RET_PF_EMULATE;
6363 
6364 	/*
6365 	 * Before emulating the instruction, check to see if the access was due
6366 	 * to a read-only violation while the CPU was walking non-nested NPT
6367 	 * page tables, i.e. for a direct MMU, for _guest_ page tables in L1.
6368 	 * If L1 is sharing (a subset of) its page tables with L2, e.g. by
6369 	 * having nCR3 share lower level page tables with hCR3, then when KVM
6370 	 * (L0) write-protects the nested NPTs, i.e. npt12 entries, KVM is also
6371 	 * unknowingly write-protecting L1's guest page tables, which KVM isn't
6372 	 * shadowing.
6373 	 *
6374 	 * Because the CPU (by default) walks NPT page tables using a write
6375 	 * access (to ensure the CPU can do A/D updates), page walks in L1 can
6376 	 * trigger write faults for the above case even when L1 isn't modifying
6377 	 * PTEs.  As a result, KVM will unnecessarily emulate (or at least, try
6378 	 * to emulate) an excessive number of L1 instructions; because L1's MMU
6379 	 * isn't shadowed by KVM, there is no need to write-protect L1's gPTEs
6380 	 * and thus no need to emulate in order to guarantee forward progress.
6381 	 *
6382 	 * Try to unprotect the gfn, i.e. zap any shadow pages, so that L1 can
6383 	 * proceed without triggering emulation.  If one or more shadow pages
6384 	 * was zapped, skip emulation and resume L1 to let it natively execute
6385 	 * the instruction.  If no shadow pages were zapped, then the write-
6386 	 * fault is due to something else entirely, i.e. KVM needs to emulate,
6387 	 * as resuming the guest will put it into an infinite loop.
6388 	 *
6389 	 * Note, this code also applies to Intel CPUs, even though it is *very*
6390 	 * unlikely that an L1 will share its page tables (IA32/PAE/paging64
6391 	 * format) with L2's page tables (EPT format).
6392 	 *
6393 	 * For indirect MMUs, i.e. if KVM is shadowing the current MMU, try to
6394 	 * unprotect the gfn and retry if an event is awaiting reinjection.  If
6395 	 * KVM emulates multiple instructions before completing event injection,
6396 	 * the event could be delayed beyond what is architecturally allowed,
6397 	 * e.g. KVM could inject an IRQ after the TPR has been raised.
6398 	 */
6399 	if (((direct && is_write_to_guest_page_table(error_code)) ||
6400 	     (!direct && kvm_event_needs_reinjection(vcpu))) &&
6401 	    kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa))
6402 		return RET_PF_RETRY;
6403 
6404 	/*
6405 	 * The gfn is write-protected, but if KVM detects its emulating an
6406 	 * instruction that is unlikely to be used to modify page tables, or if
6407 	 * emulation fails, KVM can try to unprotect the gfn and let the CPU
6408 	 * re-execute the instruction that caused the page fault.  Do not allow
6409 	 * retrying an instruction from a nested guest as KVM is only explicitly
6410 	 * shadowing L1's page tables, i.e. unprotecting something for L1 isn't
6411 	 * going to magically fix whatever issue caused L2 to fail.
6412 	 */
6413 	if (!is_guest_mode(vcpu))
6414 		*emulation_type |= EMULTYPE_ALLOW_RETRY_PF;
6415 
6416 	return RET_PF_EMULATE;
6417 }
6418 
kvm_mmu_page_fault(struct kvm_vcpu * vcpu,gpa_t cr2_or_gpa,u64 error_code,void * insn,int insn_len)6419 int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
6420 		       void *insn, int insn_len)
6421 {
6422 	int r, emulation_type = EMULTYPE_PF;
6423 	bool direct = vcpu->arch.mmu->root_role.direct;
6424 
6425 	if (WARN_ON_ONCE(!VALID_PAGE(vcpu->arch.mmu->root.hpa)))
6426 		return RET_PF_RETRY;
6427 
6428 	/*
6429 	 * Except for reserved faults (emulated MMIO is shared-only), set the
6430 	 * PFERR_PRIVATE_ACCESS flag for software-protected VMs based on the gfn's
6431 	 * current attributes, which are the source of truth for such VMs.  Note,
6432 	 * this wrong for nested MMUs as the GPA is an L2 GPA, but KVM doesn't
6433 	 * currently supported nested virtualization (among many other things)
6434 	 * for software-protected VMs.
6435 	 */
6436 	if (IS_ENABLED(CONFIG_KVM_SW_PROTECTED_VM) &&
6437 	    !(error_code & PFERR_RSVD_MASK) &&
6438 	    vcpu->kvm->arch.vm_type == KVM_X86_SW_PROTECTED_VM &&
6439 	    kvm_mem_is_private(vcpu->kvm, gpa_to_gfn(cr2_or_gpa)))
6440 		error_code |= PFERR_PRIVATE_ACCESS;
6441 
6442 	r = RET_PF_INVALID;
6443 	if (unlikely(error_code & PFERR_RSVD_MASK)) {
6444 		if (WARN_ON_ONCE(error_code & PFERR_PRIVATE_ACCESS))
6445 			return -EFAULT;
6446 
6447 		r = handle_mmio_page_fault(vcpu, cr2_or_gpa, direct);
6448 		if (r == RET_PF_EMULATE)
6449 			goto emulate;
6450 	}
6451 
6452 	if (r == RET_PF_INVALID) {
6453 		vcpu->stat.pf_taken++;
6454 
6455 		r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa, error_code, false,
6456 					  &emulation_type, NULL);
6457 		if (KVM_BUG_ON(r == RET_PF_INVALID, vcpu->kvm))
6458 			return -EIO;
6459 	}
6460 
6461 	if (r < 0)
6462 		return r;
6463 
6464 	if (r == RET_PF_WRITE_PROTECTED)
6465 		r = kvm_mmu_write_protect_fault(vcpu, cr2_or_gpa, error_code,
6466 						&emulation_type);
6467 
6468 	if (r == RET_PF_FIXED)
6469 		vcpu->stat.pf_fixed++;
6470 	else if (r == RET_PF_EMULATE)
6471 		vcpu->stat.pf_emulate++;
6472 	else if (r == RET_PF_SPURIOUS)
6473 		vcpu->stat.pf_spurious++;
6474 
6475 	/*
6476 	 * None of handle_mmio_page_fault(), kvm_mmu_do_page_fault(), or
6477 	 * kvm_mmu_write_protect_fault() return RET_PF_CONTINUE.
6478 	 * kvm_mmu_do_page_fault() only uses RET_PF_CONTINUE internally to
6479 	 * indicate continuing the page fault handling until to the final
6480 	 * page table mapping phase.
6481 	 */
6482 	WARN_ON_ONCE(r == RET_PF_CONTINUE);
6483 	if (r != RET_PF_EMULATE)
6484 		return r;
6485 
6486 emulate:
6487 	return x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn,
6488 				       insn_len);
6489 }
6490 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_page_fault);
6491 
kvm_mmu_print_sptes(struct kvm_vcpu * vcpu,gpa_t gpa,const char * msg)6492 void kvm_mmu_print_sptes(struct kvm_vcpu *vcpu, gpa_t gpa, const char *msg)
6493 {
6494 	u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
6495 	int root_level, leaf, level;
6496 
6497 	leaf = get_sptes_lockless(vcpu, gpa, sptes, &root_level);
6498 	if (unlikely(leaf < 0))
6499 		return;
6500 
6501 	pr_err("%s %llx", msg, gpa);
6502 	for (level = root_level; level >= leaf; level--)
6503 		pr_cont(", spte[%d] = 0x%llx", level, sptes[level]);
6504 	pr_cont("\n");
6505 }
6506 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_print_sptes);
6507 
__kvm_mmu_invalidate_addr(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu,u64 addr,hpa_t root_hpa)6508 static void __kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
6509 				      u64 addr, hpa_t root_hpa)
6510 {
6511 	struct kvm_shadow_walk_iterator iterator;
6512 
6513 	vcpu_clear_mmio_info(vcpu, addr);
6514 
6515 	/*
6516 	 * Walking and synchronizing SPTEs both assume they are operating in
6517 	 * the context of the current MMU, and would need to be reworked if
6518 	 * this is ever used to sync the guest_mmu, e.g. to emulate INVEPT.
6519 	 */
6520 	if (WARN_ON_ONCE(mmu != vcpu->arch.mmu))
6521 		return;
6522 
6523 	if (!VALID_PAGE(root_hpa))
6524 		return;
6525 
6526 	write_lock(&vcpu->kvm->mmu_lock);
6527 	for_each_shadow_entry_using_root(vcpu, root_hpa, addr, iterator) {
6528 		struct kvm_mmu_page *sp = sptep_to_sp(iterator.sptep);
6529 
6530 		if (sp->unsync) {
6531 			int ret = kvm_sync_spte(vcpu, sp, iterator.index);
6532 
6533 			if (ret < 0)
6534 				mmu_page_zap_pte(vcpu->kvm, sp, iterator.sptep, NULL);
6535 			if (ret)
6536 				kvm_flush_remote_tlbs_sptep(vcpu->kvm, iterator.sptep);
6537 		}
6538 
6539 		if (!sp->unsync_children)
6540 			break;
6541 	}
6542 	write_unlock(&vcpu->kvm->mmu_lock);
6543 }
6544 
kvm_mmu_invalidate_addr(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu,u64 addr,unsigned long roots)6545 void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
6546 			     u64 addr, unsigned long roots)
6547 {
6548 	int i;
6549 
6550 	WARN_ON_ONCE(roots & ~KVM_MMU_ROOTS_ALL);
6551 
6552 	/* It's actually a GPA for vcpu->arch.guest_mmu.  */
6553 	if (mmu != &vcpu->arch.guest_mmu) {
6554 		/* INVLPG on a non-canonical address is a NOP according to the SDM.  */
6555 		if (is_noncanonical_invlpg_address(addr, vcpu))
6556 			return;
6557 
6558 		kvm_x86_call(flush_tlb_gva)(vcpu, addr);
6559 	}
6560 
6561 	if (!mmu->sync_spte)
6562 		return;
6563 
6564 	if (roots & KVM_MMU_ROOT_CURRENT)
6565 		__kvm_mmu_invalidate_addr(vcpu, mmu, addr, mmu->root.hpa);
6566 
6567 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
6568 		if (roots & KVM_MMU_ROOT_PREVIOUS(i))
6569 			__kvm_mmu_invalidate_addr(vcpu, mmu, addr, mmu->prev_roots[i].hpa);
6570 	}
6571 }
6572 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_invalidate_addr);
6573 
kvm_mmu_invlpg(struct kvm_vcpu * vcpu,gva_t gva)6574 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
6575 {
6576 	/*
6577 	 * INVLPG is required to invalidate any global mappings for the VA,
6578 	 * irrespective of PCID.  Blindly sync all roots as it would take
6579 	 * roughly the same amount of work/time to determine whether any of the
6580 	 * previous roots have a global mapping.
6581 	 *
6582 	 * Mappings not reachable via the current or previous cached roots will
6583 	 * be synced when switching to that new cr3, so nothing needs to be
6584 	 * done here for them.
6585 	 */
6586 	kvm_mmu_invalidate_addr(vcpu, vcpu->arch.walk_mmu, gva, KVM_MMU_ROOTS_ALL);
6587 	++vcpu->stat.invlpg;
6588 }
6589 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_invlpg);
6590 
6591 
kvm_mmu_invpcid_gva(struct kvm_vcpu * vcpu,gva_t gva,unsigned long pcid)6592 void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
6593 {
6594 	struct kvm_mmu *mmu = vcpu->arch.mmu;
6595 	unsigned long roots = 0;
6596 	uint i;
6597 
6598 	if (pcid == kvm_get_active_pcid(vcpu))
6599 		roots |= KVM_MMU_ROOT_CURRENT;
6600 
6601 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
6602 		if (VALID_PAGE(mmu->prev_roots[i].hpa) &&
6603 		    pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd))
6604 			roots |= KVM_MMU_ROOT_PREVIOUS(i);
6605 	}
6606 
6607 	if (roots)
6608 		kvm_mmu_invalidate_addr(vcpu, mmu, gva, roots);
6609 	++vcpu->stat.invlpg;
6610 
6611 	/*
6612 	 * Mappings not reachable via the current cr3 or the prev_roots will be
6613 	 * synced when switching to that cr3, so nothing needs to be done here
6614 	 * for them.
6615 	 */
6616 }
6617 
kvm_configure_mmu(bool enable_tdp,int tdp_forced_root_level,int tdp_max_root_level,int tdp_huge_page_level)6618 void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
6619 		       int tdp_max_root_level, int tdp_huge_page_level)
6620 {
6621 	tdp_enabled = enable_tdp;
6622 	tdp_root_level = tdp_forced_root_level;
6623 	max_tdp_level = tdp_max_root_level;
6624 
6625 #ifdef CONFIG_X86_64
6626 	tdp_mmu_enabled = tdp_mmu_allowed && tdp_enabled;
6627 #endif
6628 	/*
6629 	 * max_huge_page_level reflects KVM's MMU capabilities irrespective
6630 	 * of kernel support, e.g. KVM may be capable of using 1GB pages when
6631 	 * the kernel is not.  But, KVM never creates a page size greater than
6632 	 * what is used by the kernel for any given HVA, i.e. the kernel's
6633 	 * capabilities are ultimately consulted by kvm_mmu_hugepage_adjust().
6634 	 */
6635 	if (tdp_enabled)
6636 		max_huge_page_level = tdp_huge_page_level;
6637 	else if (boot_cpu_has(X86_FEATURE_GBPAGES))
6638 		max_huge_page_level = PG_LEVEL_1G;
6639 	else
6640 		max_huge_page_level = PG_LEVEL_2M;
6641 }
6642 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_configure_mmu);
6643 
free_mmu_pages(struct kvm_mmu * mmu)6644 static void free_mmu_pages(struct kvm_mmu *mmu)
6645 {
6646 	if (!tdp_enabled && mmu->pae_root)
6647 		set_memory_encrypted((unsigned long)mmu->pae_root, 1);
6648 	free_page((unsigned long)mmu->pae_root);
6649 	free_page((unsigned long)mmu->pml4_root);
6650 	free_page((unsigned long)mmu->pml5_root);
6651 }
6652 
__kvm_mmu_create(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu)6653 static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
6654 {
6655 	struct page *page;
6656 	int i;
6657 
6658 	mmu->root.hpa = INVALID_PAGE;
6659 	mmu->root.pgd = 0;
6660 	mmu->mirror_root_hpa = INVALID_PAGE;
6661 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
6662 		mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
6663 
6664 	/* vcpu->arch.guest_mmu isn't used when !tdp_enabled. */
6665 	if (!tdp_enabled && mmu == &vcpu->arch.guest_mmu)
6666 		return 0;
6667 
6668 	/*
6669 	 * When using PAE paging, the four PDPTEs are treated as 'root' pages,
6670 	 * while the PDP table is a per-vCPU construct that's allocated at MMU
6671 	 * creation.  When emulating 32-bit mode, cr3 is only 32 bits even on
6672 	 * x86_64.  Therefore we need to allocate the PDP table in the first
6673 	 * 4GB of memory, which happens to fit the DMA32 zone.  TDP paging
6674 	 * generally doesn't use PAE paging and can skip allocating the PDP
6675 	 * table.  The main exception, handled here, is SVM's 32-bit NPT.  The
6676 	 * other exception is for shadowing L1's 32-bit or PAE NPT on 64-bit
6677 	 * KVM; that horror is handled on-demand by mmu_alloc_special_roots().
6678 	 */
6679 	if (tdp_enabled && kvm_mmu_get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
6680 		return 0;
6681 
6682 	page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32);
6683 	if (!page)
6684 		return -ENOMEM;
6685 
6686 	mmu->pae_root = page_address(page);
6687 
6688 	/*
6689 	 * CR3 is only 32 bits when PAE paging is used, thus it's impossible to
6690 	 * get the CPU to treat the PDPTEs as encrypted.  Decrypt the page so
6691 	 * that KVM's writes and the CPU's reads get along.  Note, this is
6692 	 * only necessary when using shadow paging, as 64-bit NPT can get at
6693 	 * the C-bit even when shadowing 32-bit NPT, and SME isn't supported
6694 	 * by 32-bit kernels (when KVM itself uses 32-bit NPT).
6695 	 */
6696 	if (!tdp_enabled)
6697 		set_memory_decrypted((unsigned long)mmu->pae_root, 1);
6698 	else
6699 		WARN_ON_ONCE(shadow_me_value);
6700 
6701 	for (i = 0; i < 4; ++i)
6702 		mmu->pae_root[i] = INVALID_PAE_ROOT;
6703 
6704 	return 0;
6705 }
6706 
kvm_mmu_create(struct kvm_vcpu * vcpu)6707 int kvm_mmu_create(struct kvm_vcpu *vcpu)
6708 {
6709 	int ret;
6710 
6711 	vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
6712 	vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;
6713 
6714 	vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
6715 	vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
6716 
6717 	vcpu->arch.mmu_shadow_page_cache.init_value =
6718 		SHADOW_NONPRESENT_VALUE;
6719 	if (!vcpu->arch.mmu_shadow_page_cache.init_value)
6720 		vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
6721 
6722 	vcpu->arch.mmu = &vcpu->arch.root_mmu;
6723 	vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
6724 
6725 	ret = __kvm_mmu_create(vcpu, &vcpu->arch.guest_mmu);
6726 	if (ret)
6727 		return ret;
6728 
6729 	ret = __kvm_mmu_create(vcpu, &vcpu->arch.root_mmu);
6730 	if (ret)
6731 		goto fail_allocate_root;
6732 
6733 	return ret;
6734  fail_allocate_root:
6735 	free_mmu_pages(&vcpu->arch.guest_mmu);
6736 	return ret;
6737 }
6738 
6739 #define BATCH_ZAP_PAGES	10
kvm_zap_obsolete_pages(struct kvm * kvm)6740 static void kvm_zap_obsolete_pages(struct kvm *kvm)
6741 {
6742 	struct kvm_mmu_page *sp, *node;
6743 	int nr_zapped, batch = 0;
6744 	LIST_HEAD(invalid_list);
6745 	bool unstable;
6746 
6747 	lockdep_assert_held(&kvm->slots_lock);
6748 
6749 restart:
6750 	list_for_each_entry_safe_reverse(sp, node,
6751 	      &kvm->arch.active_mmu_pages, link) {
6752 		/*
6753 		 * No obsolete valid page exists before a newly created page
6754 		 * since active_mmu_pages is a FIFO list.
6755 		 */
6756 		if (!is_obsolete_sp(kvm, sp))
6757 			break;
6758 
6759 		/*
6760 		 * Invalid pages should never land back on the list of active
6761 		 * pages.  Skip the bogus page, otherwise we'll get stuck in an
6762 		 * infinite loop if the page gets put back on the list (again).
6763 		 */
6764 		if (WARN_ON_ONCE(sp->role.invalid))
6765 			continue;
6766 
6767 		/*
6768 		 * No need to flush the TLB since we're only zapping shadow
6769 		 * pages with an obsolete generation number and all vCPUS have
6770 		 * loaded a new root, i.e. the shadow pages being zapped cannot
6771 		 * be in active use by the guest.
6772 		 */
6773 		if (batch >= BATCH_ZAP_PAGES &&
6774 		    cond_resched_rwlock_write(&kvm->mmu_lock)) {
6775 			batch = 0;
6776 			goto restart;
6777 		}
6778 
6779 		unstable = __kvm_mmu_prepare_zap_page(kvm, sp,
6780 				&invalid_list, &nr_zapped);
6781 		batch += nr_zapped;
6782 
6783 		if (unstable)
6784 			goto restart;
6785 	}
6786 
6787 	/*
6788 	 * Kick all vCPUs (via remote TLB flush) before freeing the page tables
6789 	 * to ensure KVM is not in the middle of a lockless shadow page table
6790 	 * walk, which may reference the pages.  The remote TLB flush itself is
6791 	 * not required and is simply a convenient way to kick vCPUs as needed.
6792 	 * KVM performs a local TLB flush when allocating a new root (see
6793 	 * kvm_mmu_load()), and the reload in the caller ensure no vCPUs are
6794 	 * running with an obsolete MMU.
6795 	 */
6796 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
6797 }
6798 
6799 /*
6800  * Fast invalidate all shadow pages and use lock-break technique
6801  * to zap obsolete pages.
6802  *
6803  * It's required when memslot is being deleted or VM is being
6804  * destroyed, in these cases, we should ensure that KVM MMU does
6805  * not use any resource of the being-deleted slot or all slots
6806  * after calling the function.
6807  */
kvm_mmu_zap_all_fast(struct kvm * kvm)6808 static void kvm_mmu_zap_all_fast(struct kvm *kvm)
6809 {
6810 	lockdep_assert_held(&kvm->slots_lock);
6811 
6812 	write_lock(&kvm->mmu_lock);
6813 	trace_kvm_mmu_zap_all_fast(kvm);
6814 
6815 	/*
6816 	 * Toggle mmu_valid_gen between '0' and '1'.  Because slots_lock is
6817 	 * held for the entire duration of zapping obsolete pages, it's
6818 	 * impossible for there to be multiple invalid generations associated
6819 	 * with *valid* shadow pages at any given time, i.e. there is exactly
6820 	 * one valid generation and (at most) one invalid generation.
6821 	 */
6822 	kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1;
6823 
6824 	/*
6825 	 * In order to ensure all vCPUs drop their soon-to-be invalid roots,
6826 	 * invalidating TDP MMU roots must be done while holding mmu_lock for
6827 	 * write and in the same critical section as making the reload request,
6828 	 * e.g. before kvm_zap_obsolete_pages() could drop mmu_lock and yield.
6829 	 */
6830 	if (tdp_mmu_enabled) {
6831 		/*
6832 		 * External page tables don't support fast zapping, therefore
6833 		 * their mirrors must be invalidated separately by the caller.
6834 		 */
6835 		kvm_tdp_mmu_invalidate_roots(kvm, KVM_DIRECT_ROOTS);
6836 	}
6837 
6838 	/*
6839 	 * Notify all vcpus to reload its shadow page table and flush TLB.
6840 	 * Then all vcpus will switch to new shadow page table with the new
6841 	 * mmu_valid_gen.
6842 	 *
6843 	 * Note: we need to do this under the protection of mmu_lock,
6844 	 * otherwise, vcpu would purge shadow page but miss tlb flush.
6845 	 */
6846 	kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_FREE_OBSOLETE_ROOTS);
6847 
6848 	kvm_zap_obsolete_pages(kvm);
6849 
6850 	write_unlock(&kvm->mmu_lock);
6851 
6852 	/*
6853 	 * Zap the invalidated TDP MMU roots, all SPTEs must be dropped before
6854 	 * returning to the caller, e.g. if the zap is in response to a memslot
6855 	 * deletion, mmu_notifier callbacks will be unable to reach the SPTEs
6856 	 * associated with the deleted memslot once the update completes, and
6857 	 * Deferring the zap until the final reference to the root is put would
6858 	 * lead to use-after-free.
6859 	 */
6860 	if (tdp_mmu_enabled)
6861 		kvm_tdp_mmu_zap_invalidated_roots(kvm, true);
6862 }
6863 
kvm_mmu_init_vm(struct kvm * kvm)6864 int kvm_mmu_init_vm(struct kvm *kvm)
6865 {
6866 	int r, i;
6867 
6868 	kvm->arch.shadow_mmio_value = shadow_mmio_value;
6869 	INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
6870 	for (i = 0; i < KVM_NR_MMU_TYPES; ++i)
6871 		INIT_LIST_HEAD(&kvm->arch.possible_nx_huge_pages[i].pages);
6872 	spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
6873 
6874 	if (tdp_mmu_enabled) {
6875 		kvm_mmu_init_tdp_mmu(kvm);
6876 	} else {
6877 		r = kvm_mmu_alloc_page_hash(kvm);
6878 		if (r)
6879 			return r;
6880 	}
6881 
6882 	kvm->arch.split_page_header_cache.kmem_cache = mmu_page_header_cache;
6883 	kvm->arch.split_page_header_cache.gfp_zero = __GFP_ZERO;
6884 
6885 	kvm->arch.split_shadow_page_cache.gfp_zero = __GFP_ZERO;
6886 
6887 	kvm->arch.split_desc_cache.kmem_cache = pte_list_desc_cache;
6888 	kvm->arch.split_desc_cache.gfp_zero = __GFP_ZERO;
6889 	return 0;
6890 }
6891 
mmu_free_vm_memory_caches(struct kvm * kvm)6892 static void mmu_free_vm_memory_caches(struct kvm *kvm)
6893 {
6894 	kvm_mmu_free_memory_cache(&kvm->arch.split_desc_cache);
6895 	kvm_mmu_free_memory_cache(&kvm->arch.split_page_header_cache);
6896 	kvm_mmu_free_memory_cache(&kvm->arch.split_shadow_page_cache);
6897 }
6898 
kvm_mmu_uninit_vm(struct kvm * kvm)6899 void kvm_mmu_uninit_vm(struct kvm *kvm)
6900 {
6901 	kvfree(kvm->arch.mmu_page_hash);
6902 
6903 	if (tdp_mmu_enabled)
6904 		kvm_mmu_uninit_tdp_mmu(kvm);
6905 
6906 	mmu_free_vm_memory_caches(kvm);
6907 }
6908 
kvm_rmap_zap_gfn_range(struct kvm * kvm,gfn_t gfn_start,gfn_t gfn_end)6909 static bool kvm_rmap_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
6910 {
6911 	const struct kvm_memory_slot *memslot;
6912 	struct kvm_memslots *slots;
6913 	struct kvm_memslot_iter iter;
6914 	bool flush = false;
6915 	gfn_t start, end;
6916 	int i;
6917 
6918 	if (!kvm_memslots_have_rmaps(kvm))
6919 		return flush;
6920 
6921 	for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
6922 		slots = __kvm_memslots(kvm, i);
6923 
6924 		kvm_for_each_memslot_in_gfn_range(&iter, slots, gfn_start, gfn_end) {
6925 			memslot = iter.slot;
6926 			start = max(gfn_start, memslot->base_gfn);
6927 			end = min(gfn_end, memslot->base_gfn + memslot->npages);
6928 			if (WARN_ON_ONCE(start >= end))
6929 				continue;
6930 
6931 			flush = __kvm_rmap_zap_gfn_range(kvm, memslot, start,
6932 							 end, true, flush);
6933 		}
6934 	}
6935 
6936 	return flush;
6937 }
6938 
6939 /*
6940  * Invalidate (zap) SPTEs that cover GFNs from gfn_start and up to gfn_end
6941  * (not including it)
6942  */
kvm_zap_gfn_range(struct kvm * kvm,gfn_t gfn_start,gfn_t gfn_end)6943 void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
6944 {
6945 	bool flush;
6946 
6947 	if (WARN_ON_ONCE(gfn_end <= gfn_start))
6948 		return;
6949 
6950 	write_lock(&kvm->mmu_lock);
6951 
6952 	kvm_mmu_invalidate_begin(kvm);
6953 
6954 	kvm_mmu_invalidate_range_add(kvm, gfn_start, gfn_end);
6955 
6956 	flush = kvm_rmap_zap_gfn_range(kvm, gfn_start, gfn_end);
6957 
6958 	if (tdp_mmu_enabled)
6959 		flush = kvm_tdp_mmu_zap_leafs(kvm, gfn_start, gfn_end, flush);
6960 
6961 	if (flush)
6962 		kvm_flush_remote_tlbs_range(kvm, gfn_start, gfn_end - gfn_start);
6963 
6964 	kvm_mmu_invalidate_end(kvm);
6965 
6966 	write_unlock(&kvm->mmu_lock);
6967 }
6968 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_zap_gfn_range);
6969 
slot_rmap_write_protect(struct kvm * kvm,struct kvm_rmap_head * rmap_head,const struct kvm_memory_slot * slot)6970 static bool slot_rmap_write_protect(struct kvm *kvm,
6971 				    struct kvm_rmap_head *rmap_head,
6972 				    const struct kvm_memory_slot *slot)
6973 {
6974 	return rmap_write_protect(rmap_head, false);
6975 }
6976 
kvm_mmu_slot_remove_write_access(struct kvm * kvm,const struct kvm_memory_slot * memslot,int start_level)6977 void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
6978 				      const struct kvm_memory_slot *memslot,
6979 				      int start_level)
6980 {
6981 	if (kvm_memslots_have_rmaps(kvm)) {
6982 		write_lock(&kvm->mmu_lock);
6983 		walk_slot_rmaps(kvm, memslot, slot_rmap_write_protect,
6984 				start_level, KVM_MAX_HUGEPAGE_LEVEL, false);
6985 		write_unlock(&kvm->mmu_lock);
6986 	}
6987 
6988 	if (tdp_mmu_enabled) {
6989 		read_lock(&kvm->mmu_lock);
6990 		kvm_tdp_mmu_wrprot_slot(kvm, memslot, start_level);
6991 		read_unlock(&kvm->mmu_lock);
6992 	}
6993 }
6994 
need_topup(struct kvm_mmu_memory_cache * cache,int min)6995 static inline bool need_topup(struct kvm_mmu_memory_cache *cache, int min)
6996 {
6997 	return kvm_mmu_memory_cache_nr_free_objects(cache) < min;
6998 }
6999 
need_topup_split_caches_or_resched(struct kvm * kvm)7000 static bool need_topup_split_caches_or_resched(struct kvm *kvm)
7001 {
7002 	if (need_resched() || rwlock_needbreak(&kvm->mmu_lock))
7003 		return true;
7004 
7005 	/*
7006 	 * In the worst case, SPLIT_DESC_CACHE_MIN_NR_OBJECTS descriptors are needed
7007 	 * to split a single huge page. Calculating how many are actually needed
7008 	 * is possible but not worth the complexity.
7009 	 */
7010 	return need_topup(&kvm->arch.split_desc_cache, SPLIT_DESC_CACHE_MIN_NR_OBJECTS) ||
7011 	       need_topup(&kvm->arch.split_page_header_cache, 1) ||
7012 	       need_topup(&kvm->arch.split_shadow_page_cache, 1);
7013 }
7014 
topup_split_caches(struct kvm * kvm)7015 static int topup_split_caches(struct kvm *kvm)
7016 {
7017 	/*
7018 	 * Allocating rmap list entries when splitting huge pages for nested
7019 	 * MMUs is uncommon as KVM needs to use a list if and only if there is
7020 	 * more than one rmap entry for a gfn, i.e. requires an L1 gfn to be
7021 	 * aliased by multiple L2 gfns and/or from multiple nested roots with
7022 	 * different roles.  Aliasing gfns when using TDP is atypical for VMMs;
7023 	 * a few gfns are often aliased during boot, e.g. when remapping BIOS,
7024 	 * but aliasing rarely occurs post-boot or for many gfns.  If there is
7025 	 * only one rmap entry, rmap->val points directly at that one entry and
7026 	 * doesn't need to allocate a list.  Buffer the cache by the default
7027 	 * capacity so that KVM doesn't have to drop mmu_lock to topup if KVM
7028 	 * encounters an aliased gfn or two.
7029 	 */
7030 	const int capacity = SPLIT_DESC_CACHE_MIN_NR_OBJECTS +
7031 			     KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE;
7032 	int r;
7033 
7034 	lockdep_assert_held(&kvm->slots_lock);
7035 
7036 	r = __kvm_mmu_topup_memory_cache(&kvm->arch.split_desc_cache, capacity,
7037 					 SPLIT_DESC_CACHE_MIN_NR_OBJECTS);
7038 	if (r)
7039 		return r;
7040 
7041 	r = kvm_mmu_topup_memory_cache(&kvm->arch.split_page_header_cache, 1);
7042 	if (r)
7043 		return r;
7044 
7045 	return kvm_mmu_topup_memory_cache(&kvm->arch.split_shadow_page_cache, 1);
7046 }
7047 
shadow_mmu_get_sp_for_split(struct kvm * kvm,u64 * huge_sptep)7048 static struct kvm_mmu_page *shadow_mmu_get_sp_for_split(struct kvm *kvm, u64 *huge_sptep)
7049 {
7050 	struct kvm_mmu_page *huge_sp = sptep_to_sp(huge_sptep);
7051 	struct shadow_page_caches caches = {};
7052 	union kvm_mmu_page_role role;
7053 	unsigned int access;
7054 	gfn_t gfn;
7055 
7056 	gfn = kvm_mmu_page_get_gfn(huge_sp, spte_index(huge_sptep));
7057 	access = kvm_mmu_page_get_access(huge_sp, spte_index(huge_sptep));
7058 
7059 	/*
7060 	 * Note, huge page splitting always uses direct shadow pages, regardless
7061 	 * of whether the huge page itself is mapped by a direct or indirect
7062 	 * shadow page, since the huge page region itself is being directly
7063 	 * mapped with smaller pages.
7064 	 */
7065 	role = kvm_mmu_child_role(huge_sptep, /*direct=*/true, access);
7066 
7067 	/* Direct SPs do not require a shadowed_info_cache. */
7068 	caches.page_header_cache = &kvm->arch.split_page_header_cache;
7069 	caches.shadow_page_cache = &kvm->arch.split_shadow_page_cache;
7070 
7071 	/* Safe to pass NULL for vCPU since requesting a direct SP. */
7072 	return __kvm_mmu_get_shadow_page(kvm, NULL, &caches, gfn, role);
7073 }
7074 
shadow_mmu_split_huge_page(struct kvm * kvm,const struct kvm_memory_slot * slot,u64 * huge_sptep)7075 static void shadow_mmu_split_huge_page(struct kvm *kvm,
7076 				       const struct kvm_memory_slot *slot,
7077 				       u64 *huge_sptep)
7078 
7079 {
7080 	struct kvm_mmu_memory_cache *cache = &kvm->arch.split_desc_cache;
7081 	u64 huge_spte = READ_ONCE(*huge_sptep);
7082 	struct kvm_mmu_page *sp;
7083 	bool flush = false;
7084 	u64 *sptep, spte;
7085 	gfn_t gfn;
7086 	int index;
7087 
7088 	sp = shadow_mmu_get_sp_for_split(kvm, huge_sptep);
7089 
7090 	for (index = 0; index < SPTE_ENT_PER_PAGE; index++) {
7091 		sptep = &sp->spt[index];
7092 		gfn = kvm_mmu_page_get_gfn(sp, index);
7093 
7094 		/*
7095 		 * The SP may already have populated SPTEs, e.g. if this huge
7096 		 * page is aliased by multiple sptes with the same access
7097 		 * permissions. These entries are guaranteed to map the same
7098 		 * gfn-to-pfn translation since the SP is direct, so no need to
7099 		 * modify them.
7100 		 *
7101 		 * However, if a given SPTE points to a lower level page table,
7102 		 * that lower level page table may only be partially populated.
7103 		 * Installing such SPTEs would effectively unmap a potion of the
7104 		 * huge page. Unmapping guest memory always requires a TLB flush
7105 		 * since a subsequent operation on the unmapped regions would
7106 		 * fail to detect the need to flush.
7107 		 */
7108 		if (is_shadow_present_pte(*sptep)) {
7109 			flush |= !is_last_spte(*sptep, sp->role.level);
7110 			continue;
7111 		}
7112 
7113 		spte = make_small_spte(kvm, huge_spte, sp->role, index);
7114 		mmu_spte_set(sptep, spte);
7115 		__rmap_add(kvm, cache, slot, sptep, gfn, sp->role.access);
7116 	}
7117 
7118 	__link_shadow_page(kvm, cache, huge_sptep, sp, flush);
7119 }
7120 
shadow_mmu_try_split_huge_page(struct kvm * kvm,const struct kvm_memory_slot * slot,u64 * huge_sptep)7121 static int shadow_mmu_try_split_huge_page(struct kvm *kvm,
7122 					  const struct kvm_memory_slot *slot,
7123 					  u64 *huge_sptep)
7124 {
7125 	struct kvm_mmu_page *huge_sp = sptep_to_sp(huge_sptep);
7126 	int level, r = 0;
7127 	gfn_t gfn;
7128 	u64 spte;
7129 
7130 	/* Grab information for the tracepoint before dropping the MMU lock. */
7131 	gfn = kvm_mmu_page_get_gfn(huge_sp, spte_index(huge_sptep));
7132 	level = huge_sp->role.level;
7133 	spte = *huge_sptep;
7134 
7135 	if (kvm_mmu_available_pages(kvm) <= KVM_MIN_FREE_MMU_PAGES) {
7136 		r = -ENOSPC;
7137 		goto out;
7138 	}
7139 
7140 	if (need_topup_split_caches_or_resched(kvm)) {
7141 		write_unlock(&kvm->mmu_lock);
7142 		cond_resched();
7143 		/*
7144 		 * If the topup succeeds, return -EAGAIN to indicate that the
7145 		 * rmap iterator should be restarted because the MMU lock was
7146 		 * dropped.
7147 		 */
7148 		r = topup_split_caches(kvm) ?: -EAGAIN;
7149 		write_lock(&kvm->mmu_lock);
7150 		goto out;
7151 	}
7152 
7153 	shadow_mmu_split_huge_page(kvm, slot, huge_sptep);
7154 
7155 out:
7156 	trace_kvm_mmu_split_huge_page(gfn, spte, level, r);
7157 	return r;
7158 }
7159 
shadow_mmu_try_split_huge_pages(struct kvm * kvm,struct kvm_rmap_head * rmap_head,const struct kvm_memory_slot * slot)7160 static bool shadow_mmu_try_split_huge_pages(struct kvm *kvm,
7161 					    struct kvm_rmap_head *rmap_head,
7162 					    const struct kvm_memory_slot *slot)
7163 {
7164 	struct rmap_iterator iter;
7165 	struct kvm_mmu_page *sp;
7166 	u64 *huge_sptep;
7167 	int r;
7168 
7169 restart:
7170 	for_each_rmap_spte(rmap_head, &iter, huge_sptep) {
7171 		sp = sptep_to_sp(huge_sptep);
7172 
7173 		/* TDP MMU is enabled, so rmap only contains nested MMU SPs. */
7174 		if (WARN_ON_ONCE(!sp->role.guest_mode))
7175 			continue;
7176 
7177 		/* The rmaps should never contain non-leaf SPTEs. */
7178 		if (WARN_ON_ONCE(!is_large_pte(*huge_sptep)))
7179 			continue;
7180 
7181 		/* SPs with level >PG_LEVEL_4K should never by unsync. */
7182 		if (WARN_ON_ONCE(sp->unsync))
7183 			continue;
7184 
7185 		/* Don't bother splitting huge pages on invalid SPs. */
7186 		if (sp->role.invalid)
7187 			continue;
7188 
7189 		r = shadow_mmu_try_split_huge_page(kvm, slot, huge_sptep);
7190 
7191 		/*
7192 		 * The split succeeded or needs to be retried because the MMU
7193 		 * lock was dropped. Either way, restart the iterator to get it
7194 		 * back into a consistent state.
7195 		 */
7196 		if (!r || r == -EAGAIN)
7197 			goto restart;
7198 
7199 		/* The split failed and shouldn't be retried (e.g. -ENOMEM). */
7200 		break;
7201 	}
7202 
7203 	return false;
7204 }
7205 
kvm_shadow_mmu_try_split_huge_pages(struct kvm * kvm,const struct kvm_memory_slot * slot,gfn_t start,gfn_t end,int target_level)7206 static void kvm_shadow_mmu_try_split_huge_pages(struct kvm *kvm,
7207 						const struct kvm_memory_slot *slot,
7208 						gfn_t start, gfn_t end,
7209 						int target_level)
7210 {
7211 	int level;
7212 
7213 	/*
7214 	 * Split huge pages starting with KVM_MAX_HUGEPAGE_LEVEL and working
7215 	 * down to the target level. This ensures pages are recursively split
7216 	 * all the way to the target level. There's no need to split pages
7217 	 * already at the target level.
7218 	 */
7219 	for (level = KVM_MAX_HUGEPAGE_LEVEL; level > target_level; level--)
7220 		__walk_slot_rmaps(kvm, slot, shadow_mmu_try_split_huge_pages,
7221 				  level, level, start, end - 1, true, true, false);
7222 }
7223 
7224 /* Must be called with the mmu_lock held in write-mode. */
kvm_mmu_try_split_huge_pages(struct kvm * kvm,const struct kvm_memory_slot * memslot,u64 start,u64 end,int target_level)7225 void kvm_mmu_try_split_huge_pages(struct kvm *kvm,
7226 				   const struct kvm_memory_slot *memslot,
7227 				   u64 start, u64 end,
7228 				   int target_level)
7229 {
7230 	if (!tdp_mmu_enabled)
7231 		return;
7232 
7233 	if (kvm_memslots_have_rmaps(kvm))
7234 		kvm_shadow_mmu_try_split_huge_pages(kvm, memslot, start, end, target_level);
7235 
7236 	kvm_tdp_mmu_try_split_huge_pages(kvm, memslot, start, end, target_level, false);
7237 
7238 	/*
7239 	 * A TLB flush is unnecessary at this point for the same reasons as in
7240 	 * kvm_mmu_slot_try_split_huge_pages().
7241 	 */
7242 }
7243 
kvm_mmu_slot_try_split_huge_pages(struct kvm * kvm,const struct kvm_memory_slot * memslot,int target_level)7244 void kvm_mmu_slot_try_split_huge_pages(struct kvm *kvm,
7245 					const struct kvm_memory_slot *memslot,
7246 					int target_level)
7247 {
7248 	u64 start = memslot->base_gfn;
7249 	u64 end = start + memslot->npages;
7250 
7251 	if (!tdp_mmu_enabled)
7252 		return;
7253 
7254 	if (kvm_memslots_have_rmaps(kvm)) {
7255 		write_lock(&kvm->mmu_lock);
7256 		kvm_shadow_mmu_try_split_huge_pages(kvm, memslot, start, end, target_level);
7257 		write_unlock(&kvm->mmu_lock);
7258 	}
7259 
7260 	read_lock(&kvm->mmu_lock);
7261 	kvm_tdp_mmu_try_split_huge_pages(kvm, memslot, start, end, target_level, true);
7262 	read_unlock(&kvm->mmu_lock);
7263 
7264 	/*
7265 	 * No TLB flush is necessary here. KVM will flush TLBs after
7266 	 * write-protecting and/or clearing dirty on the newly split SPTEs to
7267 	 * ensure that guest writes are reflected in the dirty log before the
7268 	 * ioctl to enable dirty logging on this memslot completes. Since the
7269 	 * split SPTEs retain the write and dirty bits of the huge SPTE, it is
7270 	 * safe for KVM to decide if a TLB flush is necessary based on the split
7271 	 * SPTEs.
7272 	 */
7273 }
7274 
kvm_mmu_zap_collapsible_spte(struct kvm * kvm,struct kvm_rmap_head * rmap_head,const struct kvm_memory_slot * slot)7275 static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
7276 					 struct kvm_rmap_head *rmap_head,
7277 					 const struct kvm_memory_slot *slot)
7278 {
7279 	u64 *sptep;
7280 	struct rmap_iterator iter;
7281 	int need_tlb_flush = 0;
7282 	struct kvm_mmu_page *sp;
7283 
7284 restart:
7285 	for_each_rmap_spte(rmap_head, &iter, sptep) {
7286 		sp = sptep_to_sp(sptep);
7287 
7288 		/*
7289 		 * We cannot do huge page mapping for indirect shadow pages,
7290 		 * which are found on the last rmap (level = 1) when not using
7291 		 * tdp; such shadow pages are synced with the page table in
7292 		 * the guest, and the guest page table is using 4K page size
7293 		 * mapping if the indirect sp has level = 1.
7294 		 */
7295 		if (sp->role.direct &&
7296 		    sp->role.level < kvm_mmu_max_mapping_level(kvm, NULL, slot, sp->gfn)) {
7297 			kvm_zap_one_rmap_spte(kvm, rmap_head, sptep);
7298 
7299 			if (kvm_available_flush_remote_tlbs_range())
7300 				kvm_flush_remote_tlbs_sptep(kvm, sptep);
7301 			else
7302 				need_tlb_flush = 1;
7303 
7304 			goto restart;
7305 		}
7306 	}
7307 
7308 	return need_tlb_flush;
7309 }
7310 
kvm_rmap_zap_collapsible_sptes(struct kvm * kvm,const struct kvm_memory_slot * slot)7311 static void kvm_rmap_zap_collapsible_sptes(struct kvm *kvm,
7312 					   const struct kvm_memory_slot *slot)
7313 {
7314 	/*
7315 	 * Note, use KVM_MAX_HUGEPAGE_LEVEL - 1 since there's no need to zap
7316 	 * pages that are already mapped at the maximum hugepage level.
7317 	 */
7318 	if (walk_slot_rmaps(kvm, slot, kvm_mmu_zap_collapsible_spte,
7319 			    PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL - 1, true))
7320 		kvm_flush_remote_tlbs_memslot(kvm, slot);
7321 }
7322 
kvm_mmu_recover_huge_pages(struct kvm * kvm,const struct kvm_memory_slot * slot)7323 void kvm_mmu_recover_huge_pages(struct kvm *kvm,
7324 				const struct kvm_memory_slot *slot)
7325 {
7326 	if (kvm_memslots_have_rmaps(kvm)) {
7327 		write_lock(&kvm->mmu_lock);
7328 		kvm_rmap_zap_collapsible_sptes(kvm, slot);
7329 		write_unlock(&kvm->mmu_lock);
7330 	}
7331 
7332 	if (tdp_mmu_enabled) {
7333 		read_lock(&kvm->mmu_lock);
7334 		kvm_tdp_mmu_recover_huge_pages(kvm, slot);
7335 		read_unlock(&kvm->mmu_lock);
7336 	}
7337 }
7338 
kvm_mmu_slot_leaf_clear_dirty(struct kvm * kvm,const struct kvm_memory_slot * memslot)7339 void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
7340 				   const struct kvm_memory_slot *memslot)
7341 {
7342 	if (kvm_memslots_have_rmaps(kvm)) {
7343 		write_lock(&kvm->mmu_lock);
7344 		/*
7345 		 * Clear dirty bits only on 4k SPTEs since the legacy MMU only
7346 		 * support dirty logging at a 4k granularity.
7347 		 */
7348 		walk_slot_rmaps_4k(kvm, memslot, __rmap_clear_dirty, false);
7349 		write_unlock(&kvm->mmu_lock);
7350 	}
7351 
7352 	if (tdp_mmu_enabled) {
7353 		read_lock(&kvm->mmu_lock);
7354 		kvm_tdp_mmu_clear_dirty_slot(kvm, memslot);
7355 		read_unlock(&kvm->mmu_lock);
7356 	}
7357 
7358 	/*
7359 	 * The caller will flush the TLBs after this function returns.
7360 	 *
7361 	 * It's also safe to flush TLBs out of mmu lock here as currently this
7362 	 * function is only used for dirty logging, in which case flushing TLB
7363 	 * out of mmu lock also guarantees no dirty pages will be lost in
7364 	 * dirty_bitmap.
7365 	 */
7366 }
7367 
kvm_mmu_zap_all(struct kvm * kvm)7368 static void kvm_mmu_zap_all(struct kvm *kvm)
7369 {
7370 	struct kvm_mmu_page *sp, *node;
7371 	LIST_HEAD(invalid_list);
7372 	int ign;
7373 
7374 	write_lock(&kvm->mmu_lock);
7375 restart:
7376 	list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
7377 		if (WARN_ON_ONCE(sp->role.invalid))
7378 			continue;
7379 		if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
7380 			goto restart;
7381 		if (cond_resched_rwlock_write(&kvm->mmu_lock))
7382 			goto restart;
7383 	}
7384 
7385 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
7386 
7387 	if (tdp_mmu_enabled)
7388 		kvm_tdp_mmu_zap_all(kvm);
7389 
7390 	write_unlock(&kvm->mmu_lock);
7391 }
7392 
kvm_arch_flush_shadow_all(struct kvm * kvm)7393 void kvm_arch_flush_shadow_all(struct kvm *kvm)
7394 {
7395 	kvm_mmu_zap_all(kvm);
7396 }
7397 
kvm_mmu_zap_memslot_pages_and_flush(struct kvm * kvm,struct kvm_memory_slot * slot,bool flush)7398 static void kvm_mmu_zap_memslot_pages_and_flush(struct kvm *kvm,
7399 						struct kvm_memory_slot *slot,
7400 						bool flush)
7401 {
7402 	LIST_HEAD(invalid_list);
7403 	unsigned long i;
7404 
7405 	if (list_empty(&kvm->arch.active_mmu_pages))
7406 		goto out_flush;
7407 
7408 	/*
7409 	 * Since accounting information is stored in struct kvm_arch_memory_slot,
7410 	 * all MMU pages that are shadowing guest PTEs must be zapped before the
7411 	 * memslot is deleted, as freeing such pages after the memslot is freed
7412 	 * will result in use-after-free, e.g. in unaccount_shadowed().
7413 	 */
7414 	for (i = 0; i < slot->npages; i++) {
7415 		struct kvm_mmu_page *sp;
7416 		gfn_t gfn = slot->base_gfn + i;
7417 
7418 		for_each_gfn_valid_sp_with_gptes(kvm, sp, gfn)
7419 			kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
7420 
7421 		if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
7422 			kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
7423 			flush = false;
7424 			cond_resched_rwlock_write(&kvm->mmu_lock);
7425 		}
7426 	}
7427 
7428 out_flush:
7429 	kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
7430 }
7431 
kvm_mmu_zap_memslot(struct kvm * kvm,struct kvm_memory_slot * slot)7432 static void kvm_mmu_zap_memslot(struct kvm *kvm,
7433 				struct kvm_memory_slot *slot)
7434 {
7435 	struct kvm_gfn_range range = {
7436 		.slot = slot,
7437 		.start = slot->base_gfn,
7438 		.end = slot->base_gfn + slot->npages,
7439 		.may_block = true,
7440 		.attr_filter = KVM_FILTER_PRIVATE | KVM_FILTER_SHARED,
7441 	};
7442 	bool flush;
7443 
7444 	write_lock(&kvm->mmu_lock);
7445 	flush = kvm_unmap_gfn_range(kvm, &range);
7446 	kvm_mmu_zap_memslot_pages_and_flush(kvm, slot, flush);
7447 	write_unlock(&kvm->mmu_lock);
7448 }
7449 
kvm_memslot_flush_zap_all(struct kvm * kvm)7450 static inline bool kvm_memslot_flush_zap_all(struct kvm *kvm)
7451 {
7452 	return kvm->arch.vm_type == KVM_X86_DEFAULT_VM &&
7453 	       kvm_check_has_quirk(kvm, KVM_X86_QUIRK_SLOT_ZAP_ALL);
7454 }
7455 
kvm_arch_flush_shadow_memslot(struct kvm * kvm,struct kvm_memory_slot * slot)7456 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
7457 				   struct kvm_memory_slot *slot)
7458 {
7459 	if (kvm_memslot_flush_zap_all(kvm))
7460 		kvm_mmu_zap_all_fast(kvm);
7461 	else
7462 		kvm_mmu_zap_memslot(kvm, slot);
7463 }
7464 
kvm_mmu_invalidate_mmio_sptes(struct kvm * kvm,u64 gen)7465 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
7466 {
7467 	WARN_ON_ONCE(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
7468 
7469 	if (!enable_mmio_caching)
7470 		return;
7471 
7472 	gen &= MMIO_SPTE_GEN_MASK;
7473 
7474 	/*
7475 	 * Generation numbers are incremented in multiples of the number of
7476 	 * address spaces in order to provide unique generations across all
7477 	 * address spaces.  Strip what is effectively the address space
7478 	 * modifier prior to checking for a wrap of the MMIO generation so
7479 	 * that a wrap in any address space is detected.
7480 	 */
7481 	gen &= ~((u64)kvm_arch_nr_memslot_as_ids(kvm) - 1);
7482 
7483 	/*
7484 	 * The very rare case: if the MMIO generation number has wrapped,
7485 	 * zap all shadow pages.
7486 	 */
7487 	if (unlikely(gen == 0)) {
7488 		kvm_debug_ratelimited("zapping shadow pages for mmio generation wraparound\n");
7489 		kvm_mmu_zap_all_fast(kvm);
7490 	}
7491 }
7492 
mmu_destroy_caches(void)7493 static void mmu_destroy_caches(void)
7494 {
7495 	kmem_cache_destroy(pte_list_desc_cache);
7496 	kmem_cache_destroy(mmu_page_header_cache);
7497 }
7498 
kvm_wake_nx_recovery_thread(struct kvm * kvm)7499 static void kvm_wake_nx_recovery_thread(struct kvm *kvm)
7500 {
7501 	/*
7502 	 * The NX recovery thread is spawned on-demand at the first KVM_RUN and
7503 	 * may not be valid even though the VM is globally visible.  Do nothing,
7504 	 * as such a VM can't have any possible NX huge pages.
7505 	 */
7506 	struct vhost_task *nx_thread = READ_ONCE(kvm->arch.nx_huge_page_recovery_thread);
7507 
7508 	if (nx_thread)
7509 		vhost_task_wake(nx_thread);
7510 }
7511 
get_nx_huge_pages(char * buffer,const struct kernel_param * kp)7512 static int get_nx_huge_pages(char *buffer, const struct kernel_param *kp)
7513 {
7514 	int val = *(int *)kp->arg;
7515 
7516 	if (nx_hugepage_mitigation_hard_disabled)
7517 		return sysfs_emit(buffer, "never\n");
7518 
7519 	if (val == -1)
7520 		return sysfs_emit(buffer, "auto\n");
7521 
7522 	return param_get_bool(buffer, kp);
7523 }
7524 
get_nx_auto_mode(void)7525 static bool get_nx_auto_mode(void)
7526 {
7527 	/* Return true when CPU has the bug, and mitigations are ON */
7528 	return boot_cpu_has_bug(X86_BUG_ITLB_MULTIHIT) && !cpu_mitigations_off();
7529 }
7530 
__set_nx_huge_pages(bool val)7531 static void __set_nx_huge_pages(bool val)
7532 {
7533 	nx_huge_pages = itlb_multihit_kvm_mitigation = val;
7534 }
7535 
set_nx_huge_pages(const char * val,const struct kernel_param * kp)7536 static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
7537 {
7538 	bool old_val = nx_huge_pages;
7539 	bool new_val;
7540 
7541 	if (nx_hugepage_mitigation_hard_disabled)
7542 		return -EPERM;
7543 
7544 	/* In "auto" mode deploy workaround only if CPU has the bug. */
7545 	if (sysfs_streq(val, "off")) {
7546 		new_val = 0;
7547 	} else if (sysfs_streq(val, "force")) {
7548 		new_val = 1;
7549 	} else if (sysfs_streq(val, "auto")) {
7550 		new_val = get_nx_auto_mode();
7551 	} else if (sysfs_streq(val, "never")) {
7552 		new_val = 0;
7553 
7554 		mutex_lock(&kvm_lock);
7555 		if (!list_empty(&vm_list)) {
7556 			mutex_unlock(&kvm_lock);
7557 			return -EBUSY;
7558 		}
7559 		nx_hugepage_mitigation_hard_disabled = true;
7560 		mutex_unlock(&kvm_lock);
7561 	} else if (kstrtobool(val, &new_val) < 0) {
7562 		return -EINVAL;
7563 	}
7564 
7565 	__set_nx_huge_pages(new_val);
7566 
7567 	if (new_val != old_val) {
7568 		struct kvm *kvm;
7569 
7570 		mutex_lock(&kvm_lock);
7571 
7572 		list_for_each_entry(kvm, &vm_list, vm_list) {
7573 			mutex_lock(&kvm->slots_lock);
7574 			kvm_mmu_zap_all_fast(kvm);
7575 			mutex_unlock(&kvm->slots_lock);
7576 
7577 			kvm_wake_nx_recovery_thread(kvm);
7578 		}
7579 		mutex_unlock(&kvm_lock);
7580 	}
7581 
7582 	return 0;
7583 }
7584 
7585 /*
7586  * nx_huge_pages needs to be resolved to true/false when kvm.ko is loaded, as
7587  * its default value of -1 is technically undefined behavior for a boolean.
7588  * Forward the module init call to SPTE code so that it too can handle module
7589  * params that need to be resolved/snapshot.
7590  */
kvm_mmu_x86_module_init(void)7591 void __init kvm_mmu_x86_module_init(void)
7592 {
7593 	if (nx_huge_pages == -1)
7594 		__set_nx_huge_pages(get_nx_auto_mode());
7595 
7596 	/*
7597 	 * Snapshot userspace's desire to enable the TDP MMU. Whether or not the
7598 	 * TDP MMU is actually enabled is determined in kvm_configure_mmu()
7599 	 * when the vendor module is loaded.
7600 	 */
7601 	tdp_mmu_allowed = tdp_mmu_enabled;
7602 
7603 	kvm_mmu_spte_module_init();
7604 }
7605 
7606 /*
7607  * The bulk of the MMU initialization is deferred until the vendor module is
7608  * loaded as many of the masks/values may be modified by VMX or SVM, i.e. need
7609  * to be reset when a potentially different vendor module is loaded.
7610  */
kvm_mmu_vendor_module_init(void)7611 int kvm_mmu_vendor_module_init(void)
7612 {
7613 	int ret = -ENOMEM;
7614 
7615 	/*
7616 	 * MMU roles use union aliasing which is, generally speaking, an
7617 	 * undefined behavior. However, we supposedly know how compilers behave
7618 	 * and the current status quo is unlikely to change. Guardians below are
7619 	 * supposed to let us know if the assumption becomes false.
7620 	 */
7621 	BUILD_BUG_ON(sizeof(union kvm_mmu_page_role) != sizeof(u32));
7622 	BUILD_BUG_ON(sizeof(union kvm_mmu_extended_role) != sizeof(u32));
7623 	BUILD_BUG_ON(sizeof(union kvm_cpu_role) != sizeof(u64));
7624 
7625 	kvm_mmu_reset_all_pte_masks();
7626 
7627 	pte_list_desc_cache = KMEM_CACHE(pte_list_desc, SLAB_ACCOUNT);
7628 	if (!pte_list_desc_cache)
7629 		goto out;
7630 
7631 	mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
7632 						  sizeof(struct kvm_mmu_page),
7633 						  0, SLAB_ACCOUNT, NULL);
7634 	if (!mmu_page_header_cache)
7635 		goto out;
7636 
7637 	return 0;
7638 
7639 out:
7640 	mmu_destroy_caches();
7641 	return ret;
7642 }
7643 
kvm_mmu_destroy(struct kvm_vcpu * vcpu)7644 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
7645 {
7646 	kvm_mmu_unload(vcpu);
7647 	if (tdp_mmu_enabled) {
7648 		read_lock(&vcpu->kvm->mmu_lock);
7649 		mmu_free_root_page(vcpu->kvm, &vcpu->arch.mmu->mirror_root_hpa,
7650 				   NULL);
7651 		read_unlock(&vcpu->kvm->mmu_lock);
7652 	}
7653 	free_mmu_pages(&vcpu->arch.root_mmu);
7654 	free_mmu_pages(&vcpu->arch.guest_mmu);
7655 	mmu_free_memory_caches(vcpu);
7656 }
7657 
kvm_mmu_vendor_module_exit(void)7658 void kvm_mmu_vendor_module_exit(void)
7659 {
7660 	mmu_destroy_caches();
7661 }
7662 
7663 /*
7664  * Calculate the effective recovery period, accounting for '0' meaning "let KVM
7665  * select a halving time of 1 hour".  Returns true if recovery is enabled.
7666  */
calc_nx_huge_pages_recovery_period(uint * period)7667 static bool calc_nx_huge_pages_recovery_period(uint *period)
7668 {
7669 	/*
7670 	 * Use READ_ONCE to get the params, this may be called outside of the
7671 	 * param setters, e.g. by the kthread to compute its next timeout.
7672 	 */
7673 	bool enabled = READ_ONCE(nx_huge_pages);
7674 	uint ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
7675 
7676 	if (!enabled || !ratio)
7677 		return false;
7678 
7679 	*period = READ_ONCE(nx_huge_pages_recovery_period_ms);
7680 	if (!*period) {
7681 		/* Make sure the period is not less than one second.  */
7682 		ratio = min(ratio, 3600u);
7683 		*period = 60 * 60 * 1000 / ratio;
7684 	}
7685 	return true;
7686 }
7687 
set_nx_huge_pages_recovery_param(const char * val,const struct kernel_param * kp)7688 static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp)
7689 {
7690 	bool was_recovery_enabled, is_recovery_enabled;
7691 	uint old_period, new_period;
7692 	int err;
7693 
7694 	if (nx_hugepage_mitigation_hard_disabled)
7695 		return -EPERM;
7696 
7697 	was_recovery_enabled = calc_nx_huge_pages_recovery_period(&old_period);
7698 
7699 	err = param_set_uint(val, kp);
7700 	if (err)
7701 		return err;
7702 
7703 	is_recovery_enabled = calc_nx_huge_pages_recovery_period(&new_period);
7704 
7705 	if (is_recovery_enabled &&
7706 	    (!was_recovery_enabled || old_period > new_period)) {
7707 		struct kvm *kvm;
7708 
7709 		mutex_lock(&kvm_lock);
7710 
7711 		list_for_each_entry(kvm, &vm_list, vm_list)
7712 			kvm_wake_nx_recovery_thread(kvm);
7713 
7714 		mutex_unlock(&kvm_lock);
7715 	}
7716 
7717 	return err;
7718 }
7719 
nx_huge_pages_to_zap(struct kvm * kvm,enum kvm_mmu_type mmu_type)7720 static unsigned long nx_huge_pages_to_zap(struct kvm *kvm,
7721 					  enum kvm_mmu_type mmu_type)
7722 {
7723 	unsigned long pages = READ_ONCE(kvm->arch.possible_nx_huge_pages[mmu_type].nr_pages);
7724 	unsigned int ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
7725 
7726 	return ratio ? DIV_ROUND_UP(pages, ratio) : 0;
7727 }
7728 
kvm_mmu_sp_dirty_logging_enabled(struct kvm * kvm,struct kvm_mmu_page * sp)7729 static bool kvm_mmu_sp_dirty_logging_enabled(struct kvm *kvm,
7730 					     struct kvm_mmu_page *sp)
7731 {
7732 	struct kvm_memory_slot *slot;
7733 
7734 	/*
7735 	 * Skip the memslot lookup if dirty tracking can't possibly be enabled,
7736 	 * as memslot lookups are relatively expensive.
7737 	 *
7738 	 * If a memslot update is in progress, reading an incorrect value of
7739 	 * kvm->nr_memslots_dirty_logging is not a problem: if it is becoming
7740 	 * zero, KVM will  do an unnecessary memslot lookup;  if it is becoming
7741 	 * nonzero, the page will be zapped unnecessarily.  Either way, this
7742 	 * only affects efficiency in racy situations, and not correctness.
7743 	 */
7744 	if (!atomic_read(&kvm->nr_memslots_dirty_logging))
7745 		return false;
7746 
7747 	slot = __gfn_to_memslot(kvm_memslots_for_spte_role(kvm, sp->role), sp->gfn);
7748 	if (WARN_ON_ONCE(!slot))
7749 		return false;
7750 
7751 	return kvm_slot_dirty_track_enabled(slot);
7752 }
7753 
kvm_recover_nx_huge_pages(struct kvm * kvm,const enum kvm_mmu_type mmu_type)7754 static void kvm_recover_nx_huge_pages(struct kvm *kvm,
7755 				      const enum kvm_mmu_type mmu_type)
7756 {
7757 #ifdef CONFIG_X86_64
7758 	const bool is_tdp_mmu = mmu_type == KVM_TDP_MMU;
7759 	spinlock_t *tdp_mmu_pages_lock = &kvm->arch.tdp_mmu_pages_lock;
7760 #else
7761 	const bool is_tdp_mmu = false;
7762 	spinlock_t *tdp_mmu_pages_lock = NULL;
7763 #endif
7764 	unsigned long to_zap = nx_huge_pages_to_zap(kvm, mmu_type);
7765 	struct list_head *nx_huge_pages;
7766 	struct kvm_mmu_page *sp;
7767 	LIST_HEAD(invalid_list);
7768 	bool flush = false;
7769 	int rcu_idx;
7770 
7771 	nx_huge_pages = &kvm->arch.possible_nx_huge_pages[mmu_type].pages;
7772 
7773 	rcu_idx = srcu_read_lock(&kvm->srcu);
7774 	if (is_tdp_mmu)
7775 		read_lock(&kvm->mmu_lock);
7776 	else
7777 		write_lock(&kvm->mmu_lock);
7778 
7779 	/*
7780 	 * Zapping TDP MMU shadow pages, including the remote TLB flush, must
7781 	 * be done under RCU protection, because the pages are freed via RCU
7782 	 * callback.
7783 	 */
7784 	rcu_read_lock();
7785 
7786 	for ( ; to_zap; --to_zap) {
7787 		if (is_tdp_mmu)
7788 			spin_lock(tdp_mmu_pages_lock);
7789 
7790 		if (list_empty(nx_huge_pages)) {
7791 			if (is_tdp_mmu)
7792 				spin_unlock(tdp_mmu_pages_lock);
7793 			break;
7794 		}
7795 
7796 		/*
7797 		 * We use a separate list instead of just using active_mmu_pages
7798 		 * because the number of shadow pages that be replaced with an
7799 		 * NX huge page is expected to be relatively small compared to
7800 		 * the total number of shadow pages.  And because the TDP MMU
7801 		 * doesn't use active_mmu_pages.
7802 		 */
7803 		sp = list_first_entry(nx_huge_pages,
7804 				      struct kvm_mmu_page,
7805 				      possible_nx_huge_page_link);
7806 		WARN_ON_ONCE(!sp->nx_huge_page_disallowed);
7807 		WARN_ON_ONCE(!sp->role.direct);
7808 
7809 		unaccount_nx_huge_page(kvm, sp);
7810 
7811 		if (is_tdp_mmu)
7812 			spin_unlock(tdp_mmu_pages_lock);
7813 
7814 		/*
7815 		 * Do not attempt to recover any NX Huge Pages that are being
7816 		 * dirty tracked, as they would just be faulted back in as 4KiB
7817 		 * pages. The NX Huge Pages in this slot will be recovered,
7818 		 * along with all the other huge pages in the slot, when dirty
7819 		 * logging is disabled.
7820 		 */
7821 		if (!kvm_mmu_sp_dirty_logging_enabled(kvm, sp)) {
7822 			if (is_tdp_mmu)
7823 				flush |= kvm_tdp_mmu_zap_possible_nx_huge_page(kvm, sp);
7824 			else
7825 				kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
7826 
7827 		}
7828 
7829 		WARN_ON_ONCE(sp->nx_huge_page_disallowed);
7830 
7831 		if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
7832 			kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
7833 			rcu_read_unlock();
7834 
7835 			if (is_tdp_mmu)
7836 				cond_resched_rwlock_read(&kvm->mmu_lock);
7837 			else
7838 				cond_resched_rwlock_write(&kvm->mmu_lock);
7839 
7840 			flush = false;
7841 			rcu_read_lock();
7842 		}
7843 	}
7844 	kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
7845 
7846 	rcu_read_unlock();
7847 
7848 	if (is_tdp_mmu)
7849 		read_unlock(&kvm->mmu_lock);
7850 	else
7851 		write_unlock(&kvm->mmu_lock);
7852 	srcu_read_unlock(&kvm->srcu, rcu_idx);
7853 }
7854 
kvm_nx_huge_page_recovery_worker_kill(void * data)7855 static void kvm_nx_huge_page_recovery_worker_kill(void *data)
7856 {
7857 }
7858 
kvm_nx_huge_page_recovery_worker(void * data)7859 static bool kvm_nx_huge_page_recovery_worker(void *data)
7860 {
7861 	struct kvm *kvm = data;
7862 	long remaining_time;
7863 	bool enabled;
7864 	uint period;
7865 	int i;
7866 
7867 	enabled = calc_nx_huge_pages_recovery_period(&period);
7868 	if (!enabled)
7869 		return false;
7870 
7871 	remaining_time = kvm->arch.nx_huge_page_last + msecs_to_jiffies(period)
7872 		- get_jiffies_64();
7873 	if (remaining_time > 0) {
7874 		schedule_timeout(remaining_time);
7875 		/* check for signals and come back */
7876 		return true;
7877 	}
7878 
7879 	__set_current_state(TASK_RUNNING);
7880 	for (i = 0; i < KVM_NR_MMU_TYPES; ++i)
7881 		kvm_recover_nx_huge_pages(kvm, i);
7882 	kvm->arch.nx_huge_page_last = get_jiffies_64();
7883 	return true;
7884 }
7885 
kvm_mmu_start_lpage_recovery(struct once * once)7886 static int kvm_mmu_start_lpage_recovery(struct once *once)
7887 {
7888 	struct kvm_arch *ka = container_of(once, struct kvm_arch, nx_once);
7889 	struct kvm *kvm = container_of(ka, struct kvm, arch);
7890 	struct vhost_task *nx_thread;
7891 
7892 	kvm->arch.nx_huge_page_last = get_jiffies_64();
7893 	nx_thread = vhost_task_create(kvm_nx_huge_page_recovery_worker,
7894 				      kvm_nx_huge_page_recovery_worker_kill,
7895 				      kvm, "kvm-nx-lpage-recovery");
7896 
7897 	if (IS_ERR(nx_thread))
7898 		return PTR_ERR(nx_thread);
7899 
7900 	vhost_task_start(nx_thread);
7901 
7902 	/* Make the task visible only once it is fully started. */
7903 	WRITE_ONCE(kvm->arch.nx_huge_page_recovery_thread, nx_thread);
7904 	return 0;
7905 }
7906 
kvm_mmu_post_init_vm(struct kvm * kvm)7907 int kvm_mmu_post_init_vm(struct kvm *kvm)
7908 {
7909 	if (nx_hugepage_mitigation_hard_disabled)
7910 		return 0;
7911 
7912 	return call_once(&kvm->arch.nx_once, kvm_mmu_start_lpage_recovery);
7913 }
7914 
kvm_mmu_pre_destroy_vm(struct kvm * kvm)7915 void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
7916 {
7917 	if (kvm->arch.nx_huge_page_recovery_thread)
7918 		vhost_task_stop(kvm->arch.nx_huge_page_recovery_thread);
7919 }
7920 
7921 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
hugepage_test_mixed(struct kvm_memory_slot * slot,gfn_t gfn,int level)7922 static bool hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
7923 				int level)
7924 {
7925 	return lpage_info_slot(gfn, slot, level)->disallow_lpage & KVM_LPAGE_MIXED_FLAG;
7926 }
7927 
hugepage_clear_mixed(struct kvm_memory_slot * slot,gfn_t gfn,int level)7928 static void hugepage_clear_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
7929 				 int level)
7930 {
7931 	lpage_info_slot(gfn, slot, level)->disallow_lpage &= ~KVM_LPAGE_MIXED_FLAG;
7932 }
7933 
hugepage_set_mixed(struct kvm_memory_slot * slot,gfn_t gfn,int level)7934 static void hugepage_set_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
7935 			       int level)
7936 {
7937 	lpage_info_slot(gfn, slot, level)->disallow_lpage |= KVM_LPAGE_MIXED_FLAG;
7938 }
7939 
kvm_arch_pre_set_memory_attributes(struct kvm * kvm,struct kvm_gfn_range * range)7940 bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
7941 					struct kvm_gfn_range *range)
7942 {
7943 	struct kvm_memory_slot *slot = range->slot;
7944 	int level;
7945 
7946 	/*
7947 	 * Zap SPTEs even if the slot can't be mapped PRIVATE.  KVM x86 only
7948 	 * supports KVM_MEMORY_ATTRIBUTE_PRIVATE, and so it *seems* like KVM
7949 	 * can simply ignore such slots.  But if userspace is making memory
7950 	 * PRIVATE, then KVM must prevent the guest from accessing the memory
7951 	 * as shared.  And if userspace is making memory SHARED and this point
7952 	 * is reached, then at least one page within the range was previously
7953 	 * PRIVATE, i.e. the slot's possible hugepage ranges are changing.
7954 	 * Zapping SPTEs in this case ensures KVM will reassess whether or not
7955 	 * a hugepage can be used for affected ranges.
7956 	 */
7957 	if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm)))
7958 		return false;
7959 
7960 	if (WARN_ON_ONCE(range->end <= range->start))
7961 		return false;
7962 
7963 	/*
7964 	 * If the head and tail pages of the range currently allow a hugepage,
7965 	 * i.e. reside fully in the slot and don't have mixed attributes, then
7966 	 * add each corresponding hugepage range to the ongoing invalidation,
7967 	 * e.g. to prevent KVM from creating a hugepage in response to a fault
7968 	 * for a gfn whose attributes aren't changing.  Note, only the range
7969 	 * of gfns whose attributes are being modified needs to be explicitly
7970 	 * unmapped, as that will unmap any existing hugepages.
7971 	 */
7972 	for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
7973 		gfn_t start = gfn_round_for_level(range->start, level);
7974 		gfn_t end = gfn_round_for_level(range->end - 1, level);
7975 		gfn_t nr_pages = KVM_PAGES_PER_HPAGE(level);
7976 
7977 		if ((start != range->start || start + nr_pages > range->end) &&
7978 		    start >= slot->base_gfn &&
7979 		    start + nr_pages <= slot->base_gfn + slot->npages &&
7980 		    !hugepage_test_mixed(slot, start, level))
7981 			kvm_mmu_invalidate_range_add(kvm, start, start + nr_pages);
7982 
7983 		if (end == start)
7984 			continue;
7985 
7986 		if ((end + nr_pages) > range->end &&
7987 		    (end + nr_pages) <= (slot->base_gfn + slot->npages) &&
7988 		    !hugepage_test_mixed(slot, end, level))
7989 			kvm_mmu_invalidate_range_add(kvm, end, end + nr_pages);
7990 	}
7991 
7992 	/* Unmap the old attribute page. */
7993 	if (range->arg.attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE)
7994 		range->attr_filter = KVM_FILTER_SHARED;
7995 	else
7996 		range->attr_filter = KVM_FILTER_PRIVATE;
7997 
7998 	return kvm_unmap_gfn_range(kvm, range);
7999 }
8000 
8001 
8002 
hugepage_has_attrs(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn,int level,unsigned long attrs)8003 static bool hugepage_has_attrs(struct kvm *kvm, struct kvm_memory_slot *slot,
8004 			       gfn_t gfn, int level, unsigned long attrs)
8005 {
8006 	const unsigned long start = gfn;
8007 	const unsigned long end = start + KVM_PAGES_PER_HPAGE(level);
8008 
8009 	if (level == PG_LEVEL_2M)
8010 		return kvm_range_has_memory_attributes(kvm, start, end, ~0, attrs);
8011 
8012 	for (gfn = start; gfn < end; gfn += KVM_PAGES_PER_HPAGE(level - 1)) {
8013 		if (hugepage_test_mixed(slot, gfn, level - 1) ||
8014 		    attrs != kvm_get_memory_attributes(kvm, gfn))
8015 			return false;
8016 	}
8017 	return true;
8018 }
8019 
kvm_arch_post_set_memory_attributes(struct kvm * kvm,struct kvm_gfn_range * range)8020 bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
8021 					 struct kvm_gfn_range *range)
8022 {
8023 	unsigned long attrs = range->arg.attributes;
8024 	struct kvm_memory_slot *slot = range->slot;
8025 	int level;
8026 
8027 	lockdep_assert_held_write(&kvm->mmu_lock);
8028 	lockdep_assert_held(&kvm->slots_lock);
8029 
8030 	/*
8031 	 * Calculate which ranges can be mapped with hugepages even if the slot
8032 	 * can't map memory PRIVATE.  KVM mustn't create a SHARED hugepage over
8033 	 * a range that has PRIVATE GFNs, and conversely converting a range to
8034 	 * SHARED may now allow hugepages.
8035 	 */
8036 	if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm)))
8037 		return false;
8038 
8039 	/*
8040 	 * The sequence matters here: upper levels consume the result of lower
8041 	 * level's scanning.
8042 	 */
8043 	for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
8044 		gfn_t nr_pages = KVM_PAGES_PER_HPAGE(level);
8045 		gfn_t gfn = gfn_round_for_level(range->start, level);
8046 
8047 		/* Process the head page if it straddles the range. */
8048 		if (gfn != range->start || gfn + nr_pages > range->end) {
8049 			/*
8050 			 * Skip mixed tracking if the aligned gfn isn't covered
8051 			 * by the memslot, KVM can't use a hugepage due to the
8052 			 * misaligned address regardless of memory attributes.
8053 			 */
8054 			if (gfn >= slot->base_gfn &&
8055 			    gfn + nr_pages <= slot->base_gfn + slot->npages) {
8056 				if (hugepage_has_attrs(kvm, slot, gfn, level, attrs))
8057 					hugepage_clear_mixed(slot, gfn, level);
8058 				else
8059 					hugepage_set_mixed(slot, gfn, level);
8060 			}
8061 			gfn += nr_pages;
8062 		}
8063 
8064 		/*
8065 		 * Pages entirely covered by the range are guaranteed to have
8066 		 * only the attributes which were just set.
8067 		 */
8068 		for ( ; gfn + nr_pages <= range->end; gfn += nr_pages)
8069 			hugepage_clear_mixed(slot, gfn, level);
8070 
8071 		/*
8072 		 * Process the last tail page if it straddles the range and is
8073 		 * contained by the memslot.  Like the head page, KVM can't
8074 		 * create a hugepage if the slot size is misaligned.
8075 		 */
8076 		if (gfn < range->end &&
8077 		    (gfn + nr_pages) <= (slot->base_gfn + slot->npages)) {
8078 			if (hugepage_has_attrs(kvm, slot, gfn, level, attrs))
8079 				hugepage_clear_mixed(slot, gfn, level);
8080 			else
8081 				hugepage_set_mixed(slot, gfn, level);
8082 		}
8083 	}
8084 	return false;
8085 }
8086 
kvm_mmu_init_memslot_memory_attributes(struct kvm * kvm,struct kvm_memory_slot * slot)8087 void kvm_mmu_init_memslot_memory_attributes(struct kvm *kvm,
8088 					    struct kvm_memory_slot *slot)
8089 {
8090 	int level;
8091 
8092 	if (!kvm_arch_has_private_mem(kvm))
8093 		return;
8094 
8095 	for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
8096 		/*
8097 		 * Don't bother tracking mixed attributes for pages that can't
8098 		 * be huge due to alignment, i.e. process only pages that are
8099 		 * entirely contained by the memslot.
8100 		 */
8101 		gfn_t end = gfn_round_for_level(slot->base_gfn + slot->npages, level);
8102 		gfn_t start = gfn_round_for_level(slot->base_gfn, level);
8103 		gfn_t nr_pages = KVM_PAGES_PER_HPAGE(level);
8104 		gfn_t gfn;
8105 
8106 		if (start < slot->base_gfn)
8107 			start += nr_pages;
8108 
8109 		/*
8110 		 * Unlike setting attributes, every potential hugepage needs to
8111 		 * be manually checked as the attributes may already be mixed.
8112 		 */
8113 		for (gfn = start; gfn < end; gfn += nr_pages) {
8114 			unsigned long attrs = kvm_get_memory_attributes(kvm, gfn);
8115 
8116 			if (hugepage_has_attrs(kvm, slot, gfn, level, attrs))
8117 				hugepage_clear_mixed(slot, gfn, level);
8118 			else
8119 				hugepage_set_mixed(slot, gfn, level);
8120 		}
8121 	}
8122 }
8123 #endif
8124