xref: /linux/arch/x86/kvm/mmu/mmu.c (revision dde63797055cf3615bdac744d641e19e165467bb)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * This module enables machines with Intel VT-x extensions to run virtual
6  * machines without emulation or binary translation.
7  *
8  * MMU support
9  *
10  * Copyright (C) 2006 Qumranet, Inc.
11  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
12  *
13  * Authors:
14  *   Yaniv Kamay  <yaniv@qumranet.com>
15  *   Avi Kivity   <avi@qumranet.com>
16  */
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 
19 #include "irq.h"
20 #include "ioapic.h"
21 #include "mmu.h"
22 #include "mmu_internal.h"
23 #include "tdp_mmu.h"
24 #include "x86.h"
25 #include "kvm_cache_regs.h"
26 #include "smm.h"
27 #include "kvm_emulate.h"
28 #include "page_track.h"
29 #include "cpuid.h"
30 #include "spte.h"
31 
32 #include <linux/kvm_host.h>
33 #include <linux/types.h>
34 #include <linux/string.h>
35 #include <linux/mm.h>
36 #include <linux/highmem.h>
37 #include <linux/moduleparam.h>
38 #include <linux/export.h>
39 #include <linux/swap.h>
40 #include <linux/hugetlb.h>
41 #include <linux/compiler.h>
42 #include <linux/srcu.h>
43 #include <linux/slab.h>
44 #include <linux/sched/signal.h>
45 #include <linux/uaccess.h>
46 #include <linux/hash.h>
47 #include <linux/kern_levels.h>
48 #include <linux/kstrtox.h>
49 #include <linux/kthread.h>
50 #include <linux/wordpart.h>
51 
52 #include <asm/page.h>
53 #include <asm/memtype.h>
54 #include <asm/cmpxchg.h>
55 #include <asm/io.h>
56 #include <asm/set_memory.h>
57 #include <asm/spec-ctrl.h>
58 #include <asm/vmx.h>
59 
60 #include "trace.h"
61 
62 static bool nx_hugepage_mitigation_hard_disabled;
63 
64 int __read_mostly nx_huge_pages = -1;
65 static uint __read_mostly nx_huge_pages_recovery_period_ms;
66 #ifdef CONFIG_PREEMPT_RT
67 /* Recovery can cause latency spikes, disable it for PREEMPT_RT.  */
68 static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
69 #else
70 static uint __read_mostly nx_huge_pages_recovery_ratio = 60;
71 #endif
72 
73 static int get_nx_huge_pages(char *buffer, const struct kernel_param *kp);
74 static int set_nx_huge_pages(const char *val, const struct kernel_param *kp);
75 static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp);
76 
77 static const struct kernel_param_ops nx_huge_pages_ops = {
78 	.set = set_nx_huge_pages,
79 	.get = get_nx_huge_pages,
80 };
81 
82 static const struct kernel_param_ops nx_huge_pages_recovery_param_ops = {
83 	.set = set_nx_huge_pages_recovery_param,
84 	.get = param_get_uint,
85 };
86 
87 module_param_cb(nx_huge_pages, &nx_huge_pages_ops, &nx_huge_pages, 0644);
88 __MODULE_PARM_TYPE(nx_huge_pages, "bool");
89 module_param_cb(nx_huge_pages_recovery_ratio, &nx_huge_pages_recovery_param_ops,
90 		&nx_huge_pages_recovery_ratio, 0644);
91 __MODULE_PARM_TYPE(nx_huge_pages_recovery_ratio, "uint");
92 module_param_cb(nx_huge_pages_recovery_period_ms, &nx_huge_pages_recovery_param_ops,
93 		&nx_huge_pages_recovery_period_ms, 0644);
94 __MODULE_PARM_TYPE(nx_huge_pages_recovery_period_ms, "uint");
95 
96 static bool __read_mostly force_flush_and_sync_on_reuse;
97 module_param_named(flush_on_reuse, force_flush_and_sync_on_reuse, bool, 0644);
98 
99 /*
100  * When setting this variable to true it enables Two-Dimensional-Paging
101  * where the hardware walks 2 page tables:
102  * 1. the guest-virtual to guest-physical
103  * 2. while doing 1. it walks guest-physical to host-physical
104  * If the hardware supports that we don't need to do shadow paging.
105  */
106 bool tdp_enabled = false;
107 
108 static bool __ro_after_init tdp_mmu_allowed;
109 
110 #ifdef CONFIG_X86_64
111 bool __read_mostly tdp_mmu_enabled = true;
112 module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0444);
113 EXPORT_SYMBOL_GPL(tdp_mmu_enabled);
114 #endif
115 
116 static int max_huge_page_level __read_mostly;
117 static int tdp_root_level __read_mostly;
118 static int max_tdp_level __read_mostly;
119 
120 #define PTE_PREFETCH_NUM		8
121 
122 #include <trace/events/kvm.h>
123 
124 /* make pte_list_desc fit well in cache lines */
125 #define PTE_LIST_EXT 14
126 
127 /*
128  * struct pte_list_desc is the core data structure used to implement a custom
129  * list for tracking a set of related SPTEs, e.g. all the SPTEs that map a
130  * given GFN when used in the context of rmaps.  Using a custom list allows KVM
131  * to optimize for the common case where many GFNs will have at most a handful
132  * of SPTEs pointing at them, i.e. allows packing multiple SPTEs into a small
133  * memory footprint, which in turn improves runtime performance by exploiting
134  * cache locality.
135  *
136  * A list is comprised of one or more pte_list_desc objects (descriptors).
137  * Each individual descriptor stores up to PTE_LIST_EXT SPTEs.  If a descriptor
138  * is full and a new SPTEs needs to be added, a new descriptor is allocated and
139  * becomes the head of the list.  This means that by definitions, all tail
140  * descriptors are full.
141  *
142  * Note, the meta data fields are deliberately placed at the start of the
143  * structure to optimize the cacheline layout; accessing the descriptor will
144  * touch only a single cacheline so long as @spte_count<=6 (or if only the
145  * descriptors metadata is accessed).
146  */
147 struct pte_list_desc {
148 	struct pte_list_desc *more;
149 	/* The number of PTEs stored in _this_ descriptor. */
150 	u32 spte_count;
151 	/* The number of PTEs stored in all tails of this descriptor. */
152 	u32 tail_count;
153 	u64 *sptes[PTE_LIST_EXT];
154 };
155 
156 struct kvm_shadow_walk_iterator {
157 	u64 addr;
158 	hpa_t shadow_addr;
159 	u64 *sptep;
160 	int level;
161 	unsigned index;
162 };
163 
164 #define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker)     \
165 	for (shadow_walk_init_using_root(&(_walker), (_vcpu),              \
166 					 (_root), (_addr));                \
167 	     shadow_walk_okay(&(_walker));			           \
168 	     shadow_walk_next(&(_walker)))
169 
170 #define for_each_shadow_entry(_vcpu, _addr, _walker)            \
171 	for (shadow_walk_init(&(_walker), _vcpu, _addr);	\
172 	     shadow_walk_okay(&(_walker));			\
173 	     shadow_walk_next(&(_walker)))
174 
175 #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte)	\
176 	for (shadow_walk_init(&(_walker), _vcpu, _addr);		\
177 	     shadow_walk_okay(&(_walker)) &&				\
178 		({ spte = mmu_spte_get_lockless(_walker.sptep); 1; });	\
179 	     __shadow_walk_next(&(_walker), spte))
180 
181 static struct kmem_cache *pte_list_desc_cache;
182 struct kmem_cache *mmu_page_header_cache;
183 
184 static void mmu_spte_set(u64 *sptep, u64 spte);
185 
186 struct kvm_mmu_role_regs {
187 	const unsigned long cr0;
188 	const unsigned long cr4;
189 	const u64 efer;
190 };
191 
192 #define CREATE_TRACE_POINTS
193 #include "mmutrace.h"
194 
195 /*
196  * Yes, lot's of underscores.  They're a hint that you probably shouldn't be
197  * reading from the role_regs.  Once the root_role is constructed, it becomes
198  * the single source of truth for the MMU's state.
199  */
200 #define BUILD_MMU_ROLE_REGS_ACCESSOR(reg, name, flag)			\
201 static inline bool __maybe_unused					\
202 ____is_##reg##_##name(const struct kvm_mmu_role_regs *regs)		\
203 {									\
204 	return !!(regs->reg & flag);					\
205 }
206 BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, pg, X86_CR0_PG);
207 BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, wp, X86_CR0_WP);
208 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pse, X86_CR4_PSE);
209 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pae, X86_CR4_PAE);
210 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smep, X86_CR4_SMEP);
211 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smap, X86_CR4_SMAP);
212 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pke, X86_CR4_PKE);
213 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, la57, X86_CR4_LA57);
214 BUILD_MMU_ROLE_REGS_ACCESSOR(efer, nx, EFER_NX);
215 BUILD_MMU_ROLE_REGS_ACCESSOR(efer, lma, EFER_LMA);
216 
217 /*
218  * The MMU itself (with a valid role) is the single source of truth for the
219  * MMU.  Do not use the regs used to build the MMU/role, nor the vCPU.  The
220  * regs don't account for dependencies, e.g. clearing CR4 bits if CR0.PG=1,
221  * and the vCPU may be incorrect/irrelevant.
222  */
223 #define BUILD_MMU_ROLE_ACCESSOR(base_or_ext, reg, name)		\
224 static inline bool __maybe_unused is_##reg##_##name(struct kvm_mmu *mmu)	\
225 {								\
226 	return !!(mmu->cpu_role. base_or_ext . reg##_##name);	\
227 }
228 BUILD_MMU_ROLE_ACCESSOR(base, cr0, wp);
229 BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pse);
230 BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, smep);
231 BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, smap);
232 BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pke);
233 BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, la57);
234 BUILD_MMU_ROLE_ACCESSOR(base, efer, nx);
235 BUILD_MMU_ROLE_ACCESSOR(ext,  efer, lma);
236 
is_cr0_pg(struct kvm_mmu * mmu)237 static inline bool is_cr0_pg(struct kvm_mmu *mmu)
238 {
239         return mmu->cpu_role.base.level > 0;
240 }
241 
is_cr4_pae(struct kvm_mmu * mmu)242 static inline bool is_cr4_pae(struct kvm_mmu *mmu)
243 {
244         return !mmu->cpu_role.base.has_4_byte_gpte;
245 }
246 
vcpu_to_role_regs(struct kvm_vcpu * vcpu)247 static struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu)
248 {
249 	struct kvm_mmu_role_regs regs = {
250 		.cr0 = kvm_read_cr0_bits(vcpu, KVM_MMU_CR0_ROLE_BITS),
251 		.cr4 = kvm_read_cr4_bits(vcpu, KVM_MMU_CR4_ROLE_BITS),
252 		.efer = vcpu->arch.efer,
253 	};
254 
255 	return regs;
256 }
257 
get_guest_cr3(struct kvm_vcpu * vcpu)258 static unsigned long get_guest_cr3(struct kvm_vcpu *vcpu)
259 {
260 	return kvm_read_cr3(vcpu);
261 }
262 
kvm_mmu_get_guest_pgd(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu)263 static inline unsigned long kvm_mmu_get_guest_pgd(struct kvm_vcpu *vcpu,
264 						  struct kvm_mmu *mmu)
265 {
266 	if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) && mmu->get_guest_pgd == get_guest_cr3)
267 		return kvm_read_cr3(vcpu);
268 
269 	return mmu->get_guest_pgd(vcpu);
270 }
271 
kvm_available_flush_remote_tlbs_range(void)272 static inline bool kvm_available_flush_remote_tlbs_range(void)
273 {
274 #if IS_ENABLED(CONFIG_HYPERV)
275 	return kvm_x86_ops.flush_remote_tlbs_range;
276 #else
277 	return false;
278 #endif
279 }
280 
281 static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index);
282 
283 /* Flush the range of guest memory mapped by the given SPTE. */
kvm_flush_remote_tlbs_sptep(struct kvm * kvm,u64 * sptep)284 static void kvm_flush_remote_tlbs_sptep(struct kvm *kvm, u64 *sptep)
285 {
286 	struct kvm_mmu_page *sp = sptep_to_sp(sptep);
287 	gfn_t gfn = kvm_mmu_page_get_gfn(sp, spte_index(sptep));
288 
289 	kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level);
290 }
291 
mark_mmio_spte(struct kvm_vcpu * vcpu,u64 * sptep,u64 gfn,unsigned int access)292 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
293 			   unsigned int access)
294 {
295 	u64 spte = make_mmio_spte(vcpu, gfn, access);
296 
297 	trace_mark_mmio_spte(sptep, gfn, spte);
298 	mmu_spte_set(sptep, spte);
299 }
300 
get_mmio_spte_gfn(u64 spte)301 static gfn_t get_mmio_spte_gfn(u64 spte)
302 {
303 	u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
304 
305 	gpa |= (spte >> SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)
306 	       & shadow_nonpresent_or_rsvd_mask;
307 
308 	return gpa >> PAGE_SHIFT;
309 }
310 
get_mmio_spte_access(u64 spte)311 static unsigned get_mmio_spte_access(u64 spte)
312 {
313 	return spte & shadow_mmio_access_mask;
314 }
315 
check_mmio_spte(struct kvm_vcpu * vcpu,u64 spte)316 static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
317 {
318 	u64 kvm_gen, spte_gen, gen;
319 
320 	gen = kvm_vcpu_memslots(vcpu)->generation;
321 	if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
322 		return false;
323 
324 	kvm_gen = gen & MMIO_SPTE_GEN_MASK;
325 	spte_gen = get_mmio_spte_generation(spte);
326 
327 	trace_check_mmio_spte(spte, kvm_gen, spte_gen);
328 	return likely(kvm_gen == spte_gen);
329 }
330 
is_cpuid_PSE36(void)331 static int is_cpuid_PSE36(void)
332 {
333 	return 1;
334 }
335 
336 #ifdef CONFIG_X86_64
__set_spte(u64 * sptep,u64 spte)337 static void __set_spte(u64 *sptep, u64 spte)
338 {
339 	KVM_MMU_WARN_ON(is_ept_ve_possible(spte));
340 	WRITE_ONCE(*sptep, spte);
341 }
342 
__update_clear_spte_fast(u64 * sptep,u64 spte)343 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
344 {
345 	KVM_MMU_WARN_ON(is_ept_ve_possible(spte));
346 	WRITE_ONCE(*sptep, spte);
347 }
348 
__update_clear_spte_slow(u64 * sptep,u64 spte)349 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
350 {
351 	KVM_MMU_WARN_ON(is_ept_ve_possible(spte));
352 	return xchg(sptep, spte);
353 }
354 
__get_spte_lockless(u64 * sptep)355 static u64 __get_spte_lockless(u64 *sptep)
356 {
357 	return READ_ONCE(*sptep);
358 }
359 #else
360 union split_spte {
361 	struct {
362 		u32 spte_low;
363 		u32 spte_high;
364 	};
365 	u64 spte;
366 };
367 
count_spte_clear(u64 * sptep,u64 spte)368 static void count_spte_clear(u64 *sptep, u64 spte)
369 {
370 	struct kvm_mmu_page *sp =  sptep_to_sp(sptep);
371 
372 	if (is_shadow_present_pte(spte))
373 		return;
374 
375 	/* Ensure the spte is completely set before we increase the count */
376 	smp_wmb();
377 	sp->clear_spte_count++;
378 }
379 
__set_spte(u64 * sptep,u64 spte)380 static void __set_spte(u64 *sptep, u64 spte)
381 {
382 	union split_spte *ssptep, sspte;
383 
384 	ssptep = (union split_spte *)sptep;
385 	sspte = (union split_spte)spte;
386 
387 	ssptep->spte_high = sspte.spte_high;
388 
389 	/*
390 	 * If we map the spte from nonpresent to present, We should store
391 	 * the high bits firstly, then set present bit, so cpu can not
392 	 * fetch this spte while we are setting the spte.
393 	 */
394 	smp_wmb();
395 
396 	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
397 }
398 
__update_clear_spte_fast(u64 * sptep,u64 spte)399 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
400 {
401 	union split_spte *ssptep, sspte;
402 
403 	ssptep = (union split_spte *)sptep;
404 	sspte = (union split_spte)spte;
405 
406 	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
407 
408 	/*
409 	 * If we map the spte from present to nonpresent, we should clear
410 	 * present bit firstly to avoid vcpu fetch the old high bits.
411 	 */
412 	smp_wmb();
413 
414 	ssptep->spte_high = sspte.spte_high;
415 	count_spte_clear(sptep, spte);
416 }
417 
__update_clear_spte_slow(u64 * sptep,u64 spte)418 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
419 {
420 	union split_spte *ssptep, sspte, orig;
421 
422 	ssptep = (union split_spte *)sptep;
423 	sspte = (union split_spte)spte;
424 
425 	/* xchg acts as a barrier before the setting of the high bits */
426 	orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low);
427 	orig.spte_high = ssptep->spte_high;
428 	ssptep->spte_high = sspte.spte_high;
429 	count_spte_clear(sptep, spte);
430 
431 	return orig.spte;
432 }
433 
434 /*
435  * The idea using the light way get the spte on x86_32 guest is from
436  * gup_get_pte (mm/gup.c).
437  *
438  * An spte tlb flush may be pending, because they are coalesced and
439  * we are running out of the MMU lock.  Therefore
440  * we need to protect against in-progress updates of the spte.
441  *
442  * Reading the spte while an update is in progress may get the old value
443  * for the high part of the spte.  The race is fine for a present->non-present
444  * change (because the high part of the spte is ignored for non-present spte),
445  * but for a present->present change we must reread the spte.
446  *
447  * All such changes are done in two steps (present->non-present and
448  * non-present->present), hence it is enough to count the number of
449  * present->non-present updates: if it changed while reading the spte,
450  * we might have hit the race.  This is done using clear_spte_count.
451  */
__get_spte_lockless(u64 * sptep)452 static u64 __get_spte_lockless(u64 *sptep)
453 {
454 	struct kvm_mmu_page *sp =  sptep_to_sp(sptep);
455 	union split_spte spte, *orig = (union split_spte *)sptep;
456 	int count;
457 
458 retry:
459 	count = sp->clear_spte_count;
460 	smp_rmb();
461 
462 	spte.spte_low = orig->spte_low;
463 	smp_rmb();
464 
465 	spte.spte_high = orig->spte_high;
466 	smp_rmb();
467 
468 	if (unlikely(spte.spte_low != orig->spte_low ||
469 	      count != sp->clear_spte_count))
470 		goto retry;
471 
472 	return spte.spte;
473 }
474 #endif
475 
476 /* Rules for using mmu_spte_set:
477  * Set the sptep from nonpresent to present.
478  * Note: the sptep being assigned *must* be either not present
479  * or in a state where the hardware will not attempt to update
480  * the spte.
481  */
mmu_spte_set(u64 * sptep,u64 new_spte)482 static void mmu_spte_set(u64 *sptep, u64 new_spte)
483 {
484 	WARN_ON_ONCE(is_shadow_present_pte(*sptep));
485 	__set_spte(sptep, new_spte);
486 }
487 
488 /* Rules for using mmu_spte_update:
489  * Update the state bits, it means the mapped pfn is not changed.
490  *
491  * Returns true if the TLB needs to be flushed
492  */
mmu_spte_update(u64 * sptep,u64 new_spte)493 static bool mmu_spte_update(u64 *sptep, u64 new_spte)
494 {
495 	u64 old_spte = *sptep;
496 
497 	WARN_ON_ONCE(!is_shadow_present_pte(new_spte));
498 	check_spte_writable_invariants(new_spte);
499 
500 	if (!is_shadow_present_pte(old_spte)) {
501 		mmu_spte_set(sptep, new_spte);
502 		return false;
503 	}
504 
505 	if (!spte_needs_atomic_update(old_spte))
506 		__update_clear_spte_fast(sptep, new_spte);
507 	else
508 		old_spte = __update_clear_spte_slow(sptep, new_spte);
509 
510 	WARN_ON_ONCE(!is_shadow_present_pte(old_spte) ||
511 		     spte_to_pfn(old_spte) != spte_to_pfn(new_spte));
512 
513 	return leaf_spte_change_needs_tlb_flush(old_spte, new_spte);
514 }
515 
516 /*
517  * Rules for using mmu_spte_clear_track_bits:
518  * It sets the sptep from present to nonpresent, and track the
519  * state bits, it is used to clear the last level sptep.
520  * Returns the old PTE.
521  */
mmu_spte_clear_track_bits(struct kvm * kvm,u64 * sptep)522 static u64 mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)
523 {
524 	u64 old_spte = *sptep;
525 	int level = sptep_to_sp(sptep)->role.level;
526 
527 	if (!is_shadow_present_pte(old_spte) ||
528 	    !spte_needs_atomic_update(old_spte))
529 		__update_clear_spte_fast(sptep, SHADOW_NONPRESENT_VALUE);
530 	else
531 		old_spte = __update_clear_spte_slow(sptep, SHADOW_NONPRESENT_VALUE);
532 
533 	if (!is_shadow_present_pte(old_spte))
534 		return old_spte;
535 
536 	kvm_update_page_stats(kvm, level, -1);
537 	return old_spte;
538 }
539 
540 /*
541  * Rules for using mmu_spte_clear_no_track:
542  * Directly clear spte without caring the state bits of sptep,
543  * it is used to set the upper level spte.
544  */
mmu_spte_clear_no_track(u64 * sptep)545 static void mmu_spte_clear_no_track(u64 *sptep)
546 {
547 	__update_clear_spte_fast(sptep, SHADOW_NONPRESENT_VALUE);
548 }
549 
mmu_spte_get_lockless(u64 * sptep)550 static u64 mmu_spte_get_lockless(u64 *sptep)
551 {
552 	return __get_spte_lockless(sptep);
553 }
554 
is_tdp_mmu_active(struct kvm_vcpu * vcpu)555 static inline bool is_tdp_mmu_active(struct kvm_vcpu *vcpu)
556 {
557 	return tdp_mmu_enabled && vcpu->arch.mmu->root_role.direct;
558 }
559 
walk_shadow_page_lockless_begin(struct kvm_vcpu * vcpu)560 static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
561 {
562 	if (is_tdp_mmu_active(vcpu)) {
563 		kvm_tdp_mmu_walk_lockless_begin();
564 	} else {
565 		/*
566 		 * Prevent page table teardown by making any free-er wait during
567 		 * kvm_flush_remote_tlbs() IPI to all active vcpus.
568 		 */
569 		local_irq_disable();
570 
571 		/*
572 		 * Make sure a following spte read is not reordered ahead of the write
573 		 * to vcpu->mode.
574 		 */
575 		smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
576 	}
577 }
578 
walk_shadow_page_lockless_end(struct kvm_vcpu * vcpu)579 static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
580 {
581 	if (is_tdp_mmu_active(vcpu)) {
582 		kvm_tdp_mmu_walk_lockless_end();
583 	} else {
584 		/*
585 		 * Make sure the write to vcpu->mode is not reordered in front of
586 		 * reads to sptes.  If it does, kvm_mmu_commit_zap_page() can see us
587 		 * OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
588 		 */
589 		smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
590 		local_irq_enable();
591 	}
592 }
593 
mmu_topup_memory_caches(struct kvm_vcpu * vcpu,bool maybe_indirect)594 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
595 {
596 	int r;
597 
598 	/* 1 rmap, 1 parent PTE per level, and the prefetched rmaps. */
599 	r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
600 				       1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);
601 	if (r)
602 		return r;
603 	if (kvm_has_mirrored_tdp(vcpu->kvm)) {
604 		r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_external_spt_cache,
605 					       PT64_ROOT_MAX_LEVEL);
606 		if (r)
607 			return r;
608 	}
609 	r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
610 				       PT64_ROOT_MAX_LEVEL);
611 	if (r)
612 		return r;
613 	if (maybe_indirect) {
614 		r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadowed_info_cache,
615 					       PT64_ROOT_MAX_LEVEL);
616 		if (r)
617 			return r;
618 	}
619 	return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
620 					  PT64_ROOT_MAX_LEVEL);
621 }
622 
mmu_free_memory_caches(struct kvm_vcpu * vcpu)623 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
624 {
625 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
626 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
627 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadowed_info_cache);
628 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_external_spt_cache);
629 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
630 }
631 
mmu_free_pte_list_desc(struct pte_list_desc * pte_list_desc)632 static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
633 {
634 	kmem_cache_free(pte_list_desc_cache, pte_list_desc);
635 }
636 
637 static bool sp_has_gptes(struct kvm_mmu_page *sp);
638 
kvm_mmu_page_get_gfn(struct kvm_mmu_page * sp,int index)639 static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
640 {
641 	if (sp->role.passthrough)
642 		return sp->gfn;
643 
644 	if (sp->shadowed_translation)
645 		return sp->shadowed_translation[index] >> PAGE_SHIFT;
646 
647 	return sp->gfn + (index << ((sp->role.level - 1) * SPTE_LEVEL_BITS));
648 }
649 
650 /*
651  * For leaf SPTEs, fetch the *guest* access permissions being shadowed. Note
652  * that the SPTE itself may have a more constrained access permissions that
653  * what the guest enforces. For example, a guest may create an executable
654  * huge PTE but KVM may disallow execution to mitigate iTLB multihit.
655  */
kvm_mmu_page_get_access(struct kvm_mmu_page * sp,int index)656 static u32 kvm_mmu_page_get_access(struct kvm_mmu_page *sp, int index)
657 {
658 	if (sp->shadowed_translation)
659 		return sp->shadowed_translation[index] & ACC_ALL;
660 
661 	/*
662 	 * For direct MMUs (e.g. TDP or non-paging guests) or passthrough SPs,
663 	 * KVM is not shadowing any guest page tables, so the "guest access
664 	 * permissions" are just ACC_ALL.
665 	 *
666 	 * For direct SPs in indirect MMUs (shadow paging), i.e. when KVM
667 	 * is shadowing a guest huge page with small pages, the guest access
668 	 * permissions being shadowed are the access permissions of the huge
669 	 * page.
670 	 *
671 	 * In both cases, sp->role.access contains the correct access bits.
672 	 */
673 	return sp->role.access;
674 }
675 
kvm_mmu_page_set_translation(struct kvm_mmu_page * sp,int index,gfn_t gfn,unsigned int access)676 static void kvm_mmu_page_set_translation(struct kvm_mmu_page *sp, int index,
677 					 gfn_t gfn, unsigned int access)
678 {
679 	if (sp->shadowed_translation) {
680 		sp->shadowed_translation[index] = (gfn << PAGE_SHIFT) | access;
681 		return;
682 	}
683 
684 	WARN_ONCE(access != kvm_mmu_page_get_access(sp, index),
685 	          "access mismatch under %s page %llx (expected %u, got %u)\n",
686 	          sp->role.passthrough ? "passthrough" : "direct",
687 	          sp->gfn, kvm_mmu_page_get_access(sp, index), access);
688 
689 	WARN_ONCE(gfn != kvm_mmu_page_get_gfn(sp, index),
690 	          "gfn mismatch under %s page %llx (expected %llx, got %llx)\n",
691 	          sp->role.passthrough ? "passthrough" : "direct",
692 	          sp->gfn, kvm_mmu_page_get_gfn(sp, index), gfn);
693 }
694 
kvm_mmu_page_set_access(struct kvm_mmu_page * sp,int index,unsigned int access)695 static void kvm_mmu_page_set_access(struct kvm_mmu_page *sp, int index,
696 				    unsigned int access)
697 {
698 	gfn_t gfn = kvm_mmu_page_get_gfn(sp, index);
699 
700 	kvm_mmu_page_set_translation(sp, index, gfn, access);
701 }
702 
703 /*
704  * Return the pointer to the large page information for a given gfn,
705  * handling slots that are not large page aligned.
706  */
lpage_info_slot(gfn_t gfn,const struct kvm_memory_slot * slot,int level)707 static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
708 		const struct kvm_memory_slot *slot, int level)
709 {
710 	unsigned long idx;
711 
712 	idx = gfn_to_index(gfn, slot->base_gfn, level);
713 	return &slot->arch.lpage_info[level - 2][idx];
714 }
715 
716 /*
717  * The most significant bit in disallow_lpage tracks whether or not memory
718  * attributes are mixed, i.e. not identical for all gfns at the current level.
719  * The lower order bits are used to refcount other cases where a hugepage is
720  * disallowed, e.g. if KVM has shadow a page table at the gfn.
721  */
722 #define KVM_LPAGE_MIXED_FLAG	BIT(31)
723 
update_gfn_disallow_lpage_count(const struct kvm_memory_slot * slot,gfn_t gfn,int count)724 static void update_gfn_disallow_lpage_count(const struct kvm_memory_slot *slot,
725 					    gfn_t gfn, int count)
726 {
727 	struct kvm_lpage_info *linfo;
728 	int old, i;
729 
730 	for (i = PG_LEVEL_2M; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
731 		linfo = lpage_info_slot(gfn, slot, i);
732 
733 		old = linfo->disallow_lpage;
734 		linfo->disallow_lpage += count;
735 		WARN_ON_ONCE((old ^ linfo->disallow_lpage) & KVM_LPAGE_MIXED_FLAG);
736 	}
737 }
738 
kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot * slot,gfn_t gfn)739 void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
740 {
741 	update_gfn_disallow_lpage_count(slot, gfn, 1);
742 }
743 
kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot * slot,gfn_t gfn)744 void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
745 {
746 	update_gfn_disallow_lpage_count(slot, gfn, -1);
747 }
748 
account_shadowed(struct kvm * kvm,struct kvm_mmu_page * sp)749 static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
750 {
751 	struct kvm_memslots *slots;
752 	struct kvm_memory_slot *slot;
753 	gfn_t gfn;
754 
755 	kvm->arch.indirect_shadow_pages++;
756 	/*
757 	 * Ensure indirect_shadow_pages is elevated prior to re-reading guest
758 	 * child PTEs in FNAME(gpte_changed), i.e. guarantee either in-flight
759 	 * emulated writes are visible before re-reading guest PTEs, or that
760 	 * an emulated write will see the elevated count and acquire mmu_lock
761 	 * to update SPTEs.  Pairs with the smp_mb() in kvm_mmu_track_write().
762 	 */
763 	smp_mb();
764 
765 	gfn = sp->gfn;
766 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
767 	slot = __gfn_to_memslot(slots, gfn);
768 
769 	/* the non-leaf shadow pages are keeping readonly. */
770 	if (sp->role.level > PG_LEVEL_4K)
771 		return __kvm_write_track_add_gfn(kvm, slot, gfn);
772 
773 	kvm_mmu_gfn_disallow_lpage(slot, gfn);
774 
775 	if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K))
776 		kvm_flush_remote_tlbs_gfn(kvm, gfn, PG_LEVEL_4K);
777 }
778 
track_possible_nx_huge_page(struct kvm * kvm,struct kvm_mmu_page * sp)779 void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
780 {
781 	/*
782 	 * If it's possible to replace the shadow page with an NX huge page,
783 	 * i.e. if the shadow page is the only thing currently preventing KVM
784 	 * from using a huge page, add the shadow page to the list of "to be
785 	 * zapped for NX recovery" pages.  Note, the shadow page can already be
786 	 * on the list if KVM is reusing an existing shadow page, i.e. if KVM
787 	 * links a shadow page at multiple points.
788 	 */
789 	if (!list_empty(&sp->possible_nx_huge_page_link))
790 		return;
791 
792 	++kvm->stat.nx_lpage_splits;
793 	list_add_tail(&sp->possible_nx_huge_page_link,
794 		      &kvm->arch.possible_nx_huge_pages);
795 }
796 
account_nx_huge_page(struct kvm * kvm,struct kvm_mmu_page * sp,bool nx_huge_page_possible)797 static void account_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp,
798 				 bool nx_huge_page_possible)
799 {
800 	sp->nx_huge_page_disallowed = true;
801 
802 	if (nx_huge_page_possible)
803 		track_possible_nx_huge_page(kvm, sp);
804 }
805 
unaccount_shadowed(struct kvm * kvm,struct kvm_mmu_page * sp)806 static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
807 {
808 	struct kvm_memslots *slots;
809 	struct kvm_memory_slot *slot;
810 	gfn_t gfn;
811 
812 	kvm->arch.indirect_shadow_pages--;
813 	gfn = sp->gfn;
814 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
815 	slot = __gfn_to_memslot(slots, gfn);
816 	if (sp->role.level > PG_LEVEL_4K)
817 		return __kvm_write_track_remove_gfn(kvm, slot, gfn);
818 
819 	kvm_mmu_gfn_allow_lpage(slot, gfn);
820 }
821 
untrack_possible_nx_huge_page(struct kvm * kvm,struct kvm_mmu_page * sp)822 void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
823 {
824 	if (list_empty(&sp->possible_nx_huge_page_link))
825 		return;
826 
827 	--kvm->stat.nx_lpage_splits;
828 	list_del_init(&sp->possible_nx_huge_page_link);
829 }
830 
unaccount_nx_huge_page(struct kvm * kvm,struct kvm_mmu_page * sp)831 static void unaccount_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
832 {
833 	sp->nx_huge_page_disallowed = false;
834 
835 	untrack_possible_nx_huge_page(kvm, sp);
836 }
837 
gfn_to_memslot_dirty_bitmap(struct kvm_vcpu * vcpu,gfn_t gfn,bool no_dirty_log)838 static struct kvm_memory_slot *gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu,
839 							   gfn_t gfn,
840 							   bool no_dirty_log)
841 {
842 	struct kvm_memory_slot *slot;
843 
844 	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
845 	if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
846 		return NULL;
847 	if (no_dirty_log && kvm_slot_dirty_track_enabled(slot))
848 		return NULL;
849 
850 	return slot;
851 }
852 
853 /*
854  * About rmap_head encoding:
855  *
856  * If the bit zero of rmap_head->val is clear, then it points to the only spte
857  * in this rmap chain. Otherwise, (rmap_head->val & ~3) points to a struct
858  * pte_list_desc containing more mappings.
859  */
860 #define KVM_RMAP_MANY	BIT(0)
861 
862 /*
863  * rmaps and PTE lists are mostly protected by mmu_lock (the shadow MMU always
864  * operates with mmu_lock held for write), but rmaps can be walked without
865  * holding mmu_lock so long as the caller can tolerate SPTEs in the rmap chain
866  * being zapped/dropped _while the rmap is locked_.
867  *
868  * Other than the KVM_RMAP_LOCKED flag, modifications to rmap entries must be
869  * done while holding mmu_lock for write.  This allows a task walking rmaps
870  * without holding mmu_lock to concurrently walk the same entries as a task
871  * that is holding mmu_lock but _not_ the rmap lock.  Neither task will modify
872  * the rmaps, thus the walks are stable.
873  *
874  * As alluded to above, SPTEs in rmaps are _not_ protected by KVM_RMAP_LOCKED,
875  * only the rmap chains themselves are protected.  E.g. holding an rmap's lock
876  * ensures all "struct pte_list_desc" fields are stable.
877  */
878 #define KVM_RMAP_LOCKED	BIT(1)
879 
__kvm_rmap_lock(struct kvm_rmap_head * rmap_head)880 static unsigned long __kvm_rmap_lock(struct kvm_rmap_head *rmap_head)
881 {
882 	unsigned long old_val, new_val;
883 
884 	lockdep_assert_preemption_disabled();
885 
886 	/*
887 	 * Elide the lock if the rmap is empty, as lockless walkers (read-only
888 	 * mode) don't need to (and can't) walk an empty rmap, nor can they add
889 	 * entries to the rmap.  I.e. the only paths that process empty rmaps
890 	 * do so while holding mmu_lock for write, and are mutually exclusive.
891 	 */
892 	old_val = atomic_long_read(&rmap_head->val);
893 	if (!old_val)
894 		return 0;
895 
896 	do {
897 		/*
898 		 * If the rmap is locked, wait for it to be unlocked before
899 		 * trying acquire the lock, e.g. to avoid bouncing the cache
900 		 * line.
901 		 */
902 		while (old_val & KVM_RMAP_LOCKED) {
903 			cpu_relax();
904 			old_val = atomic_long_read(&rmap_head->val);
905 		}
906 
907 		/*
908 		 * Recheck for an empty rmap, it may have been purged by the
909 		 * task that held the lock.
910 		 */
911 		if (!old_val)
912 			return 0;
913 
914 		new_val = old_val | KVM_RMAP_LOCKED;
915 	/*
916 	 * Use try_cmpxchg_acquire() to prevent reads and writes to the rmap
917 	 * from being reordered outside of the critical section created by
918 	 * __kvm_rmap_lock().
919 	 *
920 	 * Pairs with the atomic_long_set_release() in kvm_rmap_unlock().
921 	 *
922 	 * For the !old_val case, no ordering is needed, as there is no rmap
923 	 * to walk.
924 	 */
925 	} while (!atomic_long_try_cmpxchg_acquire(&rmap_head->val, &old_val, new_val));
926 
927 	/*
928 	 * Return the old value, i.e. _without_ the LOCKED bit set.  It's
929 	 * impossible for the return value to be 0 (see above), i.e. the read-
930 	 * only unlock flow can't get a false positive and fail to unlock.
931 	 */
932 	return old_val;
933 }
934 
kvm_rmap_lock(struct kvm * kvm,struct kvm_rmap_head * rmap_head)935 static unsigned long kvm_rmap_lock(struct kvm *kvm,
936 				   struct kvm_rmap_head *rmap_head)
937 {
938 	lockdep_assert_held_write(&kvm->mmu_lock);
939 
940 	return __kvm_rmap_lock(rmap_head);
941 }
942 
__kvm_rmap_unlock(struct kvm_rmap_head * rmap_head,unsigned long val)943 static void __kvm_rmap_unlock(struct kvm_rmap_head *rmap_head,
944 			      unsigned long val)
945 {
946 	KVM_MMU_WARN_ON(val & KVM_RMAP_LOCKED);
947 	/*
948 	 * Ensure that all accesses to the rmap have completed before unlocking
949 	 * the rmap.
950 	 *
951 	 * Pairs with the atomic_long_try_cmpxchg_acquire() in __kvm_rmap_lock().
952 	 */
953 	atomic_long_set_release(&rmap_head->val, val);
954 }
955 
kvm_rmap_unlock(struct kvm * kvm,struct kvm_rmap_head * rmap_head,unsigned long new_val)956 static void kvm_rmap_unlock(struct kvm *kvm,
957 			    struct kvm_rmap_head *rmap_head,
958 			    unsigned long new_val)
959 {
960 	lockdep_assert_held_write(&kvm->mmu_lock);
961 
962 	__kvm_rmap_unlock(rmap_head, new_val);
963 }
964 
kvm_rmap_get(struct kvm_rmap_head * rmap_head)965 static unsigned long kvm_rmap_get(struct kvm_rmap_head *rmap_head)
966 {
967 	return atomic_long_read(&rmap_head->val) & ~KVM_RMAP_LOCKED;
968 }
969 
970 /*
971  * If mmu_lock isn't held, rmaps can only be locked in read-only mode.  The
972  * actual locking is the same, but the caller is disallowed from modifying the
973  * rmap, and so the unlock flow is a nop if the rmap is/was empty.
974  */
kvm_rmap_lock_readonly(struct kvm_rmap_head * rmap_head)975 static unsigned long kvm_rmap_lock_readonly(struct kvm_rmap_head *rmap_head)
976 {
977 	unsigned long rmap_val;
978 
979 	preempt_disable();
980 	rmap_val = __kvm_rmap_lock(rmap_head);
981 
982 	if (!rmap_val)
983 		preempt_enable();
984 
985 	return rmap_val;
986 }
987 
kvm_rmap_unlock_readonly(struct kvm_rmap_head * rmap_head,unsigned long old_val)988 static void kvm_rmap_unlock_readonly(struct kvm_rmap_head *rmap_head,
989 				     unsigned long old_val)
990 {
991 	if (!old_val)
992 		return;
993 
994 	KVM_MMU_WARN_ON(old_val != kvm_rmap_get(rmap_head));
995 
996 	__kvm_rmap_unlock(rmap_head, old_val);
997 	preempt_enable();
998 }
999 
1000 /*
1001  * Returns the number of pointers in the rmap chain, not counting the new one.
1002  */
pte_list_add(struct kvm * kvm,struct kvm_mmu_memory_cache * cache,u64 * spte,struct kvm_rmap_head * rmap_head)1003 static int pte_list_add(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
1004 			u64 *spte, struct kvm_rmap_head *rmap_head)
1005 {
1006 	unsigned long old_val, new_val;
1007 	struct pte_list_desc *desc;
1008 	int count = 0;
1009 
1010 	old_val = kvm_rmap_lock(kvm, rmap_head);
1011 
1012 	if (!old_val) {
1013 		new_val = (unsigned long)spte;
1014 	} else if (!(old_val & KVM_RMAP_MANY)) {
1015 		desc = kvm_mmu_memory_cache_alloc(cache);
1016 		desc->sptes[0] = (u64 *)old_val;
1017 		desc->sptes[1] = spte;
1018 		desc->spte_count = 2;
1019 		desc->tail_count = 0;
1020 		new_val = (unsigned long)desc | KVM_RMAP_MANY;
1021 		++count;
1022 	} else {
1023 		desc = (struct pte_list_desc *)(old_val & ~KVM_RMAP_MANY);
1024 		count = desc->tail_count + desc->spte_count;
1025 
1026 		/*
1027 		 * If the previous head is full, allocate a new head descriptor
1028 		 * as tail descriptors are always kept full.
1029 		 */
1030 		if (desc->spte_count == PTE_LIST_EXT) {
1031 			desc = kvm_mmu_memory_cache_alloc(cache);
1032 			desc->more = (struct pte_list_desc *)(old_val & ~KVM_RMAP_MANY);
1033 			desc->spte_count = 0;
1034 			desc->tail_count = count;
1035 			new_val = (unsigned long)desc | KVM_RMAP_MANY;
1036 		} else {
1037 			new_val = old_val;
1038 		}
1039 		desc->sptes[desc->spte_count++] = spte;
1040 	}
1041 
1042 	kvm_rmap_unlock(kvm, rmap_head, new_val);
1043 
1044 	return count;
1045 }
1046 
pte_list_desc_remove_entry(struct kvm * kvm,unsigned long * rmap_val,struct pte_list_desc * desc,int i)1047 static void pte_list_desc_remove_entry(struct kvm *kvm, unsigned long *rmap_val,
1048 				       struct pte_list_desc *desc, int i)
1049 {
1050 	struct pte_list_desc *head_desc = (struct pte_list_desc *)(*rmap_val & ~KVM_RMAP_MANY);
1051 	int j = head_desc->spte_count - 1;
1052 
1053 	/*
1054 	 * The head descriptor should never be empty.  A new head is added only
1055 	 * when adding an entry and the previous head is full, and heads are
1056 	 * removed (this flow) when they become empty.
1057 	 */
1058 	KVM_BUG_ON_DATA_CORRUPTION(j < 0, kvm);
1059 
1060 	/*
1061 	 * Replace the to-be-freed SPTE with the last valid entry from the head
1062 	 * descriptor to ensure that tail descriptors are full at all times.
1063 	 * Note, this also means that tail_count is stable for each descriptor.
1064 	 */
1065 	desc->sptes[i] = head_desc->sptes[j];
1066 	head_desc->sptes[j] = NULL;
1067 	head_desc->spte_count--;
1068 	if (head_desc->spte_count)
1069 		return;
1070 
1071 	/*
1072 	 * The head descriptor is empty.  If there are no tail descriptors,
1073 	 * nullify the rmap head to mark the list as empty, else point the rmap
1074 	 * head at the next descriptor, i.e. the new head.
1075 	 */
1076 	if (!head_desc->more)
1077 		*rmap_val = 0;
1078 	else
1079 		*rmap_val = (unsigned long)head_desc->more | KVM_RMAP_MANY;
1080 	mmu_free_pte_list_desc(head_desc);
1081 }
1082 
pte_list_remove(struct kvm * kvm,u64 * spte,struct kvm_rmap_head * rmap_head)1083 static void pte_list_remove(struct kvm *kvm, u64 *spte,
1084 			    struct kvm_rmap_head *rmap_head)
1085 {
1086 	struct pte_list_desc *desc;
1087 	unsigned long rmap_val;
1088 	int i;
1089 
1090 	rmap_val = kvm_rmap_lock(kvm, rmap_head);
1091 	if (KVM_BUG_ON_DATA_CORRUPTION(!rmap_val, kvm))
1092 		goto out;
1093 
1094 	if (!(rmap_val & KVM_RMAP_MANY)) {
1095 		if (KVM_BUG_ON_DATA_CORRUPTION((u64 *)rmap_val != spte, kvm))
1096 			goto out;
1097 
1098 		rmap_val = 0;
1099 	} else {
1100 		desc = (struct pte_list_desc *)(rmap_val & ~KVM_RMAP_MANY);
1101 		while (desc) {
1102 			for (i = 0; i < desc->spte_count; ++i) {
1103 				if (desc->sptes[i] == spte) {
1104 					pte_list_desc_remove_entry(kvm, &rmap_val,
1105 								   desc, i);
1106 					goto out;
1107 				}
1108 			}
1109 			desc = desc->more;
1110 		}
1111 
1112 		KVM_BUG_ON_DATA_CORRUPTION(true, kvm);
1113 	}
1114 
1115 out:
1116 	kvm_rmap_unlock(kvm, rmap_head, rmap_val);
1117 }
1118 
kvm_zap_one_rmap_spte(struct kvm * kvm,struct kvm_rmap_head * rmap_head,u64 * sptep)1119 static void kvm_zap_one_rmap_spte(struct kvm *kvm,
1120 				  struct kvm_rmap_head *rmap_head, u64 *sptep)
1121 {
1122 	mmu_spte_clear_track_bits(kvm, sptep);
1123 	pte_list_remove(kvm, sptep, rmap_head);
1124 }
1125 
1126 /* Return true if at least one SPTE was zapped, false otherwise */
kvm_zap_all_rmap_sptes(struct kvm * kvm,struct kvm_rmap_head * rmap_head)1127 static bool kvm_zap_all_rmap_sptes(struct kvm *kvm,
1128 				   struct kvm_rmap_head *rmap_head)
1129 {
1130 	struct pte_list_desc *desc, *next;
1131 	unsigned long rmap_val;
1132 	int i;
1133 
1134 	rmap_val = kvm_rmap_lock(kvm, rmap_head);
1135 	if (!rmap_val)
1136 		return false;
1137 
1138 	if (!(rmap_val & KVM_RMAP_MANY)) {
1139 		mmu_spte_clear_track_bits(kvm, (u64 *)rmap_val);
1140 		goto out;
1141 	}
1142 
1143 	desc = (struct pte_list_desc *)(rmap_val & ~KVM_RMAP_MANY);
1144 
1145 	for (; desc; desc = next) {
1146 		for (i = 0; i < desc->spte_count; i++)
1147 			mmu_spte_clear_track_bits(kvm, desc->sptes[i]);
1148 		next = desc->more;
1149 		mmu_free_pte_list_desc(desc);
1150 	}
1151 out:
1152 	/* rmap_head is meaningless now, remember to reset it */
1153 	kvm_rmap_unlock(kvm, rmap_head, 0);
1154 	return true;
1155 }
1156 
pte_list_count(struct kvm_rmap_head * rmap_head)1157 unsigned int pte_list_count(struct kvm_rmap_head *rmap_head)
1158 {
1159 	unsigned long rmap_val = kvm_rmap_get(rmap_head);
1160 	struct pte_list_desc *desc;
1161 
1162 	if (!rmap_val)
1163 		return 0;
1164 	else if (!(rmap_val & KVM_RMAP_MANY))
1165 		return 1;
1166 
1167 	desc = (struct pte_list_desc *)(rmap_val & ~KVM_RMAP_MANY);
1168 	return desc->tail_count + desc->spte_count;
1169 }
1170 
gfn_to_rmap(gfn_t gfn,int level,const struct kvm_memory_slot * slot)1171 static struct kvm_rmap_head *gfn_to_rmap(gfn_t gfn, int level,
1172 					 const struct kvm_memory_slot *slot)
1173 {
1174 	unsigned long idx;
1175 
1176 	idx = gfn_to_index(gfn, slot->base_gfn, level);
1177 	return &slot->arch.rmap[level - PG_LEVEL_4K][idx];
1178 }
1179 
rmap_remove(struct kvm * kvm,u64 * spte)1180 static void rmap_remove(struct kvm *kvm, u64 *spte)
1181 {
1182 	struct kvm_memslots *slots;
1183 	struct kvm_memory_slot *slot;
1184 	struct kvm_mmu_page *sp;
1185 	gfn_t gfn;
1186 	struct kvm_rmap_head *rmap_head;
1187 
1188 	sp = sptep_to_sp(spte);
1189 	gfn = kvm_mmu_page_get_gfn(sp, spte_index(spte));
1190 
1191 	/*
1192 	 * Unlike rmap_add, rmap_remove does not run in the context of a vCPU
1193 	 * so we have to determine which memslots to use based on context
1194 	 * information in sp->role.
1195 	 */
1196 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
1197 
1198 	slot = __gfn_to_memslot(slots, gfn);
1199 	rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1200 
1201 	pte_list_remove(kvm, spte, rmap_head);
1202 }
1203 
1204 /*
1205  * Used by the following functions to iterate through the sptes linked by a
1206  * rmap.  All fields are private and not assumed to be used outside.
1207  */
1208 struct rmap_iterator {
1209 	/* private fields */
1210 	struct rmap_head *head;
1211 	struct pte_list_desc *desc;	/* holds the sptep if not NULL */
1212 	int pos;			/* index of the sptep */
1213 };
1214 
1215 /*
1216  * Iteration must be started by this function.  This should also be used after
1217  * removing/dropping sptes from the rmap link because in such cases the
1218  * information in the iterator may not be valid.
1219  *
1220  * Returns sptep if found, NULL otherwise.
1221  */
rmap_get_first(struct kvm_rmap_head * rmap_head,struct rmap_iterator * iter)1222 static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head,
1223 			   struct rmap_iterator *iter)
1224 {
1225 	unsigned long rmap_val = kvm_rmap_get(rmap_head);
1226 
1227 	if (!rmap_val)
1228 		return NULL;
1229 
1230 	if (!(rmap_val & KVM_RMAP_MANY)) {
1231 		iter->desc = NULL;
1232 		return (u64 *)rmap_val;
1233 	}
1234 
1235 	iter->desc = (struct pte_list_desc *)(rmap_val & ~KVM_RMAP_MANY);
1236 	iter->pos = 0;
1237 	return iter->desc->sptes[iter->pos];
1238 }
1239 
1240 /*
1241  * Must be used with a valid iterator: e.g. after rmap_get_first().
1242  *
1243  * Returns sptep if found, NULL otherwise.
1244  */
rmap_get_next(struct rmap_iterator * iter)1245 static u64 *rmap_get_next(struct rmap_iterator *iter)
1246 {
1247 	if (iter->desc) {
1248 		if (iter->pos < PTE_LIST_EXT - 1) {
1249 			++iter->pos;
1250 			if (iter->desc->sptes[iter->pos])
1251 				return iter->desc->sptes[iter->pos];
1252 		}
1253 
1254 		iter->desc = iter->desc->more;
1255 
1256 		if (iter->desc) {
1257 			iter->pos = 0;
1258 			/* desc->sptes[0] cannot be NULL */
1259 			return iter->desc->sptes[iter->pos];
1260 		}
1261 	}
1262 
1263 	return NULL;
1264 }
1265 
1266 #define __for_each_rmap_spte(_rmap_head_, _iter_, _sptep_)	\
1267 	for (_sptep_ = rmap_get_first(_rmap_head_, _iter_);	\
1268 	     _sptep_; _sptep_ = rmap_get_next(_iter_))
1269 
1270 #define for_each_rmap_spte(_rmap_head_, _iter_, _sptep_)			\
1271 	__for_each_rmap_spte(_rmap_head_, _iter_, _sptep_)			\
1272 		if (!WARN_ON_ONCE(!is_shadow_present_pte(*(_sptep_))))	\
1273 
1274 #define for_each_rmap_spte_lockless(_rmap_head_, _iter_, _sptep_, _spte_)	\
1275 	__for_each_rmap_spte(_rmap_head_, _iter_, _sptep_)			\
1276 		if (is_shadow_present_pte(_spte_ = mmu_spte_get_lockless(sptep)))
1277 
drop_spte(struct kvm * kvm,u64 * sptep)1278 static void drop_spte(struct kvm *kvm, u64 *sptep)
1279 {
1280 	u64 old_spte = mmu_spte_clear_track_bits(kvm, sptep);
1281 
1282 	if (is_shadow_present_pte(old_spte))
1283 		rmap_remove(kvm, sptep);
1284 }
1285 
drop_large_spte(struct kvm * kvm,u64 * sptep,bool flush)1286 static void drop_large_spte(struct kvm *kvm, u64 *sptep, bool flush)
1287 {
1288 	struct kvm_mmu_page *sp;
1289 
1290 	sp = sptep_to_sp(sptep);
1291 	WARN_ON_ONCE(sp->role.level == PG_LEVEL_4K);
1292 
1293 	drop_spte(kvm, sptep);
1294 
1295 	if (flush)
1296 		kvm_flush_remote_tlbs_sptep(kvm, sptep);
1297 }
1298 
1299 /*
1300  * Write-protect on the specified @sptep, @pt_protect indicates whether
1301  * spte write-protection is caused by protecting shadow page table.
1302  *
1303  * Note: write protection is difference between dirty logging and spte
1304  * protection:
1305  * - for dirty logging, the spte can be set to writable at anytime if
1306  *   its dirty bitmap is properly set.
1307  * - for spte protection, the spte can be writable only after unsync-ing
1308  *   shadow page.
1309  *
1310  * Return true if tlb need be flushed.
1311  */
spte_write_protect(u64 * sptep,bool pt_protect)1312 static bool spte_write_protect(u64 *sptep, bool pt_protect)
1313 {
1314 	u64 spte = *sptep;
1315 
1316 	if (!is_writable_pte(spte) &&
1317 	    !(pt_protect && is_mmu_writable_spte(spte)))
1318 		return false;
1319 
1320 	if (pt_protect)
1321 		spte &= ~shadow_mmu_writable_mask;
1322 	spte = spte & ~PT_WRITABLE_MASK;
1323 
1324 	return mmu_spte_update(sptep, spte);
1325 }
1326 
rmap_write_protect(struct kvm_rmap_head * rmap_head,bool pt_protect)1327 static bool rmap_write_protect(struct kvm_rmap_head *rmap_head,
1328 			       bool pt_protect)
1329 {
1330 	u64 *sptep;
1331 	struct rmap_iterator iter;
1332 	bool flush = false;
1333 
1334 	for_each_rmap_spte(rmap_head, &iter, sptep)
1335 		flush |= spte_write_protect(sptep, pt_protect);
1336 
1337 	return flush;
1338 }
1339 
spte_clear_dirty(u64 * sptep)1340 static bool spte_clear_dirty(u64 *sptep)
1341 {
1342 	u64 spte = *sptep;
1343 
1344 	KVM_MMU_WARN_ON(!spte_ad_enabled(spte));
1345 	spte &= ~shadow_dirty_mask;
1346 	return mmu_spte_update(sptep, spte);
1347 }
1348 
1349 /*
1350  * Gets the GFN ready for another round of dirty logging by clearing the
1351  *	- D bit on ad-enabled SPTEs, and
1352  *	- W bit on ad-disabled SPTEs.
1353  * Returns true iff any D or W bits were cleared.
1354  */
__rmap_clear_dirty(struct kvm * kvm,struct kvm_rmap_head * rmap_head,const struct kvm_memory_slot * slot)1355 static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1356 			       const struct kvm_memory_slot *slot)
1357 {
1358 	u64 *sptep;
1359 	struct rmap_iterator iter;
1360 	bool flush = false;
1361 
1362 	for_each_rmap_spte(rmap_head, &iter, sptep) {
1363 		if (spte_ad_need_write_protect(*sptep))
1364 			flush |= test_and_clear_bit(PT_WRITABLE_SHIFT,
1365 						    (unsigned long *)sptep);
1366 		else
1367 			flush |= spte_clear_dirty(sptep);
1368 	}
1369 
1370 	return flush;
1371 }
1372 
kvm_mmu_write_protect_pt_masked(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn_offset,unsigned long mask)1373 static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1374 				     struct kvm_memory_slot *slot,
1375 				     gfn_t gfn_offset, unsigned long mask)
1376 {
1377 	struct kvm_rmap_head *rmap_head;
1378 
1379 	if (tdp_mmu_enabled)
1380 		kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
1381 				slot->base_gfn + gfn_offset, mask, true);
1382 
1383 	if (!kvm_memslots_have_rmaps(kvm))
1384 		return;
1385 
1386 	while (mask) {
1387 		rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
1388 					PG_LEVEL_4K, slot);
1389 		rmap_write_protect(rmap_head, false);
1390 
1391 		/* clear the first set bit */
1392 		mask &= mask - 1;
1393 	}
1394 }
1395 
kvm_mmu_clear_dirty_pt_masked(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn_offset,unsigned long mask)1396 static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1397 					 struct kvm_memory_slot *slot,
1398 					 gfn_t gfn_offset, unsigned long mask)
1399 {
1400 	struct kvm_rmap_head *rmap_head;
1401 
1402 	if (tdp_mmu_enabled)
1403 		kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
1404 				slot->base_gfn + gfn_offset, mask, false);
1405 
1406 	if (!kvm_memslots_have_rmaps(kvm))
1407 		return;
1408 
1409 	while (mask) {
1410 		rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
1411 					PG_LEVEL_4K, slot);
1412 		__rmap_clear_dirty(kvm, rmap_head, slot);
1413 
1414 		/* clear the first set bit */
1415 		mask &= mask - 1;
1416 	}
1417 }
1418 
kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn_offset,unsigned long mask)1419 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1420 				struct kvm_memory_slot *slot,
1421 				gfn_t gfn_offset, unsigned long mask)
1422 {
1423 	/*
1424 	 * If the slot was assumed to be "initially all dirty", write-protect
1425 	 * huge pages to ensure they are split to 4KiB on the first write (KVM
1426 	 * dirty logs at 4KiB granularity). If eager page splitting is enabled,
1427 	 * immediately try to split huge pages, e.g. so that vCPUs don't get
1428 	 * saddled with the cost of splitting.
1429 	 *
1430 	 * The gfn_offset is guaranteed to be aligned to 64, but the base_gfn
1431 	 * of memslot has no such restriction, so the range can cross two large
1432 	 * pages.
1433 	 */
1434 	if (kvm_dirty_log_manual_protect_and_init_set(kvm)) {
1435 		gfn_t start = slot->base_gfn + gfn_offset + __ffs(mask);
1436 		gfn_t end = slot->base_gfn + gfn_offset + __fls(mask);
1437 
1438 		if (READ_ONCE(eager_page_split))
1439 			kvm_mmu_try_split_huge_pages(kvm, slot, start, end + 1, PG_LEVEL_4K);
1440 
1441 		kvm_mmu_slot_gfn_write_protect(kvm, slot, start, PG_LEVEL_2M);
1442 
1443 		/* Cross two large pages? */
1444 		if (ALIGN(start << PAGE_SHIFT, PMD_SIZE) !=
1445 		    ALIGN(end << PAGE_SHIFT, PMD_SIZE))
1446 			kvm_mmu_slot_gfn_write_protect(kvm, slot, end,
1447 						       PG_LEVEL_2M);
1448 	}
1449 
1450 	/*
1451 	 * (Re)Enable dirty logging for all 4KiB SPTEs that map the GFNs in
1452 	 * mask.  If PML is enabled and the GFN doesn't need to be write-
1453 	 * protected for other reasons, e.g. shadow paging, clear the Dirty bit.
1454 	 * Otherwise clear the Writable bit.
1455 	 *
1456 	 * Note that kvm_mmu_clear_dirty_pt_masked() is called whenever PML is
1457 	 * enabled but it chooses between clearing the Dirty bit and Writeable
1458 	 * bit based on the context.
1459 	 */
1460 	if (kvm->arch.cpu_dirty_log_size)
1461 		kvm_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset, mask);
1462 	else
1463 		kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1464 }
1465 
kvm_cpu_dirty_log_size(struct kvm * kvm)1466 int kvm_cpu_dirty_log_size(struct kvm *kvm)
1467 {
1468 	return kvm->arch.cpu_dirty_log_size;
1469 }
1470 
kvm_mmu_slot_gfn_write_protect(struct kvm * kvm,struct kvm_memory_slot * slot,u64 gfn,int min_level)1471 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
1472 				    struct kvm_memory_slot *slot, u64 gfn,
1473 				    int min_level)
1474 {
1475 	struct kvm_rmap_head *rmap_head;
1476 	int i;
1477 	bool write_protected = false;
1478 
1479 	if (kvm_memslots_have_rmaps(kvm)) {
1480 		for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
1481 			rmap_head = gfn_to_rmap(gfn, i, slot);
1482 			write_protected |= rmap_write_protect(rmap_head, true);
1483 		}
1484 	}
1485 
1486 	if (tdp_mmu_enabled)
1487 		write_protected |=
1488 			kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn, min_level);
1489 
1490 	return write_protected;
1491 }
1492 
kvm_vcpu_write_protect_gfn(struct kvm_vcpu * vcpu,u64 gfn)1493 static bool kvm_vcpu_write_protect_gfn(struct kvm_vcpu *vcpu, u64 gfn)
1494 {
1495 	struct kvm_memory_slot *slot;
1496 
1497 	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1498 	return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K);
1499 }
1500 
kvm_zap_rmap(struct kvm * kvm,struct kvm_rmap_head * rmap_head,const struct kvm_memory_slot * slot)1501 static bool kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1502 			 const struct kvm_memory_slot *slot)
1503 {
1504 	return kvm_zap_all_rmap_sptes(kvm, rmap_head);
1505 }
1506 
1507 struct slot_rmap_walk_iterator {
1508 	/* input fields. */
1509 	const struct kvm_memory_slot *slot;
1510 	gfn_t start_gfn;
1511 	gfn_t end_gfn;
1512 	int start_level;
1513 	int end_level;
1514 
1515 	/* output fields. */
1516 	gfn_t gfn;
1517 	struct kvm_rmap_head *rmap;
1518 	int level;
1519 
1520 	/* private field. */
1521 	struct kvm_rmap_head *end_rmap;
1522 };
1523 
rmap_walk_init_level(struct slot_rmap_walk_iterator * iterator,int level)1524 static void rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator,
1525 				 int level)
1526 {
1527 	iterator->level = level;
1528 	iterator->gfn = iterator->start_gfn;
1529 	iterator->rmap = gfn_to_rmap(iterator->gfn, level, iterator->slot);
1530 	iterator->end_rmap = gfn_to_rmap(iterator->end_gfn, level, iterator->slot);
1531 }
1532 
slot_rmap_walk_init(struct slot_rmap_walk_iterator * iterator,const struct kvm_memory_slot * slot,int start_level,int end_level,gfn_t start_gfn,gfn_t end_gfn)1533 static void slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator,
1534 				const struct kvm_memory_slot *slot,
1535 				int start_level, int end_level,
1536 				gfn_t start_gfn, gfn_t end_gfn)
1537 {
1538 	iterator->slot = slot;
1539 	iterator->start_level = start_level;
1540 	iterator->end_level = end_level;
1541 	iterator->start_gfn = start_gfn;
1542 	iterator->end_gfn = end_gfn;
1543 
1544 	rmap_walk_init_level(iterator, iterator->start_level);
1545 }
1546 
slot_rmap_walk_okay(struct slot_rmap_walk_iterator * iterator)1547 static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator)
1548 {
1549 	return !!iterator->rmap;
1550 }
1551 
slot_rmap_walk_next(struct slot_rmap_walk_iterator * iterator)1552 static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
1553 {
1554 	while (++iterator->rmap <= iterator->end_rmap) {
1555 		iterator->gfn += KVM_PAGES_PER_HPAGE(iterator->level);
1556 
1557 		if (atomic_long_read(&iterator->rmap->val))
1558 			return;
1559 	}
1560 
1561 	if (++iterator->level > iterator->end_level) {
1562 		iterator->rmap = NULL;
1563 		return;
1564 	}
1565 
1566 	rmap_walk_init_level(iterator, iterator->level);
1567 }
1568 
1569 #define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_,	\
1570 	   _start_gfn, _end_gfn, _iter_)				\
1571 	for (slot_rmap_walk_init(_iter_, _slot_, _start_level_,		\
1572 				 _end_level_, _start_gfn, _end_gfn);	\
1573 	     slot_rmap_walk_okay(_iter_);				\
1574 	     slot_rmap_walk_next(_iter_))
1575 
1576 /* The return value indicates if tlb flush on all vcpus is needed. */
1577 typedef bool (*slot_rmaps_handler) (struct kvm *kvm,
1578 				    struct kvm_rmap_head *rmap_head,
1579 				    const struct kvm_memory_slot *slot);
1580 
__walk_slot_rmaps(struct kvm * kvm,const struct kvm_memory_slot * slot,slot_rmaps_handler fn,int start_level,int end_level,gfn_t start_gfn,gfn_t end_gfn,bool can_yield,bool flush_on_yield,bool flush)1581 static __always_inline bool __walk_slot_rmaps(struct kvm *kvm,
1582 					      const struct kvm_memory_slot *slot,
1583 					      slot_rmaps_handler fn,
1584 					      int start_level, int end_level,
1585 					      gfn_t start_gfn, gfn_t end_gfn,
1586 					      bool can_yield, bool flush_on_yield,
1587 					      bool flush)
1588 {
1589 	struct slot_rmap_walk_iterator iterator;
1590 
1591 	lockdep_assert_held_write(&kvm->mmu_lock);
1592 
1593 	for_each_slot_rmap_range(slot, start_level, end_level, start_gfn,
1594 			end_gfn, &iterator) {
1595 		if (iterator.rmap)
1596 			flush |= fn(kvm, iterator.rmap, slot);
1597 
1598 		if (!can_yield)
1599 			continue;
1600 
1601 		if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
1602 			if (flush && flush_on_yield) {
1603 				kvm_flush_remote_tlbs_range(kvm, start_gfn,
1604 							    iterator.gfn - start_gfn + 1);
1605 				flush = false;
1606 			}
1607 			cond_resched_rwlock_write(&kvm->mmu_lock);
1608 		}
1609 	}
1610 
1611 	return flush;
1612 }
1613 
walk_slot_rmaps(struct kvm * kvm,const struct kvm_memory_slot * slot,slot_rmaps_handler fn,int start_level,int end_level,bool flush_on_yield)1614 static __always_inline bool walk_slot_rmaps(struct kvm *kvm,
1615 					    const struct kvm_memory_slot *slot,
1616 					    slot_rmaps_handler fn,
1617 					    int start_level, int end_level,
1618 					    bool flush_on_yield)
1619 {
1620 	return __walk_slot_rmaps(kvm, slot, fn, start_level, end_level,
1621 				 slot->base_gfn, slot->base_gfn + slot->npages - 1,
1622 				 true, flush_on_yield, false);
1623 }
1624 
walk_slot_rmaps_4k(struct kvm * kvm,const struct kvm_memory_slot * slot,slot_rmaps_handler fn,bool flush_on_yield)1625 static __always_inline bool walk_slot_rmaps_4k(struct kvm *kvm,
1626 					       const struct kvm_memory_slot *slot,
1627 					       slot_rmaps_handler fn,
1628 					       bool flush_on_yield)
1629 {
1630 	return walk_slot_rmaps(kvm, slot, fn, PG_LEVEL_4K, PG_LEVEL_4K, flush_on_yield);
1631 }
1632 
__kvm_rmap_zap_gfn_range(struct kvm * kvm,const struct kvm_memory_slot * slot,gfn_t start,gfn_t end,bool can_yield,bool flush)1633 static bool __kvm_rmap_zap_gfn_range(struct kvm *kvm,
1634 				     const struct kvm_memory_slot *slot,
1635 				     gfn_t start, gfn_t end, bool can_yield,
1636 				     bool flush)
1637 {
1638 	return __walk_slot_rmaps(kvm, slot, kvm_zap_rmap,
1639 				 PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
1640 				 start, end - 1, can_yield, true, flush);
1641 }
1642 
kvm_unmap_gfn_range(struct kvm * kvm,struct kvm_gfn_range * range)1643 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1644 {
1645 	bool flush = false;
1646 
1647 	/*
1648 	 * To prevent races with vCPUs faulting in a gfn using stale data,
1649 	 * zapping a gfn range must be protected by mmu_invalidate_in_progress
1650 	 * (and mmu_invalidate_seq).  The only exception is memslot deletion;
1651 	 * in that case, SRCU synchronization ensures that SPTEs are zapped
1652 	 * after all vCPUs have unlocked SRCU, guaranteeing that vCPUs see the
1653 	 * invalid slot.
1654 	 */
1655 	lockdep_assert_once(kvm->mmu_invalidate_in_progress ||
1656 			    lockdep_is_held(&kvm->slots_lock));
1657 
1658 	if (kvm_memslots_have_rmaps(kvm))
1659 		flush = __kvm_rmap_zap_gfn_range(kvm, range->slot,
1660 						 range->start, range->end,
1661 						 range->may_block, flush);
1662 
1663 	if (tdp_mmu_enabled)
1664 		flush = kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
1665 
1666 	if (kvm_x86_ops.set_apic_access_page_addr &&
1667 	    range->slot->id == APIC_ACCESS_PAGE_PRIVATE_MEMSLOT)
1668 		kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
1669 
1670 	return flush;
1671 }
1672 
1673 #define RMAP_RECYCLE_THRESHOLD 1000
1674 
__rmap_add(struct kvm * kvm,struct kvm_mmu_memory_cache * cache,const struct kvm_memory_slot * slot,u64 * spte,gfn_t gfn,unsigned int access)1675 static void __rmap_add(struct kvm *kvm,
1676 		       struct kvm_mmu_memory_cache *cache,
1677 		       const struct kvm_memory_slot *slot,
1678 		       u64 *spte, gfn_t gfn, unsigned int access)
1679 {
1680 	struct kvm_mmu_page *sp;
1681 	struct kvm_rmap_head *rmap_head;
1682 	int rmap_count;
1683 
1684 	sp = sptep_to_sp(spte);
1685 	kvm_mmu_page_set_translation(sp, spte_index(spte), gfn, access);
1686 	kvm_update_page_stats(kvm, sp->role.level, 1);
1687 
1688 	rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1689 	rmap_count = pte_list_add(kvm, cache, spte, rmap_head);
1690 
1691 	if (rmap_count > kvm->stat.max_mmu_rmap_size)
1692 		kvm->stat.max_mmu_rmap_size = rmap_count;
1693 	if (rmap_count > RMAP_RECYCLE_THRESHOLD) {
1694 		kvm_zap_all_rmap_sptes(kvm, rmap_head);
1695 		kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level);
1696 	}
1697 }
1698 
rmap_add(struct kvm_vcpu * vcpu,const struct kvm_memory_slot * slot,u64 * spte,gfn_t gfn,unsigned int access)1699 static void rmap_add(struct kvm_vcpu *vcpu, const struct kvm_memory_slot *slot,
1700 		     u64 *spte, gfn_t gfn, unsigned int access)
1701 {
1702 	struct kvm_mmu_memory_cache *cache = &vcpu->arch.mmu_pte_list_desc_cache;
1703 
1704 	__rmap_add(vcpu->kvm, cache, slot, spte, gfn, access);
1705 }
1706 
kvm_rmap_age_gfn_range(struct kvm * kvm,struct kvm_gfn_range * range,bool test_only)1707 static bool kvm_rmap_age_gfn_range(struct kvm *kvm,
1708 				   struct kvm_gfn_range *range,
1709 				   bool test_only)
1710 {
1711 	struct kvm_rmap_head *rmap_head;
1712 	struct rmap_iterator iter;
1713 	unsigned long rmap_val;
1714 	bool young = false;
1715 	u64 *sptep;
1716 	gfn_t gfn;
1717 	int level;
1718 	u64 spte;
1719 
1720 	for (level = PG_LEVEL_4K; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
1721 		for (gfn = range->start; gfn < range->end;
1722 		     gfn += KVM_PAGES_PER_HPAGE(level)) {
1723 			rmap_head = gfn_to_rmap(gfn, level, range->slot);
1724 			rmap_val = kvm_rmap_lock_readonly(rmap_head);
1725 
1726 			for_each_rmap_spte_lockless(rmap_head, &iter, sptep, spte) {
1727 				if (!is_accessed_spte(spte))
1728 					continue;
1729 
1730 				if (test_only) {
1731 					kvm_rmap_unlock_readonly(rmap_head, rmap_val);
1732 					return true;
1733 				}
1734 
1735 				if (spte_ad_enabled(spte))
1736 					clear_bit((ffs(shadow_accessed_mask) - 1),
1737 						  (unsigned long *)sptep);
1738 				else
1739 					/*
1740 					 * If the following cmpxchg fails, the
1741 					 * spte is being concurrently modified
1742 					 * and should most likely stay young.
1743 					 */
1744 					cmpxchg64(sptep, spte,
1745 					      mark_spte_for_access_track(spte));
1746 				young = true;
1747 			}
1748 
1749 			kvm_rmap_unlock_readonly(rmap_head, rmap_val);
1750 		}
1751 	}
1752 	return young;
1753 }
1754 
kvm_may_have_shadow_mmu_sptes(struct kvm * kvm)1755 static bool kvm_may_have_shadow_mmu_sptes(struct kvm *kvm)
1756 {
1757 	return !tdp_mmu_enabled || READ_ONCE(kvm->arch.indirect_shadow_pages);
1758 }
1759 
kvm_age_gfn(struct kvm * kvm,struct kvm_gfn_range * range)1760 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1761 {
1762 	bool young = false;
1763 
1764 	if (tdp_mmu_enabled)
1765 		young = kvm_tdp_mmu_age_gfn_range(kvm, range);
1766 
1767 	if (kvm_may_have_shadow_mmu_sptes(kvm))
1768 		young |= kvm_rmap_age_gfn_range(kvm, range, false);
1769 
1770 	return young;
1771 }
1772 
kvm_test_age_gfn(struct kvm * kvm,struct kvm_gfn_range * range)1773 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1774 {
1775 	bool young = false;
1776 
1777 	if (tdp_mmu_enabled)
1778 		young = kvm_tdp_mmu_test_age_gfn(kvm, range);
1779 
1780 	if (young)
1781 		return young;
1782 
1783 	if (kvm_may_have_shadow_mmu_sptes(kvm))
1784 		young |= kvm_rmap_age_gfn_range(kvm, range, true);
1785 
1786 	return young;
1787 }
1788 
kvm_mmu_check_sptes_at_free(struct kvm_mmu_page * sp)1789 static void kvm_mmu_check_sptes_at_free(struct kvm_mmu_page *sp)
1790 {
1791 #ifdef CONFIG_KVM_PROVE_MMU
1792 	int i;
1793 
1794 	for (i = 0; i < SPTE_ENT_PER_PAGE; i++) {
1795 		if (KVM_MMU_WARN_ON(is_shadow_present_pte(sp->spt[i])))
1796 			pr_err_ratelimited("SPTE %llx (@ %p) for gfn %llx shadow-present at free",
1797 					   sp->spt[i], &sp->spt[i],
1798 					   kvm_mmu_page_get_gfn(sp, i));
1799 	}
1800 #endif
1801 }
1802 
kvm_account_mmu_page(struct kvm * kvm,struct kvm_mmu_page * sp)1803 static void kvm_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1804 {
1805 	kvm->arch.n_used_mmu_pages++;
1806 	kvm_account_pgtable_pages((void *)sp->spt, +1);
1807 }
1808 
kvm_unaccount_mmu_page(struct kvm * kvm,struct kvm_mmu_page * sp)1809 static void kvm_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1810 {
1811 	kvm->arch.n_used_mmu_pages--;
1812 	kvm_account_pgtable_pages((void *)sp->spt, -1);
1813 }
1814 
kvm_mmu_free_shadow_page(struct kvm_mmu_page * sp)1815 static void kvm_mmu_free_shadow_page(struct kvm_mmu_page *sp)
1816 {
1817 	kvm_mmu_check_sptes_at_free(sp);
1818 
1819 	hlist_del(&sp->hash_link);
1820 	list_del(&sp->link);
1821 	free_page((unsigned long)sp->spt);
1822 	free_page((unsigned long)sp->shadowed_translation);
1823 	kmem_cache_free(mmu_page_header_cache, sp);
1824 }
1825 
kvm_page_table_hashfn(gfn_t gfn)1826 static unsigned kvm_page_table_hashfn(gfn_t gfn)
1827 {
1828 	return hash_64(gfn, KVM_MMU_HASH_SHIFT);
1829 }
1830 
mmu_page_add_parent_pte(struct kvm * kvm,struct kvm_mmu_memory_cache * cache,struct kvm_mmu_page * sp,u64 * parent_pte)1831 static void mmu_page_add_parent_pte(struct kvm *kvm,
1832 				    struct kvm_mmu_memory_cache *cache,
1833 				    struct kvm_mmu_page *sp, u64 *parent_pte)
1834 {
1835 	if (!parent_pte)
1836 		return;
1837 
1838 	pte_list_add(kvm, cache, parent_pte, &sp->parent_ptes);
1839 }
1840 
mmu_page_remove_parent_pte(struct kvm * kvm,struct kvm_mmu_page * sp,u64 * parent_pte)1841 static void mmu_page_remove_parent_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
1842 				       u64 *parent_pte)
1843 {
1844 	pte_list_remove(kvm, parent_pte, &sp->parent_ptes);
1845 }
1846 
drop_parent_pte(struct kvm * kvm,struct kvm_mmu_page * sp,u64 * parent_pte)1847 static void drop_parent_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
1848 			    u64 *parent_pte)
1849 {
1850 	mmu_page_remove_parent_pte(kvm, sp, parent_pte);
1851 	mmu_spte_clear_no_track(parent_pte);
1852 }
1853 
1854 static void mark_unsync(u64 *spte);
kvm_mmu_mark_parents_unsync(struct kvm_mmu_page * sp)1855 static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
1856 {
1857 	u64 *sptep;
1858 	struct rmap_iterator iter;
1859 
1860 	for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) {
1861 		mark_unsync(sptep);
1862 	}
1863 }
1864 
mark_unsync(u64 * spte)1865 static void mark_unsync(u64 *spte)
1866 {
1867 	struct kvm_mmu_page *sp;
1868 
1869 	sp = sptep_to_sp(spte);
1870 	if (__test_and_set_bit(spte_index(spte), sp->unsync_child_bitmap))
1871 		return;
1872 	if (sp->unsync_children++)
1873 		return;
1874 	kvm_mmu_mark_parents_unsync(sp);
1875 }
1876 
1877 #define KVM_PAGE_ARRAY_NR 16
1878 
1879 struct kvm_mmu_pages {
1880 	struct mmu_page_and_offset {
1881 		struct kvm_mmu_page *sp;
1882 		unsigned int idx;
1883 	} page[KVM_PAGE_ARRAY_NR];
1884 	unsigned int nr;
1885 };
1886 
mmu_pages_add(struct kvm_mmu_pages * pvec,struct kvm_mmu_page * sp,int idx)1887 static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
1888 			 int idx)
1889 {
1890 	int i;
1891 
1892 	if (sp->unsync)
1893 		for (i=0; i < pvec->nr; i++)
1894 			if (pvec->page[i].sp == sp)
1895 				return 0;
1896 
1897 	pvec->page[pvec->nr].sp = sp;
1898 	pvec->page[pvec->nr].idx = idx;
1899 	pvec->nr++;
1900 	return (pvec->nr == KVM_PAGE_ARRAY_NR);
1901 }
1902 
clear_unsync_child_bit(struct kvm_mmu_page * sp,int idx)1903 static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx)
1904 {
1905 	--sp->unsync_children;
1906 	WARN_ON_ONCE((int)sp->unsync_children < 0);
1907 	__clear_bit(idx, sp->unsync_child_bitmap);
1908 }
1909 
__mmu_unsync_walk(struct kvm_mmu_page * sp,struct kvm_mmu_pages * pvec)1910 static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
1911 			   struct kvm_mmu_pages *pvec)
1912 {
1913 	int i, ret, nr_unsync_leaf = 0;
1914 
1915 	for_each_set_bit(i, sp->unsync_child_bitmap, 512) {
1916 		struct kvm_mmu_page *child;
1917 		u64 ent = sp->spt[i];
1918 
1919 		if (!is_shadow_present_pte(ent) || is_large_pte(ent)) {
1920 			clear_unsync_child_bit(sp, i);
1921 			continue;
1922 		}
1923 
1924 		child = spte_to_child_sp(ent);
1925 
1926 		if (child->unsync_children) {
1927 			if (mmu_pages_add(pvec, child, i))
1928 				return -ENOSPC;
1929 
1930 			ret = __mmu_unsync_walk(child, pvec);
1931 			if (!ret) {
1932 				clear_unsync_child_bit(sp, i);
1933 				continue;
1934 			} else if (ret > 0) {
1935 				nr_unsync_leaf += ret;
1936 			} else
1937 				return ret;
1938 		} else if (child->unsync) {
1939 			nr_unsync_leaf++;
1940 			if (mmu_pages_add(pvec, child, i))
1941 				return -ENOSPC;
1942 		} else
1943 			clear_unsync_child_bit(sp, i);
1944 	}
1945 
1946 	return nr_unsync_leaf;
1947 }
1948 
1949 #define INVALID_INDEX (-1)
1950 
mmu_unsync_walk(struct kvm_mmu_page * sp,struct kvm_mmu_pages * pvec)1951 static int mmu_unsync_walk(struct kvm_mmu_page *sp,
1952 			   struct kvm_mmu_pages *pvec)
1953 {
1954 	pvec->nr = 0;
1955 	if (!sp->unsync_children)
1956 		return 0;
1957 
1958 	mmu_pages_add(pvec, sp, INVALID_INDEX);
1959 	return __mmu_unsync_walk(sp, pvec);
1960 }
1961 
kvm_unlink_unsync_page(struct kvm * kvm,struct kvm_mmu_page * sp)1962 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1963 {
1964 	WARN_ON_ONCE(!sp->unsync);
1965 	trace_kvm_mmu_sync_page(sp);
1966 	sp->unsync = 0;
1967 	--kvm->stat.mmu_unsync;
1968 }
1969 
1970 static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1971 				     struct list_head *invalid_list);
1972 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1973 				    struct list_head *invalid_list);
1974 
sp_has_gptes(struct kvm_mmu_page * sp)1975 static bool sp_has_gptes(struct kvm_mmu_page *sp)
1976 {
1977 	if (sp->role.direct)
1978 		return false;
1979 
1980 	if (sp->role.passthrough)
1981 		return false;
1982 
1983 	return true;
1984 }
1985 
1986 #define for_each_valid_sp(_kvm, _sp, _list)				\
1987 	hlist_for_each_entry(_sp, _list, hash_link)			\
1988 		if (is_obsolete_sp((_kvm), (_sp))) {			\
1989 		} else
1990 
1991 #define for_each_gfn_valid_sp_with_gptes(_kvm, _sp, _gfn)		\
1992 	for_each_valid_sp(_kvm, _sp,					\
1993 	  &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)])	\
1994 		if ((_sp)->gfn != (_gfn) || !sp_has_gptes(_sp)) {} else
1995 
kvm_sync_page_check(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp)1996 static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1997 {
1998 	union kvm_mmu_page_role root_role = vcpu->arch.mmu->root_role;
1999 
2000 	/*
2001 	 * Ignore various flags when verifying that it's safe to sync a shadow
2002 	 * page using the current MMU context.
2003 	 *
2004 	 *  - level: not part of the overall MMU role and will never match as the MMU's
2005 	 *           level tracks the root level
2006 	 *  - access: updated based on the new guest PTE
2007 	 *  - quadrant: not part of the overall MMU role (similar to level)
2008 	 */
2009 	const union kvm_mmu_page_role sync_role_ign = {
2010 		.level = 0xf,
2011 		.access = 0x7,
2012 		.quadrant = 0x3,
2013 		.passthrough = 0x1,
2014 	};
2015 
2016 	/*
2017 	 * Direct pages can never be unsync, and KVM should never attempt to
2018 	 * sync a shadow page for a different MMU context, e.g. if the role
2019 	 * differs then the memslot lookup (SMM vs. non-SMM) will be bogus, the
2020 	 * reserved bits checks will be wrong, etc...
2021 	 */
2022 	if (WARN_ON_ONCE(sp->role.direct || !vcpu->arch.mmu->sync_spte ||
2023 			 (sp->role.word ^ root_role.word) & ~sync_role_ign.word))
2024 		return false;
2025 
2026 	return true;
2027 }
2028 
kvm_sync_spte(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp,int i)2029 static int kvm_sync_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int i)
2030 {
2031 	/* sp->spt[i] has initial value of shadow page table allocation */
2032 	if (sp->spt[i] == SHADOW_NONPRESENT_VALUE)
2033 		return 0;
2034 
2035 	return vcpu->arch.mmu->sync_spte(vcpu, sp, i);
2036 }
2037 
__kvm_sync_page(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp)2038 static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
2039 {
2040 	int flush = 0;
2041 	int i;
2042 
2043 	if (!kvm_sync_page_check(vcpu, sp))
2044 		return -1;
2045 
2046 	for (i = 0; i < SPTE_ENT_PER_PAGE; i++) {
2047 		int ret = kvm_sync_spte(vcpu, sp, i);
2048 
2049 		if (ret < -1)
2050 			return -1;
2051 		flush |= ret;
2052 	}
2053 
2054 	/*
2055 	 * Note, any flush is purely for KVM's correctness, e.g. when dropping
2056 	 * an existing SPTE or clearing W/A/D bits to ensure an mmu_notifier
2057 	 * unmap or dirty logging event doesn't fail to flush.  The guest is
2058 	 * responsible for flushing the TLB to ensure any changes in protection
2059 	 * bits are recognized, i.e. until the guest flushes or page faults on
2060 	 * a relevant address, KVM is architecturally allowed to let vCPUs use
2061 	 * cached translations with the old protection bits.
2062 	 */
2063 	return flush;
2064 }
2065 
kvm_sync_page(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp,struct list_head * invalid_list)2066 static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
2067 			 struct list_head *invalid_list)
2068 {
2069 	int ret = __kvm_sync_page(vcpu, sp);
2070 
2071 	if (ret < 0)
2072 		kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
2073 	return ret;
2074 }
2075 
kvm_mmu_remote_flush_or_zap(struct kvm * kvm,struct list_head * invalid_list,bool remote_flush)2076 static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
2077 					struct list_head *invalid_list,
2078 					bool remote_flush)
2079 {
2080 	if (!remote_flush && list_empty(invalid_list))
2081 		return false;
2082 
2083 	if (!list_empty(invalid_list))
2084 		kvm_mmu_commit_zap_page(kvm, invalid_list);
2085 	else
2086 		kvm_flush_remote_tlbs(kvm);
2087 	return true;
2088 }
2089 
is_obsolete_sp(struct kvm * kvm,struct kvm_mmu_page * sp)2090 static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
2091 {
2092 	if (sp->role.invalid)
2093 		return true;
2094 
2095 	/* TDP MMU pages do not use the MMU generation. */
2096 	return !is_tdp_mmu_page(sp) &&
2097 	       unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
2098 }
2099 
2100 struct mmu_page_path {
2101 	struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL];
2102 	unsigned int idx[PT64_ROOT_MAX_LEVEL];
2103 };
2104 
2105 #define for_each_sp(pvec, sp, parents, i)			\
2106 		for (i = mmu_pages_first(&pvec, &parents);	\
2107 			i < pvec.nr && ({ sp = pvec.page[i].sp; 1;});	\
2108 			i = mmu_pages_next(&pvec, &parents, i))
2109 
mmu_pages_next(struct kvm_mmu_pages * pvec,struct mmu_page_path * parents,int i)2110 static int mmu_pages_next(struct kvm_mmu_pages *pvec,
2111 			  struct mmu_page_path *parents,
2112 			  int i)
2113 {
2114 	int n;
2115 
2116 	for (n = i+1; n < pvec->nr; n++) {
2117 		struct kvm_mmu_page *sp = pvec->page[n].sp;
2118 		unsigned idx = pvec->page[n].idx;
2119 		int level = sp->role.level;
2120 
2121 		parents->idx[level-1] = idx;
2122 		if (level == PG_LEVEL_4K)
2123 			break;
2124 
2125 		parents->parent[level-2] = sp;
2126 	}
2127 
2128 	return n;
2129 }
2130 
mmu_pages_first(struct kvm_mmu_pages * pvec,struct mmu_page_path * parents)2131 static int mmu_pages_first(struct kvm_mmu_pages *pvec,
2132 			   struct mmu_page_path *parents)
2133 {
2134 	struct kvm_mmu_page *sp;
2135 	int level;
2136 
2137 	if (pvec->nr == 0)
2138 		return 0;
2139 
2140 	WARN_ON_ONCE(pvec->page[0].idx != INVALID_INDEX);
2141 
2142 	sp = pvec->page[0].sp;
2143 	level = sp->role.level;
2144 	WARN_ON_ONCE(level == PG_LEVEL_4K);
2145 
2146 	parents->parent[level-2] = sp;
2147 
2148 	/* Also set up a sentinel.  Further entries in pvec are all
2149 	 * children of sp, so this element is never overwritten.
2150 	 */
2151 	parents->parent[level-1] = NULL;
2152 	return mmu_pages_next(pvec, parents, 0);
2153 }
2154 
mmu_pages_clear_parents(struct mmu_page_path * parents)2155 static void mmu_pages_clear_parents(struct mmu_page_path *parents)
2156 {
2157 	struct kvm_mmu_page *sp;
2158 	unsigned int level = 0;
2159 
2160 	do {
2161 		unsigned int idx = parents->idx[level];
2162 		sp = parents->parent[level];
2163 		if (!sp)
2164 			return;
2165 
2166 		WARN_ON_ONCE(idx == INVALID_INDEX);
2167 		clear_unsync_child_bit(sp, idx);
2168 		level++;
2169 	} while (!sp->unsync_children);
2170 }
2171 
mmu_sync_children(struct kvm_vcpu * vcpu,struct kvm_mmu_page * parent,bool can_yield)2172 static int mmu_sync_children(struct kvm_vcpu *vcpu,
2173 			     struct kvm_mmu_page *parent, bool can_yield)
2174 {
2175 	int i;
2176 	struct kvm_mmu_page *sp;
2177 	struct mmu_page_path parents;
2178 	struct kvm_mmu_pages pages;
2179 	LIST_HEAD(invalid_list);
2180 	bool flush = false;
2181 
2182 	while (mmu_unsync_walk(parent, &pages)) {
2183 		bool protected = false;
2184 
2185 		for_each_sp(pages, sp, parents, i)
2186 			protected |= kvm_vcpu_write_protect_gfn(vcpu, sp->gfn);
2187 
2188 		if (protected) {
2189 			kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, true);
2190 			flush = false;
2191 		}
2192 
2193 		for_each_sp(pages, sp, parents, i) {
2194 			kvm_unlink_unsync_page(vcpu->kvm, sp);
2195 			flush |= kvm_sync_page(vcpu, sp, &invalid_list) > 0;
2196 			mmu_pages_clear_parents(&parents);
2197 		}
2198 		if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
2199 			kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush);
2200 			if (!can_yield) {
2201 				kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
2202 				return -EINTR;
2203 			}
2204 
2205 			cond_resched_rwlock_write(&vcpu->kvm->mmu_lock);
2206 			flush = false;
2207 		}
2208 	}
2209 
2210 	kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush);
2211 	return 0;
2212 }
2213 
__clear_sp_write_flooding_count(struct kvm_mmu_page * sp)2214 static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
2215 {
2216 	atomic_set(&sp->write_flooding_count,  0);
2217 }
2218 
clear_sp_write_flooding_count(u64 * spte)2219 static void clear_sp_write_flooding_count(u64 *spte)
2220 {
2221 	__clear_sp_write_flooding_count(sptep_to_sp(spte));
2222 }
2223 
2224 /*
2225  * The vCPU is required when finding indirect shadow pages; the shadow
2226  * page may already exist and syncing it needs the vCPU pointer in
2227  * order to read guest page tables.  Direct shadow pages are never
2228  * unsync, thus @vcpu can be NULL if @role.direct is true.
2229  */
kvm_mmu_find_shadow_page(struct kvm * kvm,struct kvm_vcpu * vcpu,gfn_t gfn,struct hlist_head * sp_list,union kvm_mmu_page_role role)2230 static struct kvm_mmu_page *kvm_mmu_find_shadow_page(struct kvm *kvm,
2231 						     struct kvm_vcpu *vcpu,
2232 						     gfn_t gfn,
2233 						     struct hlist_head *sp_list,
2234 						     union kvm_mmu_page_role role)
2235 {
2236 	struct kvm_mmu_page *sp;
2237 	int ret;
2238 	int collisions = 0;
2239 	LIST_HEAD(invalid_list);
2240 
2241 	for_each_valid_sp(kvm, sp, sp_list) {
2242 		if (sp->gfn != gfn) {
2243 			collisions++;
2244 			continue;
2245 		}
2246 
2247 		if (sp->role.word != role.word) {
2248 			/*
2249 			 * If the guest is creating an upper-level page, zap
2250 			 * unsync pages for the same gfn.  While it's possible
2251 			 * the guest is using recursive page tables, in all
2252 			 * likelihood the guest has stopped using the unsync
2253 			 * page and is installing a completely unrelated page.
2254 			 * Unsync pages must not be left as is, because the new
2255 			 * upper-level page will be write-protected.
2256 			 */
2257 			if (role.level > PG_LEVEL_4K && sp->unsync)
2258 				kvm_mmu_prepare_zap_page(kvm, sp,
2259 							 &invalid_list);
2260 			continue;
2261 		}
2262 
2263 		/* unsync and write-flooding only apply to indirect SPs. */
2264 		if (sp->role.direct)
2265 			goto out;
2266 
2267 		if (sp->unsync) {
2268 			if (KVM_BUG_ON(!vcpu, kvm))
2269 				break;
2270 
2271 			/*
2272 			 * The page is good, but is stale.  kvm_sync_page does
2273 			 * get the latest guest state, but (unlike mmu_unsync_children)
2274 			 * it doesn't write-protect the page or mark it synchronized!
2275 			 * This way the validity of the mapping is ensured, but the
2276 			 * overhead of write protection is not incurred until the
2277 			 * guest invalidates the TLB mapping.  This allows multiple
2278 			 * SPs for a single gfn to be unsync.
2279 			 *
2280 			 * If the sync fails, the page is zapped.  If so, break
2281 			 * in order to rebuild it.
2282 			 */
2283 			ret = kvm_sync_page(vcpu, sp, &invalid_list);
2284 			if (ret < 0)
2285 				break;
2286 
2287 			WARN_ON_ONCE(!list_empty(&invalid_list));
2288 			if (ret > 0)
2289 				kvm_flush_remote_tlbs(kvm);
2290 		}
2291 
2292 		__clear_sp_write_flooding_count(sp);
2293 
2294 		goto out;
2295 	}
2296 
2297 	sp = NULL;
2298 	++kvm->stat.mmu_cache_miss;
2299 
2300 out:
2301 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
2302 
2303 	if (collisions > kvm->stat.max_mmu_page_hash_collisions)
2304 		kvm->stat.max_mmu_page_hash_collisions = collisions;
2305 	return sp;
2306 }
2307 
2308 /* Caches used when allocating a new shadow page. */
2309 struct shadow_page_caches {
2310 	struct kvm_mmu_memory_cache *page_header_cache;
2311 	struct kvm_mmu_memory_cache *shadow_page_cache;
2312 	struct kvm_mmu_memory_cache *shadowed_info_cache;
2313 };
2314 
kvm_mmu_alloc_shadow_page(struct kvm * kvm,struct shadow_page_caches * caches,gfn_t gfn,struct hlist_head * sp_list,union kvm_mmu_page_role role)2315 static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm *kvm,
2316 						      struct shadow_page_caches *caches,
2317 						      gfn_t gfn,
2318 						      struct hlist_head *sp_list,
2319 						      union kvm_mmu_page_role role)
2320 {
2321 	struct kvm_mmu_page *sp;
2322 
2323 	sp = kvm_mmu_memory_cache_alloc(caches->page_header_cache);
2324 	sp->spt = kvm_mmu_memory_cache_alloc(caches->shadow_page_cache);
2325 	if (!role.direct && role.level <= KVM_MAX_HUGEPAGE_LEVEL)
2326 		sp->shadowed_translation = kvm_mmu_memory_cache_alloc(caches->shadowed_info_cache);
2327 
2328 	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
2329 
2330 	INIT_LIST_HEAD(&sp->possible_nx_huge_page_link);
2331 
2332 	/*
2333 	 * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
2334 	 * depends on valid pages being added to the head of the list.  See
2335 	 * comments in kvm_zap_obsolete_pages().
2336 	 */
2337 	sp->mmu_valid_gen = kvm->arch.mmu_valid_gen;
2338 	list_add(&sp->link, &kvm->arch.active_mmu_pages);
2339 	kvm_account_mmu_page(kvm, sp);
2340 
2341 	sp->gfn = gfn;
2342 	sp->role = role;
2343 	hlist_add_head(&sp->hash_link, sp_list);
2344 	if (sp_has_gptes(sp))
2345 		account_shadowed(kvm, sp);
2346 
2347 	return sp;
2348 }
2349 
2350 /* Note, @vcpu may be NULL if @role.direct is true; see kvm_mmu_find_shadow_page. */
__kvm_mmu_get_shadow_page(struct kvm * kvm,struct kvm_vcpu * vcpu,struct shadow_page_caches * caches,gfn_t gfn,union kvm_mmu_page_role role)2351 static struct kvm_mmu_page *__kvm_mmu_get_shadow_page(struct kvm *kvm,
2352 						      struct kvm_vcpu *vcpu,
2353 						      struct shadow_page_caches *caches,
2354 						      gfn_t gfn,
2355 						      union kvm_mmu_page_role role)
2356 {
2357 	struct hlist_head *sp_list;
2358 	struct kvm_mmu_page *sp;
2359 	bool created = false;
2360 
2361 	sp_list = &kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
2362 
2363 	sp = kvm_mmu_find_shadow_page(kvm, vcpu, gfn, sp_list, role);
2364 	if (!sp) {
2365 		created = true;
2366 		sp = kvm_mmu_alloc_shadow_page(kvm, caches, gfn, sp_list, role);
2367 	}
2368 
2369 	trace_kvm_mmu_get_page(sp, created);
2370 	return sp;
2371 }
2372 
kvm_mmu_get_shadow_page(struct kvm_vcpu * vcpu,gfn_t gfn,union kvm_mmu_page_role role)2373 static struct kvm_mmu_page *kvm_mmu_get_shadow_page(struct kvm_vcpu *vcpu,
2374 						    gfn_t gfn,
2375 						    union kvm_mmu_page_role role)
2376 {
2377 	struct shadow_page_caches caches = {
2378 		.page_header_cache = &vcpu->arch.mmu_page_header_cache,
2379 		.shadow_page_cache = &vcpu->arch.mmu_shadow_page_cache,
2380 		.shadowed_info_cache = &vcpu->arch.mmu_shadowed_info_cache,
2381 	};
2382 
2383 	return __kvm_mmu_get_shadow_page(vcpu->kvm, vcpu, &caches, gfn, role);
2384 }
2385 
kvm_mmu_child_role(u64 * sptep,bool direct,unsigned int access)2386 static union kvm_mmu_page_role kvm_mmu_child_role(u64 *sptep, bool direct,
2387 						  unsigned int access)
2388 {
2389 	struct kvm_mmu_page *parent_sp = sptep_to_sp(sptep);
2390 	union kvm_mmu_page_role role;
2391 
2392 	role = parent_sp->role;
2393 	role.level--;
2394 	role.access = access;
2395 	role.direct = direct;
2396 	role.passthrough = 0;
2397 
2398 	/*
2399 	 * If the guest has 4-byte PTEs then that means it's using 32-bit,
2400 	 * 2-level, non-PAE paging. KVM shadows such guests with PAE paging
2401 	 * (i.e. 8-byte PTEs). The difference in PTE size means that KVM must
2402 	 * shadow each guest page table with multiple shadow page tables, which
2403 	 * requires extra bookkeeping in the role.
2404 	 *
2405 	 * Specifically, to shadow the guest's page directory (which covers a
2406 	 * 4GiB address space), KVM uses 4 PAE page directories, each mapping
2407 	 * 1GiB of the address space. @role.quadrant encodes which quarter of
2408 	 * the address space each maps.
2409 	 *
2410 	 * To shadow the guest's page tables (which each map a 4MiB region), KVM
2411 	 * uses 2 PAE page tables, each mapping a 2MiB region. For these,
2412 	 * @role.quadrant encodes which half of the region they map.
2413 	 *
2414 	 * Concretely, a 4-byte PDE consumes bits 31:22, while an 8-byte PDE
2415 	 * consumes bits 29:21.  To consume bits 31:30, KVM's uses 4 shadow
2416 	 * PDPTEs; those 4 PAE page directories are pre-allocated and their
2417 	 * quadrant is assigned in mmu_alloc_root().   A 4-byte PTE consumes
2418 	 * bits 21:12, while an 8-byte PTE consumes bits 20:12.  To consume
2419 	 * bit 21 in the PTE (the child here), KVM propagates that bit to the
2420 	 * quadrant, i.e. sets quadrant to '0' or '1'.  The parent 8-byte PDE
2421 	 * covers bit 21 (see above), thus the quadrant is calculated from the
2422 	 * _least_ significant bit of the PDE index.
2423 	 */
2424 	if (role.has_4_byte_gpte) {
2425 		WARN_ON_ONCE(role.level != PG_LEVEL_4K);
2426 		role.quadrant = spte_index(sptep) & 1;
2427 	}
2428 
2429 	return role;
2430 }
2431 
kvm_mmu_get_child_sp(struct kvm_vcpu * vcpu,u64 * sptep,gfn_t gfn,bool direct,unsigned int access)2432 static struct kvm_mmu_page *kvm_mmu_get_child_sp(struct kvm_vcpu *vcpu,
2433 						 u64 *sptep, gfn_t gfn,
2434 						 bool direct, unsigned int access)
2435 {
2436 	union kvm_mmu_page_role role;
2437 
2438 	if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep))
2439 		return ERR_PTR(-EEXIST);
2440 
2441 	role = kvm_mmu_child_role(sptep, direct, access);
2442 	return kvm_mmu_get_shadow_page(vcpu, gfn, role);
2443 }
2444 
shadow_walk_init_using_root(struct kvm_shadow_walk_iterator * iterator,struct kvm_vcpu * vcpu,hpa_t root,u64 addr)2445 static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator,
2446 					struct kvm_vcpu *vcpu, hpa_t root,
2447 					u64 addr)
2448 {
2449 	iterator->addr = addr;
2450 	iterator->shadow_addr = root;
2451 	iterator->level = vcpu->arch.mmu->root_role.level;
2452 
2453 	if (iterator->level >= PT64_ROOT_4LEVEL &&
2454 	    vcpu->arch.mmu->cpu_role.base.level < PT64_ROOT_4LEVEL &&
2455 	    !vcpu->arch.mmu->root_role.direct)
2456 		iterator->level = PT32E_ROOT_LEVEL;
2457 
2458 	if (iterator->level == PT32E_ROOT_LEVEL) {
2459 		/*
2460 		 * prev_root is currently only used for 64-bit hosts. So only
2461 		 * the active root_hpa is valid here.
2462 		 */
2463 		BUG_ON(root != vcpu->arch.mmu->root.hpa);
2464 
2465 		iterator->shadow_addr
2466 			= vcpu->arch.mmu->pae_root[(addr >> 30) & 3];
2467 		iterator->shadow_addr &= SPTE_BASE_ADDR_MASK;
2468 		--iterator->level;
2469 		if (!iterator->shadow_addr)
2470 			iterator->level = 0;
2471 	}
2472 }
2473 
shadow_walk_init(struct kvm_shadow_walk_iterator * iterator,struct kvm_vcpu * vcpu,u64 addr)2474 static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
2475 			     struct kvm_vcpu *vcpu, u64 addr)
2476 {
2477 	shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root.hpa,
2478 				    addr);
2479 }
2480 
shadow_walk_okay(struct kvm_shadow_walk_iterator * iterator)2481 static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
2482 {
2483 	if (iterator->level < PG_LEVEL_4K)
2484 		return false;
2485 
2486 	iterator->index = SPTE_INDEX(iterator->addr, iterator->level);
2487 	iterator->sptep	= ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
2488 	return true;
2489 }
2490 
__shadow_walk_next(struct kvm_shadow_walk_iterator * iterator,u64 spte)2491 static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator,
2492 			       u64 spte)
2493 {
2494 	if (!is_shadow_present_pte(spte) || is_last_spte(spte, iterator->level)) {
2495 		iterator->level = 0;
2496 		return;
2497 	}
2498 
2499 	iterator->shadow_addr = spte & SPTE_BASE_ADDR_MASK;
2500 	--iterator->level;
2501 }
2502 
shadow_walk_next(struct kvm_shadow_walk_iterator * iterator)2503 static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
2504 {
2505 	__shadow_walk_next(iterator, *iterator->sptep);
2506 }
2507 
__link_shadow_page(struct kvm * kvm,struct kvm_mmu_memory_cache * cache,u64 * sptep,struct kvm_mmu_page * sp,bool flush)2508 static void __link_shadow_page(struct kvm *kvm,
2509 			       struct kvm_mmu_memory_cache *cache, u64 *sptep,
2510 			       struct kvm_mmu_page *sp, bool flush)
2511 {
2512 	u64 spte;
2513 
2514 	BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
2515 
2516 	/*
2517 	 * If an SPTE is present already, it must be a leaf and therefore
2518 	 * a large one.  Drop it, and flush the TLB if needed, before
2519 	 * installing sp.
2520 	 */
2521 	if (is_shadow_present_pte(*sptep))
2522 		drop_large_spte(kvm, sptep, flush);
2523 
2524 	spte = make_nonleaf_spte(sp->spt, sp_ad_disabled(sp));
2525 
2526 	mmu_spte_set(sptep, spte);
2527 
2528 	mmu_page_add_parent_pte(kvm, cache, sp, sptep);
2529 
2530 	/*
2531 	 * The non-direct sub-pagetable must be updated before linking.  For
2532 	 * L1 sp, the pagetable is updated via kvm_sync_page() in
2533 	 * kvm_mmu_find_shadow_page() without write-protecting the gfn,
2534 	 * so sp->unsync can be true or false.  For higher level non-direct
2535 	 * sp, the pagetable is updated/synced via mmu_sync_children() in
2536 	 * FNAME(fetch)(), so sp->unsync_children can only be false.
2537 	 * WARN_ON_ONCE() if anything happens unexpectedly.
2538 	 */
2539 	if (WARN_ON_ONCE(sp->unsync_children) || sp->unsync)
2540 		mark_unsync(sptep);
2541 }
2542 
link_shadow_page(struct kvm_vcpu * vcpu,u64 * sptep,struct kvm_mmu_page * sp)2543 static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
2544 			     struct kvm_mmu_page *sp)
2545 {
2546 	__link_shadow_page(vcpu->kvm, &vcpu->arch.mmu_pte_list_desc_cache, sptep, sp, true);
2547 }
2548 
validate_direct_spte(struct kvm_vcpu * vcpu,u64 * sptep,unsigned direct_access)2549 static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2550 				   unsigned direct_access)
2551 {
2552 	if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
2553 		struct kvm_mmu_page *child;
2554 
2555 		/*
2556 		 * For the direct sp, if the guest pte's dirty bit
2557 		 * changed form clean to dirty, it will corrupt the
2558 		 * sp's access: allow writable in the read-only sp,
2559 		 * so we should update the spte at this point to get
2560 		 * a new sp with the correct access.
2561 		 */
2562 		child = spte_to_child_sp(*sptep);
2563 		if (child->role.access == direct_access)
2564 			return;
2565 
2566 		drop_parent_pte(vcpu->kvm, child, sptep);
2567 		kvm_flush_remote_tlbs_sptep(vcpu->kvm, sptep);
2568 	}
2569 }
2570 
2571 /* Returns the number of zapped non-leaf child shadow pages. */
mmu_page_zap_pte(struct kvm * kvm,struct kvm_mmu_page * sp,u64 * spte,struct list_head * invalid_list)2572 static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
2573 			    u64 *spte, struct list_head *invalid_list)
2574 {
2575 	u64 pte;
2576 	struct kvm_mmu_page *child;
2577 
2578 	pte = *spte;
2579 	if (is_shadow_present_pte(pte)) {
2580 		if (is_last_spte(pte, sp->role.level)) {
2581 			drop_spte(kvm, spte);
2582 		} else {
2583 			child = spte_to_child_sp(pte);
2584 			drop_parent_pte(kvm, child, spte);
2585 
2586 			/*
2587 			 * Recursively zap nested TDP SPs, parentless SPs are
2588 			 * unlikely to be used again in the near future.  This
2589 			 * avoids retaining a large number of stale nested SPs.
2590 			 */
2591 			if (tdp_enabled && invalid_list &&
2592 			    child->role.guest_mode &&
2593 			    !atomic_long_read(&child->parent_ptes.val))
2594 				return kvm_mmu_prepare_zap_page(kvm, child,
2595 								invalid_list);
2596 		}
2597 	} else if (is_mmio_spte(kvm, pte)) {
2598 		mmu_spte_clear_no_track(spte);
2599 	}
2600 	return 0;
2601 }
2602 
kvm_mmu_page_unlink_children(struct kvm * kvm,struct kvm_mmu_page * sp,struct list_head * invalid_list)2603 static int kvm_mmu_page_unlink_children(struct kvm *kvm,
2604 					struct kvm_mmu_page *sp,
2605 					struct list_head *invalid_list)
2606 {
2607 	int zapped = 0;
2608 	unsigned i;
2609 
2610 	for (i = 0; i < SPTE_ENT_PER_PAGE; ++i)
2611 		zapped += mmu_page_zap_pte(kvm, sp, sp->spt + i, invalid_list);
2612 
2613 	return zapped;
2614 }
2615 
kvm_mmu_unlink_parents(struct kvm * kvm,struct kvm_mmu_page * sp)2616 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
2617 {
2618 	u64 *sptep;
2619 	struct rmap_iterator iter;
2620 
2621 	while ((sptep = rmap_get_first(&sp->parent_ptes, &iter)))
2622 		drop_parent_pte(kvm, sp, sptep);
2623 }
2624 
mmu_zap_unsync_children(struct kvm * kvm,struct kvm_mmu_page * parent,struct list_head * invalid_list)2625 static int mmu_zap_unsync_children(struct kvm *kvm,
2626 				   struct kvm_mmu_page *parent,
2627 				   struct list_head *invalid_list)
2628 {
2629 	int i, zapped = 0;
2630 	struct mmu_page_path parents;
2631 	struct kvm_mmu_pages pages;
2632 
2633 	if (parent->role.level == PG_LEVEL_4K)
2634 		return 0;
2635 
2636 	while (mmu_unsync_walk(parent, &pages)) {
2637 		struct kvm_mmu_page *sp;
2638 
2639 		for_each_sp(pages, sp, parents, i) {
2640 			kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
2641 			mmu_pages_clear_parents(&parents);
2642 			zapped++;
2643 		}
2644 	}
2645 
2646 	return zapped;
2647 }
2648 
__kvm_mmu_prepare_zap_page(struct kvm * kvm,struct kvm_mmu_page * sp,struct list_head * invalid_list,int * nr_zapped)2649 static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
2650 				       struct kvm_mmu_page *sp,
2651 				       struct list_head *invalid_list,
2652 				       int *nr_zapped)
2653 {
2654 	bool list_unstable, zapped_root = false;
2655 
2656 	lockdep_assert_held_write(&kvm->mmu_lock);
2657 	trace_kvm_mmu_prepare_zap_page(sp);
2658 	++kvm->stat.mmu_shadow_zapped;
2659 	*nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list);
2660 	*nr_zapped += kvm_mmu_page_unlink_children(kvm, sp, invalid_list);
2661 	kvm_mmu_unlink_parents(kvm, sp);
2662 
2663 	/* Zapping children means active_mmu_pages has become unstable. */
2664 	list_unstable = *nr_zapped;
2665 
2666 	if (!sp->role.invalid && sp_has_gptes(sp))
2667 		unaccount_shadowed(kvm, sp);
2668 
2669 	if (sp->unsync)
2670 		kvm_unlink_unsync_page(kvm, sp);
2671 	if (!sp->root_count) {
2672 		/* Count self */
2673 		(*nr_zapped)++;
2674 
2675 		/*
2676 		 * Already invalid pages (previously active roots) are not on
2677 		 * the active page list.  See list_del() in the "else" case of
2678 		 * !sp->root_count.
2679 		 */
2680 		if (sp->role.invalid)
2681 			list_add(&sp->link, invalid_list);
2682 		else
2683 			list_move(&sp->link, invalid_list);
2684 		kvm_unaccount_mmu_page(kvm, sp);
2685 	} else {
2686 		/*
2687 		 * Remove the active root from the active page list, the root
2688 		 * will be explicitly freed when the root_count hits zero.
2689 		 */
2690 		list_del(&sp->link);
2691 
2692 		/*
2693 		 * Obsolete pages cannot be used on any vCPUs, see the comment
2694 		 * in kvm_mmu_zap_all_fast().  Note, is_obsolete_sp() also
2695 		 * treats invalid shadow pages as being obsolete.
2696 		 */
2697 		zapped_root = !is_obsolete_sp(kvm, sp);
2698 	}
2699 
2700 	if (sp->nx_huge_page_disallowed)
2701 		unaccount_nx_huge_page(kvm, sp);
2702 
2703 	sp->role.invalid = 1;
2704 
2705 	/*
2706 	 * Make the request to free obsolete roots after marking the root
2707 	 * invalid, otherwise other vCPUs may not see it as invalid.
2708 	 */
2709 	if (zapped_root)
2710 		kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_FREE_OBSOLETE_ROOTS);
2711 	return list_unstable;
2712 }
2713 
kvm_mmu_prepare_zap_page(struct kvm * kvm,struct kvm_mmu_page * sp,struct list_head * invalid_list)2714 static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2715 				     struct list_head *invalid_list)
2716 {
2717 	int nr_zapped;
2718 
2719 	__kvm_mmu_prepare_zap_page(kvm, sp, invalid_list, &nr_zapped);
2720 	return nr_zapped;
2721 }
2722 
kvm_mmu_commit_zap_page(struct kvm * kvm,struct list_head * invalid_list)2723 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
2724 				    struct list_head *invalid_list)
2725 {
2726 	struct kvm_mmu_page *sp, *nsp;
2727 
2728 	if (list_empty(invalid_list))
2729 		return;
2730 
2731 	/*
2732 	 * We need to make sure everyone sees our modifications to
2733 	 * the page tables and see changes to vcpu->mode here. The barrier
2734 	 * in the kvm_flush_remote_tlbs() achieves this. This pairs
2735 	 * with vcpu_enter_guest and walk_shadow_page_lockless_begin/end.
2736 	 *
2737 	 * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit
2738 	 * guest mode and/or lockless shadow page table walks.
2739 	 */
2740 	kvm_flush_remote_tlbs(kvm);
2741 
2742 	list_for_each_entry_safe(sp, nsp, invalid_list, link) {
2743 		WARN_ON_ONCE(!sp->role.invalid || sp->root_count);
2744 		kvm_mmu_free_shadow_page(sp);
2745 	}
2746 }
2747 
kvm_mmu_zap_oldest_mmu_pages(struct kvm * kvm,unsigned long nr_to_zap)2748 static unsigned long kvm_mmu_zap_oldest_mmu_pages(struct kvm *kvm,
2749 						  unsigned long nr_to_zap)
2750 {
2751 	unsigned long total_zapped = 0;
2752 	struct kvm_mmu_page *sp, *tmp;
2753 	LIST_HEAD(invalid_list);
2754 	bool unstable;
2755 	int nr_zapped;
2756 
2757 	if (list_empty(&kvm->arch.active_mmu_pages))
2758 		return 0;
2759 
2760 restart:
2761 	list_for_each_entry_safe_reverse(sp, tmp, &kvm->arch.active_mmu_pages, link) {
2762 		/*
2763 		 * Don't zap active root pages, the page itself can't be freed
2764 		 * and zapping it will just force vCPUs to realloc and reload.
2765 		 */
2766 		if (sp->root_count)
2767 			continue;
2768 
2769 		unstable = __kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list,
2770 						      &nr_zapped);
2771 		total_zapped += nr_zapped;
2772 		if (total_zapped >= nr_to_zap)
2773 			break;
2774 
2775 		if (unstable)
2776 			goto restart;
2777 	}
2778 
2779 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
2780 
2781 	kvm->stat.mmu_recycled += total_zapped;
2782 	return total_zapped;
2783 }
2784 
kvm_mmu_available_pages(struct kvm * kvm)2785 static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
2786 {
2787 	if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
2788 		return kvm->arch.n_max_mmu_pages -
2789 			kvm->arch.n_used_mmu_pages;
2790 
2791 	return 0;
2792 }
2793 
make_mmu_pages_available(struct kvm_vcpu * vcpu)2794 static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
2795 {
2796 	unsigned long avail = kvm_mmu_available_pages(vcpu->kvm);
2797 
2798 	if (likely(avail >= KVM_MIN_FREE_MMU_PAGES))
2799 		return 0;
2800 
2801 	kvm_mmu_zap_oldest_mmu_pages(vcpu->kvm, KVM_REFILL_PAGES - avail);
2802 
2803 	/*
2804 	 * Note, this check is intentionally soft, it only guarantees that one
2805 	 * page is available, while the caller may end up allocating as many as
2806 	 * four pages, e.g. for PAE roots or for 5-level paging.  Temporarily
2807 	 * exceeding the (arbitrary by default) limit will not harm the host,
2808 	 * being too aggressive may unnecessarily kill the guest, and getting an
2809 	 * exact count is far more trouble than it's worth, especially in the
2810 	 * page fault paths.
2811 	 */
2812 	if (!kvm_mmu_available_pages(vcpu->kvm))
2813 		return -ENOSPC;
2814 	return 0;
2815 }
2816 
2817 /*
2818  * Changing the number of mmu pages allocated to the vm
2819  * Note: if goal_nr_mmu_pages is too small, you will get dead lock
2820  */
kvm_mmu_change_mmu_pages(struct kvm * kvm,unsigned long goal_nr_mmu_pages)2821 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
2822 {
2823 	write_lock(&kvm->mmu_lock);
2824 
2825 	if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
2826 		kvm_mmu_zap_oldest_mmu_pages(kvm, kvm->arch.n_used_mmu_pages -
2827 						  goal_nr_mmu_pages);
2828 
2829 		goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
2830 	}
2831 
2832 	kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
2833 
2834 	write_unlock(&kvm->mmu_lock);
2835 }
2836 
__kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu * vcpu,gpa_t cr2_or_gpa,bool always_retry)2837 bool __kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
2838 				       bool always_retry)
2839 {
2840 	struct kvm *kvm = vcpu->kvm;
2841 	LIST_HEAD(invalid_list);
2842 	struct kvm_mmu_page *sp;
2843 	gpa_t gpa = cr2_or_gpa;
2844 	bool r = false;
2845 
2846 	/*
2847 	 * Bail early if there aren't any write-protected shadow pages to avoid
2848 	 * unnecessarily taking mmu_lock lock, e.g. if the gfn is write-tracked
2849 	 * by a third party.  Reading indirect_shadow_pages without holding
2850 	 * mmu_lock is safe, as this is purely an optimization, i.e. a false
2851 	 * positive is benign, and a false negative will simply result in KVM
2852 	 * skipping the unprotect+retry path, which is also an optimization.
2853 	 */
2854 	if (!READ_ONCE(kvm->arch.indirect_shadow_pages))
2855 		goto out;
2856 
2857 	if (!vcpu->arch.mmu->root_role.direct) {
2858 		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
2859 		if (gpa == INVALID_GPA)
2860 			goto out;
2861 	}
2862 
2863 	write_lock(&kvm->mmu_lock);
2864 	for_each_gfn_valid_sp_with_gptes(kvm, sp, gpa_to_gfn(gpa))
2865 		kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
2866 
2867 	/*
2868 	 * Snapshot the result before zapping, as zapping will remove all list
2869 	 * entries, i.e. checking the list later would yield a false negative.
2870 	 */
2871 	r = !list_empty(&invalid_list);
2872 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
2873 	write_unlock(&kvm->mmu_lock);
2874 
2875 out:
2876 	if (r || always_retry) {
2877 		vcpu->arch.last_retry_eip = kvm_rip_read(vcpu);
2878 		vcpu->arch.last_retry_addr = cr2_or_gpa;
2879 	}
2880 	return r;
2881 }
2882 
kvm_unsync_page(struct kvm * kvm,struct kvm_mmu_page * sp)2883 static void kvm_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
2884 {
2885 	trace_kvm_mmu_unsync_page(sp);
2886 	++kvm->stat.mmu_unsync;
2887 	sp->unsync = 1;
2888 
2889 	kvm_mmu_mark_parents_unsync(sp);
2890 }
2891 
2892 /*
2893  * Attempt to unsync any shadow pages that can be reached by the specified gfn,
2894  * KVM is creating a writable mapping for said gfn.  Returns 0 if all pages
2895  * were marked unsync (or if there is no shadow page), -EPERM if the SPTE must
2896  * be write-protected.
2897  */
mmu_try_to_unsync_pages(struct kvm * kvm,const struct kvm_memory_slot * slot,gfn_t gfn,bool synchronizing,bool prefetch)2898 int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot,
2899 			    gfn_t gfn, bool synchronizing, bool prefetch)
2900 {
2901 	struct kvm_mmu_page *sp;
2902 	bool locked = false;
2903 
2904 	/*
2905 	 * Force write-protection if the page is being tracked.  Note, the page
2906 	 * track machinery is used to write-protect upper-level shadow pages,
2907 	 * i.e. this guards the role.level == 4K assertion below!
2908 	 */
2909 	if (kvm_gfn_is_write_tracked(kvm, slot, gfn))
2910 		return -EPERM;
2911 
2912 	/*
2913 	 * The page is not write-tracked, mark existing shadow pages unsync
2914 	 * unless KVM is synchronizing an unsync SP.  In that case, KVM must
2915 	 * complete emulation of the guest TLB flush before allowing shadow
2916 	 * pages to become unsync (writable by the guest).
2917 	 */
2918 	for_each_gfn_valid_sp_with_gptes(kvm, sp, gfn) {
2919 		if (synchronizing)
2920 			return -EPERM;
2921 
2922 		if (sp->unsync)
2923 			continue;
2924 
2925 		if (prefetch)
2926 			return -EEXIST;
2927 
2928 		/*
2929 		 * TDP MMU page faults require an additional spinlock as they
2930 		 * run with mmu_lock held for read, not write, and the unsync
2931 		 * logic is not thread safe.  Take the spinklock regardless of
2932 		 * the MMU type to avoid extra conditionals/parameters, there's
2933 		 * no meaningful penalty if mmu_lock is held for write.
2934 		 */
2935 		if (!locked) {
2936 			locked = true;
2937 			spin_lock(&kvm->arch.mmu_unsync_pages_lock);
2938 
2939 			/*
2940 			 * Recheck after taking the spinlock, a different vCPU
2941 			 * may have since marked the page unsync.  A false
2942 			 * negative on the unprotected check above is not
2943 			 * possible as clearing sp->unsync _must_ hold mmu_lock
2944 			 * for write, i.e. unsync cannot transition from 1->0
2945 			 * while this CPU holds mmu_lock for read (or write).
2946 			 */
2947 			if (READ_ONCE(sp->unsync))
2948 				continue;
2949 		}
2950 
2951 		WARN_ON_ONCE(sp->role.level != PG_LEVEL_4K);
2952 		kvm_unsync_page(kvm, sp);
2953 	}
2954 	if (locked)
2955 		spin_unlock(&kvm->arch.mmu_unsync_pages_lock);
2956 
2957 	/*
2958 	 * We need to ensure that the marking of unsync pages is visible
2959 	 * before the SPTE is updated to allow writes because
2960 	 * kvm_mmu_sync_roots() checks the unsync flags without holding
2961 	 * the MMU lock and so can race with this. If the SPTE was updated
2962 	 * before the page had been marked as unsync-ed, something like the
2963 	 * following could happen:
2964 	 *
2965 	 * CPU 1                    CPU 2
2966 	 * ---------------------------------------------------------------------
2967 	 * 1.2 Host updates SPTE
2968 	 *     to be writable
2969 	 *                      2.1 Guest writes a GPTE for GVA X.
2970 	 *                          (GPTE being in the guest page table shadowed
2971 	 *                           by the SP from CPU 1.)
2972 	 *                          This reads SPTE during the page table walk.
2973 	 *                          Since SPTE.W is read as 1, there is no
2974 	 *                          fault.
2975 	 *
2976 	 *                      2.2 Guest issues TLB flush.
2977 	 *                          That causes a VM Exit.
2978 	 *
2979 	 *                      2.3 Walking of unsync pages sees sp->unsync is
2980 	 *                          false and skips the page.
2981 	 *
2982 	 *                      2.4 Guest accesses GVA X.
2983 	 *                          Since the mapping in the SP was not updated,
2984 	 *                          so the old mapping for GVA X incorrectly
2985 	 *                          gets used.
2986 	 * 1.1 Host marks SP
2987 	 *     as unsync
2988 	 *     (sp->unsync = true)
2989 	 *
2990 	 * The write barrier below ensures that 1.1 happens before 1.2 and thus
2991 	 * the situation in 2.4 does not arise.  It pairs with the read barrier
2992 	 * in is_unsync_root(), placed between 2.1's load of SPTE.W and 2.3.
2993 	 */
2994 	smp_wmb();
2995 
2996 	return 0;
2997 }
2998 
mmu_set_spte(struct kvm_vcpu * vcpu,struct kvm_memory_slot * slot,u64 * sptep,unsigned int pte_access,gfn_t gfn,kvm_pfn_t pfn,struct kvm_page_fault * fault)2999 static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
3000 			u64 *sptep, unsigned int pte_access, gfn_t gfn,
3001 			kvm_pfn_t pfn, struct kvm_page_fault *fault)
3002 {
3003 	struct kvm_mmu_page *sp = sptep_to_sp(sptep);
3004 	int level = sp->role.level;
3005 	int was_rmapped = 0;
3006 	int ret = RET_PF_FIXED;
3007 	bool flush = false;
3008 	bool wrprot;
3009 	u64 spte;
3010 
3011 	/* Prefetching always gets a writable pfn.  */
3012 	bool host_writable = !fault || fault->map_writable;
3013 	bool prefetch = !fault || fault->prefetch;
3014 	bool write_fault = fault && fault->write;
3015 
3016 	if (unlikely(is_noslot_pfn(pfn))) {
3017 		vcpu->stat.pf_mmio_spte_created++;
3018 		mark_mmio_spte(vcpu, sptep, gfn, pte_access);
3019 		return RET_PF_EMULATE;
3020 	}
3021 
3022 	if (is_shadow_present_pte(*sptep)) {
3023 		if (prefetch && is_last_spte(*sptep, level) &&
3024 		    pfn == spte_to_pfn(*sptep))
3025 			return RET_PF_SPURIOUS;
3026 
3027 		/*
3028 		 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
3029 		 * the parent of the now unreachable PTE.
3030 		 */
3031 		if (level > PG_LEVEL_4K && !is_large_pte(*sptep)) {
3032 			struct kvm_mmu_page *child;
3033 			u64 pte = *sptep;
3034 
3035 			child = spte_to_child_sp(pte);
3036 			drop_parent_pte(vcpu->kvm, child, sptep);
3037 			flush = true;
3038 		} else if (WARN_ON_ONCE(pfn != spte_to_pfn(*sptep))) {
3039 			drop_spte(vcpu->kvm, sptep);
3040 			flush = true;
3041 		} else
3042 			was_rmapped = 1;
3043 	}
3044 
3045 	wrprot = make_spte(vcpu, sp, slot, pte_access, gfn, pfn, *sptep, prefetch,
3046 			   false, host_writable, &spte);
3047 
3048 	if (*sptep == spte) {
3049 		ret = RET_PF_SPURIOUS;
3050 	} else {
3051 		flush |= mmu_spte_update(sptep, spte);
3052 		trace_kvm_mmu_set_spte(level, gfn, sptep);
3053 	}
3054 
3055 	if (wrprot && write_fault)
3056 		ret = RET_PF_WRITE_PROTECTED;
3057 
3058 	if (flush)
3059 		kvm_flush_remote_tlbs_gfn(vcpu->kvm, gfn, level);
3060 
3061 	if (!was_rmapped) {
3062 		WARN_ON_ONCE(ret == RET_PF_SPURIOUS);
3063 		rmap_add(vcpu, slot, sptep, gfn, pte_access);
3064 	} else {
3065 		/* Already rmapped but the pte_access bits may have changed. */
3066 		kvm_mmu_page_set_access(sp, spte_index(sptep), pte_access);
3067 	}
3068 
3069 	return ret;
3070 }
3071 
kvm_mmu_prefetch_sptes(struct kvm_vcpu * vcpu,gfn_t gfn,u64 * sptep,int nr_pages,unsigned int access)3072 static bool kvm_mmu_prefetch_sptes(struct kvm_vcpu *vcpu, gfn_t gfn, u64 *sptep,
3073 				   int nr_pages, unsigned int access)
3074 {
3075 	struct page *pages[PTE_PREFETCH_NUM];
3076 	struct kvm_memory_slot *slot;
3077 	int i;
3078 
3079 	if (WARN_ON_ONCE(nr_pages > PTE_PREFETCH_NUM))
3080 		return false;
3081 
3082 	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
3083 	if (!slot)
3084 		return false;
3085 
3086 	nr_pages = kvm_prefetch_pages(slot, gfn, pages, nr_pages);
3087 	if (nr_pages <= 0)
3088 		return false;
3089 
3090 	for (i = 0; i < nr_pages; i++, gfn++, sptep++) {
3091 		mmu_set_spte(vcpu, slot, sptep, access, gfn,
3092 			     page_to_pfn(pages[i]), NULL);
3093 
3094 		/*
3095 		 * KVM always prefetches writable pages from the primary MMU,
3096 		 * and KVM can make its SPTE writable in the fast page handler,
3097 		 * without notifying the primary MMU.  Mark pages/folios dirty
3098 		 * now to ensure file data is written back if it ends up being
3099 		 * written by the guest.  Because KVM's prefetching GUPs
3100 		 * writable PTEs, the probability of unnecessary writeback is
3101 		 * extremely low.
3102 		 */
3103 		kvm_release_page_dirty(pages[i]);
3104 	}
3105 
3106 	return true;
3107 }
3108 
direct_pte_prefetch_many(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp,u64 * start,u64 * end)3109 static bool direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
3110 				     struct kvm_mmu_page *sp,
3111 				     u64 *start, u64 *end)
3112 {
3113 	gfn_t gfn = kvm_mmu_page_get_gfn(sp, spte_index(start));
3114 	unsigned int access = sp->role.access;
3115 
3116 	return kvm_mmu_prefetch_sptes(vcpu, gfn, start, end - start, access);
3117 }
3118 
__direct_pte_prefetch(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp,u64 * sptep)3119 static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
3120 				  struct kvm_mmu_page *sp, u64 *sptep)
3121 {
3122 	u64 *spte, *start = NULL;
3123 	int i;
3124 
3125 	WARN_ON_ONCE(!sp->role.direct);
3126 
3127 	i = spte_index(sptep) & ~(PTE_PREFETCH_NUM - 1);
3128 	spte = sp->spt + i;
3129 
3130 	for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
3131 		if (is_shadow_present_pte(*spte) || spte == sptep) {
3132 			if (!start)
3133 				continue;
3134 			if (!direct_pte_prefetch_many(vcpu, sp, start, spte))
3135 				return;
3136 
3137 			start = NULL;
3138 		} else if (!start)
3139 			start = spte;
3140 	}
3141 	if (start)
3142 		direct_pte_prefetch_many(vcpu, sp, start, spte);
3143 }
3144 
direct_pte_prefetch(struct kvm_vcpu * vcpu,u64 * sptep)3145 static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
3146 {
3147 	struct kvm_mmu_page *sp;
3148 
3149 	sp = sptep_to_sp(sptep);
3150 
3151 	/*
3152 	 * Without accessed bits, there's no way to distinguish between
3153 	 * actually accessed translations and prefetched, so disable pte
3154 	 * prefetch if accessed bits aren't available.
3155 	 */
3156 	if (sp_ad_disabled(sp))
3157 		return;
3158 
3159 	if (sp->role.level > PG_LEVEL_4K)
3160 		return;
3161 
3162 	/*
3163 	 * If addresses are being invalidated, skip prefetching to avoid
3164 	 * accidentally prefetching those addresses.
3165 	 */
3166 	if (unlikely(vcpu->kvm->mmu_invalidate_in_progress))
3167 		return;
3168 
3169 	__direct_pte_prefetch(vcpu, sp, sptep);
3170 }
3171 
3172 /*
3173  * Lookup the mapping level for @gfn in the current mm.
3174  *
3175  * WARNING!  Use of host_pfn_mapping_level() requires the caller and the end
3176  * consumer to be tied into KVM's handlers for MMU notifier events!
3177  *
3178  * There are several ways to safely use this helper:
3179  *
3180  * - Check mmu_invalidate_retry_gfn() after grabbing the mapping level, before
3181  *   consuming it.  In this case, mmu_lock doesn't need to be held during the
3182  *   lookup, but it does need to be held while checking the MMU notifier.
3183  *
3184  * - Hold mmu_lock AND ensure there is no in-progress MMU notifier invalidation
3185  *   event for the hva.  This can be done by explicit checking the MMU notifier
3186  *   or by ensuring that KVM already has a valid mapping that covers the hva.
3187  *
3188  * - Do not use the result to install new mappings, e.g. use the host mapping
3189  *   level only to decide whether or not to zap an entry.  In this case, it's
3190  *   not required to hold mmu_lock (though it's highly likely the caller will
3191  *   want to hold mmu_lock anyways, e.g. to modify SPTEs).
3192  *
3193  * Note!  The lookup can still race with modifications to host page tables, but
3194  * the above "rules" ensure KVM will not _consume_ the result of the walk if a
3195  * race with the primary MMU occurs.
3196  */
host_pfn_mapping_level(struct kvm * kvm,gfn_t gfn,const struct kvm_memory_slot * slot)3197 static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn,
3198 				  const struct kvm_memory_slot *slot)
3199 {
3200 	int level = PG_LEVEL_4K;
3201 	unsigned long hva;
3202 	unsigned long flags;
3203 	pgd_t pgd;
3204 	p4d_t p4d;
3205 	pud_t pud;
3206 	pmd_t pmd;
3207 
3208 	/*
3209 	 * Note, using the already-retrieved memslot and __gfn_to_hva_memslot()
3210 	 * is not solely for performance, it's also necessary to avoid the
3211 	 * "writable" check in __gfn_to_hva_many(), which will always fail on
3212 	 * read-only memslots due to gfn_to_hva() assuming writes.  Earlier
3213 	 * page fault steps have already verified the guest isn't writing a
3214 	 * read-only memslot.
3215 	 */
3216 	hva = __gfn_to_hva_memslot(slot, gfn);
3217 
3218 	/*
3219 	 * Disable IRQs to prevent concurrent tear down of host page tables,
3220 	 * e.g. if the primary MMU promotes a P*D to a huge page and then frees
3221 	 * the original page table.
3222 	 */
3223 	local_irq_save(flags);
3224 
3225 	/*
3226 	 * Read each entry once.  As above, a non-leaf entry can be promoted to
3227 	 * a huge page _during_ this walk.  Re-reading the entry could send the
3228 	 * walk into the weeks, e.g. p*d_leaf() returns false (sees the old
3229 	 * value) and then p*d_offset() walks into the target huge page instead
3230 	 * of the old page table (sees the new value).
3231 	 */
3232 	pgd = READ_ONCE(*pgd_offset(kvm->mm, hva));
3233 	if (pgd_none(pgd))
3234 		goto out;
3235 
3236 	p4d = READ_ONCE(*p4d_offset(&pgd, hva));
3237 	if (p4d_none(p4d) || !p4d_present(p4d))
3238 		goto out;
3239 
3240 	pud = READ_ONCE(*pud_offset(&p4d, hva));
3241 	if (pud_none(pud) || !pud_present(pud))
3242 		goto out;
3243 
3244 	if (pud_leaf(pud)) {
3245 		level = PG_LEVEL_1G;
3246 		goto out;
3247 	}
3248 
3249 	pmd = READ_ONCE(*pmd_offset(&pud, hva));
3250 	if (pmd_none(pmd) || !pmd_present(pmd))
3251 		goto out;
3252 
3253 	if (pmd_leaf(pmd))
3254 		level = PG_LEVEL_2M;
3255 
3256 out:
3257 	local_irq_restore(flags);
3258 	return level;
3259 }
3260 
__kvm_mmu_max_mapping_level(struct kvm * kvm,const struct kvm_memory_slot * slot,gfn_t gfn,int max_level,bool is_private)3261 static int __kvm_mmu_max_mapping_level(struct kvm *kvm,
3262 				       const struct kvm_memory_slot *slot,
3263 				       gfn_t gfn, int max_level, bool is_private)
3264 {
3265 	struct kvm_lpage_info *linfo;
3266 	int host_level;
3267 
3268 	max_level = min(max_level, max_huge_page_level);
3269 	for ( ; max_level > PG_LEVEL_4K; max_level--) {
3270 		linfo = lpage_info_slot(gfn, slot, max_level);
3271 		if (!linfo->disallow_lpage)
3272 			break;
3273 	}
3274 
3275 	if (is_private)
3276 		return max_level;
3277 
3278 	if (max_level == PG_LEVEL_4K)
3279 		return PG_LEVEL_4K;
3280 
3281 	host_level = host_pfn_mapping_level(kvm, gfn, slot);
3282 	return min(host_level, max_level);
3283 }
3284 
kvm_mmu_max_mapping_level(struct kvm * kvm,const struct kvm_memory_slot * slot,gfn_t gfn)3285 int kvm_mmu_max_mapping_level(struct kvm *kvm,
3286 			      const struct kvm_memory_slot *slot, gfn_t gfn)
3287 {
3288 	bool is_private = kvm_slot_can_be_private(slot) &&
3289 			  kvm_mem_is_private(kvm, gfn);
3290 
3291 	return __kvm_mmu_max_mapping_level(kvm, slot, gfn, PG_LEVEL_NUM, is_private);
3292 }
3293 
kvm_mmu_hugepage_adjust(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)3294 void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
3295 {
3296 	struct kvm_memory_slot *slot = fault->slot;
3297 	kvm_pfn_t mask;
3298 
3299 	fault->huge_page_disallowed = fault->exec && fault->nx_huge_page_workaround_enabled;
3300 
3301 	if (unlikely(fault->max_level == PG_LEVEL_4K))
3302 		return;
3303 
3304 	if (is_error_noslot_pfn(fault->pfn))
3305 		return;
3306 
3307 	if (kvm_slot_dirty_track_enabled(slot))
3308 		return;
3309 
3310 	/*
3311 	 * Enforce the iTLB multihit workaround after capturing the requested
3312 	 * level, which will be used to do precise, accurate accounting.
3313 	 */
3314 	fault->req_level = __kvm_mmu_max_mapping_level(vcpu->kvm, slot,
3315 						       fault->gfn, fault->max_level,
3316 						       fault->is_private);
3317 	if (fault->req_level == PG_LEVEL_4K || fault->huge_page_disallowed)
3318 		return;
3319 
3320 	/*
3321 	 * mmu_invalidate_retry() was successful and mmu_lock is held, so
3322 	 * the pmd can't be split from under us.
3323 	 */
3324 	fault->goal_level = fault->req_level;
3325 	mask = KVM_PAGES_PER_HPAGE(fault->goal_level) - 1;
3326 	VM_BUG_ON((fault->gfn & mask) != (fault->pfn & mask));
3327 	fault->pfn &= ~mask;
3328 }
3329 
disallowed_hugepage_adjust(struct kvm_page_fault * fault,u64 spte,int cur_level)3330 void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level)
3331 {
3332 	if (cur_level > PG_LEVEL_4K &&
3333 	    cur_level == fault->goal_level &&
3334 	    is_shadow_present_pte(spte) &&
3335 	    !is_large_pte(spte) &&
3336 	    spte_to_child_sp(spte)->nx_huge_page_disallowed) {
3337 		/*
3338 		 * A small SPTE exists for this pfn, but FNAME(fetch),
3339 		 * direct_map(), or kvm_tdp_mmu_map() would like to create a
3340 		 * large PTE instead: just force them to go down another level,
3341 		 * patching back for them into pfn the next 9 bits of the
3342 		 * address.
3343 		 */
3344 		u64 page_mask = KVM_PAGES_PER_HPAGE(cur_level) -
3345 				KVM_PAGES_PER_HPAGE(cur_level - 1);
3346 		fault->pfn |= fault->gfn & page_mask;
3347 		fault->goal_level--;
3348 	}
3349 }
3350 
direct_map(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)3351 static int direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
3352 {
3353 	struct kvm_shadow_walk_iterator it;
3354 	struct kvm_mmu_page *sp;
3355 	int ret;
3356 	gfn_t base_gfn = fault->gfn;
3357 
3358 	kvm_mmu_hugepage_adjust(vcpu, fault);
3359 
3360 	trace_kvm_mmu_spte_requested(fault);
3361 	for_each_shadow_entry(vcpu, fault->addr, it) {
3362 		/*
3363 		 * We cannot overwrite existing page tables with an NX
3364 		 * large page, as the leaf could be executable.
3365 		 */
3366 		if (fault->nx_huge_page_workaround_enabled)
3367 			disallowed_hugepage_adjust(fault, *it.sptep, it.level);
3368 
3369 		base_gfn = gfn_round_for_level(fault->gfn, it.level);
3370 		if (it.level == fault->goal_level)
3371 			break;
3372 
3373 		sp = kvm_mmu_get_child_sp(vcpu, it.sptep, base_gfn, true, ACC_ALL);
3374 		if (sp == ERR_PTR(-EEXIST))
3375 			continue;
3376 
3377 		link_shadow_page(vcpu, it.sptep, sp);
3378 		if (fault->huge_page_disallowed)
3379 			account_nx_huge_page(vcpu->kvm, sp,
3380 					     fault->req_level >= it.level);
3381 	}
3382 
3383 	if (WARN_ON_ONCE(it.level != fault->goal_level))
3384 		return -EFAULT;
3385 
3386 	ret = mmu_set_spte(vcpu, fault->slot, it.sptep, ACC_ALL,
3387 			   base_gfn, fault->pfn, fault);
3388 	if (ret == RET_PF_SPURIOUS)
3389 		return ret;
3390 
3391 	direct_pte_prefetch(vcpu, it.sptep);
3392 	return ret;
3393 }
3394 
kvm_send_hwpoison_signal(struct kvm_memory_slot * slot,gfn_t gfn)3395 static void kvm_send_hwpoison_signal(struct kvm_memory_slot *slot, gfn_t gfn)
3396 {
3397 	unsigned long hva = gfn_to_hva_memslot(slot, gfn);
3398 
3399 	send_sig_mceerr(BUS_MCEERR_AR, (void __user *)hva, PAGE_SHIFT, current);
3400 }
3401 
kvm_handle_error_pfn(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)3402 static int kvm_handle_error_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
3403 {
3404 	if (is_sigpending_pfn(fault->pfn)) {
3405 		kvm_handle_signal_exit(vcpu);
3406 		return -EINTR;
3407 	}
3408 
3409 	/*
3410 	 * Do not cache the mmio info caused by writing the readonly gfn
3411 	 * into the spte otherwise read access on readonly gfn also can
3412 	 * caused mmio page fault and treat it as mmio access.
3413 	 */
3414 	if (fault->pfn == KVM_PFN_ERR_RO_FAULT)
3415 		return RET_PF_EMULATE;
3416 
3417 	if (fault->pfn == KVM_PFN_ERR_HWPOISON) {
3418 		kvm_send_hwpoison_signal(fault->slot, fault->gfn);
3419 		return RET_PF_RETRY;
3420 	}
3421 
3422 	return -EFAULT;
3423 }
3424 
kvm_handle_noslot_fault(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault,unsigned int access)3425 static int kvm_handle_noslot_fault(struct kvm_vcpu *vcpu,
3426 				   struct kvm_page_fault *fault,
3427 				   unsigned int access)
3428 {
3429 	gva_t gva = fault->is_tdp ? 0 : fault->addr;
3430 
3431 	if (fault->is_private) {
3432 		kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
3433 		return -EFAULT;
3434 	}
3435 
3436 	vcpu_cache_mmio_info(vcpu, gva, fault->gfn,
3437 			     access & shadow_mmio_access_mask);
3438 
3439 	fault->slot = NULL;
3440 	fault->pfn = KVM_PFN_NOSLOT;
3441 	fault->map_writable = false;
3442 
3443 	/*
3444 	 * If MMIO caching is disabled, emulate immediately without
3445 	 * touching the shadow page tables as attempting to install an
3446 	 * MMIO SPTE will just be an expensive nop.
3447 	 */
3448 	if (unlikely(!enable_mmio_caching))
3449 		return RET_PF_EMULATE;
3450 
3451 	/*
3452 	 * Do not create an MMIO SPTE for a gfn greater than host.MAXPHYADDR,
3453 	 * any guest that generates such gfns is running nested and is being
3454 	 * tricked by L0 userspace (you can observe gfn > L1.MAXPHYADDR if and
3455 	 * only if L1's MAXPHYADDR is inaccurate with respect to the
3456 	 * hardware's).
3457 	 */
3458 	if (unlikely(fault->gfn > kvm_mmu_max_gfn()))
3459 		return RET_PF_EMULATE;
3460 
3461 	return RET_PF_CONTINUE;
3462 }
3463 
page_fault_can_be_fast(struct kvm * kvm,struct kvm_page_fault * fault)3464 static bool page_fault_can_be_fast(struct kvm *kvm, struct kvm_page_fault *fault)
3465 {
3466 	/*
3467 	 * Page faults with reserved bits set, i.e. faults on MMIO SPTEs, only
3468 	 * reach the common page fault handler if the SPTE has an invalid MMIO
3469 	 * generation number.  Refreshing the MMIO generation needs to go down
3470 	 * the slow path.  Note, EPT Misconfigs do NOT set the PRESENT flag!
3471 	 */
3472 	if (fault->rsvd)
3473 		return false;
3474 
3475 	/*
3476 	 * For hardware-protected VMs, certain conditions like attempting to
3477 	 * perform a write to a page which is not in the state that the guest
3478 	 * expects it to be in can result in a nested/extended #PF. In this
3479 	 * case, the below code might misconstrue this situation as being the
3480 	 * result of a write-protected access, and treat it as a spurious case
3481 	 * rather than taking any action to satisfy the real source of the #PF
3482 	 * such as generating a KVM_EXIT_MEMORY_FAULT. This can lead to the
3483 	 * guest spinning on a #PF indefinitely, so don't attempt the fast path
3484 	 * in this case.
3485 	 *
3486 	 * Note that the kvm_mem_is_private() check might race with an
3487 	 * attribute update, but this will either result in the guest spinning
3488 	 * on RET_PF_SPURIOUS until the update completes, or an actual spurious
3489 	 * case might go down the slow path. Either case will resolve itself.
3490 	 */
3491 	if (kvm->arch.has_private_mem &&
3492 	    fault->is_private != kvm_mem_is_private(kvm, fault->gfn))
3493 		return false;
3494 
3495 	/*
3496 	 * #PF can be fast if:
3497 	 *
3498 	 * 1. The shadow page table entry is not present and A/D bits are
3499 	 *    disabled _by KVM_, which could mean that the fault is potentially
3500 	 *    caused by access tracking (if enabled).  If A/D bits are enabled
3501 	 *    by KVM, but disabled by L1 for L2, KVM is forced to disable A/D
3502 	 *    bits for L2 and employ access tracking, but the fast page fault
3503 	 *    mechanism only supports direct MMUs.
3504 	 * 2. The shadow page table entry is present, the access is a write,
3505 	 *    and no reserved bits are set (MMIO SPTEs cannot be "fixed"), i.e.
3506 	 *    the fault was caused by a write-protection violation.  If the
3507 	 *    SPTE is MMU-writable (determined later), the fault can be fixed
3508 	 *    by setting the Writable bit, which can be done out of mmu_lock.
3509 	 */
3510 	if (!fault->present)
3511 		return !kvm_ad_enabled;
3512 
3513 	/*
3514 	 * Note, instruction fetches and writes are mutually exclusive, ignore
3515 	 * the "exec" flag.
3516 	 */
3517 	return fault->write;
3518 }
3519 
3520 /*
3521  * Returns true if the SPTE was fixed successfully. Otherwise,
3522  * someone else modified the SPTE from its original value.
3523  */
fast_pf_fix_direct_spte(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault,u64 * sptep,u64 old_spte,u64 new_spte)3524 static bool fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu,
3525 				    struct kvm_page_fault *fault,
3526 				    u64 *sptep, u64 old_spte, u64 new_spte)
3527 {
3528 	/*
3529 	 * Theoretically we could also set dirty bit (and flush TLB) here in
3530 	 * order to eliminate unnecessary PML logging. See comments in
3531 	 * set_spte. But fast_page_fault is very unlikely to happen with PML
3532 	 * enabled, so we do not do this. This might result in the same GPA
3533 	 * to be logged in PML buffer again when the write really happens, and
3534 	 * eventually to be called by mark_page_dirty twice. But it's also no
3535 	 * harm. This also avoids the TLB flush needed after setting dirty bit
3536 	 * so non-PML cases won't be impacted.
3537 	 *
3538 	 * Compare with make_spte() where instead shadow_dirty_mask is set.
3539 	 */
3540 	if (!try_cmpxchg64(sptep, &old_spte, new_spte))
3541 		return false;
3542 
3543 	if (is_writable_pte(new_spte) && !is_writable_pte(old_spte))
3544 		mark_page_dirty_in_slot(vcpu->kvm, fault->slot, fault->gfn);
3545 
3546 	return true;
3547 }
3548 
3549 /*
3550  * Returns the last level spte pointer of the shadow page walk for the given
3551  * gpa, and sets *spte to the spte value. This spte may be non-preset. If no
3552  * walk could be performed, returns NULL and *spte does not contain valid data.
3553  *
3554  * Contract:
3555  *  - Must be called between walk_shadow_page_lockless_{begin,end}.
3556  *  - The returned sptep must not be used after walk_shadow_page_lockless_end.
3557  */
fast_pf_get_last_sptep(struct kvm_vcpu * vcpu,gpa_t gpa,u64 * spte)3558 static u64 *fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gpa_t gpa, u64 *spte)
3559 {
3560 	struct kvm_shadow_walk_iterator iterator;
3561 	u64 old_spte;
3562 	u64 *sptep = NULL;
3563 
3564 	for_each_shadow_entry_lockless(vcpu, gpa, iterator, old_spte) {
3565 		sptep = iterator.sptep;
3566 		*spte = old_spte;
3567 	}
3568 
3569 	return sptep;
3570 }
3571 
3572 /*
3573  * Returns one of RET_PF_INVALID, RET_PF_FIXED or RET_PF_SPURIOUS.
3574  */
fast_page_fault(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)3575 static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
3576 {
3577 	struct kvm_mmu_page *sp;
3578 	int ret = RET_PF_INVALID;
3579 	u64 spte;
3580 	u64 *sptep;
3581 	uint retry_count = 0;
3582 
3583 	if (!page_fault_can_be_fast(vcpu->kvm, fault))
3584 		return ret;
3585 
3586 	walk_shadow_page_lockless_begin(vcpu);
3587 
3588 	do {
3589 		u64 new_spte;
3590 
3591 		if (tdp_mmu_enabled)
3592 			sptep = kvm_tdp_mmu_fast_pf_get_last_sptep(vcpu, fault->gfn, &spte);
3593 		else
3594 			sptep = fast_pf_get_last_sptep(vcpu, fault->addr, &spte);
3595 
3596 		/*
3597 		 * It's entirely possible for the mapping to have been zapped
3598 		 * by a different task, but the root page should always be
3599 		 * available as the vCPU holds a reference to its root(s).
3600 		 */
3601 		if (WARN_ON_ONCE(!sptep))
3602 			spte = FROZEN_SPTE;
3603 
3604 		if (!is_shadow_present_pte(spte))
3605 			break;
3606 
3607 		sp = sptep_to_sp(sptep);
3608 		if (!is_last_spte(spte, sp->role.level))
3609 			break;
3610 
3611 		/*
3612 		 * Check whether the memory access that caused the fault would
3613 		 * still cause it if it were to be performed right now. If not,
3614 		 * then this is a spurious fault caused by TLB lazily flushed,
3615 		 * or some other CPU has already fixed the PTE after the
3616 		 * current CPU took the fault.
3617 		 *
3618 		 * Need not check the access of upper level table entries since
3619 		 * they are always ACC_ALL.
3620 		 */
3621 		if (is_access_allowed(fault, spte)) {
3622 			ret = RET_PF_SPURIOUS;
3623 			break;
3624 		}
3625 
3626 		new_spte = spte;
3627 
3628 		/*
3629 		 * KVM only supports fixing page faults outside of MMU lock for
3630 		 * direct MMUs, nested MMUs are always indirect, and KVM always
3631 		 * uses A/D bits for non-nested MMUs.  Thus, if A/D bits are
3632 		 * enabled, the SPTE can't be an access-tracked SPTE.
3633 		 */
3634 		if (unlikely(!kvm_ad_enabled) && is_access_track_spte(spte))
3635 			new_spte = restore_acc_track_spte(new_spte) |
3636 				   shadow_accessed_mask;
3637 
3638 		/*
3639 		 * To keep things simple, only SPTEs that are MMU-writable can
3640 		 * be made fully writable outside of mmu_lock, e.g. only SPTEs
3641 		 * that were write-protected for dirty-logging or access
3642 		 * tracking are handled here.  Don't bother checking if the
3643 		 * SPTE is writable to prioritize running with A/D bits enabled.
3644 		 * The is_access_allowed() check above handles the common case
3645 		 * of the fault being spurious, and the SPTE is known to be
3646 		 * shadow-present, i.e. except for access tracking restoration
3647 		 * making the new SPTE writable, the check is wasteful.
3648 		 */
3649 		if (fault->write && is_mmu_writable_spte(spte)) {
3650 			new_spte |= PT_WRITABLE_MASK;
3651 
3652 			/*
3653 			 * Do not fix write-permission on the large spte when
3654 			 * dirty logging is enabled. Since we only dirty the
3655 			 * first page into the dirty-bitmap in
3656 			 * fast_pf_fix_direct_spte(), other pages are missed
3657 			 * if its slot has dirty logging enabled.
3658 			 *
3659 			 * Instead, we let the slow page fault path create a
3660 			 * normal spte to fix the access.
3661 			 */
3662 			if (sp->role.level > PG_LEVEL_4K &&
3663 			    kvm_slot_dirty_track_enabled(fault->slot))
3664 				break;
3665 		}
3666 
3667 		/* Verify that the fault can be handled in the fast path */
3668 		if (new_spte == spte ||
3669 		    !is_access_allowed(fault, new_spte))
3670 			break;
3671 
3672 		/*
3673 		 * Currently, fast page fault only works for direct mapping
3674 		 * since the gfn is not stable for indirect shadow page. See
3675 		 * Documentation/virt/kvm/locking.rst to get more detail.
3676 		 */
3677 		if (fast_pf_fix_direct_spte(vcpu, fault, sptep, spte, new_spte)) {
3678 			ret = RET_PF_FIXED;
3679 			break;
3680 		}
3681 
3682 		if (++retry_count > 4) {
3683 			pr_warn_once("Fast #PF retrying more than 4 times.\n");
3684 			break;
3685 		}
3686 
3687 	} while (true);
3688 
3689 	trace_fast_page_fault(vcpu, fault, sptep, spte, ret);
3690 	walk_shadow_page_lockless_end(vcpu);
3691 
3692 	if (ret != RET_PF_INVALID)
3693 		vcpu->stat.pf_fast++;
3694 
3695 	return ret;
3696 }
3697 
mmu_free_root_page(struct kvm * kvm,hpa_t * root_hpa,struct list_head * invalid_list)3698 static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
3699 			       struct list_head *invalid_list)
3700 {
3701 	struct kvm_mmu_page *sp;
3702 
3703 	if (!VALID_PAGE(*root_hpa))
3704 		return;
3705 
3706 	sp = root_to_sp(*root_hpa);
3707 	if (WARN_ON_ONCE(!sp))
3708 		return;
3709 
3710 	if (is_tdp_mmu_page(sp)) {
3711 		lockdep_assert_held_read(&kvm->mmu_lock);
3712 		kvm_tdp_mmu_put_root(kvm, sp);
3713 	} else {
3714 		lockdep_assert_held_write(&kvm->mmu_lock);
3715 		if (!--sp->root_count && sp->role.invalid)
3716 			kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
3717 	}
3718 
3719 	*root_hpa = INVALID_PAGE;
3720 }
3721 
3722 /* roots_to_free must be some combination of the KVM_MMU_ROOT_* flags */
kvm_mmu_free_roots(struct kvm * kvm,struct kvm_mmu * mmu,ulong roots_to_free)3723 void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu,
3724 			ulong roots_to_free)
3725 {
3726 	bool is_tdp_mmu = tdp_mmu_enabled && mmu->root_role.direct;
3727 	int i;
3728 	LIST_HEAD(invalid_list);
3729 	bool free_active_root;
3730 
3731 	WARN_ON_ONCE(roots_to_free & ~KVM_MMU_ROOTS_ALL);
3732 
3733 	BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG);
3734 
3735 	/* Before acquiring the MMU lock, see if we need to do any real work. */
3736 	free_active_root = (roots_to_free & KVM_MMU_ROOT_CURRENT)
3737 		&& VALID_PAGE(mmu->root.hpa);
3738 
3739 	if (!free_active_root) {
3740 		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
3741 			if ((roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) &&
3742 			    VALID_PAGE(mmu->prev_roots[i].hpa))
3743 				break;
3744 
3745 		if (i == KVM_MMU_NUM_PREV_ROOTS)
3746 			return;
3747 	}
3748 
3749 	if (is_tdp_mmu)
3750 		read_lock(&kvm->mmu_lock);
3751 	else
3752 		write_lock(&kvm->mmu_lock);
3753 
3754 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
3755 		if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i))
3756 			mmu_free_root_page(kvm, &mmu->prev_roots[i].hpa,
3757 					   &invalid_list);
3758 
3759 	if (free_active_root) {
3760 		if (kvm_mmu_is_dummy_root(mmu->root.hpa)) {
3761 			/* Nothing to cleanup for dummy roots. */
3762 		} else if (root_to_sp(mmu->root.hpa)) {
3763 			mmu_free_root_page(kvm, &mmu->root.hpa, &invalid_list);
3764 		} else if (mmu->pae_root) {
3765 			for (i = 0; i < 4; ++i) {
3766 				if (!IS_VALID_PAE_ROOT(mmu->pae_root[i]))
3767 					continue;
3768 
3769 				mmu_free_root_page(kvm, &mmu->pae_root[i],
3770 						   &invalid_list);
3771 				mmu->pae_root[i] = INVALID_PAE_ROOT;
3772 			}
3773 		}
3774 		mmu->root.hpa = INVALID_PAGE;
3775 		mmu->root.pgd = 0;
3776 	}
3777 
3778 	if (is_tdp_mmu) {
3779 		read_unlock(&kvm->mmu_lock);
3780 		WARN_ON_ONCE(!list_empty(&invalid_list));
3781 	} else {
3782 		kvm_mmu_commit_zap_page(kvm, &invalid_list);
3783 		write_unlock(&kvm->mmu_lock);
3784 	}
3785 }
3786 EXPORT_SYMBOL_GPL(kvm_mmu_free_roots);
3787 
kvm_mmu_free_guest_mode_roots(struct kvm * kvm,struct kvm_mmu * mmu)3788 void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu)
3789 {
3790 	unsigned long roots_to_free = 0;
3791 	struct kvm_mmu_page *sp;
3792 	hpa_t root_hpa;
3793 	int i;
3794 
3795 	/*
3796 	 * This should not be called while L2 is active, L2 can't invalidate
3797 	 * _only_ its own roots, e.g. INVVPID unconditionally exits.
3798 	 */
3799 	WARN_ON_ONCE(mmu->root_role.guest_mode);
3800 
3801 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
3802 		root_hpa = mmu->prev_roots[i].hpa;
3803 		if (!VALID_PAGE(root_hpa))
3804 			continue;
3805 
3806 		sp = root_to_sp(root_hpa);
3807 		if (!sp || sp->role.guest_mode)
3808 			roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
3809 	}
3810 
3811 	kvm_mmu_free_roots(kvm, mmu, roots_to_free);
3812 }
3813 EXPORT_SYMBOL_GPL(kvm_mmu_free_guest_mode_roots);
3814 
mmu_alloc_root(struct kvm_vcpu * vcpu,gfn_t gfn,int quadrant,u8 level)3815 static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, int quadrant,
3816 			    u8 level)
3817 {
3818 	union kvm_mmu_page_role role = vcpu->arch.mmu->root_role;
3819 	struct kvm_mmu_page *sp;
3820 
3821 	role.level = level;
3822 	role.quadrant = quadrant;
3823 
3824 	WARN_ON_ONCE(quadrant && !role.has_4_byte_gpte);
3825 	WARN_ON_ONCE(role.direct && role.has_4_byte_gpte);
3826 
3827 	sp = kvm_mmu_get_shadow_page(vcpu, gfn, role);
3828 	++sp->root_count;
3829 
3830 	return __pa(sp->spt);
3831 }
3832 
mmu_alloc_direct_roots(struct kvm_vcpu * vcpu)3833 static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
3834 {
3835 	struct kvm_mmu *mmu = vcpu->arch.mmu;
3836 	u8 shadow_root_level = mmu->root_role.level;
3837 	hpa_t root;
3838 	unsigned i;
3839 	int r;
3840 
3841 	if (tdp_mmu_enabled) {
3842 		if (kvm_has_mirrored_tdp(vcpu->kvm) &&
3843 		    !VALID_PAGE(mmu->mirror_root_hpa))
3844 			kvm_tdp_mmu_alloc_root(vcpu, true);
3845 		kvm_tdp_mmu_alloc_root(vcpu, false);
3846 		return 0;
3847 	}
3848 
3849 	write_lock(&vcpu->kvm->mmu_lock);
3850 	r = make_mmu_pages_available(vcpu);
3851 	if (r < 0)
3852 		goto out_unlock;
3853 
3854 	if (shadow_root_level >= PT64_ROOT_4LEVEL) {
3855 		root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level);
3856 		mmu->root.hpa = root;
3857 	} else if (shadow_root_level == PT32E_ROOT_LEVEL) {
3858 		if (WARN_ON_ONCE(!mmu->pae_root)) {
3859 			r = -EIO;
3860 			goto out_unlock;
3861 		}
3862 
3863 		for (i = 0; i < 4; ++i) {
3864 			WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
3865 
3866 			root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT), 0,
3867 					      PT32_ROOT_LEVEL);
3868 			mmu->pae_root[i] = root | PT_PRESENT_MASK |
3869 					   shadow_me_value;
3870 		}
3871 		mmu->root.hpa = __pa(mmu->pae_root);
3872 	} else {
3873 		WARN_ONCE(1, "Bad TDP root level = %d\n", shadow_root_level);
3874 		r = -EIO;
3875 		goto out_unlock;
3876 	}
3877 
3878 	/* root.pgd is ignored for direct MMUs. */
3879 	mmu->root.pgd = 0;
3880 out_unlock:
3881 	write_unlock(&vcpu->kvm->mmu_lock);
3882 	return r;
3883 }
3884 
mmu_first_shadow_root_alloc(struct kvm * kvm)3885 static int mmu_first_shadow_root_alloc(struct kvm *kvm)
3886 {
3887 	struct kvm_memslots *slots;
3888 	struct kvm_memory_slot *slot;
3889 	int r = 0, i, bkt;
3890 
3891 	/*
3892 	 * Check if this is the first shadow root being allocated before
3893 	 * taking the lock.
3894 	 */
3895 	if (kvm_shadow_root_allocated(kvm))
3896 		return 0;
3897 
3898 	mutex_lock(&kvm->slots_arch_lock);
3899 
3900 	/* Recheck, under the lock, whether this is the first shadow root. */
3901 	if (kvm_shadow_root_allocated(kvm))
3902 		goto out_unlock;
3903 
3904 	/*
3905 	 * Check if anything actually needs to be allocated, e.g. all metadata
3906 	 * will be allocated upfront if TDP is disabled.
3907 	 */
3908 	if (kvm_memslots_have_rmaps(kvm) &&
3909 	    kvm_page_track_write_tracking_enabled(kvm))
3910 		goto out_success;
3911 
3912 	for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
3913 		slots = __kvm_memslots(kvm, i);
3914 		kvm_for_each_memslot(slot, bkt, slots) {
3915 			/*
3916 			 * Both of these functions are no-ops if the target is
3917 			 * already allocated, so unconditionally calling both
3918 			 * is safe.  Intentionally do NOT free allocations on
3919 			 * failure to avoid having to track which allocations
3920 			 * were made now versus when the memslot was created.
3921 			 * The metadata is guaranteed to be freed when the slot
3922 			 * is freed, and will be kept/used if userspace retries
3923 			 * KVM_RUN instead of killing the VM.
3924 			 */
3925 			r = memslot_rmap_alloc(slot, slot->npages);
3926 			if (r)
3927 				goto out_unlock;
3928 			r = kvm_page_track_write_tracking_alloc(slot);
3929 			if (r)
3930 				goto out_unlock;
3931 		}
3932 	}
3933 
3934 	/*
3935 	 * Ensure that shadow_root_allocated becomes true strictly after
3936 	 * all the related pointers are set.
3937 	 */
3938 out_success:
3939 	smp_store_release(&kvm->arch.shadow_root_allocated, true);
3940 
3941 out_unlock:
3942 	mutex_unlock(&kvm->slots_arch_lock);
3943 	return r;
3944 }
3945 
mmu_alloc_shadow_roots(struct kvm_vcpu * vcpu)3946 static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
3947 {
3948 	struct kvm_mmu *mmu = vcpu->arch.mmu;
3949 	u64 pdptrs[4], pm_mask;
3950 	gfn_t root_gfn, root_pgd;
3951 	int quadrant, i, r;
3952 	hpa_t root;
3953 
3954 	root_pgd = kvm_mmu_get_guest_pgd(vcpu, mmu);
3955 	root_gfn = (root_pgd & __PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
3956 
3957 	if (!kvm_vcpu_is_visible_gfn(vcpu, root_gfn)) {
3958 		mmu->root.hpa = kvm_mmu_get_dummy_root();
3959 		return 0;
3960 	}
3961 
3962 	/*
3963 	 * On SVM, reading PDPTRs might access guest memory, which might fault
3964 	 * and thus might sleep.  Grab the PDPTRs before acquiring mmu_lock.
3965 	 */
3966 	if (mmu->cpu_role.base.level == PT32E_ROOT_LEVEL) {
3967 		for (i = 0; i < 4; ++i) {
3968 			pdptrs[i] = mmu->get_pdptr(vcpu, i);
3969 			if (!(pdptrs[i] & PT_PRESENT_MASK))
3970 				continue;
3971 
3972 			if (!kvm_vcpu_is_visible_gfn(vcpu, pdptrs[i] >> PAGE_SHIFT))
3973 				pdptrs[i] = 0;
3974 		}
3975 	}
3976 
3977 	r = mmu_first_shadow_root_alloc(vcpu->kvm);
3978 	if (r)
3979 		return r;
3980 
3981 	write_lock(&vcpu->kvm->mmu_lock);
3982 	r = make_mmu_pages_available(vcpu);
3983 	if (r < 0)
3984 		goto out_unlock;
3985 
3986 	/*
3987 	 * Do we shadow a long mode page table? If so we need to
3988 	 * write-protect the guests page table root.
3989 	 */
3990 	if (mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL) {
3991 		root = mmu_alloc_root(vcpu, root_gfn, 0,
3992 				      mmu->root_role.level);
3993 		mmu->root.hpa = root;
3994 		goto set_root_pgd;
3995 	}
3996 
3997 	if (WARN_ON_ONCE(!mmu->pae_root)) {
3998 		r = -EIO;
3999 		goto out_unlock;
4000 	}
4001 
4002 	/*
4003 	 * We shadow a 32 bit page table. This may be a legacy 2-level
4004 	 * or a PAE 3-level page table. In either case we need to be aware that
4005 	 * the shadow page table may be a PAE or a long mode page table.
4006 	 */
4007 	pm_mask = PT_PRESENT_MASK | shadow_me_value;
4008 	if (mmu->root_role.level >= PT64_ROOT_4LEVEL) {
4009 		pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
4010 
4011 		if (WARN_ON_ONCE(!mmu->pml4_root)) {
4012 			r = -EIO;
4013 			goto out_unlock;
4014 		}
4015 		mmu->pml4_root[0] = __pa(mmu->pae_root) | pm_mask;
4016 
4017 		if (mmu->root_role.level == PT64_ROOT_5LEVEL) {
4018 			if (WARN_ON_ONCE(!mmu->pml5_root)) {
4019 				r = -EIO;
4020 				goto out_unlock;
4021 			}
4022 			mmu->pml5_root[0] = __pa(mmu->pml4_root) | pm_mask;
4023 		}
4024 	}
4025 
4026 	for (i = 0; i < 4; ++i) {
4027 		WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
4028 
4029 		if (mmu->cpu_role.base.level == PT32E_ROOT_LEVEL) {
4030 			if (!(pdptrs[i] & PT_PRESENT_MASK)) {
4031 				mmu->pae_root[i] = INVALID_PAE_ROOT;
4032 				continue;
4033 			}
4034 			root_gfn = pdptrs[i] >> PAGE_SHIFT;
4035 		}
4036 
4037 		/*
4038 		 * If shadowing 32-bit non-PAE page tables, each PAE page
4039 		 * directory maps one quarter of the guest's non-PAE page
4040 		 * directory. Othwerise each PAE page direct shadows one guest
4041 		 * PAE page directory so that quadrant should be 0.
4042 		 */
4043 		quadrant = (mmu->cpu_role.base.level == PT32_ROOT_LEVEL) ? i : 0;
4044 
4045 		root = mmu_alloc_root(vcpu, root_gfn, quadrant, PT32_ROOT_LEVEL);
4046 		mmu->pae_root[i] = root | pm_mask;
4047 	}
4048 
4049 	if (mmu->root_role.level == PT64_ROOT_5LEVEL)
4050 		mmu->root.hpa = __pa(mmu->pml5_root);
4051 	else if (mmu->root_role.level == PT64_ROOT_4LEVEL)
4052 		mmu->root.hpa = __pa(mmu->pml4_root);
4053 	else
4054 		mmu->root.hpa = __pa(mmu->pae_root);
4055 
4056 set_root_pgd:
4057 	mmu->root.pgd = root_pgd;
4058 out_unlock:
4059 	write_unlock(&vcpu->kvm->mmu_lock);
4060 
4061 	return r;
4062 }
4063 
mmu_alloc_special_roots(struct kvm_vcpu * vcpu)4064 static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
4065 {
4066 	struct kvm_mmu *mmu = vcpu->arch.mmu;
4067 	bool need_pml5 = mmu->root_role.level > PT64_ROOT_4LEVEL;
4068 	u64 *pml5_root = NULL;
4069 	u64 *pml4_root = NULL;
4070 	u64 *pae_root;
4071 
4072 	/*
4073 	 * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP
4074 	 * tables are allocated and initialized at root creation as there is no
4075 	 * equivalent level in the guest's NPT to shadow.  Allocate the tables
4076 	 * on demand, as running a 32-bit L1 VMM on 64-bit KVM is very rare.
4077 	 */
4078 	if (mmu->root_role.direct ||
4079 	    mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL ||
4080 	    mmu->root_role.level < PT64_ROOT_4LEVEL)
4081 		return 0;
4082 
4083 	/*
4084 	 * NPT, the only paging mode that uses this horror, uses a fixed number
4085 	 * of levels for the shadow page tables, e.g. all MMUs are 4-level or
4086 	 * all MMus are 5-level.  Thus, this can safely require that pml5_root
4087 	 * is allocated if the other roots are valid and pml5 is needed, as any
4088 	 * prior MMU would also have required pml5.
4089 	 */
4090 	if (mmu->pae_root && mmu->pml4_root && (!need_pml5 || mmu->pml5_root))
4091 		return 0;
4092 
4093 	/*
4094 	 * The special roots should always be allocated in concert.  Yell and
4095 	 * bail if KVM ends up in a state where only one of the roots is valid.
4096 	 */
4097 	if (WARN_ON_ONCE(!tdp_enabled || mmu->pae_root || mmu->pml4_root ||
4098 			 (need_pml5 && mmu->pml5_root)))
4099 		return -EIO;
4100 
4101 	/*
4102 	 * Unlike 32-bit NPT, the PDP table doesn't need to be in low mem, and
4103 	 * doesn't need to be decrypted.
4104 	 */
4105 	pae_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
4106 	if (!pae_root)
4107 		return -ENOMEM;
4108 
4109 #ifdef CONFIG_X86_64
4110 	pml4_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
4111 	if (!pml4_root)
4112 		goto err_pml4;
4113 
4114 	if (need_pml5) {
4115 		pml5_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
4116 		if (!pml5_root)
4117 			goto err_pml5;
4118 	}
4119 #endif
4120 
4121 	mmu->pae_root = pae_root;
4122 	mmu->pml4_root = pml4_root;
4123 	mmu->pml5_root = pml5_root;
4124 
4125 	return 0;
4126 
4127 #ifdef CONFIG_X86_64
4128 err_pml5:
4129 	free_page((unsigned long)pml4_root);
4130 err_pml4:
4131 	free_page((unsigned long)pae_root);
4132 	return -ENOMEM;
4133 #endif
4134 }
4135 
is_unsync_root(hpa_t root)4136 static bool is_unsync_root(hpa_t root)
4137 {
4138 	struct kvm_mmu_page *sp;
4139 
4140 	if (!VALID_PAGE(root) || kvm_mmu_is_dummy_root(root))
4141 		return false;
4142 
4143 	/*
4144 	 * The read barrier orders the CPU's read of SPTE.W during the page table
4145 	 * walk before the reads of sp->unsync/sp->unsync_children here.
4146 	 *
4147 	 * Even if another CPU was marking the SP as unsync-ed simultaneously,
4148 	 * any guest page table changes are not guaranteed to be visible anyway
4149 	 * until this VCPU issues a TLB flush strictly after those changes are
4150 	 * made.  We only need to ensure that the other CPU sets these flags
4151 	 * before any actual changes to the page tables are made.  The comments
4152 	 * in mmu_try_to_unsync_pages() describe what could go wrong if this
4153 	 * requirement isn't satisfied.
4154 	 */
4155 	smp_rmb();
4156 	sp = root_to_sp(root);
4157 
4158 	/*
4159 	 * PAE roots (somewhat arbitrarily) aren't backed by shadow pages, the
4160 	 * PDPTEs for a given PAE root need to be synchronized individually.
4161 	 */
4162 	if (WARN_ON_ONCE(!sp))
4163 		return false;
4164 
4165 	if (sp->unsync || sp->unsync_children)
4166 		return true;
4167 
4168 	return false;
4169 }
4170 
kvm_mmu_sync_roots(struct kvm_vcpu * vcpu)4171 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
4172 {
4173 	int i;
4174 	struct kvm_mmu_page *sp;
4175 
4176 	if (vcpu->arch.mmu->root_role.direct)
4177 		return;
4178 
4179 	if (!VALID_PAGE(vcpu->arch.mmu->root.hpa))
4180 		return;
4181 
4182 	vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
4183 
4184 	if (vcpu->arch.mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL) {
4185 		hpa_t root = vcpu->arch.mmu->root.hpa;
4186 
4187 		if (!is_unsync_root(root))
4188 			return;
4189 
4190 		sp = root_to_sp(root);
4191 
4192 		write_lock(&vcpu->kvm->mmu_lock);
4193 		mmu_sync_children(vcpu, sp, true);
4194 		write_unlock(&vcpu->kvm->mmu_lock);
4195 		return;
4196 	}
4197 
4198 	write_lock(&vcpu->kvm->mmu_lock);
4199 
4200 	for (i = 0; i < 4; ++i) {
4201 		hpa_t root = vcpu->arch.mmu->pae_root[i];
4202 
4203 		if (IS_VALID_PAE_ROOT(root)) {
4204 			sp = spte_to_child_sp(root);
4205 			mmu_sync_children(vcpu, sp, true);
4206 		}
4207 	}
4208 
4209 	write_unlock(&vcpu->kvm->mmu_lock);
4210 }
4211 
kvm_mmu_sync_prev_roots(struct kvm_vcpu * vcpu)4212 void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu)
4213 {
4214 	unsigned long roots_to_free = 0;
4215 	int i;
4216 
4217 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
4218 		if (is_unsync_root(vcpu->arch.mmu->prev_roots[i].hpa))
4219 			roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
4220 
4221 	/* sync prev_roots by simply freeing them */
4222 	kvm_mmu_free_roots(vcpu->kvm, vcpu->arch.mmu, roots_to_free);
4223 }
4224 
nonpaging_gva_to_gpa(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu,gpa_t vaddr,u64 access,struct x86_exception * exception)4225 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
4226 				  gpa_t vaddr, u64 access,
4227 				  struct x86_exception *exception)
4228 {
4229 	if (exception)
4230 		exception->error_code = 0;
4231 	return kvm_translate_gpa(vcpu, mmu, vaddr, access, exception);
4232 }
4233 
mmio_info_in_cache(struct kvm_vcpu * vcpu,u64 addr,bool direct)4234 static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
4235 {
4236 	/*
4237 	 * A nested guest cannot use the MMIO cache if it is using nested
4238 	 * page tables, because cr2 is a nGPA while the cache stores GPAs.
4239 	 */
4240 	if (mmu_is_nested(vcpu))
4241 		return false;
4242 
4243 	if (direct)
4244 		return vcpu_match_mmio_gpa(vcpu, addr);
4245 
4246 	return vcpu_match_mmio_gva(vcpu, addr);
4247 }
4248 
4249 /*
4250  * Return the level of the lowest level SPTE added to sptes.
4251  * That SPTE may be non-present.
4252  *
4253  * Must be called between walk_shadow_page_lockless_{begin,end}.
4254  */
get_walk(struct kvm_vcpu * vcpu,u64 addr,u64 * sptes,int * root_level)4255 static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level)
4256 {
4257 	struct kvm_shadow_walk_iterator iterator;
4258 	int leaf = -1;
4259 	u64 spte;
4260 
4261 	for (shadow_walk_init(&iterator, vcpu, addr),
4262 	     *root_level = iterator.level;
4263 	     shadow_walk_okay(&iterator);
4264 	     __shadow_walk_next(&iterator, spte)) {
4265 		leaf = iterator.level;
4266 		spte = mmu_spte_get_lockless(iterator.sptep);
4267 
4268 		sptes[leaf] = spte;
4269 	}
4270 
4271 	return leaf;
4272 }
4273 
get_sptes_lockless(struct kvm_vcpu * vcpu,u64 addr,u64 * sptes,int * root_level)4274 static int get_sptes_lockless(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
4275 			      int *root_level)
4276 {
4277 	int leaf;
4278 
4279 	walk_shadow_page_lockless_begin(vcpu);
4280 
4281 	if (is_tdp_mmu_active(vcpu))
4282 		leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, root_level);
4283 	else
4284 		leaf = get_walk(vcpu, addr, sptes, root_level);
4285 
4286 	walk_shadow_page_lockless_end(vcpu);
4287 	return leaf;
4288 }
4289 
4290 /* return true if reserved bit(s) are detected on a valid, non-MMIO SPTE. */
get_mmio_spte(struct kvm_vcpu * vcpu,u64 addr,u64 * sptep)4291 static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
4292 {
4293 	u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
4294 	struct rsvd_bits_validate *rsvd_check;
4295 	int root, leaf, level;
4296 	bool reserved = false;
4297 
4298 	leaf = get_sptes_lockless(vcpu, addr, sptes, &root);
4299 	if (unlikely(leaf < 0)) {
4300 		*sptep = 0ull;
4301 		return reserved;
4302 	}
4303 
4304 	*sptep = sptes[leaf];
4305 
4306 	/*
4307 	 * Skip reserved bits checks on the terminal leaf if it's not a valid
4308 	 * SPTE.  Note, this also (intentionally) skips MMIO SPTEs, which, by
4309 	 * design, always have reserved bits set.  The purpose of the checks is
4310 	 * to detect reserved bits on non-MMIO SPTEs. i.e. buggy SPTEs.
4311 	 */
4312 	if (!is_shadow_present_pte(sptes[leaf]))
4313 		leaf++;
4314 
4315 	rsvd_check = &vcpu->arch.mmu->shadow_zero_check;
4316 
4317 	for (level = root; level >= leaf; level--)
4318 		reserved |= is_rsvd_spte(rsvd_check, sptes[level], level);
4319 
4320 	if (reserved) {
4321 		pr_err("%s: reserved bits set on MMU-present spte, addr 0x%llx, hierarchy:\n",
4322 		       __func__, addr);
4323 		for (level = root; level >= leaf; level--)
4324 			pr_err("------ spte = 0x%llx level = %d, rsvd bits = 0x%llx",
4325 			       sptes[level], level,
4326 			       get_rsvd_bits(rsvd_check, sptes[level], level));
4327 	}
4328 
4329 	return reserved;
4330 }
4331 
handle_mmio_page_fault(struct kvm_vcpu * vcpu,u64 addr,bool direct)4332 static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
4333 {
4334 	u64 spte;
4335 	bool reserved;
4336 
4337 	if (mmio_info_in_cache(vcpu, addr, direct))
4338 		return RET_PF_EMULATE;
4339 
4340 	reserved = get_mmio_spte(vcpu, addr, &spte);
4341 	if (WARN_ON_ONCE(reserved))
4342 		return -EINVAL;
4343 
4344 	if (is_mmio_spte(vcpu->kvm, spte)) {
4345 		gfn_t gfn = get_mmio_spte_gfn(spte);
4346 		unsigned int access = get_mmio_spte_access(spte);
4347 
4348 		if (!check_mmio_spte(vcpu, spte))
4349 			return RET_PF_INVALID;
4350 
4351 		if (direct)
4352 			addr = 0;
4353 
4354 		trace_handle_mmio_page_fault(addr, gfn, access);
4355 		vcpu_cache_mmio_info(vcpu, addr, gfn, access);
4356 		return RET_PF_EMULATE;
4357 	}
4358 
4359 	/*
4360 	 * If the page table is zapped by other cpus, let CPU fault again on
4361 	 * the address.
4362 	 */
4363 	return RET_PF_RETRY;
4364 }
4365 
page_fault_handle_page_track(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4366 static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
4367 					 struct kvm_page_fault *fault)
4368 {
4369 	if (unlikely(fault->rsvd))
4370 		return false;
4371 
4372 	if (!fault->present || !fault->write)
4373 		return false;
4374 
4375 	/*
4376 	 * guest is writing the page which is write tracked which can
4377 	 * not be fixed by page fault handler.
4378 	 */
4379 	if (kvm_gfn_is_write_tracked(vcpu->kvm, fault->slot, fault->gfn))
4380 		return true;
4381 
4382 	return false;
4383 }
4384 
shadow_page_table_clear_flood(struct kvm_vcpu * vcpu,gva_t addr)4385 static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
4386 {
4387 	struct kvm_shadow_walk_iterator iterator;
4388 	u64 spte;
4389 
4390 	walk_shadow_page_lockless_begin(vcpu);
4391 	for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
4392 		clear_sp_write_flooding_count(iterator.sptep);
4393 	walk_shadow_page_lockless_end(vcpu);
4394 }
4395 
alloc_apf_token(struct kvm_vcpu * vcpu)4396 static u32 alloc_apf_token(struct kvm_vcpu *vcpu)
4397 {
4398 	/* make sure the token value is not 0 */
4399 	u32 id = vcpu->arch.apf.id;
4400 
4401 	if (id << 12 == 0)
4402 		vcpu->arch.apf.id = 1;
4403 
4404 	return (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
4405 }
4406 
kvm_arch_setup_async_pf(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4407 static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu,
4408 				    struct kvm_page_fault *fault)
4409 {
4410 	struct kvm_arch_async_pf arch;
4411 
4412 	arch.token = alloc_apf_token(vcpu);
4413 	arch.gfn = fault->gfn;
4414 	arch.error_code = fault->error_code;
4415 	arch.direct_map = vcpu->arch.mmu->root_role.direct;
4416 	arch.cr3 = kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu);
4417 
4418 	return kvm_setup_async_pf(vcpu, fault->addr,
4419 				  kvm_vcpu_gfn_to_hva(vcpu, fault->gfn), &arch);
4420 }
4421 
kvm_arch_async_page_ready(struct kvm_vcpu * vcpu,struct kvm_async_pf * work)4422 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
4423 {
4424 	int r;
4425 
4426 	if (WARN_ON_ONCE(work->arch.error_code & PFERR_PRIVATE_ACCESS))
4427 		return;
4428 
4429 	if ((vcpu->arch.mmu->root_role.direct != work->arch.direct_map) ||
4430 	      work->wakeup_all)
4431 		return;
4432 
4433 	r = kvm_mmu_reload(vcpu);
4434 	if (unlikely(r))
4435 		return;
4436 
4437 	if (!vcpu->arch.mmu->root_role.direct &&
4438 	      work->arch.cr3 != kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu))
4439 		return;
4440 
4441 	r = kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, work->arch.error_code,
4442 				  true, NULL, NULL);
4443 
4444 	/*
4445 	 * Account fixed page faults, otherwise they'll never be counted, but
4446 	 * ignore stats for all other return times.  Page-ready "faults" aren't
4447 	 * truly spurious and never trigger emulation
4448 	 */
4449 	if (r == RET_PF_FIXED)
4450 		vcpu->stat.pf_fixed++;
4451 }
4452 
kvm_max_level_for_order(int order)4453 static inline u8 kvm_max_level_for_order(int order)
4454 {
4455 	BUILD_BUG_ON(KVM_MAX_HUGEPAGE_LEVEL > PG_LEVEL_1G);
4456 
4457 	KVM_MMU_WARN_ON(order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G) &&
4458 			order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M) &&
4459 			order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_4K));
4460 
4461 	if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G))
4462 		return PG_LEVEL_1G;
4463 
4464 	if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M))
4465 		return PG_LEVEL_2M;
4466 
4467 	return PG_LEVEL_4K;
4468 }
4469 
kvm_max_private_mapping_level(struct kvm * kvm,kvm_pfn_t pfn,u8 max_level,int gmem_order)4470 static u8 kvm_max_private_mapping_level(struct kvm *kvm, kvm_pfn_t pfn,
4471 					u8 max_level, int gmem_order)
4472 {
4473 	u8 req_max_level;
4474 
4475 	if (max_level == PG_LEVEL_4K)
4476 		return PG_LEVEL_4K;
4477 
4478 	max_level = min(kvm_max_level_for_order(gmem_order), max_level);
4479 	if (max_level == PG_LEVEL_4K)
4480 		return PG_LEVEL_4K;
4481 
4482 	req_max_level = kvm_x86_call(private_max_mapping_level)(kvm, pfn);
4483 	if (req_max_level)
4484 		max_level = min(max_level, req_max_level);
4485 
4486 	return max_level;
4487 }
4488 
kvm_mmu_finish_page_fault(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault,int r)4489 static void kvm_mmu_finish_page_fault(struct kvm_vcpu *vcpu,
4490 				      struct kvm_page_fault *fault, int r)
4491 {
4492 	kvm_release_faultin_page(vcpu->kvm, fault->refcounted_page,
4493 				 r == RET_PF_RETRY, fault->map_writable);
4494 }
4495 
kvm_mmu_faultin_pfn_private(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4496 static int kvm_mmu_faultin_pfn_private(struct kvm_vcpu *vcpu,
4497 				       struct kvm_page_fault *fault)
4498 {
4499 	int max_order, r;
4500 
4501 	if (!kvm_slot_can_be_private(fault->slot)) {
4502 		kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
4503 		return -EFAULT;
4504 	}
4505 
4506 	r = kvm_gmem_get_pfn(vcpu->kvm, fault->slot, fault->gfn, &fault->pfn,
4507 			     &fault->refcounted_page, &max_order);
4508 	if (r) {
4509 		kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
4510 		return r;
4511 	}
4512 
4513 	fault->map_writable = !(fault->slot->flags & KVM_MEM_READONLY);
4514 	fault->max_level = kvm_max_private_mapping_level(vcpu->kvm, fault->pfn,
4515 							 fault->max_level, max_order);
4516 
4517 	return RET_PF_CONTINUE;
4518 }
4519 
__kvm_mmu_faultin_pfn(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4520 static int __kvm_mmu_faultin_pfn(struct kvm_vcpu *vcpu,
4521 				 struct kvm_page_fault *fault)
4522 {
4523 	unsigned int foll = fault->write ? FOLL_WRITE : 0;
4524 
4525 	if (fault->is_private)
4526 		return kvm_mmu_faultin_pfn_private(vcpu, fault);
4527 
4528 	foll |= FOLL_NOWAIT;
4529 	fault->pfn = __kvm_faultin_pfn(fault->slot, fault->gfn, foll,
4530 				       &fault->map_writable, &fault->refcounted_page);
4531 
4532 	/*
4533 	 * If resolving the page failed because I/O is needed to fault-in the
4534 	 * page, then either set up an asynchronous #PF to do the I/O, or if
4535 	 * doing an async #PF isn't possible, retry with I/O allowed.  All
4536 	 * other failures are terminal, i.e. retrying won't help.
4537 	 */
4538 	if (fault->pfn != KVM_PFN_ERR_NEEDS_IO)
4539 		return RET_PF_CONTINUE;
4540 
4541 	if (!fault->prefetch && kvm_can_do_async_pf(vcpu)) {
4542 		trace_kvm_try_async_get_page(fault->addr, fault->gfn);
4543 		if (kvm_find_async_pf_gfn(vcpu, fault->gfn)) {
4544 			trace_kvm_async_pf_repeated_fault(fault->addr, fault->gfn);
4545 			kvm_make_request(KVM_REQ_APF_HALT, vcpu);
4546 			return RET_PF_RETRY;
4547 		} else if (kvm_arch_setup_async_pf(vcpu, fault)) {
4548 			return RET_PF_RETRY;
4549 		}
4550 	}
4551 
4552 	/*
4553 	 * Allow gup to bail on pending non-fatal signals when it's also allowed
4554 	 * to wait for IO.  Note, gup always bails if it is unable to quickly
4555 	 * get a page and a fatal signal, i.e. SIGKILL, is pending.
4556 	 */
4557 	foll |= FOLL_INTERRUPTIBLE;
4558 	foll &= ~FOLL_NOWAIT;
4559 	fault->pfn = __kvm_faultin_pfn(fault->slot, fault->gfn, foll,
4560 				       &fault->map_writable, &fault->refcounted_page);
4561 
4562 	return RET_PF_CONTINUE;
4563 }
4564 
kvm_mmu_faultin_pfn(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault,unsigned int access)4565 static int kvm_mmu_faultin_pfn(struct kvm_vcpu *vcpu,
4566 			       struct kvm_page_fault *fault, unsigned int access)
4567 {
4568 	struct kvm_memory_slot *slot = fault->slot;
4569 	struct kvm *kvm = vcpu->kvm;
4570 	int ret;
4571 
4572 	if (KVM_BUG_ON(kvm_is_gfn_alias(kvm, fault->gfn), kvm))
4573 		return -EFAULT;
4574 
4575 	/*
4576 	 * Note that the mmu_invalidate_seq also serves to detect a concurrent
4577 	 * change in attributes.  is_page_fault_stale() will detect an
4578 	 * invalidation relate to fault->fn and resume the guest without
4579 	 * installing a mapping in the page tables.
4580 	 */
4581 	fault->mmu_seq = vcpu->kvm->mmu_invalidate_seq;
4582 	smp_rmb();
4583 
4584 	/*
4585 	 * Now that we have a snapshot of mmu_invalidate_seq we can check for a
4586 	 * private vs. shared mismatch.
4587 	 */
4588 	if (fault->is_private != kvm_mem_is_private(kvm, fault->gfn)) {
4589 		kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
4590 		return -EFAULT;
4591 	}
4592 
4593 	if (unlikely(!slot))
4594 		return kvm_handle_noslot_fault(vcpu, fault, access);
4595 
4596 	/*
4597 	 * Retry the page fault if the gfn hit a memslot that is being deleted
4598 	 * or moved.  This ensures any existing SPTEs for the old memslot will
4599 	 * be zapped before KVM inserts a new MMIO SPTE for the gfn.
4600 	 */
4601 	if (slot->flags & KVM_MEMSLOT_INVALID)
4602 		return RET_PF_RETRY;
4603 
4604 	if (slot->id == APIC_ACCESS_PAGE_PRIVATE_MEMSLOT) {
4605 		/*
4606 		 * Don't map L1's APIC access page into L2, KVM doesn't support
4607 		 * using APICv/AVIC to accelerate L2 accesses to L1's APIC,
4608 		 * i.e. the access needs to be emulated.  Emulating access to
4609 		 * L1's APIC is also correct if L1 is accelerating L2's own
4610 		 * virtual APIC, but for some reason L1 also maps _L1's_ APIC
4611 		 * into L2.  Note, vcpu_is_mmio_gpa() always treats access to
4612 		 * the APIC as MMIO.  Allow an MMIO SPTE to be created, as KVM
4613 		 * uses different roots for L1 vs. L2, i.e. there is no danger
4614 		 * of breaking APICv/AVIC for L1.
4615 		 */
4616 		if (is_guest_mode(vcpu))
4617 			return kvm_handle_noslot_fault(vcpu, fault, access);
4618 
4619 		/*
4620 		 * If the APIC access page exists but is disabled, go directly
4621 		 * to emulation without caching the MMIO access or creating a
4622 		 * MMIO SPTE.  That way the cache doesn't need to be purged
4623 		 * when the AVIC is re-enabled.
4624 		 */
4625 		if (!kvm_apicv_activated(vcpu->kvm))
4626 			return RET_PF_EMULATE;
4627 	}
4628 
4629 	/*
4630 	 * Check for a relevant mmu_notifier invalidation event before getting
4631 	 * the pfn from the primary MMU, and before acquiring mmu_lock.
4632 	 *
4633 	 * For mmu_lock, if there is an in-progress invalidation and the kernel
4634 	 * allows preemption, the invalidation task may drop mmu_lock and yield
4635 	 * in response to mmu_lock being contended, which is *very* counter-
4636 	 * productive as this vCPU can't actually make forward progress until
4637 	 * the invalidation completes.
4638 	 *
4639 	 * Retrying now can also avoid unnessary lock contention in the primary
4640 	 * MMU, as the primary MMU doesn't necessarily hold a single lock for
4641 	 * the duration of the invalidation, i.e. faulting in a conflicting pfn
4642 	 * can cause the invalidation to take longer by holding locks that are
4643 	 * needed to complete the invalidation.
4644 	 *
4645 	 * Do the pre-check even for non-preemtible kernels, i.e. even if KVM
4646 	 * will never yield mmu_lock in response to contention, as this vCPU is
4647 	 * *guaranteed* to need to retry, i.e. waiting until mmu_lock is held
4648 	 * to detect retry guarantees the worst case latency for the vCPU.
4649 	 */
4650 	if (mmu_invalidate_retry_gfn_unsafe(kvm, fault->mmu_seq, fault->gfn))
4651 		return RET_PF_RETRY;
4652 
4653 	ret = __kvm_mmu_faultin_pfn(vcpu, fault);
4654 	if (ret != RET_PF_CONTINUE)
4655 		return ret;
4656 
4657 	if (unlikely(is_error_pfn(fault->pfn)))
4658 		return kvm_handle_error_pfn(vcpu, fault);
4659 
4660 	if (WARN_ON_ONCE(!fault->slot || is_noslot_pfn(fault->pfn)))
4661 		return kvm_handle_noslot_fault(vcpu, fault, access);
4662 
4663 	/*
4664 	 * Check again for a relevant mmu_notifier invalidation event purely to
4665 	 * avoid contending mmu_lock.  Most invalidations will be detected by
4666 	 * the previous check, but checking is extremely cheap relative to the
4667 	 * overall cost of failing to detect the invalidation until after
4668 	 * mmu_lock is acquired.
4669 	 */
4670 	if (mmu_invalidate_retry_gfn_unsafe(kvm, fault->mmu_seq, fault->gfn)) {
4671 		kvm_mmu_finish_page_fault(vcpu, fault, RET_PF_RETRY);
4672 		return RET_PF_RETRY;
4673 	}
4674 
4675 	return RET_PF_CONTINUE;
4676 }
4677 
4678 /*
4679  * Returns true if the page fault is stale and needs to be retried, i.e. if the
4680  * root was invalidated by a memslot update or a relevant mmu_notifier fired.
4681  */
is_page_fault_stale(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4682 static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
4683 				struct kvm_page_fault *fault)
4684 {
4685 	struct kvm_mmu_page *sp = root_to_sp(vcpu->arch.mmu->root.hpa);
4686 
4687 	/* Special roots, e.g. pae_root, are not backed by shadow pages. */
4688 	if (sp && is_obsolete_sp(vcpu->kvm, sp))
4689 		return true;
4690 
4691 	/*
4692 	 * Roots without an associated shadow page are considered invalid if
4693 	 * there is a pending request to free obsolete roots.  The request is
4694 	 * only a hint that the current root _may_ be obsolete and needs to be
4695 	 * reloaded, e.g. if the guest frees a PGD that KVM is tracking as a
4696 	 * previous root, then __kvm_mmu_prepare_zap_page() signals all vCPUs
4697 	 * to reload even if no vCPU is actively using the root.
4698 	 */
4699 	if (!sp && kvm_test_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu))
4700 		return true;
4701 
4702 	/*
4703 	 * Check for a relevant mmu_notifier invalidation event one last time
4704 	 * now that mmu_lock is held, as the "unsafe" checks performed without
4705 	 * holding mmu_lock can get false negatives.
4706 	 */
4707 	return fault->slot &&
4708 	       mmu_invalidate_retry_gfn(vcpu->kvm, fault->mmu_seq, fault->gfn);
4709 }
4710 
direct_page_fault(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4711 static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
4712 {
4713 	int r;
4714 
4715 	/* Dummy roots are used only for shadowing bad guest roots. */
4716 	if (WARN_ON_ONCE(kvm_mmu_is_dummy_root(vcpu->arch.mmu->root.hpa)))
4717 		return RET_PF_RETRY;
4718 
4719 	if (page_fault_handle_page_track(vcpu, fault))
4720 		return RET_PF_WRITE_PROTECTED;
4721 
4722 	r = fast_page_fault(vcpu, fault);
4723 	if (r != RET_PF_INVALID)
4724 		return r;
4725 
4726 	r = mmu_topup_memory_caches(vcpu, false);
4727 	if (r)
4728 		return r;
4729 
4730 	r = kvm_mmu_faultin_pfn(vcpu, fault, ACC_ALL);
4731 	if (r != RET_PF_CONTINUE)
4732 		return r;
4733 
4734 	r = RET_PF_RETRY;
4735 	write_lock(&vcpu->kvm->mmu_lock);
4736 
4737 	if (is_page_fault_stale(vcpu, fault))
4738 		goto out_unlock;
4739 
4740 	r = make_mmu_pages_available(vcpu);
4741 	if (r)
4742 		goto out_unlock;
4743 
4744 	r = direct_map(vcpu, fault);
4745 
4746 out_unlock:
4747 	kvm_mmu_finish_page_fault(vcpu, fault, r);
4748 	write_unlock(&vcpu->kvm->mmu_lock);
4749 	return r;
4750 }
4751 
nonpaging_page_fault(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4752 static int nonpaging_page_fault(struct kvm_vcpu *vcpu,
4753 				struct kvm_page_fault *fault)
4754 {
4755 	/* This path builds a PAE pagetable, we can map 2mb pages at maximum. */
4756 	fault->max_level = PG_LEVEL_2M;
4757 	return direct_page_fault(vcpu, fault);
4758 }
4759 
kvm_handle_page_fault(struct kvm_vcpu * vcpu,u64 error_code,u64 fault_address,char * insn,int insn_len)4760 int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
4761 				u64 fault_address, char *insn, int insn_len)
4762 {
4763 	int r = 1;
4764 	u32 flags = vcpu->arch.apf.host_apf_flags;
4765 
4766 #ifndef CONFIG_X86_64
4767 	/* A 64-bit CR2 should be impossible on 32-bit KVM. */
4768 	if (WARN_ON_ONCE(fault_address >> 32))
4769 		return -EFAULT;
4770 #endif
4771 	/*
4772 	 * Legacy #PF exception only have a 32-bit error code.  Simply drop the
4773 	 * upper bits as KVM doesn't use them for #PF (because they are never
4774 	 * set), and to ensure there are no collisions with KVM-defined bits.
4775 	 */
4776 	if (WARN_ON_ONCE(error_code >> 32))
4777 		error_code = lower_32_bits(error_code);
4778 
4779 	/*
4780 	 * Restrict KVM-defined flags to bits 63:32 so that it's impossible for
4781 	 * them to conflict with #PF error codes, which are limited to 32 bits.
4782 	 */
4783 	BUILD_BUG_ON(lower_32_bits(PFERR_SYNTHETIC_MASK));
4784 
4785 	vcpu->arch.l1tf_flush_l1d = true;
4786 	if (!flags) {
4787 		trace_kvm_page_fault(vcpu, fault_address, error_code);
4788 
4789 		r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn,
4790 				insn_len);
4791 	} else if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) {
4792 		vcpu->arch.apf.host_apf_flags = 0;
4793 		local_irq_disable();
4794 		kvm_async_pf_task_wait_schedule(fault_address);
4795 		local_irq_enable();
4796 	} else {
4797 		WARN_ONCE(1, "Unexpected host async PF flags: %x\n", flags);
4798 	}
4799 
4800 	return r;
4801 }
4802 EXPORT_SYMBOL_GPL(kvm_handle_page_fault);
4803 
4804 #ifdef CONFIG_X86_64
kvm_tdp_mmu_page_fault(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4805 static int kvm_tdp_mmu_page_fault(struct kvm_vcpu *vcpu,
4806 				  struct kvm_page_fault *fault)
4807 {
4808 	int r;
4809 
4810 	if (page_fault_handle_page_track(vcpu, fault))
4811 		return RET_PF_WRITE_PROTECTED;
4812 
4813 	r = fast_page_fault(vcpu, fault);
4814 	if (r != RET_PF_INVALID)
4815 		return r;
4816 
4817 	r = mmu_topup_memory_caches(vcpu, false);
4818 	if (r)
4819 		return r;
4820 
4821 	r = kvm_mmu_faultin_pfn(vcpu, fault, ACC_ALL);
4822 	if (r != RET_PF_CONTINUE)
4823 		return r;
4824 
4825 	r = RET_PF_RETRY;
4826 	read_lock(&vcpu->kvm->mmu_lock);
4827 
4828 	if (is_page_fault_stale(vcpu, fault))
4829 		goto out_unlock;
4830 
4831 	r = kvm_tdp_mmu_map(vcpu, fault);
4832 
4833 out_unlock:
4834 	kvm_mmu_finish_page_fault(vcpu, fault, r);
4835 	read_unlock(&vcpu->kvm->mmu_lock);
4836 	return r;
4837 }
4838 #endif
4839 
kvm_tdp_page_fault(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4840 int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
4841 {
4842 #ifdef CONFIG_X86_64
4843 	if (tdp_mmu_enabled)
4844 		return kvm_tdp_mmu_page_fault(vcpu, fault);
4845 #endif
4846 
4847 	return direct_page_fault(vcpu, fault);
4848 }
4849 
kvm_tdp_map_page(struct kvm_vcpu * vcpu,gpa_t gpa,u64 error_code,u8 * level)4850 int kvm_tdp_map_page(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code, u8 *level)
4851 {
4852 	int r;
4853 
4854 	/*
4855 	 * Restrict to TDP page fault, since that's the only case where the MMU
4856 	 * is indexed by GPA.
4857 	 */
4858 	if (vcpu->arch.mmu->page_fault != kvm_tdp_page_fault)
4859 		return -EOPNOTSUPP;
4860 
4861 	do {
4862 		if (signal_pending(current))
4863 			return -EINTR;
4864 
4865 		if (kvm_check_request(KVM_REQ_VM_DEAD, vcpu))
4866 			return -EIO;
4867 
4868 		cond_resched();
4869 		r = kvm_mmu_do_page_fault(vcpu, gpa, error_code, true, NULL, level);
4870 	} while (r == RET_PF_RETRY);
4871 
4872 	if (r < 0)
4873 		return r;
4874 
4875 	switch (r) {
4876 	case RET_PF_FIXED:
4877 	case RET_PF_SPURIOUS:
4878 	case RET_PF_WRITE_PROTECTED:
4879 		return 0;
4880 
4881 	case RET_PF_EMULATE:
4882 		return -ENOENT;
4883 
4884 	case RET_PF_RETRY:
4885 	case RET_PF_CONTINUE:
4886 	case RET_PF_INVALID:
4887 	default:
4888 		WARN_ONCE(1, "could not fix page fault during prefault");
4889 		return -EIO;
4890 	}
4891 }
4892 EXPORT_SYMBOL_GPL(kvm_tdp_map_page);
4893 
kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu * vcpu,struct kvm_pre_fault_memory * range)4894 long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
4895 				    struct kvm_pre_fault_memory *range)
4896 {
4897 	u64 error_code = PFERR_GUEST_FINAL_MASK;
4898 	u8 level = PG_LEVEL_4K;
4899 	u64 direct_bits;
4900 	u64 end;
4901 	int r;
4902 
4903 	if (!vcpu->kvm->arch.pre_fault_allowed)
4904 		return -EOPNOTSUPP;
4905 
4906 	if (kvm_is_gfn_alias(vcpu->kvm, gpa_to_gfn(range->gpa)))
4907 		return -EINVAL;
4908 
4909 	/*
4910 	 * reload is efficient when called repeatedly, so we can do it on
4911 	 * every iteration.
4912 	 */
4913 	r = kvm_mmu_reload(vcpu);
4914 	if (r)
4915 		return r;
4916 
4917 	direct_bits = 0;
4918 	if (kvm_arch_has_private_mem(vcpu->kvm) &&
4919 	    kvm_mem_is_private(vcpu->kvm, gpa_to_gfn(range->gpa)))
4920 		error_code |= PFERR_PRIVATE_ACCESS;
4921 	else
4922 		direct_bits = gfn_to_gpa(kvm_gfn_direct_bits(vcpu->kvm));
4923 
4924 	/*
4925 	 * Shadow paging uses GVA for kvm page fault, so restrict to
4926 	 * two-dimensional paging.
4927 	 */
4928 	r = kvm_tdp_map_page(vcpu, range->gpa | direct_bits, error_code, &level);
4929 	if (r < 0)
4930 		return r;
4931 
4932 	/*
4933 	 * If the mapping that covers range->gpa can use a huge page, it
4934 	 * may start below it or end after range->gpa + range->size.
4935 	 */
4936 	end = (range->gpa & KVM_HPAGE_MASK(level)) + KVM_HPAGE_SIZE(level);
4937 	return min(range->size, end - range->gpa);
4938 }
4939 
nonpaging_init_context(struct kvm_mmu * context)4940 static void nonpaging_init_context(struct kvm_mmu *context)
4941 {
4942 	context->page_fault = nonpaging_page_fault;
4943 	context->gva_to_gpa = nonpaging_gva_to_gpa;
4944 	context->sync_spte = NULL;
4945 }
4946 
is_root_usable(struct kvm_mmu_root_info * root,gpa_t pgd,union kvm_mmu_page_role role)4947 static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
4948 				  union kvm_mmu_page_role role)
4949 {
4950 	struct kvm_mmu_page *sp;
4951 
4952 	if (!VALID_PAGE(root->hpa))
4953 		return false;
4954 
4955 	if (!role.direct && pgd != root->pgd)
4956 		return false;
4957 
4958 	sp = root_to_sp(root->hpa);
4959 	if (WARN_ON_ONCE(!sp))
4960 		return false;
4961 
4962 	return role.word == sp->role.word;
4963 }
4964 
4965 /*
4966  * Find out if a previously cached root matching the new pgd/role is available,
4967  * and insert the current root as the MRU in the cache.
4968  * If a matching root is found, it is assigned to kvm_mmu->root and
4969  * true is returned.
4970  * If no match is found, kvm_mmu->root is left invalid, the LRU root is
4971  * evicted to make room for the current root, and false is returned.
4972  */
cached_root_find_and_keep_current(struct kvm * kvm,struct kvm_mmu * mmu,gpa_t new_pgd,union kvm_mmu_page_role new_role)4973 static bool cached_root_find_and_keep_current(struct kvm *kvm, struct kvm_mmu *mmu,
4974 					      gpa_t new_pgd,
4975 					      union kvm_mmu_page_role new_role)
4976 {
4977 	uint i;
4978 
4979 	if (is_root_usable(&mmu->root, new_pgd, new_role))
4980 		return true;
4981 
4982 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
4983 		/*
4984 		 * The swaps end up rotating the cache like this:
4985 		 *   C   0 1 2 3   (on entry to the function)
4986 		 *   0   C 1 2 3
4987 		 *   1   C 0 2 3
4988 		 *   2   C 0 1 3
4989 		 *   3   C 0 1 2   (on exit from the loop)
4990 		 */
4991 		swap(mmu->root, mmu->prev_roots[i]);
4992 		if (is_root_usable(&mmu->root, new_pgd, new_role))
4993 			return true;
4994 	}
4995 
4996 	kvm_mmu_free_roots(kvm, mmu, KVM_MMU_ROOT_CURRENT);
4997 	return false;
4998 }
4999 
5000 /*
5001  * Find out if a previously cached root matching the new pgd/role is available.
5002  * On entry, mmu->root is invalid.
5003  * If a matching root is found, it is assigned to kvm_mmu->root, the LRU entry
5004  * of the cache becomes invalid, and true is returned.
5005  * If no match is found, kvm_mmu->root is left invalid and false is returned.
5006  */
cached_root_find_without_current(struct kvm * kvm,struct kvm_mmu * mmu,gpa_t new_pgd,union kvm_mmu_page_role new_role)5007 static bool cached_root_find_without_current(struct kvm *kvm, struct kvm_mmu *mmu,
5008 					     gpa_t new_pgd,
5009 					     union kvm_mmu_page_role new_role)
5010 {
5011 	uint i;
5012 
5013 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
5014 		if (is_root_usable(&mmu->prev_roots[i], new_pgd, new_role))
5015 			goto hit;
5016 
5017 	return false;
5018 
5019 hit:
5020 	swap(mmu->root, mmu->prev_roots[i]);
5021 	/* Bubble up the remaining roots.  */
5022 	for (; i < KVM_MMU_NUM_PREV_ROOTS - 1; i++)
5023 		mmu->prev_roots[i] = mmu->prev_roots[i + 1];
5024 	mmu->prev_roots[i].hpa = INVALID_PAGE;
5025 	return true;
5026 }
5027 
fast_pgd_switch(struct kvm * kvm,struct kvm_mmu * mmu,gpa_t new_pgd,union kvm_mmu_page_role new_role)5028 static bool fast_pgd_switch(struct kvm *kvm, struct kvm_mmu *mmu,
5029 			    gpa_t new_pgd, union kvm_mmu_page_role new_role)
5030 {
5031 	/*
5032 	 * Limit reuse to 64-bit hosts+VMs without "special" roots in order to
5033 	 * avoid having to deal with PDPTEs and other complexities.
5034 	 */
5035 	if (VALID_PAGE(mmu->root.hpa) && !root_to_sp(mmu->root.hpa))
5036 		kvm_mmu_free_roots(kvm, mmu, KVM_MMU_ROOT_CURRENT);
5037 
5038 	if (VALID_PAGE(mmu->root.hpa))
5039 		return cached_root_find_and_keep_current(kvm, mmu, new_pgd, new_role);
5040 	else
5041 		return cached_root_find_without_current(kvm, mmu, new_pgd, new_role);
5042 }
5043 
kvm_mmu_new_pgd(struct kvm_vcpu * vcpu,gpa_t new_pgd)5044 void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
5045 {
5046 	struct kvm_mmu *mmu = vcpu->arch.mmu;
5047 	union kvm_mmu_page_role new_role = mmu->root_role;
5048 
5049 	/*
5050 	 * Return immediately if no usable root was found, kvm_mmu_reload()
5051 	 * will establish a valid root prior to the next VM-Enter.
5052 	 */
5053 	if (!fast_pgd_switch(vcpu->kvm, mmu, new_pgd, new_role))
5054 		return;
5055 
5056 	/*
5057 	 * It's possible that the cached previous root page is obsolete because
5058 	 * of a change in the MMU generation number. However, changing the
5059 	 * generation number is accompanied by KVM_REQ_MMU_FREE_OBSOLETE_ROOTS,
5060 	 * which will free the root set here and allocate a new one.
5061 	 */
5062 	kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
5063 
5064 	if (force_flush_and_sync_on_reuse) {
5065 		kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
5066 		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
5067 	}
5068 
5069 	/*
5070 	 * The last MMIO access's GVA and GPA are cached in the VCPU. When
5071 	 * switching to a new CR3, that GVA->GPA mapping may no longer be
5072 	 * valid. So clear any cached MMIO info even when we don't need to sync
5073 	 * the shadow page tables.
5074 	 */
5075 	vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
5076 
5077 	/*
5078 	 * If this is a direct root page, it doesn't have a write flooding
5079 	 * count. Otherwise, clear the write flooding count.
5080 	 */
5081 	if (!new_role.direct) {
5082 		struct kvm_mmu_page *sp = root_to_sp(vcpu->arch.mmu->root.hpa);
5083 
5084 		if (!WARN_ON_ONCE(!sp))
5085 			__clear_sp_write_flooding_count(sp);
5086 	}
5087 }
5088 EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd);
5089 
sync_mmio_spte(struct kvm_vcpu * vcpu,u64 * sptep,gfn_t gfn,unsigned int access)5090 static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
5091 			   unsigned int access)
5092 {
5093 	if (unlikely(is_mmio_spte(vcpu->kvm, *sptep))) {
5094 		if (gfn != get_mmio_spte_gfn(*sptep)) {
5095 			mmu_spte_clear_no_track(sptep);
5096 			return true;
5097 		}
5098 
5099 		mark_mmio_spte(vcpu, sptep, gfn, access);
5100 		return true;
5101 	}
5102 
5103 	return false;
5104 }
5105 
5106 #define PTTYPE_EPT 18 /* arbitrary */
5107 #define PTTYPE PTTYPE_EPT
5108 #include "paging_tmpl.h"
5109 #undef PTTYPE
5110 
5111 #define PTTYPE 64
5112 #include "paging_tmpl.h"
5113 #undef PTTYPE
5114 
5115 #define PTTYPE 32
5116 #include "paging_tmpl.h"
5117 #undef PTTYPE
5118 
__reset_rsvds_bits_mask(struct rsvd_bits_validate * rsvd_check,u64 pa_bits_rsvd,int level,bool nx,bool gbpages,bool pse,bool amd)5119 static void __reset_rsvds_bits_mask(struct rsvd_bits_validate *rsvd_check,
5120 				    u64 pa_bits_rsvd, int level, bool nx,
5121 				    bool gbpages, bool pse, bool amd)
5122 {
5123 	u64 gbpages_bit_rsvd = 0;
5124 	u64 nonleaf_bit8_rsvd = 0;
5125 	u64 high_bits_rsvd;
5126 
5127 	rsvd_check->bad_mt_xwr = 0;
5128 
5129 	if (!gbpages)
5130 		gbpages_bit_rsvd = rsvd_bits(7, 7);
5131 
5132 	if (level == PT32E_ROOT_LEVEL)
5133 		high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 62);
5134 	else
5135 		high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);
5136 
5137 	/* Note, NX doesn't exist in PDPTEs, this is handled below. */
5138 	if (!nx)
5139 		high_bits_rsvd |= rsvd_bits(63, 63);
5140 
5141 	/*
5142 	 * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for
5143 	 * leaf entries) on AMD CPUs only.
5144 	 */
5145 	if (amd)
5146 		nonleaf_bit8_rsvd = rsvd_bits(8, 8);
5147 
5148 	switch (level) {
5149 	case PT32_ROOT_LEVEL:
5150 		/* no rsvd bits for 2 level 4K page table entries */
5151 		rsvd_check->rsvd_bits_mask[0][1] = 0;
5152 		rsvd_check->rsvd_bits_mask[0][0] = 0;
5153 		rsvd_check->rsvd_bits_mask[1][0] =
5154 			rsvd_check->rsvd_bits_mask[0][0];
5155 
5156 		if (!pse) {
5157 			rsvd_check->rsvd_bits_mask[1][1] = 0;
5158 			break;
5159 		}
5160 
5161 		if (is_cpuid_PSE36())
5162 			/* 36bits PSE 4MB page */
5163 			rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
5164 		else
5165 			/* 32 bits PSE 4MB page */
5166 			rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
5167 		break;
5168 	case PT32E_ROOT_LEVEL:
5169 		rsvd_check->rsvd_bits_mask[0][2] = rsvd_bits(63, 63) |
5170 						   high_bits_rsvd |
5171 						   rsvd_bits(5, 8) |
5172 						   rsvd_bits(1, 2);	/* PDPTE */
5173 		rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd;	/* PDE */
5174 		rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;	/* PTE */
5175 		rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
5176 						   rsvd_bits(13, 20);	/* large page */
5177 		rsvd_check->rsvd_bits_mask[1][0] =
5178 			rsvd_check->rsvd_bits_mask[0][0];
5179 		break;
5180 	case PT64_ROOT_5LEVEL:
5181 		rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd |
5182 						   nonleaf_bit8_rsvd |
5183 						   rsvd_bits(7, 7);
5184 		rsvd_check->rsvd_bits_mask[1][4] =
5185 			rsvd_check->rsvd_bits_mask[0][4];
5186 		fallthrough;
5187 	case PT64_ROOT_4LEVEL:
5188 		rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd |
5189 						   nonleaf_bit8_rsvd |
5190 						   rsvd_bits(7, 7);
5191 		rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd |
5192 						   gbpages_bit_rsvd;
5193 		rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd;
5194 		rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
5195 		rsvd_check->rsvd_bits_mask[1][3] =
5196 			rsvd_check->rsvd_bits_mask[0][3];
5197 		rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd |
5198 						   gbpages_bit_rsvd |
5199 						   rsvd_bits(13, 29);
5200 		rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
5201 						   rsvd_bits(13, 20); /* large page */
5202 		rsvd_check->rsvd_bits_mask[1][0] =
5203 			rsvd_check->rsvd_bits_mask[0][0];
5204 		break;
5205 	}
5206 }
5207 
reset_guest_rsvds_bits_mask(struct kvm_vcpu * vcpu,struct kvm_mmu * context)5208 static void reset_guest_rsvds_bits_mask(struct kvm_vcpu *vcpu,
5209 					struct kvm_mmu *context)
5210 {
5211 	__reset_rsvds_bits_mask(&context->guest_rsvd_check,
5212 				vcpu->arch.reserved_gpa_bits,
5213 				context->cpu_role.base.level, is_efer_nx(context),
5214 				guest_cpu_cap_has(vcpu, X86_FEATURE_GBPAGES),
5215 				is_cr4_pse(context),
5216 				guest_cpuid_is_amd_compatible(vcpu));
5217 }
5218 
__reset_rsvds_bits_mask_ept(struct rsvd_bits_validate * rsvd_check,u64 pa_bits_rsvd,bool execonly,int huge_page_level)5219 static void __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
5220 					u64 pa_bits_rsvd, bool execonly,
5221 					int huge_page_level)
5222 {
5223 	u64 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);
5224 	u64 large_1g_rsvd = 0, large_2m_rsvd = 0;
5225 	u64 bad_mt_xwr;
5226 
5227 	if (huge_page_level < PG_LEVEL_1G)
5228 		large_1g_rsvd = rsvd_bits(7, 7);
5229 	if (huge_page_level < PG_LEVEL_2M)
5230 		large_2m_rsvd = rsvd_bits(7, 7);
5231 
5232 	rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd | rsvd_bits(3, 7);
5233 	rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd | rsvd_bits(3, 7);
5234 	rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd | rsvd_bits(3, 6) | large_1g_rsvd;
5235 	rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd | rsvd_bits(3, 6) | large_2m_rsvd;
5236 	rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
5237 
5238 	/* large page */
5239 	rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4];
5240 	rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3];
5241 	rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd | rsvd_bits(12, 29) | large_1g_rsvd;
5242 	rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd | rsvd_bits(12, 20) | large_2m_rsvd;
5243 	rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0];
5244 
5245 	bad_mt_xwr = 0xFFull << (2 * 8);	/* bits 3..5 must not be 2 */
5246 	bad_mt_xwr |= 0xFFull << (3 * 8);	/* bits 3..5 must not be 3 */
5247 	bad_mt_xwr |= 0xFFull << (7 * 8);	/* bits 3..5 must not be 7 */
5248 	bad_mt_xwr |= REPEAT_BYTE(1ull << 2);	/* bits 0..2 must not be 010 */
5249 	bad_mt_xwr |= REPEAT_BYTE(1ull << 6);	/* bits 0..2 must not be 110 */
5250 	if (!execonly) {
5251 		/* bits 0..2 must not be 100 unless VMX capabilities allow it */
5252 		bad_mt_xwr |= REPEAT_BYTE(1ull << 4);
5253 	}
5254 	rsvd_check->bad_mt_xwr = bad_mt_xwr;
5255 }
5256 
reset_rsvds_bits_mask_ept(struct kvm_vcpu * vcpu,struct kvm_mmu * context,bool execonly,int huge_page_level)5257 static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
5258 		struct kvm_mmu *context, bool execonly, int huge_page_level)
5259 {
5260 	__reset_rsvds_bits_mask_ept(&context->guest_rsvd_check,
5261 				    vcpu->arch.reserved_gpa_bits, execonly,
5262 				    huge_page_level);
5263 }
5264 
reserved_hpa_bits(void)5265 static inline u64 reserved_hpa_bits(void)
5266 {
5267 	return rsvd_bits(kvm_host.maxphyaddr, 63);
5268 }
5269 
5270 /*
5271  * the page table on host is the shadow page table for the page
5272  * table in guest or amd nested guest, its mmu features completely
5273  * follow the features in guest.
5274  */
reset_shadow_zero_bits_mask(struct kvm_vcpu * vcpu,struct kvm_mmu * context)5275 static void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
5276 					struct kvm_mmu *context)
5277 {
5278 	/* @amd adds a check on bit of SPTEs, which KVM shouldn't use anyways. */
5279 	bool is_amd = true;
5280 	/* KVM doesn't use 2-level page tables for the shadow MMU. */
5281 	bool is_pse = false;
5282 	struct rsvd_bits_validate *shadow_zero_check;
5283 	int i;
5284 
5285 	WARN_ON_ONCE(context->root_role.level < PT32E_ROOT_LEVEL);
5286 
5287 	shadow_zero_check = &context->shadow_zero_check;
5288 	__reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
5289 				context->root_role.level,
5290 				context->root_role.efer_nx,
5291 				guest_cpu_cap_has(vcpu, X86_FEATURE_GBPAGES),
5292 				is_pse, is_amd);
5293 
5294 	if (!shadow_me_mask)
5295 		return;
5296 
5297 	for (i = context->root_role.level; --i >= 0;) {
5298 		/*
5299 		 * So far shadow_me_value is a constant during KVM's life
5300 		 * time.  Bits in shadow_me_value are allowed to be set.
5301 		 * Bits in shadow_me_mask but not in shadow_me_value are
5302 		 * not allowed to be set.
5303 		 */
5304 		shadow_zero_check->rsvd_bits_mask[0][i] |= shadow_me_mask;
5305 		shadow_zero_check->rsvd_bits_mask[1][i] |= shadow_me_mask;
5306 		shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_value;
5307 		shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_value;
5308 	}
5309 
5310 }
5311 
boot_cpu_is_amd(void)5312 static inline bool boot_cpu_is_amd(void)
5313 {
5314 	WARN_ON_ONCE(!tdp_enabled);
5315 	return shadow_x_mask == 0;
5316 }
5317 
5318 /*
5319  * the direct page table on host, use as much mmu features as
5320  * possible, however, kvm currently does not do execution-protection.
5321  */
reset_tdp_shadow_zero_bits_mask(struct kvm_mmu * context)5322 static void reset_tdp_shadow_zero_bits_mask(struct kvm_mmu *context)
5323 {
5324 	struct rsvd_bits_validate *shadow_zero_check;
5325 	int i;
5326 
5327 	shadow_zero_check = &context->shadow_zero_check;
5328 
5329 	if (boot_cpu_is_amd())
5330 		__reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
5331 					context->root_role.level, true,
5332 					boot_cpu_has(X86_FEATURE_GBPAGES),
5333 					false, true);
5334 	else
5335 		__reset_rsvds_bits_mask_ept(shadow_zero_check,
5336 					    reserved_hpa_bits(), false,
5337 					    max_huge_page_level);
5338 
5339 	if (!shadow_me_mask)
5340 		return;
5341 
5342 	for (i = context->root_role.level; --i >= 0;) {
5343 		shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
5344 		shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
5345 	}
5346 }
5347 
5348 /*
5349  * as the comments in reset_shadow_zero_bits_mask() except it
5350  * is the shadow page table for intel nested guest.
5351  */
5352 static void
reset_ept_shadow_zero_bits_mask(struct kvm_mmu * context,bool execonly)5353 reset_ept_shadow_zero_bits_mask(struct kvm_mmu *context, bool execonly)
5354 {
5355 	__reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
5356 				    reserved_hpa_bits(), execonly,
5357 				    max_huge_page_level);
5358 }
5359 
5360 #define BYTE_MASK(access) \
5361 	((1 & (access) ? 2 : 0) | \
5362 	 (2 & (access) ? 4 : 0) | \
5363 	 (3 & (access) ? 8 : 0) | \
5364 	 (4 & (access) ? 16 : 0) | \
5365 	 (5 & (access) ? 32 : 0) | \
5366 	 (6 & (access) ? 64 : 0) | \
5367 	 (7 & (access) ? 128 : 0))
5368 
5369 
update_permission_bitmask(struct kvm_mmu * mmu,bool ept)5370 static void update_permission_bitmask(struct kvm_mmu *mmu, bool ept)
5371 {
5372 	unsigned byte;
5373 
5374 	const u8 x = BYTE_MASK(ACC_EXEC_MASK);
5375 	const u8 w = BYTE_MASK(ACC_WRITE_MASK);
5376 	const u8 u = BYTE_MASK(ACC_USER_MASK);
5377 
5378 	bool cr4_smep = is_cr4_smep(mmu);
5379 	bool cr4_smap = is_cr4_smap(mmu);
5380 	bool cr0_wp = is_cr0_wp(mmu);
5381 	bool efer_nx = is_efer_nx(mmu);
5382 
5383 	for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) {
5384 		unsigned pfec = byte << 1;
5385 
5386 		/*
5387 		 * Each "*f" variable has a 1 bit for each UWX value
5388 		 * that causes a fault with the given PFEC.
5389 		 */
5390 
5391 		/* Faults from writes to non-writable pages */
5392 		u8 wf = (pfec & PFERR_WRITE_MASK) ? (u8)~w : 0;
5393 		/* Faults from user mode accesses to supervisor pages */
5394 		u8 uf = (pfec & PFERR_USER_MASK) ? (u8)~u : 0;
5395 		/* Faults from fetches of non-executable pages*/
5396 		u8 ff = (pfec & PFERR_FETCH_MASK) ? (u8)~x : 0;
5397 		/* Faults from kernel mode fetches of user pages */
5398 		u8 smepf = 0;
5399 		/* Faults from kernel mode accesses of user pages */
5400 		u8 smapf = 0;
5401 
5402 		if (!ept) {
5403 			/* Faults from kernel mode accesses to user pages */
5404 			u8 kf = (pfec & PFERR_USER_MASK) ? 0 : u;
5405 
5406 			/* Not really needed: !nx will cause pte.nx to fault */
5407 			if (!efer_nx)
5408 				ff = 0;
5409 
5410 			/* Allow supervisor writes if !cr0.wp */
5411 			if (!cr0_wp)
5412 				wf = (pfec & PFERR_USER_MASK) ? wf : 0;
5413 
5414 			/* Disallow supervisor fetches of user code if cr4.smep */
5415 			if (cr4_smep)
5416 				smepf = (pfec & PFERR_FETCH_MASK) ? kf : 0;
5417 
5418 			/*
5419 			 * SMAP:kernel-mode data accesses from user-mode
5420 			 * mappings should fault. A fault is considered
5421 			 * as a SMAP violation if all of the following
5422 			 * conditions are true:
5423 			 *   - X86_CR4_SMAP is set in CR4
5424 			 *   - A user page is accessed
5425 			 *   - The access is not a fetch
5426 			 *   - The access is supervisor mode
5427 			 *   - If implicit supervisor access or X86_EFLAGS_AC is clear
5428 			 *
5429 			 * Here, we cover the first four conditions.
5430 			 * The fifth is computed dynamically in permission_fault();
5431 			 * PFERR_RSVD_MASK bit will be set in PFEC if the access is
5432 			 * *not* subject to SMAP restrictions.
5433 			 */
5434 			if (cr4_smap)
5435 				smapf = (pfec & (PFERR_RSVD_MASK|PFERR_FETCH_MASK)) ? 0 : kf;
5436 		}
5437 
5438 		mmu->permissions[byte] = ff | uf | wf | smepf | smapf;
5439 	}
5440 }
5441 
5442 /*
5443 * PKU is an additional mechanism by which the paging controls access to
5444 * user-mode addresses based on the value in the PKRU register.  Protection
5445 * key violations are reported through a bit in the page fault error code.
5446 * Unlike other bits of the error code, the PK bit is not known at the
5447 * call site of e.g. gva_to_gpa; it must be computed directly in
5448 * permission_fault based on two bits of PKRU, on some machine state (CR4,
5449 * CR0, EFER, CPL), and on other bits of the error code and the page tables.
5450 *
5451 * In particular the following conditions come from the error code, the
5452 * page tables and the machine state:
5453 * - PK is always zero unless CR4.PKE=1 and EFER.LMA=1
5454 * - PK is always zero if RSVD=1 (reserved bit set) or F=1 (instruction fetch)
5455 * - PK is always zero if U=0 in the page tables
5456 * - PKRU.WD is ignored if CR0.WP=0 and the access is a supervisor access.
5457 *
5458 * The PKRU bitmask caches the result of these four conditions.  The error
5459 * code (minus the P bit) and the page table's U bit form an index into the
5460 * PKRU bitmask.  Two bits of the PKRU bitmask are then extracted and ANDed
5461 * with the two bits of the PKRU register corresponding to the protection key.
5462 * For the first three conditions above the bits will be 00, thus masking
5463 * away both AD and WD.  For all reads or if the last condition holds, WD
5464 * only will be masked away.
5465 */
update_pkru_bitmask(struct kvm_mmu * mmu)5466 static void update_pkru_bitmask(struct kvm_mmu *mmu)
5467 {
5468 	unsigned bit;
5469 	bool wp;
5470 
5471 	mmu->pkru_mask = 0;
5472 
5473 	if (!is_cr4_pke(mmu))
5474 		return;
5475 
5476 	wp = is_cr0_wp(mmu);
5477 
5478 	for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) {
5479 		unsigned pfec, pkey_bits;
5480 		bool check_pkey, check_write, ff, uf, wf, pte_user;
5481 
5482 		pfec = bit << 1;
5483 		ff = pfec & PFERR_FETCH_MASK;
5484 		uf = pfec & PFERR_USER_MASK;
5485 		wf = pfec & PFERR_WRITE_MASK;
5486 
5487 		/* PFEC.RSVD is replaced by ACC_USER_MASK. */
5488 		pte_user = pfec & PFERR_RSVD_MASK;
5489 
5490 		/*
5491 		 * Only need to check the access which is not an
5492 		 * instruction fetch and is to a user page.
5493 		 */
5494 		check_pkey = (!ff && pte_user);
5495 		/*
5496 		 * write access is controlled by PKRU if it is a
5497 		 * user access or CR0.WP = 1.
5498 		 */
5499 		check_write = check_pkey && wf && (uf || wp);
5500 
5501 		/* PKRU.AD stops both read and write access. */
5502 		pkey_bits = !!check_pkey;
5503 		/* PKRU.WD stops write access. */
5504 		pkey_bits |= (!!check_write) << 1;
5505 
5506 		mmu->pkru_mask |= (pkey_bits & 3) << pfec;
5507 	}
5508 }
5509 
reset_guest_paging_metadata(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu)5510 static void reset_guest_paging_metadata(struct kvm_vcpu *vcpu,
5511 					struct kvm_mmu *mmu)
5512 {
5513 	if (!is_cr0_pg(mmu))
5514 		return;
5515 
5516 	reset_guest_rsvds_bits_mask(vcpu, mmu);
5517 	update_permission_bitmask(mmu, false);
5518 	update_pkru_bitmask(mmu);
5519 }
5520 
paging64_init_context(struct kvm_mmu * context)5521 static void paging64_init_context(struct kvm_mmu *context)
5522 {
5523 	context->page_fault = paging64_page_fault;
5524 	context->gva_to_gpa = paging64_gva_to_gpa;
5525 	context->sync_spte = paging64_sync_spte;
5526 }
5527 
paging32_init_context(struct kvm_mmu * context)5528 static void paging32_init_context(struct kvm_mmu *context)
5529 {
5530 	context->page_fault = paging32_page_fault;
5531 	context->gva_to_gpa = paging32_gva_to_gpa;
5532 	context->sync_spte = paging32_sync_spte;
5533 }
5534 
kvm_calc_cpu_role(struct kvm_vcpu * vcpu,const struct kvm_mmu_role_regs * regs)5535 static union kvm_cpu_role kvm_calc_cpu_role(struct kvm_vcpu *vcpu,
5536 					    const struct kvm_mmu_role_regs *regs)
5537 {
5538 	union kvm_cpu_role role = {0};
5539 
5540 	role.base.access = ACC_ALL;
5541 	role.base.smm = is_smm(vcpu);
5542 	role.base.guest_mode = is_guest_mode(vcpu);
5543 	role.ext.valid = 1;
5544 
5545 	if (!____is_cr0_pg(regs)) {
5546 		role.base.direct = 1;
5547 		return role;
5548 	}
5549 
5550 	role.base.efer_nx = ____is_efer_nx(regs);
5551 	role.base.cr0_wp = ____is_cr0_wp(regs);
5552 	role.base.smep_andnot_wp = ____is_cr4_smep(regs) && !____is_cr0_wp(regs);
5553 	role.base.smap_andnot_wp = ____is_cr4_smap(regs) && !____is_cr0_wp(regs);
5554 	role.base.has_4_byte_gpte = !____is_cr4_pae(regs);
5555 
5556 	if (____is_efer_lma(regs))
5557 		role.base.level = ____is_cr4_la57(regs) ? PT64_ROOT_5LEVEL
5558 							: PT64_ROOT_4LEVEL;
5559 	else if (____is_cr4_pae(regs))
5560 		role.base.level = PT32E_ROOT_LEVEL;
5561 	else
5562 		role.base.level = PT32_ROOT_LEVEL;
5563 
5564 	role.ext.cr4_smep = ____is_cr4_smep(regs);
5565 	role.ext.cr4_smap = ____is_cr4_smap(regs);
5566 	role.ext.cr4_pse = ____is_cr4_pse(regs);
5567 
5568 	/* PKEY and LA57 are active iff long mode is active. */
5569 	role.ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs);
5570 	role.ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs);
5571 	role.ext.efer_lma = ____is_efer_lma(regs);
5572 	return role;
5573 }
5574 
__kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu)5575 void __kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
5576 					struct kvm_mmu *mmu)
5577 {
5578 	const bool cr0_wp = kvm_is_cr0_bit_set(vcpu, X86_CR0_WP);
5579 
5580 	BUILD_BUG_ON((KVM_MMU_CR0_ROLE_BITS & KVM_POSSIBLE_CR0_GUEST_BITS) != X86_CR0_WP);
5581 	BUILD_BUG_ON((KVM_MMU_CR4_ROLE_BITS & KVM_POSSIBLE_CR4_GUEST_BITS));
5582 
5583 	if (is_cr0_wp(mmu) == cr0_wp)
5584 		return;
5585 
5586 	mmu->cpu_role.base.cr0_wp = cr0_wp;
5587 	reset_guest_paging_metadata(vcpu, mmu);
5588 }
5589 
kvm_mmu_get_tdp_level(struct kvm_vcpu * vcpu)5590 static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
5591 {
5592 	int maxpa;
5593 
5594 	if (vcpu->kvm->arch.vm_type == KVM_X86_TDX_VM)
5595 		maxpa = cpuid_query_maxguestphyaddr(vcpu);
5596 	else
5597 		maxpa = cpuid_maxphyaddr(vcpu);
5598 
5599 	/* tdp_root_level is architecture forced level, use it if nonzero */
5600 	if (tdp_root_level)
5601 		return tdp_root_level;
5602 
5603 	/* Use 5-level TDP if and only if it's useful/necessary. */
5604 	if (max_tdp_level == 5 && maxpa <= 48)
5605 		return 4;
5606 
5607 	return max_tdp_level;
5608 }
5609 
kvm_mmu_get_max_tdp_level(void)5610 u8 kvm_mmu_get_max_tdp_level(void)
5611 {
5612 	return tdp_root_level ? tdp_root_level : max_tdp_level;
5613 }
5614 
5615 static union kvm_mmu_page_role
kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu * vcpu,union kvm_cpu_role cpu_role)5616 kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
5617 				union kvm_cpu_role cpu_role)
5618 {
5619 	union kvm_mmu_page_role role = {0};
5620 
5621 	role.access = ACC_ALL;
5622 	role.cr0_wp = true;
5623 	role.efer_nx = true;
5624 	role.smm = cpu_role.base.smm;
5625 	role.guest_mode = cpu_role.base.guest_mode;
5626 	role.ad_disabled = !kvm_ad_enabled;
5627 	role.level = kvm_mmu_get_tdp_level(vcpu);
5628 	role.direct = true;
5629 	role.has_4_byte_gpte = false;
5630 
5631 	return role;
5632 }
5633 
init_kvm_tdp_mmu(struct kvm_vcpu * vcpu,union kvm_cpu_role cpu_role)5634 static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,
5635 			     union kvm_cpu_role cpu_role)
5636 {
5637 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
5638 	union kvm_mmu_page_role root_role = kvm_calc_tdp_mmu_root_page_role(vcpu, cpu_role);
5639 
5640 	if (cpu_role.as_u64 == context->cpu_role.as_u64 &&
5641 	    root_role.word == context->root_role.word)
5642 		return;
5643 
5644 	context->cpu_role.as_u64 = cpu_role.as_u64;
5645 	context->root_role.word = root_role.word;
5646 	context->page_fault = kvm_tdp_page_fault;
5647 	context->sync_spte = NULL;
5648 	context->get_guest_pgd = get_guest_cr3;
5649 	context->get_pdptr = kvm_pdptr_read;
5650 	context->inject_page_fault = kvm_inject_page_fault;
5651 
5652 	if (!is_cr0_pg(context))
5653 		context->gva_to_gpa = nonpaging_gva_to_gpa;
5654 	else if (is_cr4_pae(context))
5655 		context->gva_to_gpa = paging64_gva_to_gpa;
5656 	else
5657 		context->gva_to_gpa = paging32_gva_to_gpa;
5658 
5659 	reset_guest_paging_metadata(vcpu, context);
5660 	reset_tdp_shadow_zero_bits_mask(context);
5661 }
5662 
shadow_mmu_init_context(struct kvm_vcpu * vcpu,struct kvm_mmu * context,union kvm_cpu_role cpu_role,union kvm_mmu_page_role root_role)5663 static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
5664 				    union kvm_cpu_role cpu_role,
5665 				    union kvm_mmu_page_role root_role)
5666 {
5667 	if (cpu_role.as_u64 == context->cpu_role.as_u64 &&
5668 	    root_role.word == context->root_role.word)
5669 		return;
5670 
5671 	context->cpu_role.as_u64 = cpu_role.as_u64;
5672 	context->root_role.word = root_role.word;
5673 
5674 	if (!is_cr0_pg(context))
5675 		nonpaging_init_context(context);
5676 	else if (is_cr4_pae(context))
5677 		paging64_init_context(context);
5678 	else
5679 		paging32_init_context(context);
5680 
5681 	reset_guest_paging_metadata(vcpu, context);
5682 	reset_shadow_zero_bits_mask(vcpu, context);
5683 }
5684 
kvm_init_shadow_mmu(struct kvm_vcpu * vcpu,union kvm_cpu_role cpu_role)5685 static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu,
5686 				union kvm_cpu_role cpu_role)
5687 {
5688 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
5689 	union kvm_mmu_page_role root_role;
5690 
5691 	root_role = cpu_role.base;
5692 
5693 	/* KVM uses PAE paging whenever the guest isn't using 64-bit paging. */
5694 	root_role.level = max_t(u32, root_role.level, PT32E_ROOT_LEVEL);
5695 
5696 	/*
5697 	 * KVM forces EFER.NX=1 when TDP is disabled, reflect it in the MMU role.
5698 	 * KVM uses NX when TDP is disabled to handle a variety of scenarios,
5699 	 * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and
5700 	 * to generate correct permissions for CR0.WP=0/CR4.SMEP=1/EFER.NX=0.
5701 	 * The iTLB multi-hit workaround can be toggled at any time, so assume
5702 	 * NX can be used by any non-nested shadow MMU to avoid having to reset
5703 	 * MMU contexts.
5704 	 */
5705 	root_role.efer_nx = true;
5706 
5707 	shadow_mmu_init_context(vcpu, context, cpu_role, root_role);
5708 }
5709 
kvm_init_shadow_npt_mmu(struct kvm_vcpu * vcpu,unsigned long cr0,unsigned long cr4,u64 efer,gpa_t nested_cr3)5710 void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
5711 			     unsigned long cr4, u64 efer, gpa_t nested_cr3)
5712 {
5713 	struct kvm_mmu *context = &vcpu->arch.guest_mmu;
5714 	struct kvm_mmu_role_regs regs = {
5715 		.cr0 = cr0,
5716 		.cr4 = cr4 & ~X86_CR4_PKE,
5717 		.efer = efer,
5718 	};
5719 	union kvm_cpu_role cpu_role = kvm_calc_cpu_role(vcpu, &regs);
5720 	union kvm_mmu_page_role root_role;
5721 
5722 	/* NPT requires CR0.PG=1. */
5723 	WARN_ON_ONCE(cpu_role.base.direct || !cpu_role.base.guest_mode);
5724 
5725 	root_role = cpu_role.base;
5726 	root_role.level = kvm_mmu_get_tdp_level(vcpu);
5727 	if (root_role.level == PT64_ROOT_5LEVEL &&
5728 	    cpu_role.base.level == PT64_ROOT_4LEVEL)
5729 		root_role.passthrough = 1;
5730 
5731 	shadow_mmu_init_context(vcpu, context, cpu_role, root_role);
5732 	kvm_mmu_new_pgd(vcpu, nested_cr3);
5733 }
5734 EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);
5735 
5736 static union kvm_cpu_role
kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu * vcpu,bool accessed_dirty,bool execonly,u8 level)5737 kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
5738 				   bool execonly, u8 level)
5739 {
5740 	union kvm_cpu_role role = {0};
5741 
5742 	/*
5743 	 * KVM does not support SMM transfer monitors, and consequently does not
5744 	 * support the "entry to SMM" control either.  role.base.smm is always 0.
5745 	 */
5746 	WARN_ON_ONCE(is_smm(vcpu));
5747 	role.base.level = level;
5748 	role.base.has_4_byte_gpte = false;
5749 	role.base.direct = false;
5750 	role.base.ad_disabled = !accessed_dirty;
5751 	role.base.guest_mode = true;
5752 	role.base.access = ACC_ALL;
5753 
5754 	role.ext.word = 0;
5755 	role.ext.execonly = execonly;
5756 	role.ext.valid = 1;
5757 
5758 	return role;
5759 }
5760 
kvm_init_shadow_ept_mmu(struct kvm_vcpu * vcpu,bool execonly,int huge_page_level,bool accessed_dirty,gpa_t new_eptp)5761 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
5762 			     int huge_page_level, bool accessed_dirty,
5763 			     gpa_t new_eptp)
5764 {
5765 	struct kvm_mmu *context = &vcpu->arch.guest_mmu;
5766 	u8 level = vmx_eptp_page_walk_level(new_eptp);
5767 	union kvm_cpu_role new_mode =
5768 		kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
5769 						   execonly, level);
5770 
5771 	if (new_mode.as_u64 != context->cpu_role.as_u64) {
5772 		/* EPT, and thus nested EPT, does not consume CR0, CR4, nor EFER. */
5773 		context->cpu_role.as_u64 = new_mode.as_u64;
5774 		context->root_role.word = new_mode.base.word;
5775 
5776 		context->page_fault = ept_page_fault;
5777 		context->gva_to_gpa = ept_gva_to_gpa;
5778 		context->sync_spte = ept_sync_spte;
5779 
5780 		update_permission_bitmask(context, true);
5781 		context->pkru_mask = 0;
5782 		reset_rsvds_bits_mask_ept(vcpu, context, execonly, huge_page_level);
5783 		reset_ept_shadow_zero_bits_mask(context, execonly);
5784 	}
5785 
5786 	kvm_mmu_new_pgd(vcpu, new_eptp);
5787 }
5788 EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
5789 
init_kvm_softmmu(struct kvm_vcpu * vcpu,union kvm_cpu_role cpu_role)5790 static void init_kvm_softmmu(struct kvm_vcpu *vcpu,
5791 			     union kvm_cpu_role cpu_role)
5792 {
5793 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
5794 
5795 	kvm_init_shadow_mmu(vcpu, cpu_role);
5796 
5797 	context->get_guest_pgd     = get_guest_cr3;
5798 	context->get_pdptr         = kvm_pdptr_read;
5799 	context->inject_page_fault = kvm_inject_page_fault;
5800 }
5801 
init_kvm_nested_mmu(struct kvm_vcpu * vcpu,union kvm_cpu_role new_mode)5802 static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu,
5803 				union kvm_cpu_role new_mode)
5804 {
5805 	struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
5806 
5807 	if (new_mode.as_u64 == g_context->cpu_role.as_u64)
5808 		return;
5809 
5810 	g_context->cpu_role.as_u64   = new_mode.as_u64;
5811 	g_context->get_guest_pgd     = get_guest_cr3;
5812 	g_context->get_pdptr         = kvm_pdptr_read;
5813 	g_context->inject_page_fault = kvm_inject_page_fault;
5814 
5815 	/*
5816 	 * L2 page tables are never shadowed, so there is no need to sync
5817 	 * SPTEs.
5818 	 */
5819 	g_context->sync_spte         = NULL;
5820 
5821 	/*
5822 	 * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using
5823 	 * L1's nested page tables (e.g. EPT12). The nested translation
5824 	 * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using
5825 	 * L2's page tables as the first level of translation and L1's
5826 	 * nested page tables as the second level of translation. Basically
5827 	 * the gva_to_gpa functions between mmu and nested_mmu are swapped.
5828 	 */
5829 	if (!is_paging(vcpu))
5830 		g_context->gva_to_gpa = nonpaging_gva_to_gpa;
5831 	else if (is_long_mode(vcpu))
5832 		g_context->gva_to_gpa = paging64_gva_to_gpa;
5833 	else if (is_pae(vcpu))
5834 		g_context->gva_to_gpa = paging64_gva_to_gpa;
5835 	else
5836 		g_context->gva_to_gpa = paging32_gva_to_gpa;
5837 
5838 	reset_guest_paging_metadata(vcpu, g_context);
5839 }
5840 
kvm_init_mmu(struct kvm_vcpu * vcpu)5841 void kvm_init_mmu(struct kvm_vcpu *vcpu)
5842 {
5843 	struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
5844 	union kvm_cpu_role cpu_role = kvm_calc_cpu_role(vcpu, &regs);
5845 
5846 	if (mmu_is_nested(vcpu))
5847 		init_kvm_nested_mmu(vcpu, cpu_role);
5848 	else if (tdp_enabled)
5849 		init_kvm_tdp_mmu(vcpu, cpu_role);
5850 	else
5851 		init_kvm_softmmu(vcpu, cpu_role);
5852 }
5853 EXPORT_SYMBOL_GPL(kvm_init_mmu);
5854 
kvm_mmu_after_set_cpuid(struct kvm_vcpu * vcpu)5855 void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
5856 {
5857 	/*
5858 	 * Invalidate all MMU roles to force them to reinitialize as CPUID
5859 	 * information is factored into reserved bit calculations.
5860 	 *
5861 	 * Correctly handling multiple vCPU models with respect to paging and
5862 	 * physical address properties) in a single VM would require tracking
5863 	 * all relevant CPUID information in kvm_mmu_page_role. That is very
5864 	 * undesirable as it would increase the memory requirements for
5865 	 * gfn_write_track (see struct kvm_mmu_page_role comments).  For now
5866 	 * that problem is swept under the rug; KVM's CPUID API is horrific and
5867 	 * it's all but impossible to solve it without introducing a new API.
5868 	 */
5869 	vcpu->arch.root_mmu.root_role.invalid = 1;
5870 	vcpu->arch.guest_mmu.root_role.invalid = 1;
5871 	vcpu->arch.nested_mmu.root_role.invalid = 1;
5872 	vcpu->arch.root_mmu.cpu_role.ext.valid = 0;
5873 	vcpu->arch.guest_mmu.cpu_role.ext.valid = 0;
5874 	vcpu->arch.nested_mmu.cpu_role.ext.valid = 0;
5875 	kvm_mmu_reset_context(vcpu);
5876 
5877 	/*
5878 	 * Changing guest CPUID after KVM_RUN is forbidden, see the comment in
5879 	 * kvm_arch_vcpu_ioctl().
5880 	 */
5881 	KVM_BUG_ON(kvm_vcpu_has_run(vcpu), vcpu->kvm);
5882 }
5883 
kvm_mmu_reset_context(struct kvm_vcpu * vcpu)5884 void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
5885 {
5886 	kvm_mmu_unload(vcpu);
5887 	kvm_init_mmu(vcpu);
5888 }
5889 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
5890 
kvm_mmu_load(struct kvm_vcpu * vcpu)5891 int kvm_mmu_load(struct kvm_vcpu *vcpu)
5892 {
5893 	int r;
5894 
5895 	r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->root_role.direct);
5896 	if (r)
5897 		goto out;
5898 	r = mmu_alloc_special_roots(vcpu);
5899 	if (r)
5900 		goto out;
5901 	if (vcpu->arch.mmu->root_role.direct)
5902 		r = mmu_alloc_direct_roots(vcpu);
5903 	else
5904 		r = mmu_alloc_shadow_roots(vcpu);
5905 	if (r)
5906 		goto out;
5907 
5908 	kvm_mmu_sync_roots(vcpu);
5909 
5910 	kvm_mmu_load_pgd(vcpu);
5911 
5912 	/*
5913 	 * Flush any TLB entries for the new root, the provenance of the root
5914 	 * is unknown.  Even if KVM ensures there are no stale TLB entries
5915 	 * for a freed root, in theory another hypervisor could have left
5916 	 * stale entries.  Flushing on alloc also allows KVM to skip the TLB
5917 	 * flush when freeing a root (see kvm_tdp_mmu_put_root()).
5918 	 */
5919 	kvm_x86_call(flush_tlb_current)(vcpu);
5920 out:
5921 	return r;
5922 }
5923 EXPORT_SYMBOL_GPL(kvm_mmu_load);
5924 
kvm_mmu_unload(struct kvm_vcpu * vcpu)5925 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
5926 {
5927 	struct kvm *kvm = vcpu->kvm;
5928 
5929 	kvm_mmu_free_roots(kvm, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL);
5930 	WARN_ON_ONCE(VALID_PAGE(vcpu->arch.root_mmu.root.hpa));
5931 	kvm_mmu_free_roots(kvm, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
5932 	WARN_ON_ONCE(VALID_PAGE(vcpu->arch.guest_mmu.root.hpa));
5933 	vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
5934 }
5935 
is_obsolete_root(struct kvm * kvm,hpa_t root_hpa)5936 static bool is_obsolete_root(struct kvm *kvm, hpa_t root_hpa)
5937 {
5938 	struct kvm_mmu_page *sp;
5939 
5940 	if (!VALID_PAGE(root_hpa))
5941 		return false;
5942 
5943 	/*
5944 	 * When freeing obsolete roots, treat roots as obsolete if they don't
5945 	 * have an associated shadow page, as it's impossible to determine if
5946 	 * such roots are fresh or stale.  This does mean KVM will get false
5947 	 * positives and free roots that don't strictly need to be freed, but
5948 	 * such false positives are relatively rare:
5949 	 *
5950 	 *  (a) only PAE paging and nested NPT have roots without shadow pages
5951 	 *      (or any shadow paging flavor with a dummy root, see note below)
5952 	 *  (b) remote reloads due to a memslot update obsoletes _all_ roots
5953 	 *  (c) KVM doesn't track previous roots for PAE paging, and the guest
5954 	 *      is unlikely to zap an in-use PGD.
5955 	 *
5956 	 * Note!  Dummy roots are unique in that they are obsoleted by memslot
5957 	 * _creation_!  See also FNAME(fetch).
5958 	 */
5959 	sp = root_to_sp(root_hpa);
5960 	return !sp || is_obsolete_sp(kvm, sp);
5961 }
5962 
__kvm_mmu_free_obsolete_roots(struct kvm * kvm,struct kvm_mmu * mmu)5963 static void __kvm_mmu_free_obsolete_roots(struct kvm *kvm, struct kvm_mmu *mmu)
5964 {
5965 	unsigned long roots_to_free = 0;
5966 	int i;
5967 
5968 	if (is_obsolete_root(kvm, mmu->root.hpa))
5969 		roots_to_free |= KVM_MMU_ROOT_CURRENT;
5970 
5971 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
5972 		if (is_obsolete_root(kvm, mmu->prev_roots[i].hpa))
5973 			roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
5974 	}
5975 
5976 	if (roots_to_free)
5977 		kvm_mmu_free_roots(kvm, mmu, roots_to_free);
5978 }
5979 
kvm_mmu_free_obsolete_roots(struct kvm_vcpu * vcpu)5980 void kvm_mmu_free_obsolete_roots(struct kvm_vcpu *vcpu)
5981 {
5982 	__kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.root_mmu);
5983 	__kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.guest_mmu);
5984 }
5985 EXPORT_SYMBOL_GPL(kvm_mmu_free_obsolete_roots);
5986 
mmu_pte_write_fetch_gpte(struct kvm_vcpu * vcpu,gpa_t * gpa,int * bytes)5987 static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
5988 				    int *bytes)
5989 {
5990 	u64 gentry = 0;
5991 	int r;
5992 
5993 	/*
5994 	 * Assume that the pte write on a page table of the same type
5995 	 * as the current vcpu paging mode since we update the sptes only
5996 	 * when they have the same mode.
5997 	 */
5998 	if (is_pae(vcpu) && *bytes == 4) {
5999 		/* Handle a 32-bit guest writing two halves of a 64-bit gpte */
6000 		*gpa &= ~(gpa_t)7;
6001 		*bytes = 8;
6002 	}
6003 
6004 	if (*bytes == 4 || *bytes == 8) {
6005 		r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes);
6006 		if (r)
6007 			gentry = 0;
6008 	}
6009 
6010 	return gentry;
6011 }
6012 
6013 /*
6014  * If we're seeing too many writes to a page, it may no longer be a page table,
6015  * or we may be forking, in which case it is better to unmap the page.
6016  */
detect_write_flooding(struct kvm_mmu_page * sp)6017 static bool detect_write_flooding(struct kvm_mmu_page *sp)
6018 {
6019 	/*
6020 	 * Skip write-flooding detected for the sp whose level is 1, because
6021 	 * it can become unsync, then the guest page is not write-protected.
6022 	 */
6023 	if (sp->role.level == PG_LEVEL_4K)
6024 		return false;
6025 
6026 	atomic_inc(&sp->write_flooding_count);
6027 	return atomic_read(&sp->write_flooding_count) >= 3;
6028 }
6029 
6030 /*
6031  * Misaligned accesses are too much trouble to fix up; also, they usually
6032  * indicate a page is not used as a page table.
6033  */
detect_write_misaligned(struct kvm_mmu_page * sp,gpa_t gpa,int bytes)6034 static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
6035 				    int bytes)
6036 {
6037 	unsigned offset, pte_size, misaligned;
6038 
6039 	offset = offset_in_page(gpa);
6040 	pte_size = sp->role.has_4_byte_gpte ? 4 : 8;
6041 
6042 	/*
6043 	 * Sometimes, the OS only writes the last one bytes to update status
6044 	 * bits, for example, in linux, andb instruction is used in clear_bit().
6045 	 */
6046 	if (!(offset & (pte_size - 1)) && bytes == 1)
6047 		return false;
6048 
6049 	misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
6050 	misaligned |= bytes < 4;
6051 
6052 	return misaligned;
6053 }
6054 
get_written_sptes(struct kvm_mmu_page * sp,gpa_t gpa,int * nspte)6055 static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
6056 {
6057 	unsigned page_offset, quadrant;
6058 	u64 *spte;
6059 	int level;
6060 
6061 	page_offset = offset_in_page(gpa);
6062 	level = sp->role.level;
6063 	*nspte = 1;
6064 	if (sp->role.has_4_byte_gpte) {
6065 		page_offset <<= 1;	/* 32->64 */
6066 		/*
6067 		 * A 32-bit pde maps 4MB while the shadow pdes map
6068 		 * only 2MB.  So we need to double the offset again
6069 		 * and zap two pdes instead of one.
6070 		 */
6071 		if (level == PT32_ROOT_LEVEL) {
6072 			page_offset &= ~7; /* kill rounding error */
6073 			page_offset <<= 1;
6074 			*nspte = 2;
6075 		}
6076 		quadrant = page_offset >> PAGE_SHIFT;
6077 		page_offset &= ~PAGE_MASK;
6078 		if (quadrant != sp->role.quadrant)
6079 			return NULL;
6080 	}
6081 
6082 	spte = &sp->spt[page_offset / sizeof(*spte)];
6083 	return spte;
6084 }
6085 
kvm_mmu_track_write(struct kvm_vcpu * vcpu,gpa_t gpa,const u8 * new,int bytes)6086 void kvm_mmu_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
6087 			 int bytes)
6088 {
6089 	gfn_t gfn = gpa >> PAGE_SHIFT;
6090 	struct kvm_mmu_page *sp;
6091 	LIST_HEAD(invalid_list);
6092 	u64 entry, gentry, *spte;
6093 	int npte;
6094 	bool flush = false;
6095 
6096 	/*
6097 	 * When emulating guest writes, ensure the written value is visible to
6098 	 * any task that is handling page faults before checking whether or not
6099 	 * KVM is shadowing a guest PTE.  This ensures either KVM will create
6100 	 * the correct SPTE in the page fault handler, or this task will see
6101 	 * a non-zero indirect_shadow_pages.  Pairs with the smp_mb() in
6102 	 * account_shadowed().
6103 	 */
6104 	smp_mb();
6105 	if (!vcpu->kvm->arch.indirect_shadow_pages)
6106 		return;
6107 
6108 	write_lock(&vcpu->kvm->mmu_lock);
6109 
6110 	gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes);
6111 
6112 	++vcpu->kvm->stat.mmu_pte_write;
6113 
6114 	for_each_gfn_valid_sp_with_gptes(vcpu->kvm, sp, gfn) {
6115 		if (detect_write_misaligned(sp, gpa, bytes) ||
6116 		      detect_write_flooding(sp)) {
6117 			kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
6118 			++vcpu->kvm->stat.mmu_flooded;
6119 			continue;
6120 		}
6121 
6122 		spte = get_written_sptes(sp, gpa, &npte);
6123 		if (!spte)
6124 			continue;
6125 
6126 		while (npte--) {
6127 			entry = *spte;
6128 			mmu_page_zap_pte(vcpu->kvm, sp, spte, NULL);
6129 			if (gentry && sp->role.level != PG_LEVEL_4K)
6130 				++vcpu->kvm->stat.mmu_pde_zapped;
6131 			if (is_shadow_present_pte(entry))
6132 				flush = true;
6133 			++spte;
6134 		}
6135 	}
6136 	kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush);
6137 	write_unlock(&vcpu->kvm->mmu_lock);
6138 }
6139 
is_write_to_guest_page_table(u64 error_code)6140 static bool is_write_to_guest_page_table(u64 error_code)
6141 {
6142 	const u64 mask = PFERR_GUEST_PAGE_MASK | PFERR_WRITE_MASK | PFERR_PRESENT_MASK;
6143 
6144 	return (error_code & mask) == mask;
6145 }
6146 
kvm_mmu_write_protect_fault(struct kvm_vcpu * vcpu,gpa_t cr2_or_gpa,u64 error_code,int * emulation_type)6147 static int kvm_mmu_write_protect_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
6148 				       u64 error_code, int *emulation_type)
6149 {
6150 	bool direct = vcpu->arch.mmu->root_role.direct;
6151 
6152 	/*
6153 	 * Do not try to unprotect and retry if the vCPU re-faulted on the same
6154 	 * RIP with the same address that was previously unprotected, as doing
6155 	 * so will likely put the vCPU into an infinite.  E.g. if the vCPU uses
6156 	 * a non-page-table modifying instruction on the PDE that points to the
6157 	 * instruction, then unprotecting the gfn will unmap the instruction's
6158 	 * code, i.e. make it impossible for the instruction to ever complete.
6159 	 */
6160 	if (vcpu->arch.last_retry_eip == kvm_rip_read(vcpu) &&
6161 	    vcpu->arch.last_retry_addr == cr2_or_gpa)
6162 		return RET_PF_EMULATE;
6163 
6164 	/*
6165 	 * Reset the unprotect+retry values that guard against infinite loops.
6166 	 * The values will be refreshed if KVM explicitly unprotects a gfn and
6167 	 * retries, in all other cases it's safe to retry in the future even if
6168 	 * the next page fault happens on the same RIP+address.
6169 	 */
6170 	vcpu->arch.last_retry_eip = 0;
6171 	vcpu->arch.last_retry_addr = 0;
6172 
6173 	/*
6174 	 * It should be impossible to reach this point with an MMIO cache hit,
6175 	 * as RET_PF_WRITE_PROTECTED is returned if and only if there's a valid,
6176 	 * writable memslot, and creating a memslot should invalidate the MMIO
6177 	 * cache by way of changing the memslot generation.  WARN and disallow
6178 	 * retry if MMIO is detected, as retrying MMIO emulation is pointless
6179 	 * and could put the vCPU into an infinite loop because the processor
6180 	 * will keep faulting on the non-existent MMIO address.
6181 	 */
6182 	if (WARN_ON_ONCE(mmio_info_in_cache(vcpu, cr2_or_gpa, direct)))
6183 		return RET_PF_EMULATE;
6184 
6185 	/*
6186 	 * Before emulating the instruction, check to see if the access was due
6187 	 * to a read-only violation while the CPU was walking non-nested NPT
6188 	 * page tables, i.e. for a direct MMU, for _guest_ page tables in L1.
6189 	 * If L1 is sharing (a subset of) its page tables with L2, e.g. by
6190 	 * having nCR3 share lower level page tables with hCR3, then when KVM
6191 	 * (L0) write-protects the nested NPTs, i.e. npt12 entries, KVM is also
6192 	 * unknowingly write-protecting L1's guest page tables, which KVM isn't
6193 	 * shadowing.
6194 	 *
6195 	 * Because the CPU (by default) walks NPT page tables using a write
6196 	 * access (to ensure the CPU can do A/D updates), page walks in L1 can
6197 	 * trigger write faults for the above case even when L1 isn't modifying
6198 	 * PTEs.  As a result, KVM will unnecessarily emulate (or at least, try
6199 	 * to emulate) an excessive number of L1 instructions; because L1's MMU
6200 	 * isn't shadowed by KVM, there is no need to write-protect L1's gPTEs
6201 	 * and thus no need to emulate in order to guarantee forward progress.
6202 	 *
6203 	 * Try to unprotect the gfn, i.e. zap any shadow pages, so that L1 can
6204 	 * proceed without triggering emulation.  If one or more shadow pages
6205 	 * was zapped, skip emulation and resume L1 to let it natively execute
6206 	 * the instruction.  If no shadow pages were zapped, then the write-
6207 	 * fault is due to something else entirely, i.e. KVM needs to emulate,
6208 	 * as resuming the guest will put it into an infinite loop.
6209 	 *
6210 	 * Note, this code also applies to Intel CPUs, even though it is *very*
6211 	 * unlikely that an L1 will share its page tables (IA32/PAE/paging64
6212 	 * format) with L2's page tables (EPT format).
6213 	 *
6214 	 * For indirect MMUs, i.e. if KVM is shadowing the current MMU, try to
6215 	 * unprotect the gfn and retry if an event is awaiting reinjection.  If
6216 	 * KVM emulates multiple instructions before completing event injection,
6217 	 * the event could be delayed beyond what is architecturally allowed,
6218 	 * e.g. KVM could inject an IRQ after the TPR has been raised.
6219 	 */
6220 	if (((direct && is_write_to_guest_page_table(error_code)) ||
6221 	     (!direct && kvm_event_needs_reinjection(vcpu))) &&
6222 	    kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa))
6223 		return RET_PF_RETRY;
6224 
6225 	/*
6226 	 * The gfn is write-protected, but if KVM detects its emulating an
6227 	 * instruction that is unlikely to be used to modify page tables, or if
6228 	 * emulation fails, KVM can try to unprotect the gfn and let the CPU
6229 	 * re-execute the instruction that caused the page fault.  Do not allow
6230 	 * retrying an instruction from a nested guest as KVM is only explicitly
6231 	 * shadowing L1's page tables, i.e. unprotecting something for L1 isn't
6232 	 * going to magically fix whatever issue caused L2 to fail.
6233 	 */
6234 	if (!is_guest_mode(vcpu))
6235 		*emulation_type |= EMULTYPE_ALLOW_RETRY_PF;
6236 
6237 	return RET_PF_EMULATE;
6238 }
6239 
kvm_mmu_page_fault(struct kvm_vcpu * vcpu,gpa_t cr2_or_gpa,u64 error_code,void * insn,int insn_len)6240 int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
6241 		       void *insn, int insn_len)
6242 {
6243 	int r, emulation_type = EMULTYPE_PF;
6244 	bool direct = vcpu->arch.mmu->root_role.direct;
6245 
6246 	if (WARN_ON_ONCE(!VALID_PAGE(vcpu->arch.mmu->root.hpa)))
6247 		return RET_PF_RETRY;
6248 
6249 	/*
6250 	 * Except for reserved faults (emulated MMIO is shared-only), set the
6251 	 * PFERR_PRIVATE_ACCESS flag for software-protected VMs based on the gfn's
6252 	 * current attributes, which are the source of truth for such VMs.  Note,
6253 	 * this wrong for nested MMUs as the GPA is an L2 GPA, but KVM doesn't
6254 	 * currently supported nested virtualization (among many other things)
6255 	 * for software-protected VMs.
6256 	 */
6257 	if (IS_ENABLED(CONFIG_KVM_SW_PROTECTED_VM) &&
6258 	    !(error_code & PFERR_RSVD_MASK) &&
6259 	    vcpu->kvm->arch.vm_type == KVM_X86_SW_PROTECTED_VM &&
6260 	    kvm_mem_is_private(vcpu->kvm, gpa_to_gfn(cr2_or_gpa)))
6261 		error_code |= PFERR_PRIVATE_ACCESS;
6262 
6263 	r = RET_PF_INVALID;
6264 	if (unlikely(error_code & PFERR_RSVD_MASK)) {
6265 		if (WARN_ON_ONCE(error_code & PFERR_PRIVATE_ACCESS))
6266 			return -EFAULT;
6267 
6268 		r = handle_mmio_page_fault(vcpu, cr2_or_gpa, direct);
6269 		if (r == RET_PF_EMULATE)
6270 			goto emulate;
6271 	}
6272 
6273 	if (r == RET_PF_INVALID) {
6274 		vcpu->stat.pf_taken++;
6275 
6276 		r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa, error_code, false,
6277 					  &emulation_type, NULL);
6278 		if (KVM_BUG_ON(r == RET_PF_INVALID, vcpu->kvm))
6279 			return -EIO;
6280 	}
6281 
6282 	if (r < 0)
6283 		return r;
6284 
6285 	if (r == RET_PF_WRITE_PROTECTED)
6286 		r = kvm_mmu_write_protect_fault(vcpu, cr2_or_gpa, error_code,
6287 						&emulation_type);
6288 
6289 	if (r == RET_PF_FIXED)
6290 		vcpu->stat.pf_fixed++;
6291 	else if (r == RET_PF_EMULATE)
6292 		vcpu->stat.pf_emulate++;
6293 	else if (r == RET_PF_SPURIOUS)
6294 		vcpu->stat.pf_spurious++;
6295 
6296 	/*
6297 	 * None of handle_mmio_page_fault(), kvm_mmu_do_page_fault(), or
6298 	 * kvm_mmu_write_protect_fault() return RET_PF_CONTINUE.
6299 	 * kvm_mmu_do_page_fault() only uses RET_PF_CONTINUE internally to
6300 	 * indicate continuing the page fault handling until to the final
6301 	 * page table mapping phase.
6302 	 */
6303 	WARN_ON_ONCE(r == RET_PF_CONTINUE);
6304 	if (r != RET_PF_EMULATE)
6305 		return r;
6306 
6307 emulate:
6308 	return x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn,
6309 				       insn_len);
6310 }
6311 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
6312 
kvm_mmu_print_sptes(struct kvm_vcpu * vcpu,gpa_t gpa,const char * msg)6313 void kvm_mmu_print_sptes(struct kvm_vcpu *vcpu, gpa_t gpa, const char *msg)
6314 {
6315 	u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
6316 	int root_level, leaf, level;
6317 
6318 	leaf = get_sptes_lockless(vcpu, gpa, sptes, &root_level);
6319 	if (unlikely(leaf < 0))
6320 		return;
6321 
6322 	pr_err("%s %llx", msg, gpa);
6323 	for (level = root_level; level >= leaf; level--)
6324 		pr_cont(", spte[%d] = 0x%llx", level, sptes[level]);
6325 	pr_cont("\n");
6326 }
6327 EXPORT_SYMBOL_GPL(kvm_mmu_print_sptes);
6328 
__kvm_mmu_invalidate_addr(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu,u64 addr,hpa_t root_hpa)6329 static void __kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
6330 				      u64 addr, hpa_t root_hpa)
6331 {
6332 	struct kvm_shadow_walk_iterator iterator;
6333 
6334 	vcpu_clear_mmio_info(vcpu, addr);
6335 
6336 	/*
6337 	 * Walking and synchronizing SPTEs both assume they are operating in
6338 	 * the context of the current MMU, and would need to be reworked if
6339 	 * this is ever used to sync the guest_mmu, e.g. to emulate INVEPT.
6340 	 */
6341 	if (WARN_ON_ONCE(mmu != vcpu->arch.mmu))
6342 		return;
6343 
6344 	if (!VALID_PAGE(root_hpa))
6345 		return;
6346 
6347 	write_lock(&vcpu->kvm->mmu_lock);
6348 	for_each_shadow_entry_using_root(vcpu, root_hpa, addr, iterator) {
6349 		struct kvm_mmu_page *sp = sptep_to_sp(iterator.sptep);
6350 
6351 		if (sp->unsync) {
6352 			int ret = kvm_sync_spte(vcpu, sp, iterator.index);
6353 
6354 			if (ret < 0)
6355 				mmu_page_zap_pte(vcpu->kvm, sp, iterator.sptep, NULL);
6356 			if (ret)
6357 				kvm_flush_remote_tlbs_sptep(vcpu->kvm, iterator.sptep);
6358 		}
6359 
6360 		if (!sp->unsync_children)
6361 			break;
6362 	}
6363 	write_unlock(&vcpu->kvm->mmu_lock);
6364 }
6365 
kvm_mmu_invalidate_addr(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu,u64 addr,unsigned long roots)6366 void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
6367 			     u64 addr, unsigned long roots)
6368 {
6369 	int i;
6370 
6371 	WARN_ON_ONCE(roots & ~KVM_MMU_ROOTS_ALL);
6372 
6373 	/* It's actually a GPA for vcpu->arch.guest_mmu.  */
6374 	if (mmu != &vcpu->arch.guest_mmu) {
6375 		/* INVLPG on a non-canonical address is a NOP according to the SDM.  */
6376 		if (is_noncanonical_invlpg_address(addr, vcpu))
6377 			return;
6378 
6379 		kvm_x86_call(flush_tlb_gva)(vcpu, addr);
6380 	}
6381 
6382 	if (!mmu->sync_spte)
6383 		return;
6384 
6385 	if (roots & KVM_MMU_ROOT_CURRENT)
6386 		__kvm_mmu_invalidate_addr(vcpu, mmu, addr, mmu->root.hpa);
6387 
6388 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
6389 		if (roots & KVM_MMU_ROOT_PREVIOUS(i))
6390 			__kvm_mmu_invalidate_addr(vcpu, mmu, addr, mmu->prev_roots[i].hpa);
6391 	}
6392 }
6393 EXPORT_SYMBOL_GPL(kvm_mmu_invalidate_addr);
6394 
kvm_mmu_invlpg(struct kvm_vcpu * vcpu,gva_t gva)6395 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
6396 {
6397 	/*
6398 	 * INVLPG is required to invalidate any global mappings for the VA,
6399 	 * irrespective of PCID.  Blindly sync all roots as it would take
6400 	 * roughly the same amount of work/time to determine whether any of the
6401 	 * previous roots have a global mapping.
6402 	 *
6403 	 * Mappings not reachable via the current or previous cached roots will
6404 	 * be synced when switching to that new cr3, so nothing needs to be
6405 	 * done here for them.
6406 	 */
6407 	kvm_mmu_invalidate_addr(vcpu, vcpu->arch.walk_mmu, gva, KVM_MMU_ROOTS_ALL);
6408 	++vcpu->stat.invlpg;
6409 }
6410 EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
6411 
6412 
kvm_mmu_invpcid_gva(struct kvm_vcpu * vcpu,gva_t gva,unsigned long pcid)6413 void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
6414 {
6415 	struct kvm_mmu *mmu = vcpu->arch.mmu;
6416 	unsigned long roots = 0;
6417 	uint i;
6418 
6419 	if (pcid == kvm_get_active_pcid(vcpu))
6420 		roots |= KVM_MMU_ROOT_CURRENT;
6421 
6422 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
6423 		if (VALID_PAGE(mmu->prev_roots[i].hpa) &&
6424 		    pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd))
6425 			roots |= KVM_MMU_ROOT_PREVIOUS(i);
6426 	}
6427 
6428 	if (roots)
6429 		kvm_mmu_invalidate_addr(vcpu, mmu, gva, roots);
6430 	++vcpu->stat.invlpg;
6431 
6432 	/*
6433 	 * Mappings not reachable via the current cr3 or the prev_roots will be
6434 	 * synced when switching to that cr3, so nothing needs to be done here
6435 	 * for them.
6436 	 */
6437 }
6438 
kvm_configure_mmu(bool enable_tdp,int tdp_forced_root_level,int tdp_max_root_level,int tdp_huge_page_level)6439 void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
6440 		       int tdp_max_root_level, int tdp_huge_page_level)
6441 {
6442 	tdp_enabled = enable_tdp;
6443 	tdp_root_level = tdp_forced_root_level;
6444 	max_tdp_level = tdp_max_root_level;
6445 
6446 #ifdef CONFIG_X86_64
6447 	tdp_mmu_enabled = tdp_mmu_allowed && tdp_enabled;
6448 #endif
6449 	/*
6450 	 * max_huge_page_level reflects KVM's MMU capabilities irrespective
6451 	 * of kernel support, e.g. KVM may be capable of using 1GB pages when
6452 	 * the kernel is not.  But, KVM never creates a page size greater than
6453 	 * what is used by the kernel for any given HVA, i.e. the kernel's
6454 	 * capabilities are ultimately consulted by kvm_mmu_hugepage_adjust().
6455 	 */
6456 	if (tdp_enabled)
6457 		max_huge_page_level = tdp_huge_page_level;
6458 	else if (boot_cpu_has(X86_FEATURE_GBPAGES))
6459 		max_huge_page_level = PG_LEVEL_1G;
6460 	else
6461 		max_huge_page_level = PG_LEVEL_2M;
6462 }
6463 EXPORT_SYMBOL_GPL(kvm_configure_mmu);
6464 
free_mmu_pages(struct kvm_mmu * mmu)6465 static void free_mmu_pages(struct kvm_mmu *mmu)
6466 {
6467 	if (!tdp_enabled && mmu->pae_root)
6468 		set_memory_encrypted((unsigned long)mmu->pae_root, 1);
6469 	free_page((unsigned long)mmu->pae_root);
6470 	free_page((unsigned long)mmu->pml4_root);
6471 	free_page((unsigned long)mmu->pml5_root);
6472 }
6473 
__kvm_mmu_create(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu)6474 static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
6475 {
6476 	struct page *page;
6477 	int i;
6478 
6479 	mmu->root.hpa = INVALID_PAGE;
6480 	mmu->root.pgd = 0;
6481 	mmu->mirror_root_hpa = INVALID_PAGE;
6482 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
6483 		mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
6484 
6485 	/* vcpu->arch.guest_mmu isn't used when !tdp_enabled. */
6486 	if (!tdp_enabled && mmu == &vcpu->arch.guest_mmu)
6487 		return 0;
6488 
6489 	/*
6490 	 * When using PAE paging, the four PDPTEs are treated as 'root' pages,
6491 	 * while the PDP table is a per-vCPU construct that's allocated at MMU
6492 	 * creation.  When emulating 32-bit mode, cr3 is only 32 bits even on
6493 	 * x86_64.  Therefore we need to allocate the PDP table in the first
6494 	 * 4GB of memory, which happens to fit the DMA32 zone.  TDP paging
6495 	 * generally doesn't use PAE paging and can skip allocating the PDP
6496 	 * table.  The main exception, handled here, is SVM's 32-bit NPT.  The
6497 	 * other exception is for shadowing L1's 32-bit or PAE NPT on 64-bit
6498 	 * KVM; that horror is handled on-demand by mmu_alloc_special_roots().
6499 	 */
6500 	if (tdp_enabled && kvm_mmu_get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
6501 		return 0;
6502 
6503 	page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32);
6504 	if (!page)
6505 		return -ENOMEM;
6506 
6507 	mmu->pae_root = page_address(page);
6508 
6509 	/*
6510 	 * CR3 is only 32 bits when PAE paging is used, thus it's impossible to
6511 	 * get the CPU to treat the PDPTEs as encrypted.  Decrypt the page so
6512 	 * that KVM's writes and the CPU's reads get along.  Note, this is
6513 	 * only necessary when using shadow paging, as 64-bit NPT can get at
6514 	 * the C-bit even when shadowing 32-bit NPT, and SME isn't supported
6515 	 * by 32-bit kernels (when KVM itself uses 32-bit NPT).
6516 	 */
6517 	if (!tdp_enabled)
6518 		set_memory_decrypted((unsigned long)mmu->pae_root, 1);
6519 	else
6520 		WARN_ON_ONCE(shadow_me_value);
6521 
6522 	for (i = 0; i < 4; ++i)
6523 		mmu->pae_root[i] = INVALID_PAE_ROOT;
6524 
6525 	return 0;
6526 }
6527 
kvm_mmu_create(struct kvm_vcpu * vcpu)6528 int kvm_mmu_create(struct kvm_vcpu *vcpu)
6529 {
6530 	int ret;
6531 
6532 	vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
6533 	vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;
6534 
6535 	vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
6536 	vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
6537 
6538 	vcpu->arch.mmu_shadow_page_cache.init_value =
6539 		SHADOW_NONPRESENT_VALUE;
6540 	if (!vcpu->arch.mmu_shadow_page_cache.init_value)
6541 		vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
6542 
6543 	vcpu->arch.mmu = &vcpu->arch.root_mmu;
6544 	vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
6545 
6546 	ret = __kvm_mmu_create(vcpu, &vcpu->arch.guest_mmu);
6547 	if (ret)
6548 		return ret;
6549 
6550 	ret = __kvm_mmu_create(vcpu, &vcpu->arch.root_mmu);
6551 	if (ret)
6552 		goto fail_allocate_root;
6553 
6554 	return ret;
6555  fail_allocate_root:
6556 	free_mmu_pages(&vcpu->arch.guest_mmu);
6557 	return ret;
6558 }
6559 
6560 #define BATCH_ZAP_PAGES	10
kvm_zap_obsolete_pages(struct kvm * kvm)6561 static void kvm_zap_obsolete_pages(struct kvm *kvm)
6562 {
6563 	struct kvm_mmu_page *sp, *node;
6564 	int nr_zapped, batch = 0;
6565 	LIST_HEAD(invalid_list);
6566 	bool unstable;
6567 
6568 	lockdep_assert_held(&kvm->slots_lock);
6569 
6570 restart:
6571 	list_for_each_entry_safe_reverse(sp, node,
6572 	      &kvm->arch.active_mmu_pages, link) {
6573 		/*
6574 		 * No obsolete valid page exists before a newly created page
6575 		 * since active_mmu_pages is a FIFO list.
6576 		 */
6577 		if (!is_obsolete_sp(kvm, sp))
6578 			break;
6579 
6580 		/*
6581 		 * Invalid pages should never land back on the list of active
6582 		 * pages.  Skip the bogus page, otherwise we'll get stuck in an
6583 		 * infinite loop if the page gets put back on the list (again).
6584 		 */
6585 		if (WARN_ON_ONCE(sp->role.invalid))
6586 			continue;
6587 
6588 		/*
6589 		 * No need to flush the TLB since we're only zapping shadow
6590 		 * pages with an obsolete generation number and all vCPUS have
6591 		 * loaded a new root, i.e. the shadow pages being zapped cannot
6592 		 * be in active use by the guest.
6593 		 */
6594 		if (batch >= BATCH_ZAP_PAGES &&
6595 		    cond_resched_rwlock_write(&kvm->mmu_lock)) {
6596 			batch = 0;
6597 			goto restart;
6598 		}
6599 
6600 		unstable = __kvm_mmu_prepare_zap_page(kvm, sp,
6601 				&invalid_list, &nr_zapped);
6602 		batch += nr_zapped;
6603 
6604 		if (unstable)
6605 			goto restart;
6606 	}
6607 
6608 	/*
6609 	 * Kick all vCPUs (via remote TLB flush) before freeing the page tables
6610 	 * to ensure KVM is not in the middle of a lockless shadow page table
6611 	 * walk, which may reference the pages.  The remote TLB flush itself is
6612 	 * not required and is simply a convenient way to kick vCPUs as needed.
6613 	 * KVM performs a local TLB flush when allocating a new root (see
6614 	 * kvm_mmu_load()), and the reload in the caller ensure no vCPUs are
6615 	 * running with an obsolete MMU.
6616 	 */
6617 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
6618 }
6619 
6620 /*
6621  * Fast invalidate all shadow pages and use lock-break technique
6622  * to zap obsolete pages.
6623  *
6624  * It's required when memslot is being deleted or VM is being
6625  * destroyed, in these cases, we should ensure that KVM MMU does
6626  * not use any resource of the being-deleted slot or all slots
6627  * after calling the function.
6628  */
kvm_mmu_zap_all_fast(struct kvm * kvm)6629 static void kvm_mmu_zap_all_fast(struct kvm *kvm)
6630 {
6631 	lockdep_assert_held(&kvm->slots_lock);
6632 
6633 	write_lock(&kvm->mmu_lock);
6634 	trace_kvm_mmu_zap_all_fast(kvm);
6635 
6636 	/*
6637 	 * Toggle mmu_valid_gen between '0' and '1'.  Because slots_lock is
6638 	 * held for the entire duration of zapping obsolete pages, it's
6639 	 * impossible for there to be multiple invalid generations associated
6640 	 * with *valid* shadow pages at any given time, i.e. there is exactly
6641 	 * one valid generation and (at most) one invalid generation.
6642 	 */
6643 	kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1;
6644 
6645 	/*
6646 	 * In order to ensure all vCPUs drop their soon-to-be invalid roots,
6647 	 * invalidating TDP MMU roots must be done while holding mmu_lock for
6648 	 * write and in the same critical section as making the reload request,
6649 	 * e.g. before kvm_zap_obsolete_pages() could drop mmu_lock and yield.
6650 	 */
6651 	if (tdp_mmu_enabled) {
6652 		/*
6653 		 * External page tables don't support fast zapping, therefore
6654 		 * their mirrors must be invalidated separately by the caller.
6655 		 */
6656 		kvm_tdp_mmu_invalidate_roots(kvm, KVM_DIRECT_ROOTS);
6657 	}
6658 
6659 	/*
6660 	 * Notify all vcpus to reload its shadow page table and flush TLB.
6661 	 * Then all vcpus will switch to new shadow page table with the new
6662 	 * mmu_valid_gen.
6663 	 *
6664 	 * Note: we need to do this under the protection of mmu_lock,
6665 	 * otherwise, vcpu would purge shadow page but miss tlb flush.
6666 	 */
6667 	kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_FREE_OBSOLETE_ROOTS);
6668 
6669 	kvm_zap_obsolete_pages(kvm);
6670 
6671 	write_unlock(&kvm->mmu_lock);
6672 
6673 	/*
6674 	 * Zap the invalidated TDP MMU roots, all SPTEs must be dropped before
6675 	 * returning to the caller, e.g. if the zap is in response to a memslot
6676 	 * deletion, mmu_notifier callbacks will be unable to reach the SPTEs
6677 	 * associated with the deleted memslot once the update completes, and
6678 	 * Deferring the zap until the final reference to the root is put would
6679 	 * lead to use-after-free.
6680 	 */
6681 	if (tdp_mmu_enabled)
6682 		kvm_tdp_mmu_zap_invalidated_roots(kvm, true);
6683 }
6684 
kvm_mmu_init_vm(struct kvm * kvm)6685 void kvm_mmu_init_vm(struct kvm *kvm)
6686 {
6687 	kvm->arch.shadow_mmio_value = shadow_mmio_value;
6688 	INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
6689 	INIT_LIST_HEAD(&kvm->arch.possible_nx_huge_pages);
6690 	spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
6691 
6692 	if (tdp_mmu_enabled)
6693 		kvm_mmu_init_tdp_mmu(kvm);
6694 
6695 	kvm->arch.split_page_header_cache.kmem_cache = mmu_page_header_cache;
6696 	kvm->arch.split_page_header_cache.gfp_zero = __GFP_ZERO;
6697 
6698 	kvm->arch.split_shadow_page_cache.gfp_zero = __GFP_ZERO;
6699 
6700 	kvm->arch.split_desc_cache.kmem_cache = pte_list_desc_cache;
6701 	kvm->arch.split_desc_cache.gfp_zero = __GFP_ZERO;
6702 }
6703 
mmu_free_vm_memory_caches(struct kvm * kvm)6704 static void mmu_free_vm_memory_caches(struct kvm *kvm)
6705 {
6706 	kvm_mmu_free_memory_cache(&kvm->arch.split_desc_cache);
6707 	kvm_mmu_free_memory_cache(&kvm->arch.split_page_header_cache);
6708 	kvm_mmu_free_memory_cache(&kvm->arch.split_shadow_page_cache);
6709 }
6710 
kvm_mmu_uninit_vm(struct kvm * kvm)6711 void kvm_mmu_uninit_vm(struct kvm *kvm)
6712 {
6713 	if (tdp_mmu_enabled)
6714 		kvm_mmu_uninit_tdp_mmu(kvm);
6715 
6716 	mmu_free_vm_memory_caches(kvm);
6717 }
6718 
kvm_rmap_zap_gfn_range(struct kvm * kvm,gfn_t gfn_start,gfn_t gfn_end)6719 static bool kvm_rmap_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
6720 {
6721 	const struct kvm_memory_slot *memslot;
6722 	struct kvm_memslots *slots;
6723 	struct kvm_memslot_iter iter;
6724 	bool flush = false;
6725 	gfn_t start, end;
6726 	int i;
6727 
6728 	if (!kvm_memslots_have_rmaps(kvm))
6729 		return flush;
6730 
6731 	for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
6732 		slots = __kvm_memslots(kvm, i);
6733 
6734 		kvm_for_each_memslot_in_gfn_range(&iter, slots, gfn_start, gfn_end) {
6735 			memslot = iter.slot;
6736 			start = max(gfn_start, memslot->base_gfn);
6737 			end = min(gfn_end, memslot->base_gfn + memslot->npages);
6738 			if (WARN_ON_ONCE(start >= end))
6739 				continue;
6740 
6741 			flush = __kvm_rmap_zap_gfn_range(kvm, memslot, start,
6742 							 end, true, flush);
6743 		}
6744 	}
6745 
6746 	return flush;
6747 }
6748 
6749 /*
6750  * Invalidate (zap) SPTEs that cover GFNs from gfn_start and up to gfn_end
6751  * (not including it)
6752  */
kvm_zap_gfn_range(struct kvm * kvm,gfn_t gfn_start,gfn_t gfn_end)6753 void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
6754 {
6755 	bool flush;
6756 
6757 	if (WARN_ON_ONCE(gfn_end <= gfn_start))
6758 		return;
6759 
6760 	write_lock(&kvm->mmu_lock);
6761 
6762 	kvm_mmu_invalidate_begin(kvm);
6763 
6764 	kvm_mmu_invalidate_range_add(kvm, gfn_start, gfn_end);
6765 
6766 	flush = kvm_rmap_zap_gfn_range(kvm, gfn_start, gfn_end);
6767 
6768 	if (tdp_mmu_enabled)
6769 		flush = kvm_tdp_mmu_zap_leafs(kvm, gfn_start, gfn_end, flush);
6770 
6771 	if (flush)
6772 		kvm_flush_remote_tlbs_range(kvm, gfn_start, gfn_end - gfn_start);
6773 
6774 	kvm_mmu_invalidate_end(kvm);
6775 
6776 	write_unlock(&kvm->mmu_lock);
6777 }
6778 
slot_rmap_write_protect(struct kvm * kvm,struct kvm_rmap_head * rmap_head,const struct kvm_memory_slot * slot)6779 static bool slot_rmap_write_protect(struct kvm *kvm,
6780 				    struct kvm_rmap_head *rmap_head,
6781 				    const struct kvm_memory_slot *slot)
6782 {
6783 	return rmap_write_protect(rmap_head, false);
6784 }
6785 
kvm_mmu_slot_remove_write_access(struct kvm * kvm,const struct kvm_memory_slot * memslot,int start_level)6786 void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
6787 				      const struct kvm_memory_slot *memslot,
6788 				      int start_level)
6789 {
6790 	if (kvm_memslots_have_rmaps(kvm)) {
6791 		write_lock(&kvm->mmu_lock);
6792 		walk_slot_rmaps(kvm, memslot, slot_rmap_write_protect,
6793 				start_level, KVM_MAX_HUGEPAGE_LEVEL, false);
6794 		write_unlock(&kvm->mmu_lock);
6795 	}
6796 
6797 	if (tdp_mmu_enabled) {
6798 		read_lock(&kvm->mmu_lock);
6799 		kvm_tdp_mmu_wrprot_slot(kvm, memslot, start_level);
6800 		read_unlock(&kvm->mmu_lock);
6801 	}
6802 }
6803 
need_topup(struct kvm_mmu_memory_cache * cache,int min)6804 static inline bool need_topup(struct kvm_mmu_memory_cache *cache, int min)
6805 {
6806 	return kvm_mmu_memory_cache_nr_free_objects(cache) < min;
6807 }
6808 
need_topup_split_caches_or_resched(struct kvm * kvm)6809 static bool need_topup_split_caches_or_resched(struct kvm *kvm)
6810 {
6811 	if (need_resched() || rwlock_needbreak(&kvm->mmu_lock))
6812 		return true;
6813 
6814 	/*
6815 	 * In the worst case, SPLIT_DESC_CACHE_MIN_NR_OBJECTS descriptors are needed
6816 	 * to split a single huge page. Calculating how many are actually needed
6817 	 * is possible but not worth the complexity.
6818 	 */
6819 	return need_topup(&kvm->arch.split_desc_cache, SPLIT_DESC_CACHE_MIN_NR_OBJECTS) ||
6820 	       need_topup(&kvm->arch.split_page_header_cache, 1) ||
6821 	       need_topup(&kvm->arch.split_shadow_page_cache, 1);
6822 }
6823 
topup_split_caches(struct kvm * kvm)6824 static int topup_split_caches(struct kvm *kvm)
6825 {
6826 	/*
6827 	 * Allocating rmap list entries when splitting huge pages for nested
6828 	 * MMUs is uncommon as KVM needs to use a list if and only if there is
6829 	 * more than one rmap entry for a gfn, i.e. requires an L1 gfn to be
6830 	 * aliased by multiple L2 gfns and/or from multiple nested roots with
6831 	 * different roles.  Aliasing gfns when using TDP is atypical for VMMs;
6832 	 * a few gfns are often aliased during boot, e.g. when remapping BIOS,
6833 	 * but aliasing rarely occurs post-boot or for many gfns.  If there is
6834 	 * only one rmap entry, rmap->val points directly at that one entry and
6835 	 * doesn't need to allocate a list.  Buffer the cache by the default
6836 	 * capacity so that KVM doesn't have to drop mmu_lock to topup if KVM
6837 	 * encounters an aliased gfn or two.
6838 	 */
6839 	const int capacity = SPLIT_DESC_CACHE_MIN_NR_OBJECTS +
6840 			     KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE;
6841 	int r;
6842 
6843 	lockdep_assert_held(&kvm->slots_lock);
6844 
6845 	r = __kvm_mmu_topup_memory_cache(&kvm->arch.split_desc_cache, capacity,
6846 					 SPLIT_DESC_CACHE_MIN_NR_OBJECTS);
6847 	if (r)
6848 		return r;
6849 
6850 	r = kvm_mmu_topup_memory_cache(&kvm->arch.split_page_header_cache, 1);
6851 	if (r)
6852 		return r;
6853 
6854 	return kvm_mmu_topup_memory_cache(&kvm->arch.split_shadow_page_cache, 1);
6855 }
6856 
shadow_mmu_get_sp_for_split(struct kvm * kvm,u64 * huge_sptep)6857 static struct kvm_mmu_page *shadow_mmu_get_sp_for_split(struct kvm *kvm, u64 *huge_sptep)
6858 {
6859 	struct kvm_mmu_page *huge_sp = sptep_to_sp(huge_sptep);
6860 	struct shadow_page_caches caches = {};
6861 	union kvm_mmu_page_role role;
6862 	unsigned int access;
6863 	gfn_t gfn;
6864 
6865 	gfn = kvm_mmu_page_get_gfn(huge_sp, spte_index(huge_sptep));
6866 	access = kvm_mmu_page_get_access(huge_sp, spte_index(huge_sptep));
6867 
6868 	/*
6869 	 * Note, huge page splitting always uses direct shadow pages, regardless
6870 	 * of whether the huge page itself is mapped by a direct or indirect
6871 	 * shadow page, since the huge page region itself is being directly
6872 	 * mapped with smaller pages.
6873 	 */
6874 	role = kvm_mmu_child_role(huge_sptep, /*direct=*/true, access);
6875 
6876 	/* Direct SPs do not require a shadowed_info_cache. */
6877 	caches.page_header_cache = &kvm->arch.split_page_header_cache;
6878 	caches.shadow_page_cache = &kvm->arch.split_shadow_page_cache;
6879 
6880 	/* Safe to pass NULL for vCPU since requesting a direct SP. */
6881 	return __kvm_mmu_get_shadow_page(kvm, NULL, &caches, gfn, role);
6882 }
6883 
shadow_mmu_split_huge_page(struct kvm * kvm,const struct kvm_memory_slot * slot,u64 * huge_sptep)6884 static void shadow_mmu_split_huge_page(struct kvm *kvm,
6885 				       const struct kvm_memory_slot *slot,
6886 				       u64 *huge_sptep)
6887 
6888 {
6889 	struct kvm_mmu_memory_cache *cache = &kvm->arch.split_desc_cache;
6890 	u64 huge_spte = READ_ONCE(*huge_sptep);
6891 	struct kvm_mmu_page *sp;
6892 	bool flush = false;
6893 	u64 *sptep, spte;
6894 	gfn_t gfn;
6895 	int index;
6896 
6897 	sp = shadow_mmu_get_sp_for_split(kvm, huge_sptep);
6898 
6899 	for (index = 0; index < SPTE_ENT_PER_PAGE; index++) {
6900 		sptep = &sp->spt[index];
6901 		gfn = kvm_mmu_page_get_gfn(sp, index);
6902 
6903 		/*
6904 		 * The SP may already have populated SPTEs, e.g. if this huge
6905 		 * page is aliased by multiple sptes with the same access
6906 		 * permissions. These entries are guaranteed to map the same
6907 		 * gfn-to-pfn translation since the SP is direct, so no need to
6908 		 * modify them.
6909 		 *
6910 		 * However, if a given SPTE points to a lower level page table,
6911 		 * that lower level page table may only be partially populated.
6912 		 * Installing such SPTEs would effectively unmap a potion of the
6913 		 * huge page. Unmapping guest memory always requires a TLB flush
6914 		 * since a subsequent operation on the unmapped regions would
6915 		 * fail to detect the need to flush.
6916 		 */
6917 		if (is_shadow_present_pte(*sptep)) {
6918 			flush |= !is_last_spte(*sptep, sp->role.level);
6919 			continue;
6920 		}
6921 
6922 		spte = make_small_spte(kvm, huge_spte, sp->role, index);
6923 		mmu_spte_set(sptep, spte);
6924 		__rmap_add(kvm, cache, slot, sptep, gfn, sp->role.access);
6925 	}
6926 
6927 	__link_shadow_page(kvm, cache, huge_sptep, sp, flush);
6928 }
6929 
shadow_mmu_try_split_huge_page(struct kvm * kvm,const struct kvm_memory_slot * slot,u64 * huge_sptep)6930 static int shadow_mmu_try_split_huge_page(struct kvm *kvm,
6931 					  const struct kvm_memory_slot *slot,
6932 					  u64 *huge_sptep)
6933 {
6934 	struct kvm_mmu_page *huge_sp = sptep_to_sp(huge_sptep);
6935 	int level, r = 0;
6936 	gfn_t gfn;
6937 	u64 spte;
6938 
6939 	/* Grab information for the tracepoint before dropping the MMU lock. */
6940 	gfn = kvm_mmu_page_get_gfn(huge_sp, spte_index(huge_sptep));
6941 	level = huge_sp->role.level;
6942 	spte = *huge_sptep;
6943 
6944 	if (kvm_mmu_available_pages(kvm) <= KVM_MIN_FREE_MMU_PAGES) {
6945 		r = -ENOSPC;
6946 		goto out;
6947 	}
6948 
6949 	if (need_topup_split_caches_or_resched(kvm)) {
6950 		write_unlock(&kvm->mmu_lock);
6951 		cond_resched();
6952 		/*
6953 		 * If the topup succeeds, return -EAGAIN to indicate that the
6954 		 * rmap iterator should be restarted because the MMU lock was
6955 		 * dropped.
6956 		 */
6957 		r = topup_split_caches(kvm) ?: -EAGAIN;
6958 		write_lock(&kvm->mmu_lock);
6959 		goto out;
6960 	}
6961 
6962 	shadow_mmu_split_huge_page(kvm, slot, huge_sptep);
6963 
6964 out:
6965 	trace_kvm_mmu_split_huge_page(gfn, spte, level, r);
6966 	return r;
6967 }
6968 
shadow_mmu_try_split_huge_pages(struct kvm * kvm,struct kvm_rmap_head * rmap_head,const struct kvm_memory_slot * slot)6969 static bool shadow_mmu_try_split_huge_pages(struct kvm *kvm,
6970 					    struct kvm_rmap_head *rmap_head,
6971 					    const struct kvm_memory_slot *slot)
6972 {
6973 	struct rmap_iterator iter;
6974 	struct kvm_mmu_page *sp;
6975 	u64 *huge_sptep;
6976 	int r;
6977 
6978 restart:
6979 	for_each_rmap_spte(rmap_head, &iter, huge_sptep) {
6980 		sp = sptep_to_sp(huge_sptep);
6981 
6982 		/* TDP MMU is enabled, so rmap only contains nested MMU SPs. */
6983 		if (WARN_ON_ONCE(!sp->role.guest_mode))
6984 			continue;
6985 
6986 		/* The rmaps should never contain non-leaf SPTEs. */
6987 		if (WARN_ON_ONCE(!is_large_pte(*huge_sptep)))
6988 			continue;
6989 
6990 		/* SPs with level >PG_LEVEL_4K should never by unsync. */
6991 		if (WARN_ON_ONCE(sp->unsync))
6992 			continue;
6993 
6994 		/* Don't bother splitting huge pages on invalid SPs. */
6995 		if (sp->role.invalid)
6996 			continue;
6997 
6998 		r = shadow_mmu_try_split_huge_page(kvm, slot, huge_sptep);
6999 
7000 		/*
7001 		 * The split succeeded or needs to be retried because the MMU
7002 		 * lock was dropped. Either way, restart the iterator to get it
7003 		 * back into a consistent state.
7004 		 */
7005 		if (!r || r == -EAGAIN)
7006 			goto restart;
7007 
7008 		/* The split failed and shouldn't be retried (e.g. -ENOMEM). */
7009 		break;
7010 	}
7011 
7012 	return false;
7013 }
7014 
kvm_shadow_mmu_try_split_huge_pages(struct kvm * kvm,const struct kvm_memory_slot * slot,gfn_t start,gfn_t end,int target_level)7015 static void kvm_shadow_mmu_try_split_huge_pages(struct kvm *kvm,
7016 						const struct kvm_memory_slot *slot,
7017 						gfn_t start, gfn_t end,
7018 						int target_level)
7019 {
7020 	int level;
7021 
7022 	/*
7023 	 * Split huge pages starting with KVM_MAX_HUGEPAGE_LEVEL and working
7024 	 * down to the target level. This ensures pages are recursively split
7025 	 * all the way to the target level. There's no need to split pages
7026 	 * already at the target level.
7027 	 */
7028 	for (level = KVM_MAX_HUGEPAGE_LEVEL; level > target_level; level--)
7029 		__walk_slot_rmaps(kvm, slot, shadow_mmu_try_split_huge_pages,
7030 				  level, level, start, end - 1, true, true, false);
7031 }
7032 
7033 /* Must be called with the mmu_lock held in write-mode. */
kvm_mmu_try_split_huge_pages(struct kvm * kvm,const struct kvm_memory_slot * memslot,u64 start,u64 end,int target_level)7034 void kvm_mmu_try_split_huge_pages(struct kvm *kvm,
7035 				   const struct kvm_memory_slot *memslot,
7036 				   u64 start, u64 end,
7037 				   int target_level)
7038 {
7039 	if (!tdp_mmu_enabled)
7040 		return;
7041 
7042 	if (kvm_memslots_have_rmaps(kvm))
7043 		kvm_shadow_mmu_try_split_huge_pages(kvm, memslot, start, end, target_level);
7044 
7045 	kvm_tdp_mmu_try_split_huge_pages(kvm, memslot, start, end, target_level, false);
7046 
7047 	/*
7048 	 * A TLB flush is unnecessary at this point for the same reasons as in
7049 	 * kvm_mmu_slot_try_split_huge_pages().
7050 	 */
7051 }
7052 
kvm_mmu_slot_try_split_huge_pages(struct kvm * kvm,const struct kvm_memory_slot * memslot,int target_level)7053 void kvm_mmu_slot_try_split_huge_pages(struct kvm *kvm,
7054 					const struct kvm_memory_slot *memslot,
7055 					int target_level)
7056 {
7057 	u64 start = memslot->base_gfn;
7058 	u64 end = start + memslot->npages;
7059 
7060 	if (!tdp_mmu_enabled)
7061 		return;
7062 
7063 	if (kvm_memslots_have_rmaps(kvm)) {
7064 		write_lock(&kvm->mmu_lock);
7065 		kvm_shadow_mmu_try_split_huge_pages(kvm, memslot, start, end, target_level);
7066 		write_unlock(&kvm->mmu_lock);
7067 	}
7068 
7069 	read_lock(&kvm->mmu_lock);
7070 	kvm_tdp_mmu_try_split_huge_pages(kvm, memslot, start, end, target_level, true);
7071 	read_unlock(&kvm->mmu_lock);
7072 
7073 	/*
7074 	 * No TLB flush is necessary here. KVM will flush TLBs after
7075 	 * write-protecting and/or clearing dirty on the newly split SPTEs to
7076 	 * ensure that guest writes are reflected in the dirty log before the
7077 	 * ioctl to enable dirty logging on this memslot completes. Since the
7078 	 * split SPTEs retain the write and dirty bits of the huge SPTE, it is
7079 	 * safe for KVM to decide if a TLB flush is necessary based on the split
7080 	 * SPTEs.
7081 	 */
7082 }
7083 
kvm_mmu_zap_collapsible_spte(struct kvm * kvm,struct kvm_rmap_head * rmap_head,const struct kvm_memory_slot * slot)7084 static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
7085 					 struct kvm_rmap_head *rmap_head,
7086 					 const struct kvm_memory_slot *slot)
7087 {
7088 	u64 *sptep;
7089 	struct rmap_iterator iter;
7090 	int need_tlb_flush = 0;
7091 	struct kvm_mmu_page *sp;
7092 
7093 restart:
7094 	for_each_rmap_spte(rmap_head, &iter, sptep) {
7095 		sp = sptep_to_sp(sptep);
7096 
7097 		/*
7098 		 * We cannot do huge page mapping for indirect shadow pages,
7099 		 * which are found on the last rmap (level = 1) when not using
7100 		 * tdp; such shadow pages are synced with the page table in
7101 		 * the guest, and the guest page table is using 4K page size
7102 		 * mapping if the indirect sp has level = 1.
7103 		 */
7104 		if (sp->role.direct &&
7105 		    sp->role.level < kvm_mmu_max_mapping_level(kvm, slot, sp->gfn)) {
7106 			kvm_zap_one_rmap_spte(kvm, rmap_head, sptep);
7107 
7108 			if (kvm_available_flush_remote_tlbs_range())
7109 				kvm_flush_remote_tlbs_sptep(kvm, sptep);
7110 			else
7111 				need_tlb_flush = 1;
7112 
7113 			goto restart;
7114 		}
7115 	}
7116 
7117 	return need_tlb_flush;
7118 }
7119 EXPORT_SYMBOL_GPL(kvm_zap_gfn_range);
7120 
kvm_rmap_zap_collapsible_sptes(struct kvm * kvm,const struct kvm_memory_slot * slot)7121 static void kvm_rmap_zap_collapsible_sptes(struct kvm *kvm,
7122 					   const struct kvm_memory_slot *slot)
7123 {
7124 	/*
7125 	 * Note, use KVM_MAX_HUGEPAGE_LEVEL - 1 since there's no need to zap
7126 	 * pages that are already mapped at the maximum hugepage level.
7127 	 */
7128 	if (walk_slot_rmaps(kvm, slot, kvm_mmu_zap_collapsible_spte,
7129 			    PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL - 1, true))
7130 		kvm_flush_remote_tlbs_memslot(kvm, slot);
7131 }
7132 
kvm_mmu_recover_huge_pages(struct kvm * kvm,const struct kvm_memory_slot * slot)7133 void kvm_mmu_recover_huge_pages(struct kvm *kvm,
7134 				const struct kvm_memory_slot *slot)
7135 {
7136 	if (kvm_memslots_have_rmaps(kvm)) {
7137 		write_lock(&kvm->mmu_lock);
7138 		kvm_rmap_zap_collapsible_sptes(kvm, slot);
7139 		write_unlock(&kvm->mmu_lock);
7140 	}
7141 
7142 	if (tdp_mmu_enabled) {
7143 		read_lock(&kvm->mmu_lock);
7144 		kvm_tdp_mmu_recover_huge_pages(kvm, slot);
7145 		read_unlock(&kvm->mmu_lock);
7146 	}
7147 }
7148 
kvm_mmu_slot_leaf_clear_dirty(struct kvm * kvm,const struct kvm_memory_slot * memslot)7149 void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
7150 				   const struct kvm_memory_slot *memslot)
7151 {
7152 	if (kvm_memslots_have_rmaps(kvm)) {
7153 		write_lock(&kvm->mmu_lock);
7154 		/*
7155 		 * Clear dirty bits only on 4k SPTEs since the legacy MMU only
7156 		 * support dirty logging at a 4k granularity.
7157 		 */
7158 		walk_slot_rmaps_4k(kvm, memslot, __rmap_clear_dirty, false);
7159 		write_unlock(&kvm->mmu_lock);
7160 	}
7161 
7162 	if (tdp_mmu_enabled) {
7163 		read_lock(&kvm->mmu_lock);
7164 		kvm_tdp_mmu_clear_dirty_slot(kvm, memslot);
7165 		read_unlock(&kvm->mmu_lock);
7166 	}
7167 
7168 	/*
7169 	 * The caller will flush the TLBs after this function returns.
7170 	 *
7171 	 * It's also safe to flush TLBs out of mmu lock here as currently this
7172 	 * function is only used for dirty logging, in which case flushing TLB
7173 	 * out of mmu lock also guarantees no dirty pages will be lost in
7174 	 * dirty_bitmap.
7175 	 */
7176 }
7177 
kvm_mmu_zap_all(struct kvm * kvm)7178 static void kvm_mmu_zap_all(struct kvm *kvm)
7179 {
7180 	struct kvm_mmu_page *sp, *node;
7181 	LIST_HEAD(invalid_list);
7182 	int ign;
7183 
7184 	write_lock(&kvm->mmu_lock);
7185 restart:
7186 	list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
7187 		if (WARN_ON_ONCE(sp->role.invalid))
7188 			continue;
7189 		if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
7190 			goto restart;
7191 		if (cond_resched_rwlock_write(&kvm->mmu_lock))
7192 			goto restart;
7193 	}
7194 
7195 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
7196 
7197 	if (tdp_mmu_enabled)
7198 		kvm_tdp_mmu_zap_all(kvm);
7199 
7200 	write_unlock(&kvm->mmu_lock);
7201 }
7202 
kvm_arch_flush_shadow_all(struct kvm * kvm)7203 void kvm_arch_flush_shadow_all(struct kvm *kvm)
7204 {
7205 	kvm_mmu_zap_all(kvm);
7206 }
7207 
kvm_mmu_zap_memslot_pages_and_flush(struct kvm * kvm,struct kvm_memory_slot * slot,bool flush)7208 static void kvm_mmu_zap_memslot_pages_and_flush(struct kvm *kvm,
7209 						struct kvm_memory_slot *slot,
7210 						bool flush)
7211 {
7212 	LIST_HEAD(invalid_list);
7213 	unsigned long i;
7214 
7215 	if (list_empty(&kvm->arch.active_mmu_pages))
7216 		goto out_flush;
7217 
7218 	/*
7219 	 * Since accounting information is stored in struct kvm_arch_memory_slot,
7220 	 * all MMU pages that are shadowing guest PTEs must be zapped before the
7221 	 * memslot is deleted, as freeing such pages after the memslot is freed
7222 	 * will result in use-after-free, e.g. in unaccount_shadowed().
7223 	 */
7224 	for (i = 0; i < slot->npages; i++) {
7225 		struct kvm_mmu_page *sp;
7226 		gfn_t gfn = slot->base_gfn + i;
7227 
7228 		for_each_gfn_valid_sp_with_gptes(kvm, sp, gfn)
7229 			kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
7230 
7231 		if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
7232 			kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
7233 			flush = false;
7234 			cond_resched_rwlock_write(&kvm->mmu_lock);
7235 		}
7236 	}
7237 
7238 out_flush:
7239 	kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
7240 }
7241 
kvm_mmu_zap_memslot(struct kvm * kvm,struct kvm_memory_slot * slot)7242 static void kvm_mmu_zap_memslot(struct kvm *kvm,
7243 				struct kvm_memory_slot *slot)
7244 {
7245 	struct kvm_gfn_range range = {
7246 		.slot = slot,
7247 		.start = slot->base_gfn,
7248 		.end = slot->base_gfn + slot->npages,
7249 		.may_block = true,
7250 		.attr_filter = KVM_FILTER_PRIVATE | KVM_FILTER_SHARED,
7251 	};
7252 	bool flush;
7253 
7254 	write_lock(&kvm->mmu_lock);
7255 	flush = kvm_unmap_gfn_range(kvm, &range);
7256 	kvm_mmu_zap_memslot_pages_and_flush(kvm, slot, flush);
7257 	write_unlock(&kvm->mmu_lock);
7258 }
7259 
kvm_memslot_flush_zap_all(struct kvm * kvm)7260 static inline bool kvm_memslot_flush_zap_all(struct kvm *kvm)
7261 {
7262 	return kvm->arch.vm_type == KVM_X86_DEFAULT_VM &&
7263 	       kvm_check_has_quirk(kvm, KVM_X86_QUIRK_SLOT_ZAP_ALL);
7264 }
7265 
kvm_arch_flush_shadow_memslot(struct kvm * kvm,struct kvm_memory_slot * slot)7266 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
7267 				   struct kvm_memory_slot *slot)
7268 {
7269 	if (kvm_memslot_flush_zap_all(kvm))
7270 		kvm_mmu_zap_all_fast(kvm);
7271 	else
7272 		kvm_mmu_zap_memslot(kvm, slot);
7273 }
7274 
kvm_mmu_invalidate_mmio_sptes(struct kvm * kvm,u64 gen)7275 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
7276 {
7277 	WARN_ON_ONCE(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
7278 
7279 	gen &= MMIO_SPTE_GEN_MASK;
7280 
7281 	/*
7282 	 * Generation numbers are incremented in multiples of the number of
7283 	 * address spaces in order to provide unique generations across all
7284 	 * address spaces.  Strip what is effectively the address space
7285 	 * modifier prior to checking for a wrap of the MMIO generation so
7286 	 * that a wrap in any address space is detected.
7287 	 */
7288 	gen &= ~((u64)kvm_arch_nr_memslot_as_ids(kvm) - 1);
7289 
7290 	/*
7291 	 * The very rare case: if the MMIO generation number has wrapped,
7292 	 * zap all shadow pages.
7293 	 */
7294 	if (unlikely(gen == 0)) {
7295 		kvm_debug_ratelimited("zapping shadow pages for mmio generation wraparound\n");
7296 		kvm_mmu_zap_all_fast(kvm);
7297 	}
7298 }
7299 
mmu_destroy_caches(void)7300 static void mmu_destroy_caches(void)
7301 {
7302 	kmem_cache_destroy(pte_list_desc_cache);
7303 	kmem_cache_destroy(mmu_page_header_cache);
7304 }
7305 
kvm_wake_nx_recovery_thread(struct kvm * kvm)7306 static void kvm_wake_nx_recovery_thread(struct kvm *kvm)
7307 {
7308 	/*
7309 	 * The NX recovery thread is spawned on-demand at the first KVM_RUN and
7310 	 * may not be valid even though the VM is globally visible.  Do nothing,
7311 	 * as such a VM can't have any possible NX huge pages.
7312 	 */
7313 	struct vhost_task *nx_thread = READ_ONCE(kvm->arch.nx_huge_page_recovery_thread);
7314 
7315 	if (nx_thread)
7316 		vhost_task_wake(nx_thread);
7317 }
7318 
get_nx_huge_pages(char * buffer,const struct kernel_param * kp)7319 static int get_nx_huge_pages(char *buffer, const struct kernel_param *kp)
7320 {
7321 	if (nx_hugepage_mitigation_hard_disabled)
7322 		return sysfs_emit(buffer, "never\n");
7323 
7324 	return param_get_bool(buffer, kp);
7325 }
7326 
get_nx_auto_mode(void)7327 static bool get_nx_auto_mode(void)
7328 {
7329 	/* Return true when CPU has the bug, and mitigations are ON */
7330 	return boot_cpu_has_bug(X86_BUG_ITLB_MULTIHIT) && !cpu_mitigations_off();
7331 }
7332 
__set_nx_huge_pages(bool val)7333 static void __set_nx_huge_pages(bool val)
7334 {
7335 	nx_huge_pages = itlb_multihit_kvm_mitigation = val;
7336 }
7337 
set_nx_huge_pages(const char * val,const struct kernel_param * kp)7338 static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
7339 {
7340 	bool old_val = nx_huge_pages;
7341 	bool new_val;
7342 
7343 	if (nx_hugepage_mitigation_hard_disabled)
7344 		return -EPERM;
7345 
7346 	/* In "auto" mode deploy workaround only if CPU has the bug. */
7347 	if (sysfs_streq(val, "off")) {
7348 		new_val = 0;
7349 	} else if (sysfs_streq(val, "force")) {
7350 		new_val = 1;
7351 	} else if (sysfs_streq(val, "auto")) {
7352 		new_val = get_nx_auto_mode();
7353 	} else if (sysfs_streq(val, "never")) {
7354 		new_val = 0;
7355 
7356 		mutex_lock(&kvm_lock);
7357 		if (!list_empty(&vm_list)) {
7358 			mutex_unlock(&kvm_lock);
7359 			return -EBUSY;
7360 		}
7361 		nx_hugepage_mitigation_hard_disabled = true;
7362 		mutex_unlock(&kvm_lock);
7363 	} else if (kstrtobool(val, &new_val) < 0) {
7364 		return -EINVAL;
7365 	}
7366 
7367 	__set_nx_huge_pages(new_val);
7368 
7369 	if (new_val != old_val) {
7370 		struct kvm *kvm;
7371 
7372 		mutex_lock(&kvm_lock);
7373 
7374 		list_for_each_entry(kvm, &vm_list, vm_list) {
7375 			mutex_lock(&kvm->slots_lock);
7376 			kvm_mmu_zap_all_fast(kvm);
7377 			mutex_unlock(&kvm->slots_lock);
7378 
7379 			kvm_wake_nx_recovery_thread(kvm);
7380 		}
7381 		mutex_unlock(&kvm_lock);
7382 	}
7383 
7384 	return 0;
7385 }
7386 
7387 /*
7388  * nx_huge_pages needs to be resolved to true/false when kvm.ko is loaded, as
7389  * its default value of -1 is technically undefined behavior for a boolean.
7390  * Forward the module init call to SPTE code so that it too can handle module
7391  * params that need to be resolved/snapshot.
7392  */
kvm_mmu_x86_module_init(void)7393 void __init kvm_mmu_x86_module_init(void)
7394 {
7395 	if (nx_huge_pages == -1)
7396 		__set_nx_huge_pages(get_nx_auto_mode());
7397 
7398 	/*
7399 	 * Snapshot userspace's desire to enable the TDP MMU. Whether or not the
7400 	 * TDP MMU is actually enabled is determined in kvm_configure_mmu()
7401 	 * when the vendor module is loaded.
7402 	 */
7403 	tdp_mmu_allowed = tdp_mmu_enabled;
7404 
7405 	kvm_mmu_spte_module_init();
7406 }
7407 
7408 /*
7409  * The bulk of the MMU initialization is deferred until the vendor module is
7410  * loaded as many of the masks/values may be modified by VMX or SVM, i.e. need
7411  * to be reset when a potentially different vendor module is loaded.
7412  */
kvm_mmu_vendor_module_init(void)7413 int kvm_mmu_vendor_module_init(void)
7414 {
7415 	int ret = -ENOMEM;
7416 
7417 	/*
7418 	 * MMU roles use union aliasing which is, generally speaking, an
7419 	 * undefined behavior. However, we supposedly know how compilers behave
7420 	 * and the current status quo is unlikely to change. Guardians below are
7421 	 * supposed to let us know if the assumption becomes false.
7422 	 */
7423 	BUILD_BUG_ON(sizeof(union kvm_mmu_page_role) != sizeof(u32));
7424 	BUILD_BUG_ON(sizeof(union kvm_mmu_extended_role) != sizeof(u32));
7425 	BUILD_BUG_ON(sizeof(union kvm_cpu_role) != sizeof(u64));
7426 
7427 	kvm_mmu_reset_all_pte_masks();
7428 
7429 	pte_list_desc_cache = KMEM_CACHE(pte_list_desc, SLAB_ACCOUNT);
7430 	if (!pte_list_desc_cache)
7431 		goto out;
7432 
7433 	mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
7434 						  sizeof(struct kvm_mmu_page),
7435 						  0, SLAB_ACCOUNT, NULL);
7436 	if (!mmu_page_header_cache)
7437 		goto out;
7438 
7439 	return 0;
7440 
7441 out:
7442 	mmu_destroy_caches();
7443 	return ret;
7444 }
7445 
kvm_mmu_destroy(struct kvm_vcpu * vcpu)7446 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
7447 {
7448 	kvm_mmu_unload(vcpu);
7449 	if (tdp_mmu_enabled) {
7450 		read_lock(&vcpu->kvm->mmu_lock);
7451 		mmu_free_root_page(vcpu->kvm, &vcpu->arch.mmu->mirror_root_hpa,
7452 				   NULL);
7453 		read_unlock(&vcpu->kvm->mmu_lock);
7454 	}
7455 	free_mmu_pages(&vcpu->arch.root_mmu);
7456 	free_mmu_pages(&vcpu->arch.guest_mmu);
7457 	mmu_free_memory_caches(vcpu);
7458 }
7459 
kvm_mmu_vendor_module_exit(void)7460 void kvm_mmu_vendor_module_exit(void)
7461 {
7462 	mmu_destroy_caches();
7463 }
7464 
7465 /*
7466  * Calculate the effective recovery period, accounting for '0' meaning "let KVM
7467  * select a halving time of 1 hour".  Returns true if recovery is enabled.
7468  */
calc_nx_huge_pages_recovery_period(uint * period)7469 static bool calc_nx_huge_pages_recovery_period(uint *period)
7470 {
7471 	/*
7472 	 * Use READ_ONCE to get the params, this may be called outside of the
7473 	 * param setters, e.g. by the kthread to compute its next timeout.
7474 	 */
7475 	bool enabled = READ_ONCE(nx_huge_pages);
7476 	uint ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
7477 
7478 	if (!enabled || !ratio)
7479 		return false;
7480 
7481 	*period = READ_ONCE(nx_huge_pages_recovery_period_ms);
7482 	if (!*period) {
7483 		/* Make sure the period is not less than one second.  */
7484 		ratio = min(ratio, 3600u);
7485 		*period = 60 * 60 * 1000 / ratio;
7486 	}
7487 	return true;
7488 }
7489 
set_nx_huge_pages_recovery_param(const char * val,const struct kernel_param * kp)7490 static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp)
7491 {
7492 	bool was_recovery_enabled, is_recovery_enabled;
7493 	uint old_period, new_period;
7494 	int err;
7495 
7496 	if (nx_hugepage_mitigation_hard_disabled)
7497 		return -EPERM;
7498 
7499 	was_recovery_enabled = calc_nx_huge_pages_recovery_period(&old_period);
7500 
7501 	err = param_set_uint(val, kp);
7502 	if (err)
7503 		return err;
7504 
7505 	is_recovery_enabled = calc_nx_huge_pages_recovery_period(&new_period);
7506 
7507 	if (is_recovery_enabled &&
7508 	    (!was_recovery_enabled || old_period > new_period)) {
7509 		struct kvm *kvm;
7510 
7511 		mutex_lock(&kvm_lock);
7512 
7513 		list_for_each_entry(kvm, &vm_list, vm_list)
7514 			kvm_wake_nx_recovery_thread(kvm);
7515 
7516 		mutex_unlock(&kvm_lock);
7517 	}
7518 
7519 	return err;
7520 }
7521 
kvm_recover_nx_huge_pages(struct kvm * kvm)7522 static void kvm_recover_nx_huge_pages(struct kvm *kvm)
7523 {
7524 	unsigned long nx_lpage_splits = kvm->stat.nx_lpage_splits;
7525 	struct kvm_memory_slot *slot;
7526 	int rcu_idx;
7527 	struct kvm_mmu_page *sp;
7528 	unsigned int ratio;
7529 	LIST_HEAD(invalid_list);
7530 	bool flush = false;
7531 	ulong to_zap;
7532 
7533 	rcu_idx = srcu_read_lock(&kvm->srcu);
7534 	write_lock(&kvm->mmu_lock);
7535 
7536 	/*
7537 	 * Zapping TDP MMU shadow pages, including the remote TLB flush, must
7538 	 * be done under RCU protection, because the pages are freed via RCU
7539 	 * callback.
7540 	 */
7541 	rcu_read_lock();
7542 
7543 	ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
7544 	to_zap = ratio ? DIV_ROUND_UP(nx_lpage_splits, ratio) : 0;
7545 	for ( ; to_zap; --to_zap) {
7546 		if (list_empty(&kvm->arch.possible_nx_huge_pages))
7547 			break;
7548 
7549 		/*
7550 		 * We use a separate list instead of just using active_mmu_pages
7551 		 * because the number of shadow pages that be replaced with an
7552 		 * NX huge page is expected to be relatively small compared to
7553 		 * the total number of shadow pages.  And because the TDP MMU
7554 		 * doesn't use active_mmu_pages.
7555 		 */
7556 		sp = list_first_entry(&kvm->arch.possible_nx_huge_pages,
7557 				      struct kvm_mmu_page,
7558 				      possible_nx_huge_page_link);
7559 		WARN_ON_ONCE(!sp->nx_huge_page_disallowed);
7560 		WARN_ON_ONCE(!sp->role.direct);
7561 
7562 		/*
7563 		 * Unaccount and do not attempt to recover any NX Huge Pages
7564 		 * that are being dirty tracked, as they would just be faulted
7565 		 * back in as 4KiB pages. The NX Huge Pages in this slot will be
7566 		 * recovered, along with all the other huge pages in the slot,
7567 		 * when dirty logging is disabled.
7568 		 *
7569 		 * Since gfn_to_memslot() is relatively expensive, it helps to
7570 		 * skip it if it the test cannot possibly return true.  On the
7571 		 * other hand, if any memslot has logging enabled, chances are
7572 		 * good that all of them do, in which case unaccount_nx_huge_page()
7573 		 * is much cheaper than zapping the page.
7574 		 *
7575 		 * If a memslot update is in progress, reading an incorrect value
7576 		 * of kvm->nr_memslots_dirty_logging is not a problem: if it is
7577 		 * becoming zero, gfn_to_memslot() will be done unnecessarily; if
7578 		 * it is becoming nonzero, the page will be zapped unnecessarily.
7579 		 * Either way, this only affects efficiency in racy situations,
7580 		 * and not correctness.
7581 		 */
7582 		slot = NULL;
7583 		if (atomic_read(&kvm->nr_memslots_dirty_logging)) {
7584 			struct kvm_memslots *slots;
7585 
7586 			slots = kvm_memslots_for_spte_role(kvm, sp->role);
7587 			slot = __gfn_to_memslot(slots, sp->gfn);
7588 			WARN_ON_ONCE(!slot);
7589 		}
7590 
7591 		if (slot && kvm_slot_dirty_track_enabled(slot))
7592 			unaccount_nx_huge_page(kvm, sp);
7593 		else if (is_tdp_mmu_page(sp))
7594 			flush |= kvm_tdp_mmu_zap_sp(kvm, sp);
7595 		else
7596 			kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
7597 		WARN_ON_ONCE(sp->nx_huge_page_disallowed);
7598 
7599 		if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
7600 			kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
7601 			rcu_read_unlock();
7602 
7603 			cond_resched_rwlock_write(&kvm->mmu_lock);
7604 			flush = false;
7605 
7606 			rcu_read_lock();
7607 		}
7608 	}
7609 	kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
7610 
7611 	rcu_read_unlock();
7612 
7613 	write_unlock(&kvm->mmu_lock);
7614 	srcu_read_unlock(&kvm->srcu, rcu_idx);
7615 }
7616 
kvm_nx_huge_page_recovery_worker_kill(void * data)7617 static void kvm_nx_huge_page_recovery_worker_kill(void *data)
7618 {
7619 }
7620 
kvm_nx_huge_page_recovery_worker(void * data)7621 static bool kvm_nx_huge_page_recovery_worker(void *data)
7622 {
7623 	struct kvm *kvm = data;
7624 	bool enabled;
7625 	uint period;
7626 	long remaining_time;
7627 
7628 	enabled = calc_nx_huge_pages_recovery_period(&period);
7629 	if (!enabled)
7630 		return false;
7631 
7632 	remaining_time = kvm->arch.nx_huge_page_last + msecs_to_jiffies(period)
7633 		- get_jiffies_64();
7634 	if (remaining_time > 0) {
7635 		schedule_timeout(remaining_time);
7636 		/* check for signals and come back */
7637 		return true;
7638 	}
7639 
7640 	__set_current_state(TASK_RUNNING);
7641 	kvm_recover_nx_huge_pages(kvm);
7642 	kvm->arch.nx_huge_page_last = get_jiffies_64();
7643 	return true;
7644 }
7645 
kvm_mmu_start_lpage_recovery(struct once * once)7646 static int kvm_mmu_start_lpage_recovery(struct once *once)
7647 {
7648 	struct kvm_arch *ka = container_of(once, struct kvm_arch, nx_once);
7649 	struct kvm *kvm = container_of(ka, struct kvm, arch);
7650 	struct vhost_task *nx_thread;
7651 
7652 	kvm->arch.nx_huge_page_last = get_jiffies_64();
7653 	nx_thread = vhost_task_create(kvm_nx_huge_page_recovery_worker,
7654 				      kvm_nx_huge_page_recovery_worker_kill,
7655 				      kvm, "kvm-nx-lpage-recovery");
7656 
7657 	if (IS_ERR(nx_thread))
7658 		return PTR_ERR(nx_thread);
7659 
7660 	vhost_task_start(nx_thread);
7661 
7662 	/* Make the task visible only once it is fully started. */
7663 	WRITE_ONCE(kvm->arch.nx_huge_page_recovery_thread, nx_thread);
7664 	return 0;
7665 }
7666 
kvm_mmu_post_init_vm(struct kvm * kvm)7667 int kvm_mmu_post_init_vm(struct kvm *kvm)
7668 {
7669 	if (nx_hugepage_mitigation_hard_disabled)
7670 		return 0;
7671 
7672 	return call_once(&kvm->arch.nx_once, kvm_mmu_start_lpage_recovery);
7673 }
7674 
kvm_mmu_pre_destroy_vm(struct kvm * kvm)7675 void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
7676 {
7677 	if (kvm->arch.nx_huge_page_recovery_thread)
7678 		vhost_task_stop(kvm->arch.nx_huge_page_recovery_thread);
7679 }
7680 
7681 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
hugepage_test_mixed(struct kvm_memory_slot * slot,gfn_t gfn,int level)7682 static bool hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
7683 				int level)
7684 {
7685 	return lpage_info_slot(gfn, slot, level)->disallow_lpage & KVM_LPAGE_MIXED_FLAG;
7686 }
7687 
hugepage_clear_mixed(struct kvm_memory_slot * slot,gfn_t gfn,int level)7688 static void hugepage_clear_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
7689 				 int level)
7690 {
7691 	lpage_info_slot(gfn, slot, level)->disallow_lpage &= ~KVM_LPAGE_MIXED_FLAG;
7692 }
7693 
hugepage_set_mixed(struct kvm_memory_slot * slot,gfn_t gfn,int level)7694 static void hugepage_set_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
7695 			       int level)
7696 {
7697 	lpage_info_slot(gfn, slot, level)->disallow_lpage |= KVM_LPAGE_MIXED_FLAG;
7698 }
7699 
kvm_arch_pre_set_memory_attributes(struct kvm * kvm,struct kvm_gfn_range * range)7700 bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
7701 					struct kvm_gfn_range *range)
7702 {
7703 	struct kvm_memory_slot *slot = range->slot;
7704 	int level;
7705 
7706 	/*
7707 	 * Zap SPTEs even if the slot can't be mapped PRIVATE.  KVM x86 only
7708 	 * supports KVM_MEMORY_ATTRIBUTE_PRIVATE, and so it *seems* like KVM
7709 	 * can simply ignore such slots.  But if userspace is making memory
7710 	 * PRIVATE, then KVM must prevent the guest from accessing the memory
7711 	 * as shared.  And if userspace is making memory SHARED and this point
7712 	 * is reached, then at least one page within the range was previously
7713 	 * PRIVATE, i.e. the slot's possible hugepage ranges are changing.
7714 	 * Zapping SPTEs in this case ensures KVM will reassess whether or not
7715 	 * a hugepage can be used for affected ranges.
7716 	 */
7717 	if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm)))
7718 		return false;
7719 
7720 	if (WARN_ON_ONCE(range->end <= range->start))
7721 		return false;
7722 
7723 	/*
7724 	 * If the head and tail pages of the range currently allow a hugepage,
7725 	 * i.e. reside fully in the slot and don't have mixed attributes, then
7726 	 * add each corresponding hugepage range to the ongoing invalidation,
7727 	 * e.g. to prevent KVM from creating a hugepage in response to a fault
7728 	 * for a gfn whose attributes aren't changing.  Note, only the range
7729 	 * of gfns whose attributes are being modified needs to be explicitly
7730 	 * unmapped, as that will unmap any existing hugepages.
7731 	 */
7732 	for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
7733 		gfn_t start = gfn_round_for_level(range->start, level);
7734 		gfn_t end = gfn_round_for_level(range->end - 1, level);
7735 		gfn_t nr_pages = KVM_PAGES_PER_HPAGE(level);
7736 
7737 		if ((start != range->start || start + nr_pages > range->end) &&
7738 		    start >= slot->base_gfn &&
7739 		    start + nr_pages <= slot->base_gfn + slot->npages &&
7740 		    !hugepage_test_mixed(slot, start, level))
7741 			kvm_mmu_invalidate_range_add(kvm, start, start + nr_pages);
7742 
7743 		if (end == start)
7744 			continue;
7745 
7746 		if ((end + nr_pages) > range->end &&
7747 		    (end + nr_pages) <= (slot->base_gfn + slot->npages) &&
7748 		    !hugepage_test_mixed(slot, end, level))
7749 			kvm_mmu_invalidate_range_add(kvm, end, end + nr_pages);
7750 	}
7751 
7752 	/* Unmap the old attribute page. */
7753 	if (range->arg.attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE)
7754 		range->attr_filter = KVM_FILTER_SHARED;
7755 	else
7756 		range->attr_filter = KVM_FILTER_PRIVATE;
7757 
7758 	return kvm_unmap_gfn_range(kvm, range);
7759 }
7760 
7761 
7762 
hugepage_has_attrs(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn,int level,unsigned long attrs)7763 static bool hugepage_has_attrs(struct kvm *kvm, struct kvm_memory_slot *slot,
7764 			       gfn_t gfn, int level, unsigned long attrs)
7765 {
7766 	const unsigned long start = gfn;
7767 	const unsigned long end = start + KVM_PAGES_PER_HPAGE(level);
7768 
7769 	if (level == PG_LEVEL_2M)
7770 		return kvm_range_has_memory_attributes(kvm, start, end, ~0, attrs);
7771 
7772 	for (gfn = start; gfn < end; gfn += KVM_PAGES_PER_HPAGE(level - 1)) {
7773 		if (hugepage_test_mixed(slot, gfn, level - 1) ||
7774 		    attrs != kvm_get_memory_attributes(kvm, gfn))
7775 			return false;
7776 	}
7777 	return true;
7778 }
7779 
kvm_arch_post_set_memory_attributes(struct kvm * kvm,struct kvm_gfn_range * range)7780 bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
7781 					 struct kvm_gfn_range *range)
7782 {
7783 	unsigned long attrs = range->arg.attributes;
7784 	struct kvm_memory_slot *slot = range->slot;
7785 	int level;
7786 
7787 	lockdep_assert_held_write(&kvm->mmu_lock);
7788 	lockdep_assert_held(&kvm->slots_lock);
7789 
7790 	/*
7791 	 * Calculate which ranges can be mapped with hugepages even if the slot
7792 	 * can't map memory PRIVATE.  KVM mustn't create a SHARED hugepage over
7793 	 * a range that has PRIVATE GFNs, and conversely converting a range to
7794 	 * SHARED may now allow hugepages.
7795 	 */
7796 	if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm)))
7797 		return false;
7798 
7799 	/*
7800 	 * The sequence matters here: upper levels consume the result of lower
7801 	 * level's scanning.
7802 	 */
7803 	for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
7804 		gfn_t nr_pages = KVM_PAGES_PER_HPAGE(level);
7805 		gfn_t gfn = gfn_round_for_level(range->start, level);
7806 
7807 		/* Process the head page if it straddles the range. */
7808 		if (gfn != range->start || gfn + nr_pages > range->end) {
7809 			/*
7810 			 * Skip mixed tracking if the aligned gfn isn't covered
7811 			 * by the memslot, KVM can't use a hugepage due to the
7812 			 * misaligned address regardless of memory attributes.
7813 			 */
7814 			if (gfn >= slot->base_gfn &&
7815 			    gfn + nr_pages <= slot->base_gfn + slot->npages) {
7816 				if (hugepage_has_attrs(kvm, slot, gfn, level, attrs))
7817 					hugepage_clear_mixed(slot, gfn, level);
7818 				else
7819 					hugepage_set_mixed(slot, gfn, level);
7820 			}
7821 			gfn += nr_pages;
7822 		}
7823 
7824 		/*
7825 		 * Pages entirely covered by the range are guaranteed to have
7826 		 * only the attributes which were just set.
7827 		 */
7828 		for ( ; gfn + nr_pages <= range->end; gfn += nr_pages)
7829 			hugepage_clear_mixed(slot, gfn, level);
7830 
7831 		/*
7832 		 * Process the last tail page if it straddles the range and is
7833 		 * contained by the memslot.  Like the head page, KVM can't
7834 		 * create a hugepage if the slot size is misaligned.
7835 		 */
7836 		if (gfn < range->end &&
7837 		    (gfn + nr_pages) <= (slot->base_gfn + slot->npages)) {
7838 			if (hugepage_has_attrs(kvm, slot, gfn, level, attrs))
7839 				hugepage_clear_mixed(slot, gfn, level);
7840 			else
7841 				hugepage_set_mixed(slot, gfn, level);
7842 		}
7843 	}
7844 	return false;
7845 }
7846 
kvm_mmu_init_memslot_memory_attributes(struct kvm * kvm,struct kvm_memory_slot * slot)7847 void kvm_mmu_init_memslot_memory_attributes(struct kvm *kvm,
7848 					    struct kvm_memory_slot *slot)
7849 {
7850 	int level;
7851 
7852 	if (!kvm_arch_has_private_mem(kvm))
7853 		return;
7854 
7855 	for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
7856 		/*
7857 		 * Don't bother tracking mixed attributes for pages that can't
7858 		 * be huge due to alignment, i.e. process only pages that are
7859 		 * entirely contained by the memslot.
7860 		 */
7861 		gfn_t end = gfn_round_for_level(slot->base_gfn + slot->npages, level);
7862 		gfn_t start = gfn_round_for_level(slot->base_gfn, level);
7863 		gfn_t nr_pages = KVM_PAGES_PER_HPAGE(level);
7864 		gfn_t gfn;
7865 
7866 		if (start < slot->base_gfn)
7867 			start += nr_pages;
7868 
7869 		/*
7870 		 * Unlike setting attributes, every potential hugepage needs to
7871 		 * be manually checked as the attributes may already be mixed.
7872 		 */
7873 		for (gfn = start; gfn < end; gfn += nr_pages) {
7874 			unsigned long attrs = kvm_get_memory_attributes(kvm, gfn);
7875 
7876 			if (hugepage_has_attrs(kvm, slot, gfn, level, attrs))
7877 				hugepage_clear_mixed(slot, gfn, level);
7878 			else
7879 				hugepage_set_mixed(slot, gfn, level);
7880 		}
7881 	}
7882 }
7883 #endif
7884