xref: /linux/arch/x86/kvm/mmu/mmu.c (revision 533f9a4b387bf79c722faf0a760a09129d9627f9)
1c50d8ae3SPaolo Bonzini // SPDX-License-Identifier: GPL-2.0-only
2c50d8ae3SPaolo Bonzini /*
3c50d8ae3SPaolo Bonzini  * Kernel-based Virtual Machine driver for Linux
4c50d8ae3SPaolo Bonzini  *
5c50d8ae3SPaolo Bonzini  * This module enables machines with Intel VT-x extensions to run virtual
6c50d8ae3SPaolo Bonzini  * machines without emulation or binary translation.
7c50d8ae3SPaolo Bonzini  *
8c50d8ae3SPaolo Bonzini  * MMU support
9c50d8ae3SPaolo Bonzini  *
10c50d8ae3SPaolo Bonzini  * Copyright (C) 2006 Qumranet, Inc.
11c50d8ae3SPaolo Bonzini  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
12c50d8ae3SPaolo Bonzini  *
13c50d8ae3SPaolo Bonzini  * Authors:
14c50d8ae3SPaolo Bonzini  *   Yaniv Kamay  <yaniv@qumranet.com>
15c50d8ae3SPaolo Bonzini  *   Avi Kivity   <avi@qumranet.com>
16c50d8ae3SPaolo Bonzini  */
17c50d8ae3SPaolo Bonzini 
18c50d8ae3SPaolo Bonzini #include "irq.h"
1988197e6aS彭浩(Richard) #include "ioapic.h"
20c50d8ae3SPaolo Bonzini #include "mmu.h"
216ca9a6f3SSean Christopherson #include "mmu_internal.h"
22fe5db27dSBen Gardon #include "tdp_mmu.h"
23c50d8ae3SPaolo Bonzini #include "x86.h"
24c50d8ae3SPaolo Bonzini #include "kvm_cache_regs.h"
252f728d66SSean Christopherson #include "kvm_emulate.h"
26c50d8ae3SPaolo Bonzini #include "cpuid.h"
275a9624afSPaolo Bonzini #include "spte.h"
28c50d8ae3SPaolo Bonzini 
29c50d8ae3SPaolo Bonzini #include <linux/kvm_host.h>
30c50d8ae3SPaolo Bonzini #include <linux/types.h>
31c50d8ae3SPaolo Bonzini #include <linux/string.h>
32c50d8ae3SPaolo Bonzini #include <linux/mm.h>
33c50d8ae3SPaolo Bonzini #include <linux/highmem.h>
34c50d8ae3SPaolo Bonzini #include <linux/moduleparam.h>
35c50d8ae3SPaolo Bonzini #include <linux/export.h>
36c50d8ae3SPaolo Bonzini #include <linux/swap.h>
37c50d8ae3SPaolo Bonzini #include <linux/hugetlb.h>
38c50d8ae3SPaolo Bonzini #include <linux/compiler.h>
39c50d8ae3SPaolo Bonzini #include <linux/srcu.h>
40c50d8ae3SPaolo Bonzini #include <linux/slab.h>
41c50d8ae3SPaolo Bonzini #include <linux/sched/signal.h>
42c50d8ae3SPaolo Bonzini #include <linux/uaccess.h>
43c50d8ae3SPaolo Bonzini #include <linux/hash.h>
44c50d8ae3SPaolo Bonzini #include <linux/kern_levels.h>
45c50d8ae3SPaolo Bonzini #include <linux/kthread.h>
46c50d8ae3SPaolo Bonzini 
47c50d8ae3SPaolo Bonzini #include <asm/page.h>
48eb243d1dSIngo Molnar #include <asm/memtype.h>
49c50d8ae3SPaolo Bonzini #include <asm/cmpxchg.h>
50c50d8ae3SPaolo Bonzini #include <asm/io.h>
514a98623dSSean Christopherson #include <asm/set_memory.h>
52c50d8ae3SPaolo Bonzini #include <asm/vmx.h>
53c50d8ae3SPaolo Bonzini #include <asm/kvm_page_track.h>
54c50d8ae3SPaolo Bonzini #include "trace.h"
55c50d8ae3SPaolo Bonzini 
56c50d8ae3SPaolo Bonzini extern bool itlb_multihit_kvm_mitigation;
57c50d8ae3SPaolo Bonzini 
58a9d6496dSShaokun Zhang int __read_mostly nx_huge_pages = -1;
59c50d8ae3SPaolo Bonzini #ifdef CONFIG_PREEMPT_RT
60c50d8ae3SPaolo Bonzini /* Recovery can cause latency spikes, disable it for PREEMPT_RT.  */
61c50d8ae3SPaolo Bonzini static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
62c50d8ae3SPaolo Bonzini #else
63c50d8ae3SPaolo Bonzini static uint __read_mostly nx_huge_pages_recovery_ratio = 60;
64c50d8ae3SPaolo Bonzini #endif
65c50d8ae3SPaolo Bonzini 
66c50d8ae3SPaolo Bonzini static int set_nx_huge_pages(const char *val, const struct kernel_param *kp);
67c50d8ae3SPaolo Bonzini static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp);
68c50d8ae3SPaolo Bonzini 
69d5d6c18dSJoe Perches static const struct kernel_param_ops nx_huge_pages_ops = {
70c50d8ae3SPaolo Bonzini 	.set = set_nx_huge_pages,
71c50d8ae3SPaolo Bonzini 	.get = param_get_bool,
72c50d8ae3SPaolo Bonzini };
73c50d8ae3SPaolo Bonzini 
74d5d6c18dSJoe Perches static const struct kernel_param_ops nx_huge_pages_recovery_ratio_ops = {
75c50d8ae3SPaolo Bonzini 	.set = set_nx_huge_pages_recovery_ratio,
76c50d8ae3SPaolo Bonzini 	.get = param_get_uint,
77c50d8ae3SPaolo Bonzini };
78c50d8ae3SPaolo Bonzini 
79c50d8ae3SPaolo Bonzini module_param_cb(nx_huge_pages, &nx_huge_pages_ops, &nx_huge_pages, 0644);
80c50d8ae3SPaolo Bonzini __MODULE_PARM_TYPE(nx_huge_pages, "bool");
81c50d8ae3SPaolo Bonzini module_param_cb(nx_huge_pages_recovery_ratio, &nx_huge_pages_recovery_ratio_ops,
82c50d8ae3SPaolo Bonzini 		&nx_huge_pages_recovery_ratio, 0644);
83c50d8ae3SPaolo Bonzini __MODULE_PARM_TYPE(nx_huge_pages_recovery_ratio, "uint");
84c50d8ae3SPaolo Bonzini 
8571fe7013SSean Christopherson static bool __read_mostly force_flush_and_sync_on_reuse;
8671fe7013SSean Christopherson module_param_named(flush_on_reuse, force_flush_and_sync_on_reuse, bool, 0644);
8771fe7013SSean Christopherson 
88c50d8ae3SPaolo Bonzini /*
89c50d8ae3SPaolo Bonzini  * When setting this variable to true it enables Two-Dimensional-Paging
90c50d8ae3SPaolo Bonzini  * where the hardware walks 2 page tables:
91c50d8ae3SPaolo Bonzini  * 1. the guest-virtual to guest-physical
92c50d8ae3SPaolo Bonzini  * 2. while doing 1. it walks guest-physical to host-physical
93c50d8ae3SPaolo Bonzini  * If the hardware supports that we don't need to do shadow paging.
94c50d8ae3SPaolo Bonzini  */
95c50d8ae3SPaolo Bonzini bool tdp_enabled = false;
96c50d8ae3SPaolo Bonzini 
971d92d2e8SSean Christopherson static int max_huge_page_level __read_mostly;
9883013059SSean Christopherson static int max_tdp_level __read_mostly;
99703c335dSSean Christopherson 
100c50d8ae3SPaolo Bonzini enum {
101c50d8ae3SPaolo Bonzini 	AUDIT_PRE_PAGE_FAULT,
102c50d8ae3SPaolo Bonzini 	AUDIT_POST_PAGE_FAULT,
103c50d8ae3SPaolo Bonzini 	AUDIT_PRE_PTE_WRITE,
104c50d8ae3SPaolo Bonzini 	AUDIT_POST_PTE_WRITE,
105c50d8ae3SPaolo Bonzini 	AUDIT_PRE_SYNC,
106c50d8ae3SPaolo Bonzini 	AUDIT_POST_SYNC
107c50d8ae3SPaolo Bonzini };
108c50d8ae3SPaolo Bonzini 
109c50d8ae3SPaolo Bonzini #ifdef MMU_DEBUG
1105a9624afSPaolo Bonzini bool dbg = 0;
111c50d8ae3SPaolo Bonzini module_param(dbg, bool, 0644);
112c50d8ae3SPaolo Bonzini #endif
113c50d8ae3SPaolo Bonzini 
114c50d8ae3SPaolo Bonzini #define PTE_PREFETCH_NUM		8
115c50d8ae3SPaolo Bonzini 
116c50d8ae3SPaolo Bonzini #define PT32_LEVEL_BITS 10
117c50d8ae3SPaolo Bonzini 
118c50d8ae3SPaolo Bonzini #define PT32_LEVEL_SHIFT(level) \
119c50d8ae3SPaolo Bonzini 		(PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
120c50d8ae3SPaolo Bonzini 
121c50d8ae3SPaolo Bonzini #define PT32_LVL_OFFSET_MASK(level) \
122c50d8ae3SPaolo Bonzini 	(PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
123c50d8ae3SPaolo Bonzini 						* PT32_LEVEL_BITS))) - 1))
124c50d8ae3SPaolo Bonzini 
125c50d8ae3SPaolo Bonzini #define PT32_INDEX(address, level)\
126c50d8ae3SPaolo Bonzini 	(((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
127c50d8ae3SPaolo Bonzini 
128c50d8ae3SPaolo Bonzini 
129c50d8ae3SPaolo Bonzini #define PT32_BASE_ADDR_MASK PAGE_MASK
130c50d8ae3SPaolo Bonzini #define PT32_DIR_BASE_ADDR_MASK \
131c50d8ae3SPaolo Bonzini 	(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
132c50d8ae3SPaolo Bonzini #define PT32_LVL_ADDR_MASK(level) \
133c50d8ae3SPaolo Bonzini 	(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
134c50d8ae3SPaolo Bonzini 					    * PT32_LEVEL_BITS))) - 1))
135c50d8ae3SPaolo Bonzini 
136c50d8ae3SPaolo Bonzini #include <trace/events/kvm.h>
137c50d8ae3SPaolo Bonzini 
138c50d8ae3SPaolo Bonzini /* make pte_list_desc fit well in cache line */
139c50d8ae3SPaolo Bonzini #define PTE_LIST_EXT 3
140c50d8ae3SPaolo Bonzini 
141c50d8ae3SPaolo Bonzini struct pte_list_desc {
142c50d8ae3SPaolo Bonzini 	u64 *sptes[PTE_LIST_EXT];
143c50d8ae3SPaolo Bonzini 	struct pte_list_desc *more;
144c50d8ae3SPaolo Bonzini };
145c50d8ae3SPaolo Bonzini 
146c50d8ae3SPaolo Bonzini struct kvm_shadow_walk_iterator {
147c50d8ae3SPaolo Bonzini 	u64 addr;
148c50d8ae3SPaolo Bonzini 	hpa_t shadow_addr;
149c50d8ae3SPaolo Bonzini 	u64 *sptep;
150c50d8ae3SPaolo Bonzini 	int level;
151c50d8ae3SPaolo Bonzini 	unsigned index;
152c50d8ae3SPaolo Bonzini };
153c50d8ae3SPaolo Bonzini 
154c50d8ae3SPaolo Bonzini #define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker)     \
155c50d8ae3SPaolo Bonzini 	for (shadow_walk_init_using_root(&(_walker), (_vcpu),              \
156c50d8ae3SPaolo Bonzini 					 (_root), (_addr));                \
157c50d8ae3SPaolo Bonzini 	     shadow_walk_okay(&(_walker));			           \
158c50d8ae3SPaolo Bonzini 	     shadow_walk_next(&(_walker)))
159c50d8ae3SPaolo Bonzini 
160c50d8ae3SPaolo Bonzini #define for_each_shadow_entry(_vcpu, _addr, _walker)            \
161c50d8ae3SPaolo Bonzini 	for (shadow_walk_init(&(_walker), _vcpu, _addr);	\
162c50d8ae3SPaolo Bonzini 	     shadow_walk_okay(&(_walker));			\
163c50d8ae3SPaolo Bonzini 	     shadow_walk_next(&(_walker)))
164c50d8ae3SPaolo Bonzini 
165c50d8ae3SPaolo Bonzini #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte)	\
166c50d8ae3SPaolo Bonzini 	for (shadow_walk_init(&(_walker), _vcpu, _addr);		\
167c50d8ae3SPaolo Bonzini 	     shadow_walk_okay(&(_walker)) &&				\
168c50d8ae3SPaolo Bonzini 		({ spte = mmu_spte_get_lockless(_walker.sptep); 1; });	\
169c50d8ae3SPaolo Bonzini 	     __shadow_walk_next(&(_walker), spte))
170c50d8ae3SPaolo Bonzini 
171c50d8ae3SPaolo Bonzini static struct kmem_cache *pte_list_desc_cache;
17202c00b3aSBen Gardon struct kmem_cache *mmu_page_header_cache;
173c50d8ae3SPaolo Bonzini static struct percpu_counter kvm_total_used_mmu_pages;
174c50d8ae3SPaolo Bonzini 
175c50d8ae3SPaolo Bonzini static void mmu_spte_set(u64 *sptep, u64 spte);
176c50d8ae3SPaolo Bonzini static union kvm_mmu_page_role
177c50d8ae3SPaolo Bonzini kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
178c50d8ae3SPaolo Bonzini 
179594e91a1SSean Christopherson struct kvm_mmu_role_regs {
180594e91a1SSean Christopherson 	const unsigned long cr0;
181594e91a1SSean Christopherson 	const unsigned long cr4;
182594e91a1SSean Christopherson 	const u64 efer;
183594e91a1SSean Christopherson };
184594e91a1SSean Christopherson 
185c50d8ae3SPaolo Bonzini #define CREATE_TRACE_POINTS
186c50d8ae3SPaolo Bonzini #include "mmutrace.h"
187c50d8ae3SPaolo Bonzini 
188594e91a1SSean Christopherson /*
189594e91a1SSean Christopherson  * Yes, lot's of underscores.  They're a hint that you probably shouldn't be
190594e91a1SSean Christopherson  * reading from the role_regs.  Once the mmu_role is constructed, it becomes
191594e91a1SSean Christopherson  * the single source of truth for the MMU's state.
192594e91a1SSean Christopherson  */
193594e91a1SSean Christopherson #define BUILD_MMU_ROLE_REGS_ACCESSOR(reg, name, flag)			\
194594e91a1SSean Christopherson static inline bool ____is_##reg##_##name(struct kvm_mmu_role_regs *regs)\
195594e91a1SSean Christopherson {									\
196594e91a1SSean Christopherson 	return !!(regs->reg & flag);					\
197594e91a1SSean Christopherson }
198594e91a1SSean Christopherson BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, pg, X86_CR0_PG);
199594e91a1SSean Christopherson BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, wp, X86_CR0_WP);
200594e91a1SSean Christopherson BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pse, X86_CR4_PSE);
201594e91a1SSean Christopherson BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pae, X86_CR4_PAE);
202594e91a1SSean Christopherson BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smep, X86_CR4_SMEP);
203594e91a1SSean Christopherson BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smap, X86_CR4_SMAP);
204594e91a1SSean Christopherson BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pke, X86_CR4_PKE);
205594e91a1SSean Christopherson BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, la57, X86_CR4_LA57);
206594e91a1SSean Christopherson BUILD_MMU_ROLE_REGS_ACCESSOR(efer, nx, EFER_NX);
207594e91a1SSean Christopherson BUILD_MMU_ROLE_REGS_ACCESSOR(efer, lma, EFER_LMA);
208594e91a1SSean Christopherson 
20960667724SSean Christopherson /*
21060667724SSean Christopherson  * The MMU itself (with a valid role) is the single source of truth for the
21160667724SSean Christopherson  * MMU.  Do not use the regs used to build the MMU/role, nor the vCPU.  The
21260667724SSean Christopherson  * regs don't account for dependencies, e.g. clearing CR4 bits if CR0.PG=1,
21360667724SSean Christopherson  * and the vCPU may be incorrect/irrelevant.
21460667724SSean Christopherson  */
21560667724SSean Christopherson #define BUILD_MMU_ROLE_ACCESSOR(base_or_ext, reg, name)		\
21660667724SSean Christopherson static inline bool is_##reg##_##name(struct kvm_mmu *mmu)	\
21760667724SSean Christopherson {								\
21860667724SSean Christopherson 	return !!(mmu->mmu_role. base_or_ext . reg##_##name);	\
21960667724SSean Christopherson }
22060667724SSean Christopherson BUILD_MMU_ROLE_ACCESSOR(ext,  cr0, pg);
22160667724SSean Christopherson BUILD_MMU_ROLE_ACCESSOR(base, cr0, wp);
22260667724SSean Christopherson BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pse);
22360667724SSean Christopherson BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pae);
22460667724SSean Christopherson BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, smep);
22560667724SSean Christopherson BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, smap);
22660667724SSean Christopherson BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pke);
22760667724SSean Christopherson BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, la57);
22860667724SSean Christopherson BUILD_MMU_ROLE_ACCESSOR(base, efer, nx);
22960667724SSean Christopherson 
230594e91a1SSean Christopherson static struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu)
231594e91a1SSean Christopherson {
232594e91a1SSean Christopherson 	struct kvm_mmu_role_regs regs = {
233594e91a1SSean Christopherson 		.cr0 = kvm_read_cr0_bits(vcpu, KVM_MMU_CR0_ROLE_BITS),
234594e91a1SSean Christopherson 		.cr4 = kvm_read_cr4_bits(vcpu, KVM_MMU_CR4_ROLE_BITS),
235594e91a1SSean Christopherson 		.efer = vcpu->arch.efer,
236594e91a1SSean Christopherson 	};
237594e91a1SSean Christopherson 
238594e91a1SSean Christopherson 	return regs;
239594e91a1SSean Christopherson }
240c50d8ae3SPaolo Bonzini 
241c50d8ae3SPaolo Bonzini static inline bool kvm_available_flush_tlb_with_range(void)
242c50d8ae3SPaolo Bonzini {
243afaf0b2fSSean Christopherson 	return kvm_x86_ops.tlb_remote_flush_with_range;
244c50d8ae3SPaolo Bonzini }
245c50d8ae3SPaolo Bonzini 
246c50d8ae3SPaolo Bonzini static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm,
247c50d8ae3SPaolo Bonzini 		struct kvm_tlb_range *range)
248c50d8ae3SPaolo Bonzini {
249c50d8ae3SPaolo Bonzini 	int ret = -ENOTSUPP;
250c50d8ae3SPaolo Bonzini 
251afaf0b2fSSean Christopherson 	if (range && kvm_x86_ops.tlb_remote_flush_with_range)
252b3646477SJason Baron 		ret = static_call(kvm_x86_tlb_remote_flush_with_range)(kvm, range);
253c50d8ae3SPaolo Bonzini 
254c50d8ae3SPaolo Bonzini 	if (ret)
255c50d8ae3SPaolo Bonzini 		kvm_flush_remote_tlbs(kvm);
256c50d8ae3SPaolo Bonzini }
257c50d8ae3SPaolo Bonzini 
2582f2fad08SBen Gardon void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
259c50d8ae3SPaolo Bonzini 		u64 start_gfn, u64 pages)
260c50d8ae3SPaolo Bonzini {
261c50d8ae3SPaolo Bonzini 	struct kvm_tlb_range range;
262c50d8ae3SPaolo Bonzini 
263c50d8ae3SPaolo Bonzini 	range.start_gfn = start_gfn;
264c50d8ae3SPaolo Bonzini 	range.pages = pages;
265c50d8ae3SPaolo Bonzini 
266c50d8ae3SPaolo Bonzini 	kvm_flush_remote_tlbs_with_range(kvm, &range);
267c50d8ae3SPaolo Bonzini }
268c50d8ae3SPaolo Bonzini 
2698f79b064SBen Gardon static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
2708f79b064SBen Gardon 			   unsigned int access)
2718f79b064SBen Gardon {
272c236d962SSean Christopherson 	u64 spte = make_mmio_spte(vcpu, gfn, access);
2738f79b064SBen Gardon 
274c236d962SSean Christopherson 	trace_mark_mmio_spte(sptep, gfn, spte);
275c236d962SSean Christopherson 	mmu_spte_set(sptep, spte);
276c50d8ae3SPaolo Bonzini }
277c50d8ae3SPaolo Bonzini 
278c50d8ae3SPaolo Bonzini static gfn_t get_mmio_spte_gfn(u64 spte)
279c50d8ae3SPaolo Bonzini {
280c50d8ae3SPaolo Bonzini 	u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
281c50d8ae3SPaolo Bonzini 
2828a967d65SPaolo Bonzini 	gpa |= (spte >> SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)
283c50d8ae3SPaolo Bonzini 	       & shadow_nonpresent_or_rsvd_mask;
284c50d8ae3SPaolo Bonzini 
285c50d8ae3SPaolo Bonzini 	return gpa >> PAGE_SHIFT;
286c50d8ae3SPaolo Bonzini }
287c50d8ae3SPaolo Bonzini 
288c50d8ae3SPaolo Bonzini static unsigned get_mmio_spte_access(u64 spte)
289c50d8ae3SPaolo Bonzini {
290c50d8ae3SPaolo Bonzini 	return spte & shadow_mmio_access_mask;
291c50d8ae3SPaolo Bonzini }
292c50d8ae3SPaolo Bonzini 
293c50d8ae3SPaolo Bonzini static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
294c50d8ae3SPaolo Bonzini {
295c50d8ae3SPaolo Bonzini 	u64 kvm_gen, spte_gen, gen;
296c50d8ae3SPaolo Bonzini 
297c50d8ae3SPaolo Bonzini 	gen = kvm_vcpu_memslots(vcpu)->generation;
298c50d8ae3SPaolo Bonzini 	if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
299c50d8ae3SPaolo Bonzini 		return false;
300c50d8ae3SPaolo Bonzini 
301c50d8ae3SPaolo Bonzini 	kvm_gen = gen & MMIO_SPTE_GEN_MASK;
302c50d8ae3SPaolo Bonzini 	spte_gen = get_mmio_spte_generation(spte);
303c50d8ae3SPaolo Bonzini 
304c50d8ae3SPaolo Bonzini 	trace_check_mmio_spte(spte, kvm_gen, spte_gen);
305c50d8ae3SPaolo Bonzini 	return likely(kvm_gen == spte_gen);
306c50d8ae3SPaolo Bonzini }
307c50d8ae3SPaolo Bonzini 
308cd313569SMohammed Gamal static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
309cd313569SMohammed Gamal                                   struct x86_exception *exception)
310cd313569SMohammed Gamal {
311ec7771abSMohammed Gamal 	/* Check if guest physical address doesn't exceed guest maximum */
312dc46515cSSean Christopherson 	if (kvm_vcpu_is_illegal_gpa(vcpu, gpa)) {
313ec7771abSMohammed Gamal 		exception->error_code |= PFERR_RSVD_MASK;
314ec7771abSMohammed Gamal 		return UNMAPPED_GVA;
315ec7771abSMohammed Gamal 	}
316ec7771abSMohammed Gamal 
317cd313569SMohammed Gamal         return gpa;
318cd313569SMohammed Gamal }
319cd313569SMohammed Gamal 
320c50d8ae3SPaolo Bonzini static int is_cpuid_PSE36(void)
321c50d8ae3SPaolo Bonzini {
322c50d8ae3SPaolo Bonzini 	return 1;
323c50d8ae3SPaolo Bonzini }
324c50d8ae3SPaolo Bonzini 
325c50d8ae3SPaolo Bonzini static gfn_t pse36_gfn_delta(u32 gpte)
326c50d8ae3SPaolo Bonzini {
327c50d8ae3SPaolo Bonzini 	int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
328c50d8ae3SPaolo Bonzini 
329c50d8ae3SPaolo Bonzini 	return (gpte & PT32_DIR_PSE36_MASK) << shift;
330c50d8ae3SPaolo Bonzini }
331c50d8ae3SPaolo Bonzini 
332c50d8ae3SPaolo Bonzini #ifdef CONFIG_X86_64
333c50d8ae3SPaolo Bonzini static void __set_spte(u64 *sptep, u64 spte)
334c50d8ae3SPaolo Bonzini {
335c50d8ae3SPaolo Bonzini 	WRITE_ONCE(*sptep, spte);
336c50d8ae3SPaolo Bonzini }
337c50d8ae3SPaolo Bonzini 
338c50d8ae3SPaolo Bonzini static void __update_clear_spte_fast(u64 *sptep, u64 spte)
339c50d8ae3SPaolo Bonzini {
340c50d8ae3SPaolo Bonzini 	WRITE_ONCE(*sptep, spte);
341c50d8ae3SPaolo Bonzini }
342c50d8ae3SPaolo Bonzini 
343c50d8ae3SPaolo Bonzini static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
344c50d8ae3SPaolo Bonzini {
345c50d8ae3SPaolo Bonzini 	return xchg(sptep, spte);
346c50d8ae3SPaolo Bonzini }
347c50d8ae3SPaolo Bonzini 
348c50d8ae3SPaolo Bonzini static u64 __get_spte_lockless(u64 *sptep)
349c50d8ae3SPaolo Bonzini {
350c50d8ae3SPaolo Bonzini 	return READ_ONCE(*sptep);
351c50d8ae3SPaolo Bonzini }
352c50d8ae3SPaolo Bonzini #else
353c50d8ae3SPaolo Bonzini union split_spte {
354c50d8ae3SPaolo Bonzini 	struct {
355c50d8ae3SPaolo Bonzini 		u32 spte_low;
356c50d8ae3SPaolo Bonzini 		u32 spte_high;
357c50d8ae3SPaolo Bonzini 	};
358c50d8ae3SPaolo Bonzini 	u64 spte;
359c50d8ae3SPaolo Bonzini };
360c50d8ae3SPaolo Bonzini 
361c50d8ae3SPaolo Bonzini static void count_spte_clear(u64 *sptep, u64 spte)
362c50d8ae3SPaolo Bonzini {
36357354682SSean Christopherson 	struct kvm_mmu_page *sp =  sptep_to_sp(sptep);
364c50d8ae3SPaolo Bonzini 
365c50d8ae3SPaolo Bonzini 	if (is_shadow_present_pte(spte))
366c50d8ae3SPaolo Bonzini 		return;
367c50d8ae3SPaolo Bonzini 
368c50d8ae3SPaolo Bonzini 	/* Ensure the spte is completely set before we increase the count */
369c50d8ae3SPaolo Bonzini 	smp_wmb();
370c50d8ae3SPaolo Bonzini 	sp->clear_spte_count++;
371c50d8ae3SPaolo Bonzini }
372c50d8ae3SPaolo Bonzini 
373c50d8ae3SPaolo Bonzini static void __set_spte(u64 *sptep, u64 spte)
374c50d8ae3SPaolo Bonzini {
375c50d8ae3SPaolo Bonzini 	union split_spte *ssptep, sspte;
376c50d8ae3SPaolo Bonzini 
377c50d8ae3SPaolo Bonzini 	ssptep = (union split_spte *)sptep;
378c50d8ae3SPaolo Bonzini 	sspte = (union split_spte)spte;
379c50d8ae3SPaolo Bonzini 
380c50d8ae3SPaolo Bonzini 	ssptep->spte_high = sspte.spte_high;
381c50d8ae3SPaolo Bonzini 
382c50d8ae3SPaolo Bonzini 	/*
383c50d8ae3SPaolo Bonzini 	 * If we map the spte from nonpresent to present, We should store
384c50d8ae3SPaolo Bonzini 	 * the high bits firstly, then set present bit, so cpu can not
385c50d8ae3SPaolo Bonzini 	 * fetch this spte while we are setting the spte.
386c50d8ae3SPaolo Bonzini 	 */
387c50d8ae3SPaolo Bonzini 	smp_wmb();
388c50d8ae3SPaolo Bonzini 
389c50d8ae3SPaolo Bonzini 	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
390c50d8ae3SPaolo Bonzini }
391c50d8ae3SPaolo Bonzini 
392c50d8ae3SPaolo Bonzini static void __update_clear_spte_fast(u64 *sptep, u64 spte)
393c50d8ae3SPaolo Bonzini {
394c50d8ae3SPaolo Bonzini 	union split_spte *ssptep, sspte;
395c50d8ae3SPaolo Bonzini 
396c50d8ae3SPaolo Bonzini 	ssptep = (union split_spte *)sptep;
397c50d8ae3SPaolo Bonzini 	sspte = (union split_spte)spte;
398c50d8ae3SPaolo Bonzini 
399c50d8ae3SPaolo Bonzini 	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
400c50d8ae3SPaolo Bonzini 
401c50d8ae3SPaolo Bonzini 	/*
402c50d8ae3SPaolo Bonzini 	 * If we map the spte from present to nonpresent, we should clear
403c50d8ae3SPaolo Bonzini 	 * present bit firstly to avoid vcpu fetch the old high bits.
404c50d8ae3SPaolo Bonzini 	 */
405c50d8ae3SPaolo Bonzini 	smp_wmb();
406c50d8ae3SPaolo Bonzini 
407c50d8ae3SPaolo Bonzini 	ssptep->spte_high = sspte.spte_high;
408c50d8ae3SPaolo Bonzini 	count_spte_clear(sptep, spte);
409c50d8ae3SPaolo Bonzini }
410c50d8ae3SPaolo Bonzini 
411c50d8ae3SPaolo Bonzini static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
412c50d8ae3SPaolo Bonzini {
413c50d8ae3SPaolo Bonzini 	union split_spte *ssptep, sspte, orig;
414c50d8ae3SPaolo Bonzini 
415c50d8ae3SPaolo Bonzini 	ssptep = (union split_spte *)sptep;
416c50d8ae3SPaolo Bonzini 	sspte = (union split_spte)spte;
417c50d8ae3SPaolo Bonzini 
418c50d8ae3SPaolo Bonzini 	/* xchg acts as a barrier before the setting of the high bits */
419c50d8ae3SPaolo Bonzini 	orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low);
420c50d8ae3SPaolo Bonzini 	orig.spte_high = ssptep->spte_high;
421c50d8ae3SPaolo Bonzini 	ssptep->spte_high = sspte.spte_high;
422c50d8ae3SPaolo Bonzini 	count_spte_clear(sptep, spte);
423c50d8ae3SPaolo Bonzini 
424c50d8ae3SPaolo Bonzini 	return orig.spte;
425c50d8ae3SPaolo Bonzini }
426c50d8ae3SPaolo Bonzini 
427c50d8ae3SPaolo Bonzini /*
428c50d8ae3SPaolo Bonzini  * The idea using the light way get the spte on x86_32 guest is from
429c50d8ae3SPaolo Bonzini  * gup_get_pte (mm/gup.c).
430c50d8ae3SPaolo Bonzini  *
431c50d8ae3SPaolo Bonzini  * An spte tlb flush may be pending, because kvm_set_pte_rmapp
432c50d8ae3SPaolo Bonzini  * coalesces them and we are running out of the MMU lock.  Therefore
433c50d8ae3SPaolo Bonzini  * we need to protect against in-progress updates of the spte.
434c50d8ae3SPaolo Bonzini  *
435c50d8ae3SPaolo Bonzini  * Reading the spte while an update is in progress may get the old value
436c50d8ae3SPaolo Bonzini  * for the high part of the spte.  The race is fine for a present->non-present
437c50d8ae3SPaolo Bonzini  * change (because the high part of the spte is ignored for non-present spte),
438c50d8ae3SPaolo Bonzini  * but for a present->present change we must reread the spte.
439c50d8ae3SPaolo Bonzini  *
440c50d8ae3SPaolo Bonzini  * All such changes are done in two steps (present->non-present and
441c50d8ae3SPaolo Bonzini  * non-present->present), hence it is enough to count the number of
442c50d8ae3SPaolo Bonzini  * present->non-present updates: if it changed while reading the spte,
443c50d8ae3SPaolo Bonzini  * we might have hit the race.  This is done using clear_spte_count.
444c50d8ae3SPaolo Bonzini  */
445c50d8ae3SPaolo Bonzini static u64 __get_spte_lockless(u64 *sptep)
446c50d8ae3SPaolo Bonzini {
44757354682SSean Christopherson 	struct kvm_mmu_page *sp =  sptep_to_sp(sptep);
448c50d8ae3SPaolo Bonzini 	union split_spte spte, *orig = (union split_spte *)sptep;
449c50d8ae3SPaolo Bonzini 	int count;
450c50d8ae3SPaolo Bonzini 
451c50d8ae3SPaolo Bonzini retry:
452c50d8ae3SPaolo Bonzini 	count = sp->clear_spte_count;
453c50d8ae3SPaolo Bonzini 	smp_rmb();
454c50d8ae3SPaolo Bonzini 
455c50d8ae3SPaolo Bonzini 	spte.spte_low = orig->spte_low;
456c50d8ae3SPaolo Bonzini 	smp_rmb();
457c50d8ae3SPaolo Bonzini 
458c50d8ae3SPaolo Bonzini 	spte.spte_high = orig->spte_high;
459c50d8ae3SPaolo Bonzini 	smp_rmb();
460c50d8ae3SPaolo Bonzini 
461c50d8ae3SPaolo Bonzini 	if (unlikely(spte.spte_low != orig->spte_low ||
462c50d8ae3SPaolo Bonzini 	      count != sp->clear_spte_count))
463c50d8ae3SPaolo Bonzini 		goto retry;
464c50d8ae3SPaolo Bonzini 
465c50d8ae3SPaolo Bonzini 	return spte.spte;
466c50d8ae3SPaolo Bonzini }
467c50d8ae3SPaolo Bonzini #endif
468c50d8ae3SPaolo Bonzini 
469c50d8ae3SPaolo Bonzini static bool spte_has_volatile_bits(u64 spte)
470c50d8ae3SPaolo Bonzini {
471c50d8ae3SPaolo Bonzini 	if (!is_shadow_present_pte(spte))
472c50d8ae3SPaolo Bonzini 		return false;
473c50d8ae3SPaolo Bonzini 
474c50d8ae3SPaolo Bonzini 	/*
475c50d8ae3SPaolo Bonzini 	 * Always atomically update spte if it can be updated
476c50d8ae3SPaolo Bonzini 	 * out of mmu-lock, it can ensure dirty bit is not lost,
477c50d8ae3SPaolo Bonzini 	 * also, it can help us to get a stable is_writable_pte()
478c50d8ae3SPaolo Bonzini 	 * to ensure tlb flush is not missed.
479c50d8ae3SPaolo Bonzini 	 */
480c50d8ae3SPaolo Bonzini 	if (spte_can_locklessly_be_made_writable(spte) ||
481c50d8ae3SPaolo Bonzini 	    is_access_track_spte(spte))
482c50d8ae3SPaolo Bonzini 		return true;
483c50d8ae3SPaolo Bonzini 
484c50d8ae3SPaolo Bonzini 	if (spte_ad_enabled(spte)) {
485c50d8ae3SPaolo Bonzini 		if ((spte & shadow_accessed_mask) == 0 ||
486c50d8ae3SPaolo Bonzini 	    	    (is_writable_pte(spte) && (spte & shadow_dirty_mask) == 0))
487c50d8ae3SPaolo Bonzini 			return true;
488c50d8ae3SPaolo Bonzini 	}
489c50d8ae3SPaolo Bonzini 
490c50d8ae3SPaolo Bonzini 	return false;
491c50d8ae3SPaolo Bonzini }
492c50d8ae3SPaolo Bonzini 
493c50d8ae3SPaolo Bonzini /* Rules for using mmu_spte_set:
494c50d8ae3SPaolo Bonzini  * Set the sptep from nonpresent to present.
495c50d8ae3SPaolo Bonzini  * Note: the sptep being assigned *must* be either not present
496c50d8ae3SPaolo Bonzini  * or in a state where the hardware will not attempt to update
497c50d8ae3SPaolo Bonzini  * the spte.
498c50d8ae3SPaolo Bonzini  */
499c50d8ae3SPaolo Bonzini static void mmu_spte_set(u64 *sptep, u64 new_spte)
500c50d8ae3SPaolo Bonzini {
501c50d8ae3SPaolo Bonzini 	WARN_ON(is_shadow_present_pte(*sptep));
502c50d8ae3SPaolo Bonzini 	__set_spte(sptep, new_spte);
503c50d8ae3SPaolo Bonzini }
504c50d8ae3SPaolo Bonzini 
505c50d8ae3SPaolo Bonzini /*
506c50d8ae3SPaolo Bonzini  * Update the SPTE (excluding the PFN), but do not track changes in its
507c50d8ae3SPaolo Bonzini  * accessed/dirty status.
508c50d8ae3SPaolo Bonzini  */
509c50d8ae3SPaolo Bonzini static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte)
510c50d8ae3SPaolo Bonzini {
511c50d8ae3SPaolo Bonzini 	u64 old_spte = *sptep;
512c50d8ae3SPaolo Bonzini 
513c50d8ae3SPaolo Bonzini 	WARN_ON(!is_shadow_present_pte(new_spte));
514c50d8ae3SPaolo Bonzini 
515c50d8ae3SPaolo Bonzini 	if (!is_shadow_present_pte(old_spte)) {
516c50d8ae3SPaolo Bonzini 		mmu_spte_set(sptep, new_spte);
517c50d8ae3SPaolo Bonzini 		return old_spte;
518c50d8ae3SPaolo Bonzini 	}
519c50d8ae3SPaolo Bonzini 
520c50d8ae3SPaolo Bonzini 	if (!spte_has_volatile_bits(old_spte))
521c50d8ae3SPaolo Bonzini 		__update_clear_spte_fast(sptep, new_spte);
522c50d8ae3SPaolo Bonzini 	else
523c50d8ae3SPaolo Bonzini 		old_spte = __update_clear_spte_slow(sptep, new_spte);
524c50d8ae3SPaolo Bonzini 
525c50d8ae3SPaolo Bonzini 	WARN_ON(spte_to_pfn(old_spte) != spte_to_pfn(new_spte));
526c50d8ae3SPaolo Bonzini 
527c50d8ae3SPaolo Bonzini 	return old_spte;
528c50d8ae3SPaolo Bonzini }
529c50d8ae3SPaolo Bonzini 
530c50d8ae3SPaolo Bonzini /* Rules for using mmu_spte_update:
531c50d8ae3SPaolo Bonzini  * Update the state bits, it means the mapped pfn is not changed.
532c50d8ae3SPaolo Bonzini  *
533c50d8ae3SPaolo Bonzini  * Whenever we overwrite a writable spte with a read-only one we
534c50d8ae3SPaolo Bonzini  * should flush remote TLBs. Otherwise rmap_write_protect
535c50d8ae3SPaolo Bonzini  * will find a read-only spte, even though the writable spte
536c50d8ae3SPaolo Bonzini  * might be cached on a CPU's TLB, the return value indicates this
537c50d8ae3SPaolo Bonzini  * case.
538c50d8ae3SPaolo Bonzini  *
539c50d8ae3SPaolo Bonzini  * Returns true if the TLB needs to be flushed
540c50d8ae3SPaolo Bonzini  */
541c50d8ae3SPaolo Bonzini static bool mmu_spte_update(u64 *sptep, u64 new_spte)
542c50d8ae3SPaolo Bonzini {
543c50d8ae3SPaolo Bonzini 	bool flush = false;
544c50d8ae3SPaolo Bonzini 	u64 old_spte = mmu_spte_update_no_track(sptep, new_spte);
545c50d8ae3SPaolo Bonzini 
546c50d8ae3SPaolo Bonzini 	if (!is_shadow_present_pte(old_spte))
547c50d8ae3SPaolo Bonzini 		return false;
548c50d8ae3SPaolo Bonzini 
549c50d8ae3SPaolo Bonzini 	/*
550c50d8ae3SPaolo Bonzini 	 * For the spte updated out of mmu-lock is safe, since
551c50d8ae3SPaolo Bonzini 	 * we always atomically update it, see the comments in
552c50d8ae3SPaolo Bonzini 	 * spte_has_volatile_bits().
553c50d8ae3SPaolo Bonzini 	 */
554c50d8ae3SPaolo Bonzini 	if (spte_can_locklessly_be_made_writable(old_spte) &&
555c50d8ae3SPaolo Bonzini 	      !is_writable_pte(new_spte))
556c50d8ae3SPaolo Bonzini 		flush = true;
557c50d8ae3SPaolo Bonzini 
558c50d8ae3SPaolo Bonzini 	/*
559c50d8ae3SPaolo Bonzini 	 * Flush TLB when accessed/dirty states are changed in the page tables,
560c50d8ae3SPaolo Bonzini 	 * to guarantee consistency between TLB and page tables.
561c50d8ae3SPaolo Bonzini 	 */
562c50d8ae3SPaolo Bonzini 
563c50d8ae3SPaolo Bonzini 	if (is_accessed_spte(old_spte) && !is_accessed_spte(new_spte)) {
564c50d8ae3SPaolo Bonzini 		flush = true;
565c50d8ae3SPaolo Bonzini 		kvm_set_pfn_accessed(spte_to_pfn(old_spte));
566c50d8ae3SPaolo Bonzini 	}
567c50d8ae3SPaolo Bonzini 
568c50d8ae3SPaolo Bonzini 	if (is_dirty_spte(old_spte) && !is_dirty_spte(new_spte)) {
569c50d8ae3SPaolo Bonzini 		flush = true;
570c50d8ae3SPaolo Bonzini 		kvm_set_pfn_dirty(spte_to_pfn(old_spte));
571c50d8ae3SPaolo Bonzini 	}
572c50d8ae3SPaolo Bonzini 
573c50d8ae3SPaolo Bonzini 	return flush;
574c50d8ae3SPaolo Bonzini }
575c50d8ae3SPaolo Bonzini 
576c50d8ae3SPaolo Bonzini /*
577c50d8ae3SPaolo Bonzini  * Rules for using mmu_spte_clear_track_bits:
578c50d8ae3SPaolo Bonzini  * It sets the sptep from present to nonpresent, and track the
579c50d8ae3SPaolo Bonzini  * state bits, it is used to clear the last level sptep.
580c50d8ae3SPaolo Bonzini  * Returns non-zero if the PTE was previously valid.
581c50d8ae3SPaolo Bonzini  */
582c50d8ae3SPaolo Bonzini static int mmu_spte_clear_track_bits(u64 *sptep)
583c50d8ae3SPaolo Bonzini {
584c50d8ae3SPaolo Bonzini 	kvm_pfn_t pfn;
585c50d8ae3SPaolo Bonzini 	u64 old_spte = *sptep;
586c50d8ae3SPaolo Bonzini 
587c50d8ae3SPaolo Bonzini 	if (!spte_has_volatile_bits(old_spte))
588c50d8ae3SPaolo Bonzini 		__update_clear_spte_fast(sptep, 0ull);
589c50d8ae3SPaolo Bonzini 	else
590c50d8ae3SPaolo Bonzini 		old_spte = __update_clear_spte_slow(sptep, 0ull);
591c50d8ae3SPaolo Bonzini 
592c50d8ae3SPaolo Bonzini 	if (!is_shadow_present_pte(old_spte))
593c50d8ae3SPaolo Bonzini 		return 0;
594c50d8ae3SPaolo Bonzini 
595c50d8ae3SPaolo Bonzini 	pfn = spte_to_pfn(old_spte);
596c50d8ae3SPaolo Bonzini 
597c50d8ae3SPaolo Bonzini 	/*
598c50d8ae3SPaolo Bonzini 	 * KVM does not hold the refcount of the page used by
599c50d8ae3SPaolo Bonzini 	 * kvm mmu, before reclaiming the page, we should
600c50d8ae3SPaolo Bonzini 	 * unmap it from mmu first.
601c50d8ae3SPaolo Bonzini 	 */
602c50d8ae3SPaolo Bonzini 	WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
603c50d8ae3SPaolo Bonzini 
604c50d8ae3SPaolo Bonzini 	if (is_accessed_spte(old_spte))
605c50d8ae3SPaolo Bonzini 		kvm_set_pfn_accessed(pfn);
606c50d8ae3SPaolo Bonzini 
607c50d8ae3SPaolo Bonzini 	if (is_dirty_spte(old_spte))
608c50d8ae3SPaolo Bonzini 		kvm_set_pfn_dirty(pfn);
609c50d8ae3SPaolo Bonzini 
610c50d8ae3SPaolo Bonzini 	return 1;
611c50d8ae3SPaolo Bonzini }
612c50d8ae3SPaolo Bonzini 
613c50d8ae3SPaolo Bonzini /*
614c50d8ae3SPaolo Bonzini  * Rules for using mmu_spte_clear_no_track:
615c50d8ae3SPaolo Bonzini  * Directly clear spte without caring the state bits of sptep,
616c50d8ae3SPaolo Bonzini  * it is used to set the upper level spte.
617c50d8ae3SPaolo Bonzini  */
618c50d8ae3SPaolo Bonzini static void mmu_spte_clear_no_track(u64 *sptep)
619c50d8ae3SPaolo Bonzini {
620c50d8ae3SPaolo Bonzini 	__update_clear_spte_fast(sptep, 0ull);
621c50d8ae3SPaolo Bonzini }
622c50d8ae3SPaolo Bonzini 
623c50d8ae3SPaolo Bonzini static u64 mmu_spte_get_lockless(u64 *sptep)
624c50d8ae3SPaolo Bonzini {
625c50d8ae3SPaolo Bonzini 	return __get_spte_lockless(sptep);
626c50d8ae3SPaolo Bonzini }
627c50d8ae3SPaolo Bonzini 
628c50d8ae3SPaolo Bonzini /* Restore an acc-track PTE back to a regular PTE */
629c50d8ae3SPaolo Bonzini static u64 restore_acc_track_spte(u64 spte)
630c50d8ae3SPaolo Bonzini {
631c50d8ae3SPaolo Bonzini 	u64 new_spte = spte;
6328a967d65SPaolo Bonzini 	u64 saved_bits = (spte >> SHADOW_ACC_TRACK_SAVED_BITS_SHIFT)
6338a967d65SPaolo Bonzini 			 & SHADOW_ACC_TRACK_SAVED_BITS_MASK;
634c50d8ae3SPaolo Bonzini 
635c50d8ae3SPaolo Bonzini 	WARN_ON_ONCE(spte_ad_enabled(spte));
636c50d8ae3SPaolo Bonzini 	WARN_ON_ONCE(!is_access_track_spte(spte));
637c50d8ae3SPaolo Bonzini 
638c50d8ae3SPaolo Bonzini 	new_spte &= ~shadow_acc_track_mask;
6398a967d65SPaolo Bonzini 	new_spte &= ~(SHADOW_ACC_TRACK_SAVED_BITS_MASK <<
6408a967d65SPaolo Bonzini 		      SHADOW_ACC_TRACK_SAVED_BITS_SHIFT);
641c50d8ae3SPaolo Bonzini 	new_spte |= saved_bits;
642c50d8ae3SPaolo Bonzini 
643c50d8ae3SPaolo Bonzini 	return new_spte;
644c50d8ae3SPaolo Bonzini }
645c50d8ae3SPaolo Bonzini 
646c50d8ae3SPaolo Bonzini /* Returns the Accessed status of the PTE and resets it at the same time. */
647c50d8ae3SPaolo Bonzini static bool mmu_spte_age(u64 *sptep)
648c50d8ae3SPaolo Bonzini {
649c50d8ae3SPaolo Bonzini 	u64 spte = mmu_spte_get_lockless(sptep);
650c50d8ae3SPaolo Bonzini 
651c50d8ae3SPaolo Bonzini 	if (!is_accessed_spte(spte))
652c50d8ae3SPaolo Bonzini 		return false;
653c50d8ae3SPaolo Bonzini 
654c50d8ae3SPaolo Bonzini 	if (spte_ad_enabled(spte)) {
655c50d8ae3SPaolo Bonzini 		clear_bit((ffs(shadow_accessed_mask) - 1),
656c50d8ae3SPaolo Bonzini 			  (unsigned long *)sptep);
657c50d8ae3SPaolo Bonzini 	} else {
658c50d8ae3SPaolo Bonzini 		/*
659c50d8ae3SPaolo Bonzini 		 * Capture the dirty status of the page, so that it doesn't get
660c50d8ae3SPaolo Bonzini 		 * lost when the SPTE is marked for access tracking.
661c50d8ae3SPaolo Bonzini 		 */
662c50d8ae3SPaolo Bonzini 		if (is_writable_pte(spte))
663c50d8ae3SPaolo Bonzini 			kvm_set_pfn_dirty(spte_to_pfn(spte));
664c50d8ae3SPaolo Bonzini 
665c50d8ae3SPaolo Bonzini 		spte = mark_spte_for_access_track(spte);
666c50d8ae3SPaolo Bonzini 		mmu_spte_update_no_track(sptep, spte);
667c50d8ae3SPaolo Bonzini 	}
668c50d8ae3SPaolo Bonzini 
669c50d8ae3SPaolo Bonzini 	return true;
670c50d8ae3SPaolo Bonzini }
671c50d8ae3SPaolo Bonzini 
672c50d8ae3SPaolo Bonzini static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
673c50d8ae3SPaolo Bonzini {
674c50d8ae3SPaolo Bonzini 	/*
675c50d8ae3SPaolo Bonzini 	 * Prevent page table teardown by making any free-er wait during
676c50d8ae3SPaolo Bonzini 	 * kvm_flush_remote_tlbs() IPI to all active vcpus.
677c50d8ae3SPaolo Bonzini 	 */
678c50d8ae3SPaolo Bonzini 	local_irq_disable();
679c50d8ae3SPaolo Bonzini 
680c50d8ae3SPaolo Bonzini 	/*
681c50d8ae3SPaolo Bonzini 	 * Make sure a following spte read is not reordered ahead of the write
682c50d8ae3SPaolo Bonzini 	 * to vcpu->mode.
683c50d8ae3SPaolo Bonzini 	 */
684c50d8ae3SPaolo Bonzini 	smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
685c50d8ae3SPaolo Bonzini }
686c50d8ae3SPaolo Bonzini 
687c50d8ae3SPaolo Bonzini static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
688c50d8ae3SPaolo Bonzini {
689c50d8ae3SPaolo Bonzini 	/*
690c50d8ae3SPaolo Bonzini 	 * Make sure the write to vcpu->mode is not reordered in front of
691c50d8ae3SPaolo Bonzini 	 * reads to sptes.  If it does, kvm_mmu_commit_zap_page() can see us
692c50d8ae3SPaolo Bonzini 	 * OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
693c50d8ae3SPaolo Bonzini 	 */
694c50d8ae3SPaolo Bonzini 	smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
695c50d8ae3SPaolo Bonzini 	local_irq_enable();
696c50d8ae3SPaolo Bonzini }
697c50d8ae3SPaolo Bonzini 
698378f5cd6SSean Christopherson static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
699c50d8ae3SPaolo Bonzini {
700c50d8ae3SPaolo Bonzini 	int r;
701c50d8ae3SPaolo Bonzini 
702531281adSSean Christopherson 	/* 1 rmap, 1 parent PTE per level, and the prefetched rmaps. */
70394ce87efSSean Christopherson 	r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
704531281adSSean Christopherson 				       1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);
705c50d8ae3SPaolo Bonzini 	if (r)
706c50d8ae3SPaolo Bonzini 		return r;
70794ce87efSSean Christopherson 	r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
708171a90d7SSean Christopherson 				       PT64_ROOT_MAX_LEVEL);
709171a90d7SSean Christopherson 	if (r)
710171a90d7SSean Christopherson 		return r;
711378f5cd6SSean Christopherson 	if (maybe_indirect) {
71294ce87efSSean Christopherson 		r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
713171a90d7SSean Christopherson 					       PT64_ROOT_MAX_LEVEL);
714c50d8ae3SPaolo Bonzini 		if (r)
715c50d8ae3SPaolo Bonzini 			return r;
716378f5cd6SSean Christopherson 	}
71794ce87efSSean Christopherson 	return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
718531281adSSean Christopherson 					  PT64_ROOT_MAX_LEVEL);
719c50d8ae3SPaolo Bonzini }
720c50d8ae3SPaolo Bonzini 
721c50d8ae3SPaolo Bonzini static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
722c50d8ae3SPaolo Bonzini {
72394ce87efSSean Christopherson 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
72494ce87efSSean Christopherson 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
72594ce87efSSean Christopherson 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_gfn_array_cache);
72694ce87efSSean Christopherson 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
727c50d8ae3SPaolo Bonzini }
728c50d8ae3SPaolo Bonzini 
729c50d8ae3SPaolo Bonzini static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu)
730c50d8ae3SPaolo Bonzini {
73194ce87efSSean Christopherson 	return kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache);
732c50d8ae3SPaolo Bonzini }
733c50d8ae3SPaolo Bonzini 
734c50d8ae3SPaolo Bonzini static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
735c50d8ae3SPaolo Bonzini {
736c50d8ae3SPaolo Bonzini 	kmem_cache_free(pte_list_desc_cache, pte_list_desc);
737c50d8ae3SPaolo Bonzini }
738c50d8ae3SPaolo Bonzini 
739c50d8ae3SPaolo Bonzini static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
740c50d8ae3SPaolo Bonzini {
741c50d8ae3SPaolo Bonzini 	if (!sp->role.direct)
742c50d8ae3SPaolo Bonzini 		return sp->gfns[index];
743c50d8ae3SPaolo Bonzini 
744c50d8ae3SPaolo Bonzini 	return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS));
745c50d8ae3SPaolo Bonzini }
746c50d8ae3SPaolo Bonzini 
747c50d8ae3SPaolo Bonzini static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
748c50d8ae3SPaolo Bonzini {
749c50d8ae3SPaolo Bonzini 	if (!sp->role.direct) {
750c50d8ae3SPaolo Bonzini 		sp->gfns[index] = gfn;
751c50d8ae3SPaolo Bonzini 		return;
752c50d8ae3SPaolo Bonzini 	}
753c50d8ae3SPaolo Bonzini 
754c50d8ae3SPaolo Bonzini 	if (WARN_ON(gfn != kvm_mmu_page_get_gfn(sp, index)))
755c50d8ae3SPaolo Bonzini 		pr_err_ratelimited("gfn mismatch under direct page %llx "
756c50d8ae3SPaolo Bonzini 				   "(expected %llx, got %llx)\n",
757c50d8ae3SPaolo Bonzini 				   sp->gfn,
758c50d8ae3SPaolo Bonzini 				   kvm_mmu_page_get_gfn(sp, index), gfn);
759c50d8ae3SPaolo Bonzini }
760c50d8ae3SPaolo Bonzini 
761c50d8ae3SPaolo Bonzini /*
762c50d8ae3SPaolo Bonzini  * Return the pointer to the large page information for a given gfn,
763c50d8ae3SPaolo Bonzini  * handling slots that are not large page aligned.
764c50d8ae3SPaolo Bonzini  */
765c50d8ae3SPaolo Bonzini static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
7668ca6f063SBen Gardon 		const struct kvm_memory_slot *slot, int level)
767c50d8ae3SPaolo Bonzini {
768c50d8ae3SPaolo Bonzini 	unsigned long idx;
769c50d8ae3SPaolo Bonzini 
770c50d8ae3SPaolo Bonzini 	idx = gfn_to_index(gfn, slot->base_gfn, level);
771c50d8ae3SPaolo Bonzini 	return &slot->arch.lpage_info[level - 2][idx];
772c50d8ae3SPaolo Bonzini }
773c50d8ae3SPaolo Bonzini 
774c50d8ae3SPaolo Bonzini static void update_gfn_disallow_lpage_count(struct kvm_memory_slot *slot,
775c50d8ae3SPaolo Bonzini 					    gfn_t gfn, int count)
776c50d8ae3SPaolo Bonzini {
777c50d8ae3SPaolo Bonzini 	struct kvm_lpage_info *linfo;
778c50d8ae3SPaolo Bonzini 	int i;
779c50d8ae3SPaolo Bonzini 
7803bae0459SSean Christopherson 	for (i = PG_LEVEL_2M; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
781c50d8ae3SPaolo Bonzini 		linfo = lpage_info_slot(gfn, slot, i);
782c50d8ae3SPaolo Bonzini 		linfo->disallow_lpage += count;
783c50d8ae3SPaolo Bonzini 		WARN_ON(linfo->disallow_lpage < 0);
784c50d8ae3SPaolo Bonzini 	}
785c50d8ae3SPaolo Bonzini }
786c50d8ae3SPaolo Bonzini 
787c50d8ae3SPaolo Bonzini void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn)
788c50d8ae3SPaolo Bonzini {
789c50d8ae3SPaolo Bonzini 	update_gfn_disallow_lpage_count(slot, gfn, 1);
790c50d8ae3SPaolo Bonzini }
791c50d8ae3SPaolo Bonzini 
792c50d8ae3SPaolo Bonzini void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn)
793c50d8ae3SPaolo Bonzini {
794c50d8ae3SPaolo Bonzini 	update_gfn_disallow_lpage_count(slot, gfn, -1);
795c50d8ae3SPaolo Bonzini }
796c50d8ae3SPaolo Bonzini 
797c50d8ae3SPaolo Bonzini static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
798c50d8ae3SPaolo Bonzini {
799c50d8ae3SPaolo Bonzini 	struct kvm_memslots *slots;
800c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
801c50d8ae3SPaolo Bonzini 	gfn_t gfn;
802c50d8ae3SPaolo Bonzini 
803c50d8ae3SPaolo Bonzini 	kvm->arch.indirect_shadow_pages++;
804c50d8ae3SPaolo Bonzini 	gfn = sp->gfn;
805c50d8ae3SPaolo Bonzini 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
806c50d8ae3SPaolo Bonzini 	slot = __gfn_to_memslot(slots, gfn);
807c50d8ae3SPaolo Bonzini 
808c50d8ae3SPaolo Bonzini 	/* the non-leaf shadow pages are keeping readonly. */
8093bae0459SSean Christopherson 	if (sp->role.level > PG_LEVEL_4K)
810c50d8ae3SPaolo Bonzini 		return kvm_slot_page_track_add_page(kvm, slot, gfn,
811c50d8ae3SPaolo Bonzini 						    KVM_PAGE_TRACK_WRITE);
812c50d8ae3SPaolo Bonzini 
813c50d8ae3SPaolo Bonzini 	kvm_mmu_gfn_disallow_lpage(slot, gfn);
814c50d8ae3SPaolo Bonzini }
815c50d8ae3SPaolo Bonzini 
81629cf0f50SBen Gardon void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
817c50d8ae3SPaolo Bonzini {
818c50d8ae3SPaolo Bonzini 	if (sp->lpage_disallowed)
819c50d8ae3SPaolo Bonzini 		return;
820c50d8ae3SPaolo Bonzini 
821c50d8ae3SPaolo Bonzini 	++kvm->stat.nx_lpage_splits;
822c50d8ae3SPaolo Bonzini 	list_add_tail(&sp->lpage_disallowed_link,
823c50d8ae3SPaolo Bonzini 		      &kvm->arch.lpage_disallowed_mmu_pages);
824c50d8ae3SPaolo Bonzini 	sp->lpage_disallowed = true;
825c50d8ae3SPaolo Bonzini }
826c50d8ae3SPaolo Bonzini 
827c50d8ae3SPaolo Bonzini static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
828c50d8ae3SPaolo Bonzini {
829c50d8ae3SPaolo Bonzini 	struct kvm_memslots *slots;
830c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
831c50d8ae3SPaolo Bonzini 	gfn_t gfn;
832c50d8ae3SPaolo Bonzini 
833c50d8ae3SPaolo Bonzini 	kvm->arch.indirect_shadow_pages--;
834c50d8ae3SPaolo Bonzini 	gfn = sp->gfn;
835c50d8ae3SPaolo Bonzini 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
836c50d8ae3SPaolo Bonzini 	slot = __gfn_to_memslot(slots, gfn);
8373bae0459SSean Christopherson 	if (sp->role.level > PG_LEVEL_4K)
838c50d8ae3SPaolo Bonzini 		return kvm_slot_page_track_remove_page(kvm, slot, gfn,
839c50d8ae3SPaolo Bonzini 						       KVM_PAGE_TRACK_WRITE);
840c50d8ae3SPaolo Bonzini 
841c50d8ae3SPaolo Bonzini 	kvm_mmu_gfn_allow_lpage(slot, gfn);
842c50d8ae3SPaolo Bonzini }
843c50d8ae3SPaolo Bonzini 
84429cf0f50SBen Gardon void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
845c50d8ae3SPaolo Bonzini {
846c50d8ae3SPaolo Bonzini 	--kvm->stat.nx_lpage_splits;
847c50d8ae3SPaolo Bonzini 	sp->lpage_disallowed = false;
848c50d8ae3SPaolo Bonzini 	list_del(&sp->lpage_disallowed_link);
849c50d8ae3SPaolo Bonzini }
850c50d8ae3SPaolo Bonzini 
851c50d8ae3SPaolo Bonzini static struct kvm_memory_slot *
852c50d8ae3SPaolo Bonzini gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
853c50d8ae3SPaolo Bonzini 			    bool no_dirty_log)
854c50d8ae3SPaolo Bonzini {
855c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
856c50d8ae3SPaolo Bonzini 
857c50d8ae3SPaolo Bonzini 	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
85891b0d268SPaolo Bonzini 	if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
85991b0d268SPaolo Bonzini 		return NULL;
860044c59c4SPeter Xu 	if (no_dirty_log && kvm_slot_dirty_track_enabled(slot))
86191b0d268SPaolo Bonzini 		return NULL;
862c50d8ae3SPaolo Bonzini 
863c50d8ae3SPaolo Bonzini 	return slot;
864c50d8ae3SPaolo Bonzini }
865c50d8ae3SPaolo Bonzini 
866c50d8ae3SPaolo Bonzini /*
867c50d8ae3SPaolo Bonzini  * About rmap_head encoding:
868c50d8ae3SPaolo Bonzini  *
869c50d8ae3SPaolo Bonzini  * If the bit zero of rmap_head->val is clear, then it points to the only spte
870c50d8ae3SPaolo Bonzini  * in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct
871c50d8ae3SPaolo Bonzini  * pte_list_desc containing more mappings.
872c50d8ae3SPaolo Bonzini  */
873c50d8ae3SPaolo Bonzini 
874c50d8ae3SPaolo Bonzini /*
875c50d8ae3SPaolo Bonzini  * Returns the number of pointers in the rmap chain, not counting the new one.
876c50d8ae3SPaolo Bonzini  */
877c50d8ae3SPaolo Bonzini static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
878c50d8ae3SPaolo Bonzini 			struct kvm_rmap_head *rmap_head)
879c50d8ae3SPaolo Bonzini {
880c50d8ae3SPaolo Bonzini 	struct pte_list_desc *desc;
881c50d8ae3SPaolo Bonzini 	int i, count = 0;
882c50d8ae3SPaolo Bonzini 
883c50d8ae3SPaolo Bonzini 	if (!rmap_head->val) {
884805a0f83SStephen Zhang 		rmap_printk("%p %llx 0->1\n", spte, *spte);
885c50d8ae3SPaolo Bonzini 		rmap_head->val = (unsigned long)spte;
886c50d8ae3SPaolo Bonzini 	} else if (!(rmap_head->val & 1)) {
887805a0f83SStephen Zhang 		rmap_printk("%p %llx 1->many\n", spte, *spte);
888c50d8ae3SPaolo Bonzini 		desc = mmu_alloc_pte_list_desc(vcpu);
889c50d8ae3SPaolo Bonzini 		desc->sptes[0] = (u64 *)rmap_head->val;
890c50d8ae3SPaolo Bonzini 		desc->sptes[1] = spte;
891c50d8ae3SPaolo Bonzini 		rmap_head->val = (unsigned long)desc | 1;
892c50d8ae3SPaolo Bonzini 		++count;
893c50d8ae3SPaolo Bonzini 	} else {
894805a0f83SStephen Zhang 		rmap_printk("%p %llx many->many\n", spte, *spte);
895c50d8ae3SPaolo Bonzini 		desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
896c6c4f961SLi RongQing 		while (desc->sptes[PTE_LIST_EXT-1]) {
897c50d8ae3SPaolo Bonzini 			count += PTE_LIST_EXT;
898c6c4f961SLi RongQing 
899c6c4f961SLi RongQing 			if (!desc->more) {
900c50d8ae3SPaolo Bonzini 				desc->more = mmu_alloc_pte_list_desc(vcpu);
901c50d8ae3SPaolo Bonzini 				desc = desc->more;
902c6c4f961SLi RongQing 				break;
903c6c4f961SLi RongQing 			}
904c6c4f961SLi RongQing 			desc = desc->more;
905c50d8ae3SPaolo Bonzini 		}
906c50d8ae3SPaolo Bonzini 		for (i = 0; desc->sptes[i]; ++i)
907c50d8ae3SPaolo Bonzini 			++count;
908c50d8ae3SPaolo Bonzini 		desc->sptes[i] = spte;
909c50d8ae3SPaolo Bonzini 	}
910c50d8ae3SPaolo Bonzini 	return count;
911c50d8ae3SPaolo Bonzini }
912c50d8ae3SPaolo Bonzini 
913c50d8ae3SPaolo Bonzini static void
914c50d8ae3SPaolo Bonzini pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
915c50d8ae3SPaolo Bonzini 			   struct pte_list_desc *desc, int i,
916c50d8ae3SPaolo Bonzini 			   struct pte_list_desc *prev_desc)
917c50d8ae3SPaolo Bonzini {
918c50d8ae3SPaolo Bonzini 	int j;
919c50d8ae3SPaolo Bonzini 
920c50d8ae3SPaolo Bonzini 	for (j = PTE_LIST_EXT - 1; !desc->sptes[j] && j > i; --j)
921c50d8ae3SPaolo Bonzini 		;
922c50d8ae3SPaolo Bonzini 	desc->sptes[i] = desc->sptes[j];
923c50d8ae3SPaolo Bonzini 	desc->sptes[j] = NULL;
924c50d8ae3SPaolo Bonzini 	if (j != 0)
925c50d8ae3SPaolo Bonzini 		return;
926c50d8ae3SPaolo Bonzini 	if (!prev_desc && !desc->more)
927fe3c2b4cSMiaohe Lin 		rmap_head->val = 0;
928c50d8ae3SPaolo Bonzini 	else
929c50d8ae3SPaolo Bonzini 		if (prev_desc)
930c50d8ae3SPaolo Bonzini 			prev_desc->more = desc->more;
931c50d8ae3SPaolo Bonzini 		else
932c50d8ae3SPaolo Bonzini 			rmap_head->val = (unsigned long)desc->more | 1;
933c50d8ae3SPaolo Bonzini 	mmu_free_pte_list_desc(desc);
934c50d8ae3SPaolo Bonzini }
935c50d8ae3SPaolo Bonzini 
936c50d8ae3SPaolo Bonzini static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
937c50d8ae3SPaolo Bonzini {
938c50d8ae3SPaolo Bonzini 	struct pte_list_desc *desc;
939c50d8ae3SPaolo Bonzini 	struct pte_list_desc *prev_desc;
940c50d8ae3SPaolo Bonzini 	int i;
941c50d8ae3SPaolo Bonzini 
942c50d8ae3SPaolo Bonzini 	if (!rmap_head->val) {
943c50d8ae3SPaolo Bonzini 		pr_err("%s: %p 0->BUG\n", __func__, spte);
944c50d8ae3SPaolo Bonzini 		BUG();
945c50d8ae3SPaolo Bonzini 	} else if (!(rmap_head->val & 1)) {
946805a0f83SStephen Zhang 		rmap_printk("%p 1->0\n", spte);
947c50d8ae3SPaolo Bonzini 		if ((u64 *)rmap_head->val != spte) {
948c50d8ae3SPaolo Bonzini 			pr_err("%s:  %p 1->BUG\n", __func__, spte);
949c50d8ae3SPaolo Bonzini 			BUG();
950c50d8ae3SPaolo Bonzini 		}
951c50d8ae3SPaolo Bonzini 		rmap_head->val = 0;
952c50d8ae3SPaolo Bonzini 	} else {
953805a0f83SStephen Zhang 		rmap_printk("%p many->many\n", spte);
954c50d8ae3SPaolo Bonzini 		desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
955c50d8ae3SPaolo Bonzini 		prev_desc = NULL;
956c50d8ae3SPaolo Bonzini 		while (desc) {
957c50d8ae3SPaolo Bonzini 			for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) {
958c50d8ae3SPaolo Bonzini 				if (desc->sptes[i] == spte) {
959c50d8ae3SPaolo Bonzini 					pte_list_desc_remove_entry(rmap_head,
960c50d8ae3SPaolo Bonzini 							desc, i, prev_desc);
961c50d8ae3SPaolo Bonzini 					return;
962c50d8ae3SPaolo Bonzini 				}
963c50d8ae3SPaolo Bonzini 			}
964c50d8ae3SPaolo Bonzini 			prev_desc = desc;
965c50d8ae3SPaolo Bonzini 			desc = desc->more;
966c50d8ae3SPaolo Bonzini 		}
967c50d8ae3SPaolo Bonzini 		pr_err("%s: %p many->many\n", __func__, spte);
968c50d8ae3SPaolo Bonzini 		BUG();
969c50d8ae3SPaolo Bonzini 	}
970c50d8ae3SPaolo Bonzini }
971c50d8ae3SPaolo Bonzini 
972c50d8ae3SPaolo Bonzini static void pte_list_remove(struct kvm_rmap_head *rmap_head, u64 *sptep)
973c50d8ae3SPaolo Bonzini {
974c50d8ae3SPaolo Bonzini 	mmu_spte_clear_track_bits(sptep);
975c50d8ae3SPaolo Bonzini 	__pte_list_remove(sptep, rmap_head);
976c50d8ae3SPaolo Bonzini }
977c50d8ae3SPaolo Bonzini 
978c50d8ae3SPaolo Bonzini static struct kvm_rmap_head *__gfn_to_rmap(gfn_t gfn, int level,
979c50d8ae3SPaolo Bonzini 					   struct kvm_memory_slot *slot)
980c50d8ae3SPaolo Bonzini {
981c50d8ae3SPaolo Bonzini 	unsigned long idx;
982c50d8ae3SPaolo Bonzini 
983c50d8ae3SPaolo Bonzini 	idx = gfn_to_index(gfn, slot->base_gfn, level);
9843bae0459SSean Christopherson 	return &slot->arch.rmap[level - PG_LEVEL_4K][idx];
985c50d8ae3SPaolo Bonzini }
986c50d8ae3SPaolo Bonzini 
987c50d8ae3SPaolo Bonzini static struct kvm_rmap_head *gfn_to_rmap(struct kvm *kvm, gfn_t gfn,
988c50d8ae3SPaolo Bonzini 					 struct kvm_mmu_page *sp)
989c50d8ae3SPaolo Bonzini {
990c50d8ae3SPaolo Bonzini 	struct kvm_memslots *slots;
991c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
992c50d8ae3SPaolo Bonzini 
993c50d8ae3SPaolo Bonzini 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
994c50d8ae3SPaolo Bonzini 	slot = __gfn_to_memslot(slots, gfn);
995c50d8ae3SPaolo Bonzini 	return __gfn_to_rmap(gfn, sp->role.level, slot);
996c50d8ae3SPaolo Bonzini }
997c50d8ae3SPaolo Bonzini 
998c50d8ae3SPaolo Bonzini static bool rmap_can_add(struct kvm_vcpu *vcpu)
999c50d8ae3SPaolo Bonzini {
1000356ec69aSSean Christopherson 	struct kvm_mmu_memory_cache *mc;
1001c50d8ae3SPaolo Bonzini 
1002356ec69aSSean Christopherson 	mc = &vcpu->arch.mmu_pte_list_desc_cache;
100394ce87efSSean Christopherson 	return kvm_mmu_memory_cache_nr_free_objects(mc);
1004c50d8ae3SPaolo Bonzini }
1005c50d8ae3SPaolo Bonzini 
1006c50d8ae3SPaolo Bonzini static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
1007c50d8ae3SPaolo Bonzini {
1008c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
1009c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *rmap_head;
1010c50d8ae3SPaolo Bonzini 
101157354682SSean Christopherson 	sp = sptep_to_sp(spte);
1012c50d8ae3SPaolo Bonzini 	kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
1013c50d8ae3SPaolo Bonzini 	rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
1014c50d8ae3SPaolo Bonzini 	return pte_list_add(vcpu, spte, rmap_head);
1015c50d8ae3SPaolo Bonzini }
1016c50d8ae3SPaolo Bonzini 
1017c50d8ae3SPaolo Bonzini static void rmap_remove(struct kvm *kvm, u64 *spte)
1018c50d8ae3SPaolo Bonzini {
1019c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
1020c50d8ae3SPaolo Bonzini 	gfn_t gfn;
1021c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *rmap_head;
1022c50d8ae3SPaolo Bonzini 
102357354682SSean Christopherson 	sp = sptep_to_sp(spte);
1024c50d8ae3SPaolo Bonzini 	gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
1025c50d8ae3SPaolo Bonzini 	rmap_head = gfn_to_rmap(kvm, gfn, sp);
1026c50d8ae3SPaolo Bonzini 	__pte_list_remove(spte, rmap_head);
1027c50d8ae3SPaolo Bonzini }
1028c50d8ae3SPaolo Bonzini 
1029c50d8ae3SPaolo Bonzini /*
1030c50d8ae3SPaolo Bonzini  * Used by the following functions to iterate through the sptes linked by a
1031c50d8ae3SPaolo Bonzini  * rmap.  All fields are private and not assumed to be used outside.
1032c50d8ae3SPaolo Bonzini  */
1033c50d8ae3SPaolo Bonzini struct rmap_iterator {
1034c50d8ae3SPaolo Bonzini 	/* private fields */
1035c50d8ae3SPaolo Bonzini 	struct pte_list_desc *desc;	/* holds the sptep if not NULL */
1036c50d8ae3SPaolo Bonzini 	int pos;			/* index of the sptep */
1037c50d8ae3SPaolo Bonzini };
1038c50d8ae3SPaolo Bonzini 
1039c50d8ae3SPaolo Bonzini /*
1040c50d8ae3SPaolo Bonzini  * Iteration must be started by this function.  This should also be used after
1041c50d8ae3SPaolo Bonzini  * removing/dropping sptes from the rmap link because in such cases the
10420a03cbdaSMiaohe Lin  * information in the iterator may not be valid.
1043c50d8ae3SPaolo Bonzini  *
1044c50d8ae3SPaolo Bonzini  * Returns sptep if found, NULL otherwise.
1045c50d8ae3SPaolo Bonzini  */
1046c50d8ae3SPaolo Bonzini static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head,
1047c50d8ae3SPaolo Bonzini 			   struct rmap_iterator *iter)
1048c50d8ae3SPaolo Bonzini {
1049c50d8ae3SPaolo Bonzini 	u64 *sptep;
1050c50d8ae3SPaolo Bonzini 
1051c50d8ae3SPaolo Bonzini 	if (!rmap_head->val)
1052c50d8ae3SPaolo Bonzini 		return NULL;
1053c50d8ae3SPaolo Bonzini 
1054c50d8ae3SPaolo Bonzini 	if (!(rmap_head->val & 1)) {
1055c50d8ae3SPaolo Bonzini 		iter->desc = NULL;
1056c50d8ae3SPaolo Bonzini 		sptep = (u64 *)rmap_head->val;
1057c50d8ae3SPaolo Bonzini 		goto out;
1058c50d8ae3SPaolo Bonzini 	}
1059c50d8ae3SPaolo Bonzini 
1060c50d8ae3SPaolo Bonzini 	iter->desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1061c50d8ae3SPaolo Bonzini 	iter->pos = 0;
1062c50d8ae3SPaolo Bonzini 	sptep = iter->desc->sptes[iter->pos];
1063c50d8ae3SPaolo Bonzini out:
1064c50d8ae3SPaolo Bonzini 	BUG_ON(!is_shadow_present_pte(*sptep));
1065c50d8ae3SPaolo Bonzini 	return sptep;
1066c50d8ae3SPaolo Bonzini }
1067c50d8ae3SPaolo Bonzini 
1068c50d8ae3SPaolo Bonzini /*
1069c50d8ae3SPaolo Bonzini  * Must be used with a valid iterator: e.g. after rmap_get_first().
1070c50d8ae3SPaolo Bonzini  *
1071c50d8ae3SPaolo Bonzini  * Returns sptep if found, NULL otherwise.
1072c50d8ae3SPaolo Bonzini  */
1073c50d8ae3SPaolo Bonzini static u64 *rmap_get_next(struct rmap_iterator *iter)
1074c50d8ae3SPaolo Bonzini {
1075c50d8ae3SPaolo Bonzini 	u64 *sptep;
1076c50d8ae3SPaolo Bonzini 
1077c50d8ae3SPaolo Bonzini 	if (iter->desc) {
1078c50d8ae3SPaolo Bonzini 		if (iter->pos < PTE_LIST_EXT - 1) {
1079c50d8ae3SPaolo Bonzini 			++iter->pos;
1080c50d8ae3SPaolo Bonzini 			sptep = iter->desc->sptes[iter->pos];
1081c50d8ae3SPaolo Bonzini 			if (sptep)
1082c50d8ae3SPaolo Bonzini 				goto out;
1083c50d8ae3SPaolo Bonzini 		}
1084c50d8ae3SPaolo Bonzini 
1085c50d8ae3SPaolo Bonzini 		iter->desc = iter->desc->more;
1086c50d8ae3SPaolo Bonzini 
1087c50d8ae3SPaolo Bonzini 		if (iter->desc) {
1088c50d8ae3SPaolo Bonzini 			iter->pos = 0;
1089c50d8ae3SPaolo Bonzini 			/* desc->sptes[0] cannot be NULL */
1090c50d8ae3SPaolo Bonzini 			sptep = iter->desc->sptes[iter->pos];
1091c50d8ae3SPaolo Bonzini 			goto out;
1092c50d8ae3SPaolo Bonzini 		}
1093c50d8ae3SPaolo Bonzini 	}
1094c50d8ae3SPaolo Bonzini 
1095c50d8ae3SPaolo Bonzini 	return NULL;
1096c50d8ae3SPaolo Bonzini out:
1097c50d8ae3SPaolo Bonzini 	BUG_ON(!is_shadow_present_pte(*sptep));
1098c50d8ae3SPaolo Bonzini 	return sptep;
1099c50d8ae3SPaolo Bonzini }
1100c50d8ae3SPaolo Bonzini 
1101c50d8ae3SPaolo Bonzini #define for_each_rmap_spte(_rmap_head_, _iter_, _spte_)			\
1102c50d8ae3SPaolo Bonzini 	for (_spte_ = rmap_get_first(_rmap_head_, _iter_);		\
1103c50d8ae3SPaolo Bonzini 	     _spte_; _spte_ = rmap_get_next(_iter_))
1104c50d8ae3SPaolo Bonzini 
1105c50d8ae3SPaolo Bonzini static void drop_spte(struct kvm *kvm, u64 *sptep)
1106c50d8ae3SPaolo Bonzini {
1107c50d8ae3SPaolo Bonzini 	if (mmu_spte_clear_track_bits(sptep))
1108c50d8ae3SPaolo Bonzini 		rmap_remove(kvm, sptep);
1109c50d8ae3SPaolo Bonzini }
1110c50d8ae3SPaolo Bonzini 
1111c50d8ae3SPaolo Bonzini 
1112c50d8ae3SPaolo Bonzini static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
1113c50d8ae3SPaolo Bonzini {
1114c50d8ae3SPaolo Bonzini 	if (is_large_pte(*sptep)) {
111557354682SSean Christopherson 		WARN_ON(sptep_to_sp(sptep)->role.level == PG_LEVEL_4K);
1116c50d8ae3SPaolo Bonzini 		drop_spte(kvm, sptep);
1117c50d8ae3SPaolo Bonzini 		--kvm->stat.lpages;
1118c50d8ae3SPaolo Bonzini 		return true;
1119c50d8ae3SPaolo Bonzini 	}
1120c50d8ae3SPaolo Bonzini 
1121c50d8ae3SPaolo Bonzini 	return false;
1122c50d8ae3SPaolo Bonzini }
1123c50d8ae3SPaolo Bonzini 
1124c50d8ae3SPaolo Bonzini static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
1125c50d8ae3SPaolo Bonzini {
1126c50d8ae3SPaolo Bonzini 	if (__drop_large_spte(vcpu->kvm, sptep)) {
112757354682SSean Christopherson 		struct kvm_mmu_page *sp = sptep_to_sp(sptep);
1128c50d8ae3SPaolo Bonzini 
1129c50d8ae3SPaolo Bonzini 		kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
1130c50d8ae3SPaolo Bonzini 			KVM_PAGES_PER_HPAGE(sp->role.level));
1131c50d8ae3SPaolo Bonzini 	}
1132c50d8ae3SPaolo Bonzini }
1133c50d8ae3SPaolo Bonzini 
1134c50d8ae3SPaolo Bonzini /*
1135c50d8ae3SPaolo Bonzini  * Write-protect on the specified @sptep, @pt_protect indicates whether
1136c50d8ae3SPaolo Bonzini  * spte write-protection is caused by protecting shadow page table.
1137c50d8ae3SPaolo Bonzini  *
1138c50d8ae3SPaolo Bonzini  * Note: write protection is difference between dirty logging and spte
1139c50d8ae3SPaolo Bonzini  * protection:
1140c50d8ae3SPaolo Bonzini  * - for dirty logging, the spte can be set to writable at anytime if
1141c50d8ae3SPaolo Bonzini  *   its dirty bitmap is properly set.
1142c50d8ae3SPaolo Bonzini  * - for spte protection, the spte can be writable only after unsync-ing
1143c50d8ae3SPaolo Bonzini  *   shadow page.
1144c50d8ae3SPaolo Bonzini  *
1145c50d8ae3SPaolo Bonzini  * Return true if tlb need be flushed.
1146c50d8ae3SPaolo Bonzini  */
1147c50d8ae3SPaolo Bonzini static bool spte_write_protect(u64 *sptep, bool pt_protect)
1148c50d8ae3SPaolo Bonzini {
1149c50d8ae3SPaolo Bonzini 	u64 spte = *sptep;
1150c50d8ae3SPaolo Bonzini 
1151c50d8ae3SPaolo Bonzini 	if (!is_writable_pte(spte) &&
1152c50d8ae3SPaolo Bonzini 	      !(pt_protect && spte_can_locklessly_be_made_writable(spte)))
1153c50d8ae3SPaolo Bonzini 		return false;
1154c50d8ae3SPaolo Bonzini 
1155805a0f83SStephen Zhang 	rmap_printk("spte %p %llx\n", sptep, *sptep);
1156c50d8ae3SPaolo Bonzini 
1157c50d8ae3SPaolo Bonzini 	if (pt_protect)
11585fc3424fSSean Christopherson 		spte &= ~shadow_mmu_writable_mask;
1159c50d8ae3SPaolo Bonzini 	spte = spte & ~PT_WRITABLE_MASK;
1160c50d8ae3SPaolo Bonzini 
1161c50d8ae3SPaolo Bonzini 	return mmu_spte_update(sptep, spte);
1162c50d8ae3SPaolo Bonzini }
1163c50d8ae3SPaolo Bonzini 
1164c50d8ae3SPaolo Bonzini static bool __rmap_write_protect(struct kvm *kvm,
1165c50d8ae3SPaolo Bonzini 				 struct kvm_rmap_head *rmap_head,
1166c50d8ae3SPaolo Bonzini 				 bool pt_protect)
1167c50d8ae3SPaolo Bonzini {
1168c50d8ae3SPaolo Bonzini 	u64 *sptep;
1169c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
1170c50d8ae3SPaolo Bonzini 	bool flush = false;
1171c50d8ae3SPaolo Bonzini 
1172c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(rmap_head, &iter, sptep)
1173c50d8ae3SPaolo Bonzini 		flush |= spte_write_protect(sptep, pt_protect);
1174c50d8ae3SPaolo Bonzini 
1175c50d8ae3SPaolo Bonzini 	return flush;
1176c50d8ae3SPaolo Bonzini }
1177c50d8ae3SPaolo Bonzini 
1178c50d8ae3SPaolo Bonzini static bool spte_clear_dirty(u64 *sptep)
1179c50d8ae3SPaolo Bonzini {
1180c50d8ae3SPaolo Bonzini 	u64 spte = *sptep;
1181c50d8ae3SPaolo Bonzini 
1182805a0f83SStephen Zhang 	rmap_printk("spte %p %llx\n", sptep, *sptep);
1183c50d8ae3SPaolo Bonzini 
1184c50d8ae3SPaolo Bonzini 	MMU_WARN_ON(!spte_ad_enabled(spte));
1185c50d8ae3SPaolo Bonzini 	spte &= ~shadow_dirty_mask;
1186c50d8ae3SPaolo Bonzini 	return mmu_spte_update(sptep, spte);
1187c50d8ae3SPaolo Bonzini }
1188c50d8ae3SPaolo Bonzini 
1189c50d8ae3SPaolo Bonzini static bool spte_wrprot_for_clear_dirty(u64 *sptep)
1190c50d8ae3SPaolo Bonzini {
1191c50d8ae3SPaolo Bonzini 	bool was_writable = test_and_clear_bit(PT_WRITABLE_SHIFT,
1192c50d8ae3SPaolo Bonzini 					       (unsigned long *)sptep);
1193c50d8ae3SPaolo Bonzini 	if (was_writable && !spte_ad_enabled(*sptep))
1194c50d8ae3SPaolo Bonzini 		kvm_set_pfn_dirty(spte_to_pfn(*sptep));
1195c50d8ae3SPaolo Bonzini 
1196c50d8ae3SPaolo Bonzini 	return was_writable;
1197c50d8ae3SPaolo Bonzini }
1198c50d8ae3SPaolo Bonzini 
1199c50d8ae3SPaolo Bonzini /*
1200c50d8ae3SPaolo Bonzini  * Gets the GFN ready for another round of dirty logging by clearing the
1201c50d8ae3SPaolo Bonzini  *	- D bit on ad-enabled SPTEs, and
1202c50d8ae3SPaolo Bonzini  *	- W bit on ad-disabled SPTEs.
1203c50d8ae3SPaolo Bonzini  * Returns true iff any D or W bits were cleared.
1204c50d8ae3SPaolo Bonzini  */
12050a234f5dSSean Christopherson static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
12060a234f5dSSean Christopherson 			       struct kvm_memory_slot *slot)
1207c50d8ae3SPaolo Bonzini {
1208c50d8ae3SPaolo Bonzini 	u64 *sptep;
1209c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
1210c50d8ae3SPaolo Bonzini 	bool flush = false;
1211c50d8ae3SPaolo Bonzini 
1212c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(rmap_head, &iter, sptep)
1213c50d8ae3SPaolo Bonzini 		if (spte_ad_need_write_protect(*sptep))
1214c50d8ae3SPaolo Bonzini 			flush |= spte_wrprot_for_clear_dirty(sptep);
1215c50d8ae3SPaolo Bonzini 		else
1216c50d8ae3SPaolo Bonzini 			flush |= spte_clear_dirty(sptep);
1217c50d8ae3SPaolo Bonzini 
1218c50d8ae3SPaolo Bonzini 	return flush;
1219c50d8ae3SPaolo Bonzini }
1220c50d8ae3SPaolo Bonzini 
1221c50d8ae3SPaolo Bonzini /**
1222c50d8ae3SPaolo Bonzini  * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
1223c50d8ae3SPaolo Bonzini  * @kvm: kvm instance
1224c50d8ae3SPaolo Bonzini  * @slot: slot to protect
1225c50d8ae3SPaolo Bonzini  * @gfn_offset: start of the BITS_PER_LONG pages we care about
1226c50d8ae3SPaolo Bonzini  * @mask: indicates which pages we should protect
1227c50d8ae3SPaolo Bonzini  *
122889212919SKeqian Zhu  * Used when we do not need to care about huge page mappings.
1229c50d8ae3SPaolo Bonzini  */
1230c50d8ae3SPaolo Bonzini static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1231c50d8ae3SPaolo Bonzini 				     struct kvm_memory_slot *slot,
1232c50d8ae3SPaolo Bonzini 				     gfn_t gfn_offset, unsigned long mask)
1233c50d8ae3SPaolo Bonzini {
1234c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *rmap_head;
1235c50d8ae3SPaolo Bonzini 
1236897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
1237a6a0b05dSBen Gardon 		kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
1238a6a0b05dSBen Gardon 				slot->base_gfn + gfn_offset, mask, true);
1239e2209710SBen Gardon 
1240e2209710SBen Gardon 	if (!kvm_memslots_have_rmaps(kvm))
1241e2209710SBen Gardon 		return;
1242e2209710SBen Gardon 
1243c50d8ae3SPaolo Bonzini 	while (mask) {
1244c50d8ae3SPaolo Bonzini 		rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
12453bae0459SSean Christopherson 					  PG_LEVEL_4K, slot);
1246c50d8ae3SPaolo Bonzini 		__rmap_write_protect(kvm, rmap_head, false);
1247c50d8ae3SPaolo Bonzini 
1248c50d8ae3SPaolo Bonzini 		/* clear the first set bit */
1249c50d8ae3SPaolo Bonzini 		mask &= mask - 1;
1250c50d8ae3SPaolo Bonzini 	}
1251c50d8ae3SPaolo Bonzini }
1252c50d8ae3SPaolo Bonzini 
1253c50d8ae3SPaolo Bonzini /**
1254c50d8ae3SPaolo Bonzini  * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages, or write
1255c50d8ae3SPaolo Bonzini  * protect the page if the D-bit isn't supported.
1256c50d8ae3SPaolo Bonzini  * @kvm: kvm instance
1257c50d8ae3SPaolo Bonzini  * @slot: slot to clear D-bit
1258c50d8ae3SPaolo Bonzini  * @gfn_offset: start of the BITS_PER_LONG pages we care about
1259c50d8ae3SPaolo Bonzini  * @mask: indicates which pages we should clear D-bit
1260c50d8ae3SPaolo Bonzini  *
1261c50d8ae3SPaolo Bonzini  * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap.
1262c50d8ae3SPaolo Bonzini  */
1263a018eba5SSean Christopherson static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1264c50d8ae3SPaolo Bonzini 					 struct kvm_memory_slot *slot,
1265c50d8ae3SPaolo Bonzini 					 gfn_t gfn_offset, unsigned long mask)
1266c50d8ae3SPaolo Bonzini {
1267c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *rmap_head;
1268c50d8ae3SPaolo Bonzini 
1269897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
1270a6a0b05dSBen Gardon 		kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
1271a6a0b05dSBen Gardon 				slot->base_gfn + gfn_offset, mask, false);
1272e2209710SBen Gardon 
1273e2209710SBen Gardon 	if (!kvm_memslots_have_rmaps(kvm))
1274e2209710SBen Gardon 		return;
1275e2209710SBen Gardon 
1276c50d8ae3SPaolo Bonzini 	while (mask) {
1277c50d8ae3SPaolo Bonzini 		rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
12783bae0459SSean Christopherson 					  PG_LEVEL_4K, slot);
12790a234f5dSSean Christopherson 		__rmap_clear_dirty(kvm, rmap_head, slot);
1280c50d8ae3SPaolo Bonzini 
1281c50d8ae3SPaolo Bonzini 		/* clear the first set bit */
1282c50d8ae3SPaolo Bonzini 		mask &= mask - 1;
1283c50d8ae3SPaolo Bonzini 	}
1284c50d8ae3SPaolo Bonzini }
1285c50d8ae3SPaolo Bonzini 
1286c50d8ae3SPaolo Bonzini /**
1287c50d8ae3SPaolo Bonzini  * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
1288c50d8ae3SPaolo Bonzini  * PT level pages.
1289c50d8ae3SPaolo Bonzini  *
1290c50d8ae3SPaolo Bonzini  * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
1291c50d8ae3SPaolo Bonzini  * enable dirty logging for them.
1292c50d8ae3SPaolo Bonzini  *
129389212919SKeqian Zhu  * We need to care about huge page mappings: e.g. during dirty logging we may
129489212919SKeqian Zhu  * have such mappings.
1295c50d8ae3SPaolo Bonzini  */
1296c50d8ae3SPaolo Bonzini void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1297c50d8ae3SPaolo Bonzini 				struct kvm_memory_slot *slot,
1298c50d8ae3SPaolo Bonzini 				gfn_t gfn_offset, unsigned long mask)
1299c50d8ae3SPaolo Bonzini {
130089212919SKeqian Zhu 	/*
130189212919SKeqian Zhu 	 * Huge pages are NOT write protected when we start dirty logging in
130289212919SKeqian Zhu 	 * initially-all-set mode; must write protect them here so that they
130389212919SKeqian Zhu 	 * are split to 4K on the first write.
130489212919SKeqian Zhu 	 *
130589212919SKeqian Zhu 	 * The gfn_offset is guaranteed to be aligned to 64, but the base_gfn
130689212919SKeqian Zhu 	 * of memslot has no such restriction, so the range can cross two large
130789212919SKeqian Zhu 	 * pages.
130889212919SKeqian Zhu 	 */
130989212919SKeqian Zhu 	if (kvm_dirty_log_manual_protect_and_init_set(kvm)) {
131089212919SKeqian Zhu 		gfn_t start = slot->base_gfn + gfn_offset + __ffs(mask);
131189212919SKeqian Zhu 		gfn_t end = slot->base_gfn + gfn_offset + __fls(mask);
131289212919SKeqian Zhu 
131389212919SKeqian Zhu 		kvm_mmu_slot_gfn_write_protect(kvm, slot, start, PG_LEVEL_2M);
131489212919SKeqian Zhu 
131589212919SKeqian Zhu 		/* Cross two large pages? */
131689212919SKeqian Zhu 		if (ALIGN(start << PAGE_SHIFT, PMD_SIZE) !=
131789212919SKeqian Zhu 		    ALIGN(end << PAGE_SHIFT, PMD_SIZE))
131889212919SKeqian Zhu 			kvm_mmu_slot_gfn_write_protect(kvm, slot, end,
131989212919SKeqian Zhu 						       PG_LEVEL_2M);
132089212919SKeqian Zhu 	}
132189212919SKeqian Zhu 
132289212919SKeqian Zhu 	/* Now handle 4K PTEs.  */
1323a018eba5SSean Christopherson 	if (kvm_x86_ops.cpu_dirty_log_size)
1324a018eba5SSean Christopherson 		kvm_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset, mask);
1325c50d8ae3SPaolo Bonzini 	else
1326c50d8ae3SPaolo Bonzini 		kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1327c50d8ae3SPaolo Bonzini }
1328c50d8ae3SPaolo Bonzini 
1329fb04a1edSPeter Xu int kvm_cpu_dirty_log_size(void)
1330fb04a1edSPeter Xu {
13316dd03800SSean Christopherson 	return kvm_x86_ops.cpu_dirty_log_size;
1332fb04a1edSPeter Xu }
1333fb04a1edSPeter Xu 
1334c50d8ae3SPaolo Bonzini bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
13353ad93562SKeqian Zhu 				    struct kvm_memory_slot *slot, u64 gfn,
13363ad93562SKeqian Zhu 				    int min_level)
1337c50d8ae3SPaolo Bonzini {
1338c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *rmap_head;
1339c50d8ae3SPaolo Bonzini 	int i;
1340c50d8ae3SPaolo Bonzini 	bool write_protected = false;
1341c50d8ae3SPaolo Bonzini 
1342e2209710SBen Gardon 	if (kvm_memslots_have_rmaps(kvm)) {
13433ad93562SKeqian Zhu 		for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
1344c50d8ae3SPaolo Bonzini 			rmap_head = __gfn_to_rmap(gfn, i, slot);
1345c50d8ae3SPaolo Bonzini 			write_protected |= __rmap_write_protect(kvm, rmap_head, true);
1346c50d8ae3SPaolo Bonzini 		}
1347e2209710SBen Gardon 	}
1348c50d8ae3SPaolo Bonzini 
1349897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
135046044f72SBen Gardon 		write_protected |=
13513ad93562SKeqian Zhu 			kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn, min_level);
135246044f72SBen Gardon 
1353c50d8ae3SPaolo Bonzini 	return write_protected;
1354c50d8ae3SPaolo Bonzini }
1355c50d8ae3SPaolo Bonzini 
1356c50d8ae3SPaolo Bonzini static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
1357c50d8ae3SPaolo Bonzini {
1358c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
1359c50d8ae3SPaolo Bonzini 
1360c50d8ae3SPaolo Bonzini 	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
13613ad93562SKeqian Zhu 	return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K);
1362c50d8ae3SPaolo Bonzini }
1363c50d8ae3SPaolo Bonzini 
13640a234f5dSSean Christopherson static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
13650a234f5dSSean Christopherson 			  struct kvm_memory_slot *slot)
1366c50d8ae3SPaolo Bonzini {
1367c50d8ae3SPaolo Bonzini 	u64 *sptep;
1368c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
1369c50d8ae3SPaolo Bonzini 	bool flush = false;
1370c50d8ae3SPaolo Bonzini 
1371c50d8ae3SPaolo Bonzini 	while ((sptep = rmap_get_first(rmap_head, &iter))) {
1372805a0f83SStephen Zhang 		rmap_printk("spte %p %llx.\n", sptep, *sptep);
1373c50d8ae3SPaolo Bonzini 
1374c50d8ae3SPaolo Bonzini 		pte_list_remove(rmap_head, sptep);
1375c50d8ae3SPaolo Bonzini 		flush = true;
1376c50d8ae3SPaolo Bonzini 	}
1377c50d8ae3SPaolo Bonzini 
1378c50d8ae3SPaolo Bonzini 	return flush;
1379c50d8ae3SPaolo Bonzini }
1380c50d8ae3SPaolo Bonzini 
13813039bcc7SSean Christopherson static bool kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1382c50d8ae3SPaolo Bonzini 			    struct kvm_memory_slot *slot, gfn_t gfn, int level,
13833039bcc7SSean Christopherson 			    pte_t unused)
1384c50d8ae3SPaolo Bonzini {
13850a234f5dSSean Christopherson 	return kvm_zap_rmapp(kvm, rmap_head, slot);
1386c50d8ae3SPaolo Bonzini }
1387c50d8ae3SPaolo Bonzini 
13883039bcc7SSean Christopherson static bool kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1389c50d8ae3SPaolo Bonzini 			      struct kvm_memory_slot *slot, gfn_t gfn, int level,
13903039bcc7SSean Christopherson 			      pte_t pte)
1391c50d8ae3SPaolo Bonzini {
1392c50d8ae3SPaolo Bonzini 	u64 *sptep;
1393c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
1394c50d8ae3SPaolo Bonzini 	int need_flush = 0;
1395c50d8ae3SPaolo Bonzini 	u64 new_spte;
1396c50d8ae3SPaolo Bonzini 	kvm_pfn_t new_pfn;
1397c50d8ae3SPaolo Bonzini 
13983039bcc7SSean Christopherson 	WARN_ON(pte_huge(pte));
13993039bcc7SSean Christopherson 	new_pfn = pte_pfn(pte);
1400c50d8ae3SPaolo Bonzini 
1401c50d8ae3SPaolo Bonzini restart:
1402c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(rmap_head, &iter, sptep) {
1403805a0f83SStephen Zhang 		rmap_printk("spte %p %llx gfn %llx (%d)\n",
1404c50d8ae3SPaolo Bonzini 			    sptep, *sptep, gfn, level);
1405c50d8ae3SPaolo Bonzini 
1406c50d8ae3SPaolo Bonzini 		need_flush = 1;
1407c50d8ae3SPaolo Bonzini 
14083039bcc7SSean Christopherson 		if (pte_write(pte)) {
1409c50d8ae3SPaolo Bonzini 			pte_list_remove(rmap_head, sptep);
1410c50d8ae3SPaolo Bonzini 			goto restart;
1411c50d8ae3SPaolo Bonzini 		} else {
1412cb3eedabSPaolo Bonzini 			new_spte = kvm_mmu_changed_pte_notifier_make_spte(
1413cb3eedabSPaolo Bonzini 					*sptep, new_pfn);
1414c50d8ae3SPaolo Bonzini 
1415c50d8ae3SPaolo Bonzini 			mmu_spte_clear_track_bits(sptep);
1416c50d8ae3SPaolo Bonzini 			mmu_spte_set(sptep, new_spte);
1417c50d8ae3SPaolo Bonzini 		}
1418c50d8ae3SPaolo Bonzini 	}
1419c50d8ae3SPaolo Bonzini 
1420c50d8ae3SPaolo Bonzini 	if (need_flush && kvm_available_flush_tlb_with_range()) {
1421c50d8ae3SPaolo Bonzini 		kvm_flush_remote_tlbs_with_address(kvm, gfn, 1);
1422c50d8ae3SPaolo Bonzini 		return 0;
1423c50d8ae3SPaolo Bonzini 	}
1424c50d8ae3SPaolo Bonzini 
1425c50d8ae3SPaolo Bonzini 	return need_flush;
1426c50d8ae3SPaolo Bonzini }
1427c50d8ae3SPaolo Bonzini 
1428c50d8ae3SPaolo Bonzini struct slot_rmap_walk_iterator {
1429c50d8ae3SPaolo Bonzini 	/* input fields. */
1430c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
1431c50d8ae3SPaolo Bonzini 	gfn_t start_gfn;
1432c50d8ae3SPaolo Bonzini 	gfn_t end_gfn;
1433c50d8ae3SPaolo Bonzini 	int start_level;
1434c50d8ae3SPaolo Bonzini 	int end_level;
1435c50d8ae3SPaolo Bonzini 
1436c50d8ae3SPaolo Bonzini 	/* output fields. */
1437c50d8ae3SPaolo Bonzini 	gfn_t gfn;
1438c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *rmap;
1439c50d8ae3SPaolo Bonzini 	int level;
1440c50d8ae3SPaolo Bonzini 
1441c50d8ae3SPaolo Bonzini 	/* private field. */
1442c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *end_rmap;
1443c50d8ae3SPaolo Bonzini };
1444c50d8ae3SPaolo Bonzini 
1445c50d8ae3SPaolo Bonzini static void
1446c50d8ae3SPaolo Bonzini rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level)
1447c50d8ae3SPaolo Bonzini {
1448c50d8ae3SPaolo Bonzini 	iterator->level = level;
1449c50d8ae3SPaolo Bonzini 	iterator->gfn = iterator->start_gfn;
1450c50d8ae3SPaolo Bonzini 	iterator->rmap = __gfn_to_rmap(iterator->gfn, level, iterator->slot);
1451c50d8ae3SPaolo Bonzini 	iterator->end_rmap = __gfn_to_rmap(iterator->end_gfn, level,
1452c50d8ae3SPaolo Bonzini 					   iterator->slot);
1453c50d8ae3SPaolo Bonzini }
1454c50d8ae3SPaolo Bonzini 
1455c50d8ae3SPaolo Bonzini static void
1456c50d8ae3SPaolo Bonzini slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator,
1457c50d8ae3SPaolo Bonzini 		    struct kvm_memory_slot *slot, int start_level,
1458c50d8ae3SPaolo Bonzini 		    int end_level, gfn_t start_gfn, gfn_t end_gfn)
1459c50d8ae3SPaolo Bonzini {
1460c50d8ae3SPaolo Bonzini 	iterator->slot = slot;
1461c50d8ae3SPaolo Bonzini 	iterator->start_level = start_level;
1462c50d8ae3SPaolo Bonzini 	iterator->end_level = end_level;
1463c50d8ae3SPaolo Bonzini 	iterator->start_gfn = start_gfn;
1464c50d8ae3SPaolo Bonzini 	iterator->end_gfn = end_gfn;
1465c50d8ae3SPaolo Bonzini 
1466c50d8ae3SPaolo Bonzini 	rmap_walk_init_level(iterator, iterator->start_level);
1467c50d8ae3SPaolo Bonzini }
1468c50d8ae3SPaolo Bonzini 
1469c50d8ae3SPaolo Bonzini static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator)
1470c50d8ae3SPaolo Bonzini {
1471c50d8ae3SPaolo Bonzini 	return !!iterator->rmap;
1472c50d8ae3SPaolo Bonzini }
1473c50d8ae3SPaolo Bonzini 
1474c50d8ae3SPaolo Bonzini static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
1475c50d8ae3SPaolo Bonzini {
1476c50d8ae3SPaolo Bonzini 	if (++iterator->rmap <= iterator->end_rmap) {
1477c50d8ae3SPaolo Bonzini 		iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level));
1478c50d8ae3SPaolo Bonzini 		return;
1479c50d8ae3SPaolo Bonzini 	}
1480c50d8ae3SPaolo Bonzini 
1481c50d8ae3SPaolo Bonzini 	if (++iterator->level > iterator->end_level) {
1482c50d8ae3SPaolo Bonzini 		iterator->rmap = NULL;
1483c50d8ae3SPaolo Bonzini 		return;
1484c50d8ae3SPaolo Bonzini 	}
1485c50d8ae3SPaolo Bonzini 
1486c50d8ae3SPaolo Bonzini 	rmap_walk_init_level(iterator, iterator->level);
1487c50d8ae3SPaolo Bonzini }
1488c50d8ae3SPaolo Bonzini 
1489c50d8ae3SPaolo Bonzini #define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_,	\
1490c50d8ae3SPaolo Bonzini 	   _start_gfn, _end_gfn, _iter_)				\
1491c50d8ae3SPaolo Bonzini 	for (slot_rmap_walk_init(_iter_, _slot_, _start_level_,		\
1492c50d8ae3SPaolo Bonzini 				 _end_level_, _start_gfn, _end_gfn);	\
1493c50d8ae3SPaolo Bonzini 	     slot_rmap_walk_okay(_iter_);				\
1494c50d8ae3SPaolo Bonzini 	     slot_rmap_walk_next(_iter_))
1495c50d8ae3SPaolo Bonzini 
14963039bcc7SSean Christopherson typedef bool (*rmap_handler_t)(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1497c1b91493SSean Christopherson 			       struct kvm_memory_slot *slot, gfn_t gfn,
14983039bcc7SSean Christopherson 			       int level, pte_t pte);
1499c1b91493SSean Christopherson 
15003039bcc7SSean Christopherson static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm,
15013039bcc7SSean Christopherson 						 struct kvm_gfn_range *range,
1502c1b91493SSean Christopherson 						 rmap_handler_t handler)
1503c50d8ae3SPaolo Bonzini {
1504c50d8ae3SPaolo Bonzini 	struct slot_rmap_walk_iterator iterator;
15053039bcc7SSean Christopherson 	bool ret = false;
1506c50d8ae3SPaolo Bonzini 
15073039bcc7SSean Christopherson 	for_each_slot_rmap_range(range->slot, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
15083039bcc7SSean Christopherson 				 range->start, range->end - 1, &iterator)
15093039bcc7SSean Christopherson 		ret |= handler(kvm, iterator.rmap, range->slot, iterator.gfn,
15103039bcc7SSean Christopherson 			       iterator.level, range->pte);
1511c50d8ae3SPaolo Bonzini 
1512c50d8ae3SPaolo Bonzini 	return ret;
1513c50d8ae3SPaolo Bonzini }
1514c50d8ae3SPaolo Bonzini 
15153039bcc7SSean Christopherson bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1516c50d8ae3SPaolo Bonzini {
1517e2209710SBen Gardon 	bool flush = false;
1518c50d8ae3SPaolo Bonzini 
1519e2209710SBen Gardon 	if (kvm_memslots_have_rmaps(kvm))
15203039bcc7SSean Christopherson 		flush = kvm_handle_gfn_range(kvm, range, kvm_unmap_rmapp);
1521063afacdSBen Gardon 
1522897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
15233039bcc7SSean Christopherson 		flush |= kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
1524063afacdSBen Gardon 
15253039bcc7SSean Christopherson 	return flush;
1526c50d8ae3SPaolo Bonzini }
1527c50d8ae3SPaolo Bonzini 
15283039bcc7SSean Christopherson bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1529c50d8ae3SPaolo Bonzini {
1530e2209710SBen Gardon 	bool flush = false;
15311d8dd6b3SBen Gardon 
1532e2209710SBen Gardon 	if (kvm_memslots_have_rmaps(kvm))
15333039bcc7SSean Christopherson 		flush = kvm_handle_gfn_range(kvm, range, kvm_set_pte_rmapp);
15341d8dd6b3SBen Gardon 
1535897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
15363039bcc7SSean Christopherson 		flush |= kvm_tdp_mmu_set_spte_gfn(kvm, range);
15371d8dd6b3SBen Gardon 
15383039bcc7SSean Christopherson 	return flush;
1539c50d8ae3SPaolo Bonzini }
1540c50d8ae3SPaolo Bonzini 
15413039bcc7SSean Christopherson static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1542c50d8ae3SPaolo Bonzini 			  struct kvm_memory_slot *slot, gfn_t gfn, int level,
15433039bcc7SSean Christopherson 			  pte_t unused)
1544c50d8ae3SPaolo Bonzini {
1545c50d8ae3SPaolo Bonzini 	u64 *sptep;
15463f649ab7SKees Cook 	struct rmap_iterator iter;
1547c50d8ae3SPaolo Bonzini 	int young = 0;
1548c50d8ae3SPaolo Bonzini 
1549c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(rmap_head, &iter, sptep)
1550c50d8ae3SPaolo Bonzini 		young |= mmu_spte_age(sptep);
1551c50d8ae3SPaolo Bonzini 
1552c50d8ae3SPaolo Bonzini 	return young;
1553c50d8ae3SPaolo Bonzini }
1554c50d8ae3SPaolo Bonzini 
15553039bcc7SSean Christopherson static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1556c50d8ae3SPaolo Bonzini 			       struct kvm_memory_slot *slot, gfn_t gfn,
15573039bcc7SSean Christopherson 			       int level, pte_t unused)
1558c50d8ae3SPaolo Bonzini {
1559c50d8ae3SPaolo Bonzini 	u64 *sptep;
1560c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
1561c50d8ae3SPaolo Bonzini 
1562c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(rmap_head, &iter, sptep)
1563c50d8ae3SPaolo Bonzini 		if (is_accessed_spte(*sptep))
1564c50d8ae3SPaolo Bonzini 			return 1;
1565c50d8ae3SPaolo Bonzini 	return 0;
1566c50d8ae3SPaolo Bonzini }
1567c50d8ae3SPaolo Bonzini 
1568c50d8ae3SPaolo Bonzini #define RMAP_RECYCLE_THRESHOLD 1000
1569c50d8ae3SPaolo Bonzini 
1570c50d8ae3SPaolo Bonzini static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
1571c50d8ae3SPaolo Bonzini {
1572c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *rmap_head;
1573c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
1574c50d8ae3SPaolo Bonzini 
157557354682SSean Christopherson 	sp = sptep_to_sp(spte);
1576c50d8ae3SPaolo Bonzini 
1577c50d8ae3SPaolo Bonzini 	rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
1578c50d8ae3SPaolo Bonzini 
15793039bcc7SSean Christopherson 	kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0));
1580c50d8ae3SPaolo Bonzini 	kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
1581c50d8ae3SPaolo Bonzini 			KVM_PAGES_PER_HPAGE(sp->role.level));
1582c50d8ae3SPaolo Bonzini }
1583c50d8ae3SPaolo Bonzini 
15843039bcc7SSean Christopherson bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1585c50d8ae3SPaolo Bonzini {
1586e2209710SBen Gardon 	bool young = false;
1587f8e14497SBen Gardon 
1588e2209710SBen Gardon 	if (kvm_memslots_have_rmaps(kvm))
15893039bcc7SSean Christopherson 		young = kvm_handle_gfn_range(kvm, range, kvm_age_rmapp);
15903039bcc7SSean Christopherson 
1591897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
15923039bcc7SSean Christopherson 		young |= kvm_tdp_mmu_age_gfn_range(kvm, range);
1593f8e14497SBen Gardon 
1594f8e14497SBen Gardon 	return young;
1595c50d8ae3SPaolo Bonzini }
1596c50d8ae3SPaolo Bonzini 
15973039bcc7SSean Christopherson bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1598c50d8ae3SPaolo Bonzini {
1599e2209710SBen Gardon 	bool young = false;
1600f8e14497SBen Gardon 
1601e2209710SBen Gardon 	if (kvm_memslots_have_rmaps(kvm))
16023039bcc7SSean Christopherson 		young = kvm_handle_gfn_range(kvm, range, kvm_test_age_rmapp);
16033039bcc7SSean Christopherson 
1604897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
16053039bcc7SSean Christopherson 		young |= kvm_tdp_mmu_test_age_gfn(kvm, range);
1606f8e14497SBen Gardon 
1607f8e14497SBen Gardon 	return young;
1608c50d8ae3SPaolo Bonzini }
1609c50d8ae3SPaolo Bonzini 
1610c50d8ae3SPaolo Bonzini #ifdef MMU_DEBUG
1611c50d8ae3SPaolo Bonzini static int is_empty_shadow_page(u64 *spt)
1612c50d8ae3SPaolo Bonzini {
1613c50d8ae3SPaolo Bonzini 	u64 *pos;
1614c50d8ae3SPaolo Bonzini 	u64 *end;
1615c50d8ae3SPaolo Bonzini 
1616c50d8ae3SPaolo Bonzini 	for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
1617c50d8ae3SPaolo Bonzini 		if (is_shadow_present_pte(*pos)) {
1618c50d8ae3SPaolo Bonzini 			printk(KERN_ERR "%s: %p %llx\n", __func__,
1619c50d8ae3SPaolo Bonzini 			       pos, *pos);
1620c50d8ae3SPaolo Bonzini 			return 0;
1621c50d8ae3SPaolo Bonzini 		}
1622c50d8ae3SPaolo Bonzini 	return 1;
1623c50d8ae3SPaolo Bonzini }
1624c50d8ae3SPaolo Bonzini #endif
1625c50d8ae3SPaolo Bonzini 
1626c50d8ae3SPaolo Bonzini /*
1627c50d8ae3SPaolo Bonzini  * This value is the sum of all of the kvm instances's
1628c50d8ae3SPaolo Bonzini  * kvm->arch.n_used_mmu_pages values.  We need a global,
1629c50d8ae3SPaolo Bonzini  * aggregate version in order to make the slab shrinker
1630c50d8ae3SPaolo Bonzini  * faster
1631c50d8ae3SPaolo Bonzini  */
1632c50d8ae3SPaolo Bonzini static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, unsigned long nr)
1633c50d8ae3SPaolo Bonzini {
1634c50d8ae3SPaolo Bonzini 	kvm->arch.n_used_mmu_pages += nr;
1635c50d8ae3SPaolo Bonzini 	percpu_counter_add(&kvm_total_used_mmu_pages, nr);
1636c50d8ae3SPaolo Bonzini }
1637c50d8ae3SPaolo Bonzini 
1638c50d8ae3SPaolo Bonzini static void kvm_mmu_free_page(struct kvm_mmu_page *sp)
1639c50d8ae3SPaolo Bonzini {
1640c50d8ae3SPaolo Bonzini 	MMU_WARN_ON(!is_empty_shadow_page(sp->spt));
1641c50d8ae3SPaolo Bonzini 	hlist_del(&sp->hash_link);
1642c50d8ae3SPaolo Bonzini 	list_del(&sp->link);
1643c50d8ae3SPaolo Bonzini 	free_page((unsigned long)sp->spt);
1644c50d8ae3SPaolo Bonzini 	if (!sp->role.direct)
1645c50d8ae3SPaolo Bonzini 		free_page((unsigned long)sp->gfns);
1646c50d8ae3SPaolo Bonzini 	kmem_cache_free(mmu_page_header_cache, sp);
1647c50d8ae3SPaolo Bonzini }
1648c50d8ae3SPaolo Bonzini 
1649c50d8ae3SPaolo Bonzini static unsigned kvm_page_table_hashfn(gfn_t gfn)
1650c50d8ae3SPaolo Bonzini {
1651c50d8ae3SPaolo Bonzini 	return hash_64(gfn, KVM_MMU_HASH_SHIFT);
1652c50d8ae3SPaolo Bonzini }
1653c50d8ae3SPaolo Bonzini 
1654c50d8ae3SPaolo Bonzini static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
1655c50d8ae3SPaolo Bonzini 				    struct kvm_mmu_page *sp, u64 *parent_pte)
1656c50d8ae3SPaolo Bonzini {
1657c50d8ae3SPaolo Bonzini 	if (!parent_pte)
1658c50d8ae3SPaolo Bonzini 		return;
1659c50d8ae3SPaolo Bonzini 
1660c50d8ae3SPaolo Bonzini 	pte_list_add(vcpu, parent_pte, &sp->parent_ptes);
1661c50d8ae3SPaolo Bonzini }
1662c50d8ae3SPaolo Bonzini 
1663c50d8ae3SPaolo Bonzini static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
1664c50d8ae3SPaolo Bonzini 				       u64 *parent_pte)
1665c50d8ae3SPaolo Bonzini {
1666c50d8ae3SPaolo Bonzini 	__pte_list_remove(parent_pte, &sp->parent_ptes);
1667c50d8ae3SPaolo Bonzini }
1668c50d8ae3SPaolo Bonzini 
1669c50d8ae3SPaolo Bonzini static void drop_parent_pte(struct kvm_mmu_page *sp,
1670c50d8ae3SPaolo Bonzini 			    u64 *parent_pte)
1671c50d8ae3SPaolo Bonzini {
1672c50d8ae3SPaolo Bonzini 	mmu_page_remove_parent_pte(sp, parent_pte);
1673c50d8ae3SPaolo Bonzini 	mmu_spte_clear_no_track(parent_pte);
1674c50d8ae3SPaolo Bonzini }
1675c50d8ae3SPaolo Bonzini 
1676c50d8ae3SPaolo Bonzini static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct)
1677c50d8ae3SPaolo Bonzini {
1678c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
1679c50d8ae3SPaolo Bonzini 
168094ce87efSSean Christopherson 	sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
168194ce87efSSean Christopherson 	sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
1682c50d8ae3SPaolo Bonzini 	if (!direct)
168394ce87efSSean Christopherson 		sp->gfns = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache);
1684c50d8ae3SPaolo Bonzini 	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
1685c50d8ae3SPaolo Bonzini 
1686c50d8ae3SPaolo Bonzini 	/*
1687c50d8ae3SPaolo Bonzini 	 * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
1688c50d8ae3SPaolo Bonzini 	 * depends on valid pages being added to the head of the list.  See
1689c50d8ae3SPaolo Bonzini 	 * comments in kvm_zap_obsolete_pages().
1690c50d8ae3SPaolo Bonzini 	 */
1691c50d8ae3SPaolo Bonzini 	sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
1692c50d8ae3SPaolo Bonzini 	list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
1693c50d8ae3SPaolo Bonzini 	kvm_mod_used_mmu_pages(vcpu->kvm, +1);
1694c50d8ae3SPaolo Bonzini 	return sp;
1695c50d8ae3SPaolo Bonzini }
1696c50d8ae3SPaolo Bonzini 
1697c50d8ae3SPaolo Bonzini static void mark_unsync(u64 *spte);
1698c50d8ae3SPaolo Bonzini static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
1699c50d8ae3SPaolo Bonzini {
1700c50d8ae3SPaolo Bonzini 	u64 *sptep;
1701c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
1702c50d8ae3SPaolo Bonzini 
1703c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) {
1704c50d8ae3SPaolo Bonzini 		mark_unsync(sptep);
1705c50d8ae3SPaolo Bonzini 	}
1706c50d8ae3SPaolo Bonzini }
1707c50d8ae3SPaolo Bonzini 
1708c50d8ae3SPaolo Bonzini static void mark_unsync(u64 *spte)
1709c50d8ae3SPaolo Bonzini {
1710c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
1711c50d8ae3SPaolo Bonzini 	unsigned int index;
1712c50d8ae3SPaolo Bonzini 
171357354682SSean Christopherson 	sp = sptep_to_sp(spte);
1714c50d8ae3SPaolo Bonzini 	index = spte - sp->spt;
1715c50d8ae3SPaolo Bonzini 	if (__test_and_set_bit(index, sp->unsync_child_bitmap))
1716c50d8ae3SPaolo Bonzini 		return;
1717c50d8ae3SPaolo Bonzini 	if (sp->unsync_children++)
1718c50d8ae3SPaolo Bonzini 		return;
1719c50d8ae3SPaolo Bonzini 	kvm_mmu_mark_parents_unsync(sp);
1720c50d8ae3SPaolo Bonzini }
1721c50d8ae3SPaolo Bonzini 
1722c50d8ae3SPaolo Bonzini static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
1723c50d8ae3SPaolo Bonzini 			       struct kvm_mmu_page *sp)
1724c50d8ae3SPaolo Bonzini {
1725c50d8ae3SPaolo Bonzini 	return 0;
1726c50d8ae3SPaolo Bonzini }
1727c50d8ae3SPaolo Bonzini 
1728c50d8ae3SPaolo Bonzini #define KVM_PAGE_ARRAY_NR 16
1729c50d8ae3SPaolo Bonzini 
1730c50d8ae3SPaolo Bonzini struct kvm_mmu_pages {
1731c50d8ae3SPaolo Bonzini 	struct mmu_page_and_offset {
1732c50d8ae3SPaolo Bonzini 		struct kvm_mmu_page *sp;
1733c50d8ae3SPaolo Bonzini 		unsigned int idx;
1734c50d8ae3SPaolo Bonzini 	} page[KVM_PAGE_ARRAY_NR];
1735c50d8ae3SPaolo Bonzini 	unsigned int nr;
1736c50d8ae3SPaolo Bonzini };
1737c50d8ae3SPaolo Bonzini 
1738c50d8ae3SPaolo Bonzini static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
1739c50d8ae3SPaolo Bonzini 			 int idx)
1740c50d8ae3SPaolo Bonzini {
1741c50d8ae3SPaolo Bonzini 	int i;
1742c50d8ae3SPaolo Bonzini 
1743c50d8ae3SPaolo Bonzini 	if (sp->unsync)
1744c50d8ae3SPaolo Bonzini 		for (i=0; i < pvec->nr; i++)
1745c50d8ae3SPaolo Bonzini 			if (pvec->page[i].sp == sp)
1746c50d8ae3SPaolo Bonzini 				return 0;
1747c50d8ae3SPaolo Bonzini 
1748c50d8ae3SPaolo Bonzini 	pvec->page[pvec->nr].sp = sp;
1749c50d8ae3SPaolo Bonzini 	pvec->page[pvec->nr].idx = idx;
1750c50d8ae3SPaolo Bonzini 	pvec->nr++;
1751c50d8ae3SPaolo Bonzini 	return (pvec->nr == KVM_PAGE_ARRAY_NR);
1752c50d8ae3SPaolo Bonzini }
1753c50d8ae3SPaolo Bonzini 
1754c50d8ae3SPaolo Bonzini static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx)
1755c50d8ae3SPaolo Bonzini {
1756c50d8ae3SPaolo Bonzini 	--sp->unsync_children;
1757c50d8ae3SPaolo Bonzini 	WARN_ON((int)sp->unsync_children < 0);
1758c50d8ae3SPaolo Bonzini 	__clear_bit(idx, sp->unsync_child_bitmap);
1759c50d8ae3SPaolo Bonzini }
1760c50d8ae3SPaolo Bonzini 
1761c50d8ae3SPaolo Bonzini static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
1762c50d8ae3SPaolo Bonzini 			   struct kvm_mmu_pages *pvec)
1763c50d8ae3SPaolo Bonzini {
1764c50d8ae3SPaolo Bonzini 	int i, ret, nr_unsync_leaf = 0;
1765c50d8ae3SPaolo Bonzini 
1766c50d8ae3SPaolo Bonzini 	for_each_set_bit(i, sp->unsync_child_bitmap, 512) {
1767c50d8ae3SPaolo Bonzini 		struct kvm_mmu_page *child;
1768c50d8ae3SPaolo Bonzini 		u64 ent = sp->spt[i];
1769c50d8ae3SPaolo Bonzini 
1770c50d8ae3SPaolo Bonzini 		if (!is_shadow_present_pte(ent) || is_large_pte(ent)) {
1771c50d8ae3SPaolo Bonzini 			clear_unsync_child_bit(sp, i);
1772c50d8ae3SPaolo Bonzini 			continue;
1773c50d8ae3SPaolo Bonzini 		}
1774c50d8ae3SPaolo Bonzini 
1775e47c4aeeSSean Christopherson 		child = to_shadow_page(ent & PT64_BASE_ADDR_MASK);
1776c50d8ae3SPaolo Bonzini 
1777c50d8ae3SPaolo Bonzini 		if (child->unsync_children) {
1778c50d8ae3SPaolo Bonzini 			if (mmu_pages_add(pvec, child, i))
1779c50d8ae3SPaolo Bonzini 				return -ENOSPC;
1780c50d8ae3SPaolo Bonzini 
1781c50d8ae3SPaolo Bonzini 			ret = __mmu_unsync_walk(child, pvec);
1782c50d8ae3SPaolo Bonzini 			if (!ret) {
1783c50d8ae3SPaolo Bonzini 				clear_unsync_child_bit(sp, i);
1784c50d8ae3SPaolo Bonzini 				continue;
1785c50d8ae3SPaolo Bonzini 			} else if (ret > 0) {
1786c50d8ae3SPaolo Bonzini 				nr_unsync_leaf += ret;
1787c50d8ae3SPaolo Bonzini 			} else
1788c50d8ae3SPaolo Bonzini 				return ret;
1789c50d8ae3SPaolo Bonzini 		} else if (child->unsync) {
1790c50d8ae3SPaolo Bonzini 			nr_unsync_leaf++;
1791c50d8ae3SPaolo Bonzini 			if (mmu_pages_add(pvec, child, i))
1792c50d8ae3SPaolo Bonzini 				return -ENOSPC;
1793c50d8ae3SPaolo Bonzini 		} else
1794c50d8ae3SPaolo Bonzini 			clear_unsync_child_bit(sp, i);
1795c50d8ae3SPaolo Bonzini 	}
1796c50d8ae3SPaolo Bonzini 
1797c50d8ae3SPaolo Bonzini 	return nr_unsync_leaf;
1798c50d8ae3SPaolo Bonzini }
1799c50d8ae3SPaolo Bonzini 
1800c50d8ae3SPaolo Bonzini #define INVALID_INDEX (-1)
1801c50d8ae3SPaolo Bonzini 
1802c50d8ae3SPaolo Bonzini static int mmu_unsync_walk(struct kvm_mmu_page *sp,
1803c50d8ae3SPaolo Bonzini 			   struct kvm_mmu_pages *pvec)
1804c50d8ae3SPaolo Bonzini {
1805c50d8ae3SPaolo Bonzini 	pvec->nr = 0;
1806c50d8ae3SPaolo Bonzini 	if (!sp->unsync_children)
1807c50d8ae3SPaolo Bonzini 		return 0;
1808c50d8ae3SPaolo Bonzini 
1809c50d8ae3SPaolo Bonzini 	mmu_pages_add(pvec, sp, INVALID_INDEX);
1810c50d8ae3SPaolo Bonzini 	return __mmu_unsync_walk(sp, pvec);
1811c50d8ae3SPaolo Bonzini }
1812c50d8ae3SPaolo Bonzini 
1813c50d8ae3SPaolo Bonzini static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1814c50d8ae3SPaolo Bonzini {
1815c50d8ae3SPaolo Bonzini 	WARN_ON(!sp->unsync);
1816c50d8ae3SPaolo Bonzini 	trace_kvm_mmu_sync_page(sp);
1817c50d8ae3SPaolo Bonzini 	sp->unsync = 0;
1818c50d8ae3SPaolo Bonzini 	--kvm->stat.mmu_unsync;
1819c50d8ae3SPaolo Bonzini }
1820c50d8ae3SPaolo Bonzini 
1821c50d8ae3SPaolo Bonzini static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1822c50d8ae3SPaolo Bonzini 				     struct list_head *invalid_list);
1823c50d8ae3SPaolo Bonzini static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1824c50d8ae3SPaolo Bonzini 				    struct list_head *invalid_list);
1825c50d8ae3SPaolo Bonzini 
1826ac101b7cSSean Christopherson #define for_each_valid_sp(_kvm, _sp, _list)				\
1827ac101b7cSSean Christopherson 	hlist_for_each_entry(_sp, _list, hash_link)			\
1828c50d8ae3SPaolo Bonzini 		if (is_obsolete_sp((_kvm), (_sp))) {			\
1829c50d8ae3SPaolo Bonzini 		} else
1830c50d8ae3SPaolo Bonzini 
1831c50d8ae3SPaolo Bonzini #define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn)			\
1832ac101b7cSSean Christopherson 	for_each_valid_sp(_kvm, _sp,					\
1833ac101b7cSSean Christopherson 	  &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)])	\
1834c50d8ae3SPaolo Bonzini 		if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
1835c50d8ae3SPaolo Bonzini 
1836479a1efcSSean Christopherson static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1837c50d8ae3SPaolo Bonzini 			 struct list_head *invalid_list)
1838c50d8ae3SPaolo Bonzini {
18392640b086SSean Christopherson 	if (vcpu->arch.mmu->sync_page(vcpu, sp) == 0) {
1840c50d8ae3SPaolo Bonzini 		kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
1841c50d8ae3SPaolo Bonzini 		return false;
1842c50d8ae3SPaolo Bonzini 	}
1843c50d8ae3SPaolo Bonzini 
1844c50d8ae3SPaolo Bonzini 	return true;
1845c50d8ae3SPaolo Bonzini }
1846c50d8ae3SPaolo Bonzini 
1847c50d8ae3SPaolo Bonzini static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
1848c50d8ae3SPaolo Bonzini 					struct list_head *invalid_list,
1849c50d8ae3SPaolo Bonzini 					bool remote_flush)
1850c50d8ae3SPaolo Bonzini {
1851c50d8ae3SPaolo Bonzini 	if (!remote_flush && list_empty(invalid_list))
1852c50d8ae3SPaolo Bonzini 		return false;
1853c50d8ae3SPaolo Bonzini 
1854c50d8ae3SPaolo Bonzini 	if (!list_empty(invalid_list))
1855c50d8ae3SPaolo Bonzini 		kvm_mmu_commit_zap_page(kvm, invalid_list);
1856c50d8ae3SPaolo Bonzini 	else
1857c50d8ae3SPaolo Bonzini 		kvm_flush_remote_tlbs(kvm);
1858c50d8ae3SPaolo Bonzini 	return true;
1859c50d8ae3SPaolo Bonzini }
1860c50d8ae3SPaolo Bonzini 
1861c50d8ae3SPaolo Bonzini static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu,
1862c50d8ae3SPaolo Bonzini 				 struct list_head *invalid_list,
1863c50d8ae3SPaolo Bonzini 				 bool remote_flush, bool local_flush)
1864c50d8ae3SPaolo Bonzini {
1865c50d8ae3SPaolo Bonzini 	if (kvm_mmu_remote_flush_or_zap(vcpu->kvm, invalid_list, remote_flush))
1866c50d8ae3SPaolo Bonzini 		return;
1867c50d8ae3SPaolo Bonzini 
1868c50d8ae3SPaolo Bonzini 	if (local_flush)
18698c8560b8SSean Christopherson 		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
1870c50d8ae3SPaolo Bonzini }
1871c50d8ae3SPaolo Bonzini 
1872c50d8ae3SPaolo Bonzini #ifdef CONFIG_KVM_MMU_AUDIT
1873c50d8ae3SPaolo Bonzini #include "mmu_audit.c"
1874c50d8ae3SPaolo Bonzini #else
1875c50d8ae3SPaolo Bonzini static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { }
1876c50d8ae3SPaolo Bonzini static void mmu_audit_disable(void) { }
1877c50d8ae3SPaolo Bonzini #endif
1878c50d8ae3SPaolo Bonzini 
1879c50d8ae3SPaolo Bonzini static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
1880c50d8ae3SPaolo Bonzini {
1881c50d8ae3SPaolo Bonzini 	return sp->role.invalid ||
1882c50d8ae3SPaolo Bonzini 	       unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
1883c50d8ae3SPaolo Bonzini }
1884c50d8ae3SPaolo Bonzini 
1885c50d8ae3SPaolo Bonzini struct mmu_page_path {
1886c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL];
1887c50d8ae3SPaolo Bonzini 	unsigned int idx[PT64_ROOT_MAX_LEVEL];
1888c50d8ae3SPaolo Bonzini };
1889c50d8ae3SPaolo Bonzini 
1890c50d8ae3SPaolo Bonzini #define for_each_sp(pvec, sp, parents, i)			\
1891c50d8ae3SPaolo Bonzini 		for (i = mmu_pages_first(&pvec, &parents);	\
1892c50d8ae3SPaolo Bonzini 			i < pvec.nr && ({ sp = pvec.page[i].sp; 1;});	\
1893c50d8ae3SPaolo Bonzini 			i = mmu_pages_next(&pvec, &parents, i))
1894c50d8ae3SPaolo Bonzini 
1895c50d8ae3SPaolo Bonzini static int mmu_pages_next(struct kvm_mmu_pages *pvec,
1896c50d8ae3SPaolo Bonzini 			  struct mmu_page_path *parents,
1897c50d8ae3SPaolo Bonzini 			  int i)
1898c50d8ae3SPaolo Bonzini {
1899c50d8ae3SPaolo Bonzini 	int n;
1900c50d8ae3SPaolo Bonzini 
1901c50d8ae3SPaolo Bonzini 	for (n = i+1; n < pvec->nr; n++) {
1902c50d8ae3SPaolo Bonzini 		struct kvm_mmu_page *sp = pvec->page[n].sp;
1903c50d8ae3SPaolo Bonzini 		unsigned idx = pvec->page[n].idx;
1904c50d8ae3SPaolo Bonzini 		int level = sp->role.level;
1905c50d8ae3SPaolo Bonzini 
1906c50d8ae3SPaolo Bonzini 		parents->idx[level-1] = idx;
19073bae0459SSean Christopherson 		if (level == PG_LEVEL_4K)
1908c50d8ae3SPaolo Bonzini 			break;
1909c50d8ae3SPaolo Bonzini 
1910c50d8ae3SPaolo Bonzini 		parents->parent[level-2] = sp;
1911c50d8ae3SPaolo Bonzini 	}
1912c50d8ae3SPaolo Bonzini 
1913c50d8ae3SPaolo Bonzini 	return n;
1914c50d8ae3SPaolo Bonzini }
1915c50d8ae3SPaolo Bonzini 
1916c50d8ae3SPaolo Bonzini static int mmu_pages_first(struct kvm_mmu_pages *pvec,
1917c50d8ae3SPaolo Bonzini 			   struct mmu_page_path *parents)
1918c50d8ae3SPaolo Bonzini {
1919c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
1920c50d8ae3SPaolo Bonzini 	int level;
1921c50d8ae3SPaolo Bonzini 
1922c50d8ae3SPaolo Bonzini 	if (pvec->nr == 0)
1923c50d8ae3SPaolo Bonzini 		return 0;
1924c50d8ae3SPaolo Bonzini 
1925c50d8ae3SPaolo Bonzini 	WARN_ON(pvec->page[0].idx != INVALID_INDEX);
1926c50d8ae3SPaolo Bonzini 
1927c50d8ae3SPaolo Bonzini 	sp = pvec->page[0].sp;
1928c50d8ae3SPaolo Bonzini 	level = sp->role.level;
19293bae0459SSean Christopherson 	WARN_ON(level == PG_LEVEL_4K);
1930c50d8ae3SPaolo Bonzini 
1931c50d8ae3SPaolo Bonzini 	parents->parent[level-2] = sp;
1932c50d8ae3SPaolo Bonzini 
1933c50d8ae3SPaolo Bonzini 	/* Also set up a sentinel.  Further entries in pvec are all
1934c50d8ae3SPaolo Bonzini 	 * children of sp, so this element is never overwritten.
1935c50d8ae3SPaolo Bonzini 	 */
1936c50d8ae3SPaolo Bonzini 	parents->parent[level-1] = NULL;
1937c50d8ae3SPaolo Bonzini 	return mmu_pages_next(pvec, parents, 0);
1938c50d8ae3SPaolo Bonzini }
1939c50d8ae3SPaolo Bonzini 
1940c50d8ae3SPaolo Bonzini static void mmu_pages_clear_parents(struct mmu_page_path *parents)
1941c50d8ae3SPaolo Bonzini {
1942c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
1943c50d8ae3SPaolo Bonzini 	unsigned int level = 0;
1944c50d8ae3SPaolo Bonzini 
1945c50d8ae3SPaolo Bonzini 	do {
1946c50d8ae3SPaolo Bonzini 		unsigned int idx = parents->idx[level];
1947c50d8ae3SPaolo Bonzini 		sp = parents->parent[level];
1948c50d8ae3SPaolo Bonzini 		if (!sp)
1949c50d8ae3SPaolo Bonzini 			return;
1950c50d8ae3SPaolo Bonzini 
1951c50d8ae3SPaolo Bonzini 		WARN_ON(idx == INVALID_INDEX);
1952c50d8ae3SPaolo Bonzini 		clear_unsync_child_bit(sp, idx);
1953c50d8ae3SPaolo Bonzini 		level++;
1954c50d8ae3SPaolo Bonzini 	} while (!sp->unsync_children);
1955c50d8ae3SPaolo Bonzini }
1956c50d8ae3SPaolo Bonzini 
1957c50d8ae3SPaolo Bonzini static void mmu_sync_children(struct kvm_vcpu *vcpu,
1958c50d8ae3SPaolo Bonzini 			      struct kvm_mmu_page *parent)
1959c50d8ae3SPaolo Bonzini {
1960c50d8ae3SPaolo Bonzini 	int i;
1961c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
1962c50d8ae3SPaolo Bonzini 	struct mmu_page_path parents;
1963c50d8ae3SPaolo Bonzini 	struct kvm_mmu_pages pages;
1964c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
1965c50d8ae3SPaolo Bonzini 	bool flush = false;
1966c50d8ae3SPaolo Bonzini 
1967c50d8ae3SPaolo Bonzini 	while (mmu_unsync_walk(parent, &pages)) {
1968c50d8ae3SPaolo Bonzini 		bool protected = false;
1969c50d8ae3SPaolo Bonzini 
1970c50d8ae3SPaolo Bonzini 		for_each_sp(pages, sp, parents, i)
1971c50d8ae3SPaolo Bonzini 			protected |= rmap_write_protect(vcpu, sp->gfn);
1972c50d8ae3SPaolo Bonzini 
1973c50d8ae3SPaolo Bonzini 		if (protected) {
1974c50d8ae3SPaolo Bonzini 			kvm_flush_remote_tlbs(vcpu->kvm);
1975c50d8ae3SPaolo Bonzini 			flush = false;
1976c50d8ae3SPaolo Bonzini 		}
1977c50d8ae3SPaolo Bonzini 
1978c50d8ae3SPaolo Bonzini 		for_each_sp(pages, sp, parents, i) {
1979479a1efcSSean Christopherson 			kvm_unlink_unsync_page(vcpu->kvm, sp);
1980c50d8ae3SPaolo Bonzini 			flush |= kvm_sync_page(vcpu, sp, &invalid_list);
1981c50d8ae3SPaolo Bonzini 			mmu_pages_clear_parents(&parents);
1982c50d8ae3SPaolo Bonzini 		}
1983531810caSBen Gardon 		if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
1984c50d8ae3SPaolo Bonzini 			kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
1985531810caSBen Gardon 			cond_resched_rwlock_write(&vcpu->kvm->mmu_lock);
1986c50d8ae3SPaolo Bonzini 			flush = false;
1987c50d8ae3SPaolo Bonzini 		}
1988c50d8ae3SPaolo Bonzini 	}
1989c50d8ae3SPaolo Bonzini 
1990c50d8ae3SPaolo Bonzini 	kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
1991c50d8ae3SPaolo Bonzini }
1992c50d8ae3SPaolo Bonzini 
1993c50d8ae3SPaolo Bonzini static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
1994c50d8ae3SPaolo Bonzini {
1995c50d8ae3SPaolo Bonzini 	atomic_set(&sp->write_flooding_count,  0);
1996c50d8ae3SPaolo Bonzini }
1997c50d8ae3SPaolo Bonzini 
1998c50d8ae3SPaolo Bonzini static void clear_sp_write_flooding_count(u64 *spte)
1999c50d8ae3SPaolo Bonzini {
200057354682SSean Christopherson 	__clear_sp_write_flooding_count(sptep_to_sp(spte));
2001c50d8ae3SPaolo Bonzini }
2002c50d8ae3SPaolo Bonzini 
2003c50d8ae3SPaolo Bonzini static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
2004c50d8ae3SPaolo Bonzini 					     gfn_t gfn,
2005c50d8ae3SPaolo Bonzini 					     gva_t gaddr,
2006c50d8ae3SPaolo Bonzini 					     unsigned level,
2007c50d8ae3SPaolo Bonzini 					     int direct,
20080a2b64c5SBen Gardon 					     unsigned int access)
2009c50d8ae3SPaolo Bonzini {
2010fb58a9c3SSean Christopherson 	bool direct_mmu = vcpu->arch.mmu->direct_map;
2011c50d8ae3SPaolo Bonzini 	union kvm_mmu_page_role role;
2012ac101b7cSSean Christopherson 	struct hlist_head *sp_list;
2013c50d8ae3SPaolo Bonzini 	unsigned quadrant;
2014c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
2015c50d8ae3SPaolo Bonzini 	int collisions = 0;
2016c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
2017c50d8ae3SPaolo Bonzini 
2018c50d8ae3SPaolo Bonzini 	role = vcpu->arch.mmu->mmu_role.base;
2019c50d8ae3SPaolo Bonzini 	role.level = level;
2020c50d8ae3SPaolo Bonzini 	role.direct = direct;
2021c50d8ae3SPaolo Bonzini 	if (role.direct)
2022c50d8ae3SPaolo Bonzini 		role.gpte_is_8_bytes = true;
2023c50d8ae3SPaolo Bonzini 	role.access = access;
2024fb58a9c3SSean Christopherson 	if (!direct_mmu && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) {
2025c50d8ae3SPaolo Bonzini 		quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
2026c50d8ae3SPaolo Bonzini 		quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
2027c50d8ae3SPaolo Bonzini 		role.quadrant = quadrant;
2028c50d8ae3SPaolo Bonzini 	}
2029ac101b7cSSean Christopherson 
2030ac101b7cSSean Christopherson 	sp_list = &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
2031ac101b7cSSean Christopherson 	for_each_valid_sp(vcpu->kvm, sp, sp_list) {
2032c50d8ae3SPaolo Bonzini 		if (sp->gfn != gfn) {
2033c50d8ae3SPaolo Bonzini 			collisions++;
2034c50d8ae3SPaolo Bonzini 			continue;
2035c50d8ae3SPaolo Bonzini 		}
2036c50d8ae3SPaolo Bonzini 
2037ddc16abbSSean Christopherson 		if (sp->role.word != role.word) {
2038ddc16abbSSean Christopherson 			/*
2039ddc16abbSSean Christopherson 			 * If the guest is creating an upper-level page, zap
2040ddc16abbSSean Christopherson 			 * unsync pages for the same gfn.  While it's possible
2041ddc16abbSSean Christopherson 			 * the guest is using recursive page tables, in all
2042ddc16abbSSean Christopherson 			 * likelihood the guest has stopped using the unsync
2043ddc16abbSSean Christopherson 			 * page and is installing a completely unrelated page.
2044ddc16abbSSean Christopherson 			 * Unsync pages must not be left as is, because the new
2045ddc16abbSSean Christopherson 			 * upper-level page will be write-protected.
2046ddc16abbSSean Christopherson 			 */
2047ddc16abbSSean Christopherson 			if (level > PG_LEVEL_4K && sp->unsync)
2048ddc16abbSSean Christopherson 				kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
2049ddc16abbSSean Christopherson 							 &invalid_list);
2050c50d8ae3SPaolo Bonzini 			continue;
2051ddc16abbSSean Christopherson 		}
2052c50d8ae3SPaolo Bonzini 
2053fb58a9c3SSean Christopherson 		if (direct_mmu)
2054fb58a9c3SSean Christopherson 			goto trace_get_page;
2055fb58a9c3SSean Christopherson 
2056c50d8ae3SPaolo Bonzini 		if (sp->unsync) {
205707dc4f35SSean Christopherson 			/*
2058479a1efcSSean Christopherson 			 * The page is good, but is stale.  kvm_sync_page does
205907dc4f35SSean Christopherson 			 * get the latest guest state, but (unlike mmu_unsync_children)
206007dc4f35SSean Christopherson 			 * it doesn't write-protect the page or mark it synchronized!
206107dc4f35SSean Christopherson 			 * This way the validity of the mapping is ensured, but the
206207dc4f35SSean Christopherson 			 * overhead of write protection is not incurred until the
206307dc4f35SSean Christopherson 			 * guest invalidates the TLB mapping.  This allows multiple
206407dc4f35SSean Christopherson 			 * SPs for a single gfn to be unsync.
206507dc4f35SSean Christopherson 			 *
206607dc4f35SSean Christopherson 			 * If the sync fails, the page is zapped.  If so, break
206707dc4f35SSean Christopherson 			 * in order to rebuild it.
2068c50d8ae3SPaolo Bonzini 			 */
2069479a1efcSSean Christopherson 			if (!kvm_sync_page(vcpu, sp, &invalid_list))
2070c50d8ae3SPaolo Bonzini 				break;
2071c50d8ae3SPaolo Bonzini 
2072c50d8ae3SPaolo Bonzini 			WARN_ON(!list_empty(&invalid_list));
20738c8560b8SSean Christopherson 			kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
2074c50d8ae3SPaolo Bonzini 		}
2075c50d8ae3SPaolo Bonzini 
2076c50d8ae3SPaolo Bonzini 		if (sp->unsync_children)
2077f6f6195bSLai Jiangshan 			kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
2078c50d8ae3SPaolo Bonzini 
2079c50d8ae3SPaolo Bonzini 		__clear_sp_write_flooding_count(sp);
2080fb58a9c3SSean Christopherson 
2081fb58a9c3SSean Christopherson trace_get_page:
2082c50d8ae3SPaolo Bonzini 		trace_kvm_mmu_get_page(sp, false);
2083c50d8ae3SPaolo Bonzini 		goto out;
2084c50d8ae3SPaolo Bonzini 	}
2085c50d8ae3SPaolo Bonzini 
2086c50d8ae3SPaolo Bonzini 	++vcpu->kvm->stat.mmu_cache_miss;
2087c50d8ae3SPaolo Bonzini 
2088c50d8ae3SPaolo Bonzini 	sp = kvm_mmu_alloc_page(vcpu, direct);
2089c50d8ae3SPaolo Bonzini 
2090c50d8ae3SPaolo Bonzini 	sp->gfn = gfn;
2091c50d8ae3SPaolo Bonzini 	sp->role = role;
2092ac101b7cSSean Christopherson 	hlist_add_head(&sp->hash_link, sp_list);
2093c50d8ae3SPaolo Bonzini 	if (!direct) {
2094c50d8ae3SPaolo Bonzini 		account_shadowed(vcpu->kvm, sp);
20953bae0459SSean Christopherson 		if (level == PG_LEVEL_4K && rmap_write_protect(vcpu, gfn))
2096c50d8ae3SPaolo Bonzini 			kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
2097c50d8ae3SPaolo Bonzini 	}
2098c50d8ae3SPaolo Bonzini 	trace_kvm_mmu_get_page(sp, true);
2099c50d8ae3SPaolo Bonzini out:
2100ddc16abbSSean Christopherson 	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
2101ddc16abbSSean Christopherson 
2102c50d8ae3SPaolo Bonzini 	if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions)
2103c50d8ae3SPaolo Bonzini 		vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions;
2104c50d8ae3SPaolo Bonzini 	return sp;
2105c50d8ae3SPaolo Bonzini }
2106c50d8ae3SPaolo Bonzini 
2107c50d8ae3SPaolo Bonzini static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator,
2108c50d8ae3SPaolo Bonzini 					struct kvm_vcpu *vcpu, hpa_t root,
2109c50d8ae3SPaolo Bonzini 					u64 addr)
2110c50d8ae3SPaolo Bonzini {
2111c50d8ae3SPaolo Bonzini 	iterator->addr = addr;
2112c50d8ae3SPaolo Bonzini 	iterator->shadow_addr = root;
2113c50d8ae3SPaolo Bonzini 	iterator->level = vcpu->arch.mmu->shadow_root_level;
2114c50d8ae3SPaolo Bonzini 
2115c50d8ae3SPaolo Bonzini 	if (iterator->level == PT64_ROOT_4LEVEL &&
2116c50d8ae3SPaolo Bonzini 	    vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL &&
2117c50d8ae3SPaolo Bonzini 	    !vcpu->arch.mmu->direct_map)
2118c50d8ae3SPaolo Bonzini 		--iterator->level;
2119c50d8ae3SPaolo Bonzini 
2120c50d8ae3SPaolo Bonzini 	if (iterator->level == PT32E_ROOT_LEVEL) {
2121c50d8ae3SPaolo Bonzini 		/*
2122c50d8ae3SPaolo Bonzini 		 * prev_root is currently only used for 64-bit hosts. So only
2123c50d8ae3SPaolo Bonzini 		 * the active root_hpa is valid here.
2124c50d8ae3SPaolo Bonzini 		 */
2125c50d8ae3SPaolo Bonzini 		BUG_ON(root != vcpu->arch.mmu->root_hpa);
2126c50d8ae3SPaolo Bonzini 
2127c50d8ae3SPaolo Bonzini 		iterator->shadow_addr
2128c50d8ae3SPaolo Bonzini 			= vcpu->arch.mmu->pae_root[(addr >> 30) & 3];
2129c50d8ae3SPaolo Bonzini 		iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
2130c50d8ae3SPaolo Bonzini 		--iterator->level;
2131c50d8ae3SPaolo Bonzini 		if (!iterator->shadow_addr)
2132c50d8ae3SPaolo Bonzini 			iterator->level = 0;
2133c50d8ae3SPaolo Bonzini 	}
2134c50d8ae3SPaolo Bonzini }
2135c50d8ae3SPaolo Bonzini 
2136c50d8ae3SPaolo Bonzini static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
2137c50d8ae3SPaolo Bonzini 			     struct kvm_vcpu *vcpu, u64 addr)
2138c50d8ae3SPaolo Bonzini {
2139c50d8ae3SPaolo Bonzini 	shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root_hpa,
2140c50d8ae3SPaolo Bonzini 				    addr);
2141c50d8ae3SPaolo Bonzini }
2142c50d8ae3SPaolo Bonzini 
2143c50d8ae3SPaolo Bonzini static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
2144c50d8ae3SPaolo Bonzini {
21453bae0459SSean Christopherson 	if (iterator->level < PG_LEVEL_4K)
2146c50d8ae3SPaolo Bonzini 		return false;
2147c50d8ae3SPaolo Bonzini 
2148c50d8ae3SPaolo Bonzini 	iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
2149c50d8ae3SPaolo Bonzini 	iterator->sptep	= ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
2150c50d8ae3SPaolo Bonzini 	return true;
2151c50d8ae3SPaolo Bonzini }
2152c50d8ae3SPaolo Bonzini 
2153c50d8ae3SPaolo Bonzini static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator,
2154c50d8ae3SPaolo Bonzini 			       u64 spte)
2155c50d8ae3SPaolo Bonzini {
2156c50d8ae3SPaolo Bonzini 	if (is_last_spte(spte, iterator->level)) {
2157c50d8ae3SPaolo Bonzini 		iterator->level = 0;
2158c50d8ae3SPaolo Bonzini 		return;
2159c50d8ae3SPaolo Bonzini 	}
2160c50d8ae3SPaolo Bonzini 
2161c50d8ae3SPaolo Bonzini 	iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK;
2162c50d8ae3SPaolo Bonzini 	--iterator->level;
2163c50d8ae3SPaolo Bonzini }
2164c50d8ae3SPaolo Bonzini 
2165c50d8ae3SPaolo Bonzini static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
2166c50d8ae3SPaolo Bonzini {
2167c50d8ae3SPaolo Bonzini 	__shadow_walk_next(iterator, *iterator->sptep);
2168c50d8ae3SPaolo Bonzini }
2169c50d8ae3SPaolo Bonzini 
2170c50d8ae3SPaolo Bonzini static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
2171c50d8ae3SPaolo Bonzini 			     struct kvm_mmu_page *sp)
2172c50d8ae3SPaolo Bonzini {
2173c50d8ae3SPaolo Bonzini 	u64 spte;
2174c50d8ae3SPaolo Bonzini 
2175c50d8ae3SPaolo Bonzini 	BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
2176c50d8ae3SPaolo Bonzini 
2177cc4674d0SBen Gardon 	spte = make_nonleaf_spte(sp->spt, sp_ad_disabled(sp));
2178c50d8ae3SPaolo Bonzini 
2179c50d8ae3SPaolo Bonzini 	mmu_spte_set(sptep, spte);
2180c50d8ae3SPaolo Bonzini 
2181c50d8ae3SPaolo Bonzini 	mmu_page_add_parent_pte(vcpu, sp, sptep);
2182c50d8ae3SPaolo Bonzini 
2183c50d8ae3SPaolo Bonzini 	if (sp->unsync_children || sp->unsync)
2184c50d8ae3SPaolo Bonzini 		mark_unsync(sptep);
2185c50d8ae3SPaolo Bonzini }
2186c50d8ae3SPaolo Bonzini 
2187c50d8ae3SPaolo Bonzini static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2188c50d8ae3SPaolo Bonzini 				   unsigned direct_access)
2189c50d8ae3SPaolo Bonzini {
2190c50d8ae3SPaolo Bonzini 	if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
2191c50d8ae3SPaolo Bonzini 		struct kvm_mmu_page *child;
2192c50d8ae3SPaolo Bonzini 
2193c50d8ae3SPaolo Bonzini 		/*
2194c50d8ae3SPaolo Bonzini 		 * For the direct sp, if the guest pte's dirty bit
2195c50d8ae3SPaolo Bonzini 		 * changed form clean to dirty, it will corrupt the
2196c50d8ae3SPaolo Bonzini 		 * sp's access: allow writable in the read-only sp,
2197c50d8ae3SPaolo Bonzini 		 * so we should update the spte at this point to get
2198c50d8ae3SPaolo Bonzini 		 * a new sp with the correct access.
2199c50d8ae3SPaolo Bonzini 		 */
2200e47c4aeeSSean Christopherson 		child = to_shadow_page(*sptep & PT64_BASE_ADDR_MASK);
2201c50d8ae3SPaolo Bonzini 		if (child->role.access == direct_access)
2202c50d8ae3SPaolo Bonzini 			return;
2203c50d8ae3SPaolo Bonzini 
2204c50d8ae3SPaolo Bonzini 		drop_parent_pte(child, sptep);
2205c50d8ae3SPaolo Bonzini 		kvm_flush_remote_tlbs_with_address(vcpu->kvm, child->gfn, 1);
2206c50d8ae3SPaolo Bonzini 	}
2207c50d8ae3SPaolo Bonzini }
2208c50d8ae3SPaolo Bonzini 
22092de4085cSBen Gardon /* Returns the number of zapped non-leaf child shadow pages. */
22102de4085cSBen Gardon static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
22112de4085cSBen Gardon 			    u64 *spte, struct list_head *invalid_list)
2212c50d8ae3SPaolo Bonzini {
2213c50d8ae3SPaolo Bonzini 	u64 pte;
2214c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *child;
2215c50d8ae3SPaolo Bonzini 
2216c50d8ae3SPaolo Bonzini 	pte = *spte;
2217c50d8ae3SPaolo Bonzini 	if (is_shadow_present_pte(pte)) {
2218c50d8ae3SPaolo Bonzini 		if (is_last_spte(pte, sp->role.level)) {
2219c50d8ae3SPaolo Bonzini 			drop_spte(kvm, spte);
2220c50d8ae3SPaolo Bonzini 			if (is_large_pte(pte))
2221c50d8ae3SPaolo Bonzini 				--kvm->stat.lpages;
2222c50d8ae3SPaolo Bonzini 		} else {
2223e47c4aeeSSean Christopherson 			child = to_shadow_page(pte & PT64_BASE_ADDR_MASK);
2224c50d8ae3SPaolo Bonzini 			drop_parent_pte(child, spte);
22252de4085cSBen Gardon 
22262de4085cSBen Gardon 			/*
22272de4085cSBen Gardon 			 * Recursively zap nested TDP SPs, parentless SPs are
22282de4085cSBen Gardon 			 * unlikely to be used again in the near future.  This
22292de4085cSBen Gardon 			 * avoids retaining a large number of stale nested SPs.
22302de4085cSBen Gardon 			 */
22312de4085cSBen Gardon 			if (tdp_enabled && invalid_list &&
22322de4085cSBen Gardon 			    child->role.guest_mode && !child->parent_ptes.val)
22332de4085cSBen Gardon 				return kvm_mmu_prepare_zap_page(kvm, child,
22342de4085cSBen Gardon 								invalid_list);
2235c50d8ae3SPaolo Bonzini 		}
2236ace569e0SSean Christopherson 	} else if (is_mmio_spte(pte)) {
2237c50d8ae3SPaolo Bonzini 		mmu_spte_clear_no_track(spte);
2238ace569e0SSean Christopherson 	}
22392de4085cSBen Gardon 	return 0;
2240c50d8ae3SPaolo Bonzini }
2241c50d8ae3SPaolo Bonzini 
22422de4085cSBen Gardon static int kvm_mmu_page_unlink_children(struct kvm *kvm,
22432de4085cSBen Gardon 					struct kvm_mmu_page *sp,
22442de4085cSBen Gardon 					struct list_head *invalid_list)
2245c50d8ae3SPaolo Bonzini {
22462de4085cSBen Gardon 	int zapped = 0;
2247c50d8ae3SPaolo Bonzini 	unsigned i;
2248c50d8ae3SPaolo Bonzini 
2249c50d8ae3SPaolo Bonzini 	for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
22502de4085cSBen Gardon 		zapped += mmu_page_zap_pte(kvm, sp, sp->spt + i, invalid_list);
22512de4085cSBen Gardon 
22522de4085cSBen Gardon 	return zapped;
2253c50d8ae3SPaolo Bonzini }
2254c50d8ae3SPaolo Bonzini 
2255c50d8ae3SPaolo Bonzini static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
2256c50d8ae3SPaolo Bonzini {
2257c50d8ae3SPaolo Bonzini 	u64 *sptep;
2258c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
2259c50d8ae3SPaolo Bonzini 
2260c50d8ae3SPaolo Bonzini 	while ((sptep = rmap_get_first(&sp->parent_ptes, &iter)))
2261c50d8ae3SPaolo Bonzini 		drop_parent_pte(sp, sptep);
2262c50d8ae3SPaolo Bonzini }
2263c50d8ae3SPaolo Bonzini 
2264c50d8ae3SPaolo Bonzini static int mmu_zap_unsync_children(struct kvm *kvm,
2265c50d8ae3SPaolo Bonzini 				   struct kvm_mmu_page *parent,
2266c50d8ae3SPaolo Bonzini 				   struct list_head *invalid_list)
2267c50d8ae3SPaolo Bonzini {
2268c50d8ae3SPaolo Bonzini 	int i, zapped = 0;
2269c50d8ae3SPaolo Bonzini 	struct mmu_page_path parents;
2270c50d8ae3SPaolo Bonzini 	struct kvm_mmu_pages pages;
2271c50d8ae3SPaolo Bonzini 
22723bae0459SSean Christopherson 	if (parent->role.level == PG_LEVEL_4K)
2273c50d8ae3SPaolo Bonzini 		return 0;
2274c50d8ae3SPaolo Bonzini 
2275c50d8ae3SPaolo Bonzini 	while (mmu_unsync_walk(parent, &pages)) {
2276c50d8ae3SPaolo Bonzini 		struct kvm_mmu_page *sp;
2277c50d8ae3SPaolo Bonzini 
2278c50d8ae3SPaolo Bonzini 		for_each_sp(pages, sp, parents, i) {
2279c50d8ae3SPaolo Bonzini 			kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
2280c50d8ae3SPaolo Bonzini 			mmu_pages_clear_parents(&parents);
2281c50d8ae3SPaolo Bonzini 			zapped++;
2282c50d8ae3SPaolo Bonzini 		}
2283c50d8ae3SPaolo Bonzini 	}
2284c50d8ae3SPaolo Bonzini 
2285c50d8ae3SPaolo Bonzini 	return zapped;
2286c50d8ae3SPaolo Bonzini }
2287c50d8ae3SPaolo Bonzini 
2288c50d8ae3SPaolo Bonzini static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
2289c50d8ae3SPaolo Bonzini 				       struct kvm_mmu_page *sp,
2290c50d8ae3SPaolo Bonzini 				       struct list_head *invalid_list,
2291c50d8ae3SPaolo Bonzini 				       int *nr_zapped)
2292c50d8ae3SPaolo Bonzini {
2293c50d8ae3SPaolo Bonzini 	bool list_unstable;
2294c50d8ae3SPaolo Bonzini 
2295c50d8ae3SPaolo Bonzini 	trace_kvm_mmu_prepare_zap_page(sp);
2296c50d8ae3SPaolo Bonzini 	++kvm->stat.mmu_shadow_zapped;
2297c50d8ae3SPaolo Bonzini 	*nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list);
22982de4085cSBen Gardon 	*nr_zapped += kvm_mmu_page_unlink_children(kvm, sp, invalid_list);
2299c50d8ae3SPaolo Bonzini 	kvm_mmu_unlink_parents(kvm, sp);
2300c50d8ae3SPaolo Bonzini 
2301c50d8ae3SPaolo Bonzini 	/* Zapping children means active_mmu_pages has become unstable. */
2302c50d8ae3SPaolo Bonzini 	list_unstable = *nr_zapped;
2303c50d8ae3SPaolo Bonzini 
2304c50d8ae3SPaolo Bonzini 	if (!sp->role.invalid && !sp->role.direct)
2305c50d8ae3SPaolo Bonzini 		unaccount_shadowed(kvm, sp);
2306c50d8ae3SPaolo Bonzini 
2307c50d8ae3SPaolo Bonzini 	if (sp->unsync)
2308c50d8ae3SPaolo Bonzini 		kvm_unlink_unsync_page(kvm, sp);
2309c50d8ae3SPaolo Bonzini 	if (!sp->root_count) {
2310c50d8ae3SPaolo Bonzini 		/* Count self */
2311c50d8ae3SPaolo Bonzini 		(*nr_zapped)++;
2312f95eec9bSSean Christopherson 
2313f95eec9bSSean Christopherson 		/*
2314f95eec9bSSean Christopherson 		 * Already invalid pages (previously active roots) are not on
2315f95eec9bSSean Christopherson 		 * the active page list.  See list_del() in the "else" case of
2316f95eec9bSSean Christopherson 		 * !sp->root_count.
2317f95eec9bSSean Christopherson 		 */
2318f95eec9bSSean Christopherson 		if (sp->role.invalid)
2319f95eec9bSSean Christopherson 			list_add(&sp->link, invalid_list);
2320f95eec9bSSean Christopherson 		else
2321c50d8ae3SPaolo Bonzini 			list_move(&sp->link, invalid_list);
2322c50d8ae3SPaolo Bonzini 		kvm_mod_used_mmu_pages(kvm, -1);
2323c50d8ae3SPaolo Bonzini 	} else {
2324f95eec9bSSean Christopherson 		/*
2325f95eec9bSSean Christopherson 		 * Remove the active root from the active page list, the root
2326f95eec9bSSean Christopherson 		 * will be explicitly freed when the root_count hits zero.
2327f95eec9bSSean Christopherson 		 */
2328f95eec9bSSean Christopherson 		list_del(&sp->link);
2329c50d8ae3SPaolo Bonzini 
2330c50d8ae3SPaolo Bonzini 		/*
2331c50d8ae3SPaolo Bonzini 		 * Obsolete pages cannot be used on any vCPUs, see the comment
2332c50d8ae3SPaolo Bonzini 		 * in kvm_mmu_zap_all_fast().  Note, is_obsolete_sp() also
2333c50d8ae3SPaolo Bonzini 		 * treats invalid shadow pages as being obsolete.
2334c50d8ae3SPaolo Bonzini 		 */
2335c50d8ae3SPaolo Bonzini 		if (!is_obsolete_sp(kvm, sp))
2336c50d8ae3SPaolo Bonzini 			kvm_reload_remote_mmus(kvm);
2337c50d8ae3SPaolo Bonzini 	}
2338c50d8ae3SPaolo Bonzini 
2339c50d8ae3SPaolo Bonzini 	if (sp->lpage_disallowed)
2340c50d8ae3SPaolo Bonzini 		unaccount_huge_nx_page(kvm, sp);
2341c50d8ae3SPaolo Bonzini 
2342c50d8ae3SPaolo Bonzini 	sp->role.invalid = 1;
2343c50d8ae3SPaolo Bonzini 	return list_unstable;
2344c50d8ae3SPaolo Bonzini }
2345c50d8ae3SPaolo Bonzini 
2346c50d8ae3SPaolo Bonzini static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2347c50d8ae3SPaolo Bonzini 				     struct list_head *invalid_list)
2348c50d8ae3SPaolo Bonzini {
2349c50d8ae3SPaolo Bonzini 	int nr_zapped;
2350c50d8ae3SPaolo Bonzini 
2351c50d8ae3SPaolo Bonzini 	__kvm_mmu_prepare_zap_page(kvm, sp, invalid_list, &nr_zapped);
2352c50d8ae3SPaolo Bonzini 	return nr_zapped;
2353c50d8ae3SPaolo Bonzini }
2354c50d8ae3SPaolo Bonzini 
2355c50d8ae3SPaolo Bonzini static void kvm_mmu_commit_zap_page(struct kvm *kvm,
2356c50d8ae3SPaolo Bonzini 				    struct list_head *invalid_list)
2357c50d8ae3SPaolo Bonzini {
2358c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp, *nsp;
2359c50d8ae3SPaolo Bonzini 
2360c50d8ae3SPaolo Bonzini 	if (list_empty(invalid_list))
2361c50d8ae3SPaolo Bonzini 		return;
2362c50d8ae3SPaolo Bonzini 
2363c50d8ae3SPaolo Bonzini 	/*
2364c50d8ae3SPaolo Bonzini 	 * We need to make sure everyone sees our modifications to
2365c50d8ae3SPaolo Bonzini 	 * the page tables and see changes to vcpu->mode here. The barrier
2366c50d8ae3SPaolo Bonzini 	 * in the kvm_flush_remote_tlbs() achieves this. This pairs
2367c50d8ae3SPaolo Bonzini 	 * with vcpu_enter_guest and walk_shadow_page_lockless_begin/end.
2368c50d8ae3SPaolo Bonzini 	 *
2369c50d8ae3SPaolo Bonzini 	 * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit
2370c50d8ae3SPaolo Bonzini 	 * guest mode and/or lockless shadow page table walks.
2371c50d8ae3SPaolo Bonzini 	 */
2372c50d8ae3SPaolo Bonzini 	kvm_flush_remote_tlbs(kvm);
2373c50d8ae3SPaolo Bonzini 
2374c50d8ae3SPaolo Bonzini 	list_for_each_entry_safe(sp, nsp, invalid_list, link) {
2375c50d8ae3SPaolo Bonzini 		WARN_ON(!sp->role.invalid || sp->root_count);
2376c50d8ae3SPaolo Bonzini 		kvm_mmu_free_page(sp);
2377c50d8ae3SPaolo Bonzini 	}
2378c50d8ae3SPaolo Bonzini }
2379c50d8ae3SPaolo Bonzini 
23806b82ef2cSSean Christopherson static unsigned long kvm_mmu_zap_oldest_mmu_pages(struct kvm *kvm,
23816b82ef2cSSean Christopherson 						  unsigned long nr_to_zap)
2382c50d8ae3SPaolo Bonzini {
23836b82ef2cSSean Christopherson 	unsigned long total_zapped = 0;
23846b82ef2cSSean Christopherson 	struct kvm_mmu_page *sp, *tmp;
2385ba7888ddSSean Christopherson 	LIST_HEAD(invalid_list);
23866b82ef2cSSean Christopherson 	bool unstable;
23876b82ef2cSSean Christopherson 	int nr_zapped;
2388c50d8ae3SPaolo Bonzini 
2389c50d8ae3SPaolo Bonzini 	if (list_empty(&kvm->arch.active_mmu_pages))
2390ba7888ddSSean Christopherson 		return 0;
2391c50d8ae3SPaolo Bonzini 
23926b82ef2cSSean Christopherson restart:
23938fc51726SSean Christopherson 	list_for_each_entry_safe_reverse(sp, tmp, &kvm->arch.active_mmu_pages, link) {
23946b82ef2cSSean Christopherson 		/*
23956b82ef2cSSean Christopherson 		 * Don't zap active root pages, the page itself can't be freed
23966b82ef2cSSean Christopherson 		 * and zapping it will just force vCPUs to realloc and reload.
23976b82ef2cSSean Christopherson 		 */
23986b82ef2cSSean Christopherson 		if (sp->root_count)
23996b82ef2cSSean Christopherson 			continue;
24006b82ef2cSSean Christopherson 
24016b82ef2cSSean Christopherson 		unstable = __kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list,
24026b82ef2cSSean Christopherson 						      &nr_zapped);
24036b82ef2cSSean Christopherson 		total_zapped += nr_zapped;
24046b82ef2cSSean Christopherson 		if (total_zapped >= nr_to_zap)
2405ba7888ddSSean Christopherson 			break;
2406ba7888ddSSean Christopherson 
24076b82ef2cSSean Christopherson 		if (unstable)
24086b82ef2cSSean Christopherson 			goto restart;
2409ba7888ddSSean Christopherson 	}
24106b82ef2cSSean Christopherson 
24116b82ef2cSSean Christopherson 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
24126b82ef2cSSean Christopherson 
24136b82ef2cSSean Christopherson 	kvm->stat.mmu_recycled += total_zapped;
24146b82ef2cSSean Christopherson 	return total_zapped;
24156b82ef2cSSean Christopherson }
24166b82ef2cSSean Christopherson 
2417afe8d7e6SSean Christopherson static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
2418afe8d7e6SSean Christopherson {
2419afe8d7e6SSean Christopherson 	if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
2420afe8d7e6SSean Christopherson 		return kvm->arch.n_max_mmu_pages -
2421afe8d7e6SSean Christopherson 			kvm->arch.n_used_mmu_pages;
2422afe8d7e6SSean Christopherson 
2423afe8d7e6SSean Christopherson 	return 0;
2424c50d8ae3SPaolo Bonzini }
2425c50d8ae3SPaolo Bonzini 
2426ba7888ddSSean Christopherson static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
2427ba7888ddSSean Christopherson {
24286b82ef2cSSean Christopherson 	unsigned long avail = kvm_mmu_available_pages(vcpu->kvm);
2429ba7888ddSSean Christopherson 
24306b82ef2cSSean Christopherson 	if (likely(avail >= KVM_MIN_FREE_MMU_PAGES))
2431ba7888ddSSean Christopherson 		return 0;
2432ba7888ddSSean Christopherson 
24336b82ef2cSSean Christopherson 	kvm_mmu_zap_oldest_mmu_pages(vcpu->kvm, KVM_REFILL_PAGES - avail);
2434ba7888ddSSean Christopherson 
24356e6ec584SSean Christopherson 	/*
24366e6ec584SSean Christopherson 	 * Note, this check is intentionally soft, it only guarantees that one
24376e6ec584SSean Christopherson 	 * page is available, while the caller may end up allocating as many as
24386e6ec584SSean Christopherson 	 * four pages, e.g. for PAE roots or for 5-level paging.  Temporarily
24396e6ec584SSean Christopherson 	 * exceeding the (arbitrary by default) limit will not harm the host,
24406e6ec584SSean Christopherson 	 * being too agressive may unnecessarily kill the guest, and getting an
24416e6ec584SSean Christopherson 	 * exact count is far more trouble than it's worth, especially in the
24426e6ec584SSean Christopherson 	 * page fault paths.
24436e6ec584SSean Christopherson 	 */
2444ba7888ddSSean Christopherson 	if (!kvm_mmu_available_pages(vcpu->kvm))
2445ba7888ddSSean Christopherson 		return -ENOSPC;
2446ba7888ddSSean Christopherson 	return 0;
2447ba7888ddSSean Christopherson }
2448ba7888ddSSean Christopherson 
2449c50d8ae3SPaolo Bonzini /*
2450c50d8ae3SPaolo Bonzini  * Changing the number of mmu pages allocated to the vm
2451c50d8ae3SPaolo Bonzini  * Note: if goal_nr_mmu_pages is too small, you will get dead lock
2452c50d8ae3SPaolo Bonzini  */
2453c50d8ae3SPaolo Bonzini void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
2454c50d8ae3SPaolo Bonzini {
2455531810caSBen Gardon 	write_lock(&kvm->mmu_lock);
2456c50d8ae3SPaolo Bonzini 
2457c50d8ae3SPaolo Bonzini 	if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
24586b82ef2cSSean Christopherson 		kvm_mmu_zap_oldest_mmu_pages(kvm, kvm->arch.n_used_mmu_pages -
24596b82ef2cSSean Christopherson 						  goal_nr_mmu_pages);
2460c50d8ae3SPaolo Bonzini 
2461c50d8ae3SPaolo Bonzini 		goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
2462c50d8ae3SPaolo Bonzini 	}
2463c50d8ae3SPaolo Bonzini 
2464c50d8ae3SPaolo Bonzini 	kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
2465c50d8ae3SPaolo Bonzini 
2466531810caSBen Gardon 	write_unlock(&kvm->mmu_lock);
2467c50d8ae3SPaolo Bonzini }
2468c50d8ae3SPaolo Bonzini 
2469c50d8ae3SPaolo Bonzini int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
2470c50d8ae3SPaolo Bonzini {
2471c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
2472c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
2473c50d8ae3SPaolo Bonzini 	int r;
2474c50d8ae3SPaolo Bonzini 
2475c50d8ae3SPaolo Bonzini 	pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
2476c50d8ae3SPaolo Bonzini 	r = 0;
2477531810caSBen Gardon 	write_lock(&kvm->mmu_lock);
2478c50d8ae3SPaolo Bonzini 	for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
2479c50d8ae3SPaolo Bonzini 		pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
2480c50d8ae3SPaolo Bonzini 			 sp->role.word);
2481c50d8ae3SPaolo Bonzini 		r = 1;
2482c50d8ae3SPaolo Bonzini 		kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
2483c50d8ae3SPaolo Bonzini 	}
2484c50d8ae3SPaolo Bonzini 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
2485531810caSBen Gardon 	write_unlock(&kvm->mmu_lock);
2486c50d8ae3SPaolo Bonzini 
2487c50d8ae3SPaolo Bonzini 	return r;
2488c50d8ae3SPaolo Bonzini }
248996ad91aeSSean Christopherson 
249096ad91aeSSean Christopherson static int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
249196ad91aeSSean Christopherson {
249296ad91aeSSean Christopherson 	gpa_t gpa;
249396ad91aeSSean Christopherson 	int r;
249496ad91aeSSean Christopherson 
249596ad91aeSSean Christopherson 	if (vcpu->arch.mmu->direct_map)
249696ad91aeSSean Christopherson 		return 0;
249796ad91aeSSean Christopherson 
249896ad91aeSSean Christopherson 	gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
249996ad91aeSSean Christopherson 
250096ad91aeSSean Christopherson 	r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
250196ad91aeSSean Christopherson 
250296ad91aeSSean Christopherson 	return r;
250396ad91aeSSean Christopherson }
2504c50d8ae3SPaolo Bonzini 
2505c50d8ae3SPaolo Bonzini static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
2506c50d8ae3SPaolo Bonzini {
2507c50d8ae3SPaolo Bonzini 	trace_kvm_mmu_unsync_page(sp);
2508c50d8ae3SPaolo Bonzini 	++vcpu->kvm->stat.mmu_unsync;
2509c50d8ae3SPaolo Bonzini 	sp->unsync = 1;
2510c50d8ae3SPaolo Bonzini 
2511c50d8ae3SPaolo Bonzini 	kvm_mmu_mark_parents_unsync(sp);
2512c50d8ae3SPaolo Bonzini }
2513c50d8ae3SPaolo Bonzini 
25140337f585SSean Christopherson /*
25150337f585SSean Christopherson  * Attempt to unsync any shadow pages that can be reached by the specified gfn,
25160337f585SSean Christopherson  * KVM is creating a writable mapping for said gfn.  Returns 0 if all pages
25170337f585SSean Christopherson  * were marked unsync (or if there is no shadow page), -EPERM if the SPTE must
25180337f585SSean Christopherson  * be write-protected.
25190337f585SSean Christopherson  */
25200337f585SSean Christopherson int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync)
2521c50d8ae3SPaolo Bonzini {
2522c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
2523c50d8ae3SPaolo Bonzini 
25240337f585SSean Christopherson 	/*
25250337f585SSean Christopherson 	 * Force write-protection if the page is being tracked.  Note, the page
25260337f585SSean Christopherson 	 * track machinery is used to write-protect upper-level shadow pages,
25270337f585SSean Christopherson 	 * i.e. this guards the role.level == 4K assertion below!
25280337f585SSean Christopherson 	 */
2529c50d8ae3SPaolo Bonzini 	if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
25300337f585SSean Christopherson 		return -EPERM;
2531c50d8ae3SPaolo Bonzini 
25320337f585SSean Christopherson 	/*
25330337f585SSean Christopherson 	 * The page is not write-tracked, mark existing shadow pages unsync
25340337f585SSean Christopherson 	 * unless KVM is synchronizing an unsync SP (can_unsync = false).  In
25350337f585SSean Christopherson 	 * that case, KVM must complete emulation of the guest TLB flush before
25360337f585SSean Christopherson 	 * allowing shadow pages to become unsync (writable by the guest).
25370337f585SSean Christopherson 	 */
2538c50d8ae3SPaolo Bonzini 	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
2539c50d8ae3SPaolo Bonzini 		if (!can_unsync)
25400337f585SSean Christopherson 			return -EPERM;
2541c50d8ae3SPaolo Bonzini 
2542c50d8ae3SPaolo Bonzini 		if (sp->unsync)
2543c50d8ae3SPaolo Bonzini 			continue;
2544c50d8ae3SPaolo Bonzini 
25453bae0459SSean Christopherson 		WARN_ON(sp->role.level != PG_LEVEL_4K);
2546c50d8ae3SPaolo Bonzini 		kvm_unsync_page(vcpu, sp);
2547c50d8ae3SPaolo Bonzini 	}
2548c50d8ae3SPaolo Bonzini 
2549c50d8ae3SPaolo Bonzini 	/*
2550c50d8ae3SPaolo Bonzini 	 * We need to ensure that the marking of unsync pages is visible
2551c50d8ae3SPaolo Bonzini 	 * before the SPTE is updated to allow writes because
2552c50d8ae3SPaolo Bonzini 	 * kvm_mmu_sync_roots() checks the unsync flags without holding
2553c50d8ae3SPaolo Bonzini 	 * the MMU lock and so can race with this. If the SPTE was updated
2554c50d8ae3SPaolo Bonzini 	 * before the page had been marked as unsync-ed, something like the
2555c50d8ae3SPaolo Bonzini 	 * following could happen:
2556c50d8ae3SPaolo Bonzini 	 *
2557c50d8ae3SPaolo Bonzini 	 * CPU 1                    CPU 2
2558c50d8ae3SPaolo Bonzini 	 * ---------------------------------------------------------------------
2559c50d8ae3SPaolo Bonzini 	 * 1.2 Host updates SPTE
2560c50d8ae3SPaolo Bonzini 	 *     to be writable
2561c50d8ae3SPaolo Bonzini 	 *                      2.1 Guest writes a GPTE for GVA X.
2562c50d8ae3SPaolo Bonzini 	 *                          (GPTE being in the guest page table shadowed
2563c50d8ae3SPaolo Bonzini 	 *                           by the SP from CPU 1.)
2564c50d8ae3SPaolo Bonzini 	 *                          This reads SPTE during the page table walk.
2565c50d8ae3SPaolo Bonzini 	 *                          Since SPTE.W is read as 1, there is no
2566c50d8ae3SPaolo Bonzini 	 *                          fault.
2567c50d8ae3SPaolo Bonzini 	 *
2568c50d8ae3SPaolo Bonzini 	 *                      2.2 Guest issues TLB flush.
2569c50d8ae3SPaolo Bonzini 	 *                          That causes a VM Exit.
2570c50d8ae3SPaolo Bonzini 	 *
25710337f585SSean Christopherson 	 *                      2.3 Walking of unsync pages sees sp->unsync is
25720337f585SSean Christopherson 	 *                          false and skips the page.
2573c50d8ae3SPaolo Bonzini 	 *
2574c50d8ae3SPaolo Bonzini 	 *                      2.4 Guest accesses GVA X.
2575c50d8ae3SPaolo Bonzini 	 *                          Since the mapping in the SP was not updated,
2576c50d8ae3SPaolo Bonzini 	 *                          so the old mapping for GVA X incorrectly
2577c50d8ae3SPaolo Bonzini 	 *                          gets used.
2578c50d8ae3SPaolo Bonzini 	 * 1.1 Host marks SP
2579c50d8ae3SPaolo Bonzini 	 *     as unsync
2580c50d8ae3SPaolo Bonzini 	 *     (sp->unsync = true)
2581c50d8ae3SPaolo Bonzini 	 *
2582c50d8ae3SPaolo Bonzini 	 * The write barrier below ensures that 1.1 happens before 1.2 and thus
2583c50d8ae3SPaolo Bonzini 	 * the situation in 2.4 does not arise. The implicit barrier in 2.2
2584c50d8ae3SPaolo Bonzini 	 * pairs with this write barrier.
2585c50d8ae3SPaolo Bonzini 	 */
2586c50d8ae3SPaolo Bonzini 	smp_wmb();
2587c50d8ae3SPaolo Bonzini 
25880337f585SSean Christopherson 	return 0;
2589c50d8ae3SPaolo Bonzini }
2590c50d8ae3SPaolo Bonzini 
2591799a4190SBen Gardon static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2592799a4190SBen Gardon 		    unsigned int pte_access, int level,
2593799a4190SBen Gardon 		    gfn_t gfn, kvm_pfn_t pfn, bool speculative,
2594799a4190SBen Gardon 		    bool can_unsync, bool host_writable)
2595799a4190SBen Gardon {
2596799a4190SBen Gardon 	u64 spte;
2597799a4190SBen Gardon 	struct kvm_mmu_page *sp;
2598799a4190SBen Gardon 	int ret;
2599799a4190SBen Gardon 
2600799a4190SBen Gardon 	sp = sptep_to_sp(sptep);
2601799a4190SBen Gardon 
2602799a4190SBen Gardon 	ret = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative,
2603799a4190SBen Gardon 			can_unsync, host_writable, sp_ad_disabled(sp), &spte);
2604799a4190SBen Gardon 
2605799a4190SBen Gardon 	if (spte & PT_WRITABLE_MASK)
2606799a4190SBen Gardon 		kvm_vcpu_mark_page_dirty(vcpu, gfn);
2607799a4190SBen Gardon 
260812703759SSean Christopherson 	if (*sptep == spte)
260912703759SSean Christopherson 		ret |= SET_SPTE_SPURIOUS;
261012703759SSean Christopherson 	else if (mmu_spte_update(sptep, spte))
2611c50d8ae3SPaolo Bonzini 		ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
2612c50d8ae3SPaolo Bonzini 	return ret;
2613c50d8ae3SPaolo Bonzini }
2614c50d8ae3SPaolo Bonzini 
26150a2b64c5SBen Gardon static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2616e88b8093SSean Christopherson 			unsigned int pte_access, bool write_fault, int level,
26170a2b64c5SBen Gardon 			gfn_t gfn, kvm_pfn_t pfn, bool speculative,
26180a2b64c5SBen Gardon 			bool host_writable)
2619c50d8ae3SPaolo Bonzini {
2620c50d8ae3SPaolo Bonzini 	int was_rmapped = 0;
2621c50d8ae3SPaolo Bonzini 	int rmap_count;
2622c50d8ae3SPaolo Bonzini 	int set_spte_ret;
2623c4371c2aSSean Christopherson 	int ret = RET_PF_FIXED;
2624c50d8ae3SPaolo Bonzini 	bool flush = false;
2625c50d8ae3SPaolo Bonzini 
2626c50d8ae3SPaolo Bonzini 	pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
2627c50d8ae3SPaolo Bonzini 		 *sptep, write_fault, gfn);
2628c50d8ae3SPaolo Bonzini 
2629a54aa15cSSean Christopherson 	if (unlikely(is_noslot_pfn(pfn))) {
2630a54aa15cSSean Christopherson 		mark_mmio_spte(vcpu, sptep, gfn, pte_access);
2631a54aa15cSSean Christopherson 		return RET_PF_EMULATE;
2632a54aa15cSSean Christopherson 	}
2633a54aa15cSSean Christopherson 
2634c50d8ae3SPaolo Bonzini 	if (is_shadow_present_pte(*sptep)) {
2635c50d8ae3SPaolo Bonzini 		/*
2636c50d8ae3SPaolo Bonzini 		 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
2637c50d8ae3SPaolo Bonzini 		 * the parent of the now unreachable PTE.
2638c50d8ae3SPaolo Bonzini 		 */
26393bae0459SSean Christopherson 		if (level > PG_LEVEL_4K && !is_large_pte(*sptep)) {
2640c50d8ae3SPaolo Bonzini 			struct kvm_mmu_page *child;
2641c50d8ae3SPaolo Bonzini 			u64 pte = *sptep;
2642c50d8ae3SPaolo Bonzini 
2643e47c4aeeSSean Christopherson 			child = to_shadow_page(pte & PT64_BASE_ADDR_MASK);
2644c50d8ae3SPaolo Bonzini 			drop_parent_pte(child, sptep);
2645c50d8ae3SPaolo Bonzini 			flush = true;
2646c50d8ae3SPaolo Bonzini 		} else if (pfn != spte_to_pfn(*sptep)) {
2647c50d8ae3SPaolo Bonzini 			pgprintk("hfn old %llx new %llx\n",
2648c50d8ae3SPaolo Bonzini 				 spte_to_pfn(*sptep), pfn);
2649c50d8ae3SPaolo Bonzini 			drop_spte(vcpu->kvm, sptep);
2650c50d8ae3SPaolo Bonzini 			flush = true;
2651c50d8ae3SPaolo Bonzini 		} else
2652c50d8ae3SPaolo Bonzini 			was_rmapped = 1;
2653c50d8ae3SPaolo Bonzini 	}
2654c50d8ae3SPaolo Bonzini 
2655c50d8ae3SPaolo Bonzini 	set_spte_ret = set_spte(vcpu, sptep, pte_access, level, gfn, pfn,
2656c50d8ae3SPaolo Bonzini 				speculative, true, host_writable);
2657c50d8ae3SPaolo Bonzini 	if (set_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
2658c50d8ae3SPaolo Bonzini 		if (write_fault)
2659c50d8ae3SPaolo Bonzini 			ret = RET_PF_EMULATE;
26608c8560b8SSean Christopherson 		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
2661c50d8ae3SPaolo Bonzini 	}
2662c50d8ae3SPaolo Bonzini 
2663c50d8ae3SPaolo Bonzini 	if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH || flush)
2664c50d8ae3SPaolo Bonzini 		kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn,
2665c50d8ae3SPaolo Bonzini 				KVM_PAGES_PER_HPAGE(level));
2666c50d8ae3SPaolo Bonzini 
266712703759SSean Christopherson 	/*
266812703759SSean Christopherson 	 * The fault is fully spurious if and only if the new SPTE and old SPTE
266912703759SSean Christopherson 	 * are identical, and emulation is not required.
267012703759SSean Christopherson 	 */
267112703759SSean Christopherson 	if ((set_spte_ret & SET_SPTE_SPURIOUS) && ret == RET_PF_FIXED) {
267212703759SSean Christopherson 		WARN_ON_ONCE(!was_rmapped);
267312703759SSean Christopherson 		return RET_PF_SPURIOUS;
267412703759SSean Christopherson 	}
267512703759SSean Christopherson 
2676c50d8ae3SPaolo Bonzini 	pgprintk("%s: setting spte %llx\n", __func__, *sptep);
2677c50d8ae3SPaolo Bonzini 	trace_kvm_mmu_set_spte(level, gfn, sptep);
2678c50d8ae3SPaolo Bonzini 	if (!was_rmapped && is_large_pte(*sptep))
2679c50d8ae3SPaolo Bonzini 		++vcpu->kvm->stat.lpages;
2680c50d8ae3SPaolo Bonzini 
2681c50d8ae3SPaolo Bonzini 	if (is_shadow_present_pte(*sptep)) {
2682c50d8ae3SPaolo Bonzini 		if (!was_rmapped) {
2683c50d8ae3SPaolo Bonzini 			rmap_count = rmap_add(vcpu, sptep, gfn);
2684c50d8ae3SPaolo Bonzini 			if (rmap_count > RMAP_RECYCLE_THRESHOLD)
2685c50d8ae3SPaolo Bonzini 				rmap_recycle(vcpu, sptep, gfn);
2686c50d8ae3SPaolo Bonzini 		}
2687c50d8ae3SPaolo Bonzini 	}
2688c50d8ae3SPaolo Bonzini 
2689c50d8ae3SPaolo Bonzini 	return ret;
2690c50d8ae3SPaolo Bonzini }
2691c50d8ae3SPaolo Bonzini 
2692c50d8ae3SPaolo Bonzini static kvm_pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
2693c50d8ae3SPaolo Bonzini 				     bool no_dirty_log)
2694c50d8ae3SPaolo Bonzini {
2695c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
2696c50d8ae3SPaolo Bonzini 
2697c50d8ae3SPaolo Bonzini 	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log);
2698c50d8ae3SPaolo Bonzini 	if (!slot)
2699c50d8ae3SPaolo Bonzini 		return KVM_PFN_ERR_FAULT;
2700c50d8ae3SPaolo Bonzini 
2701c50d8ae3SPaolo Bonzini 	return gfn_to_pfn_memslot_atomic(slot, gfn);
2702c50d8ae3SPaolo Bonzini }
2703c50d8ae3SPaolo Bonzini 
2704c50d8ae3SPaolo Bonzini static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
2705c50d8ae3SPaolo Bonzini 				    struct kvm_mmu_page *sp,
2706c50d8ae3SPaolo Bonzini 				    u64 *start, u64 *end)
2707c50d8ae3SPaolo Bonzini {
2708c50d8ae3SPaolo Bonzini 	struct page *pages[PTE_PREFETCH_NUM];
2709c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
27100a2b64c5SBen Gardon 	unsigned int access = sp->role.access;
2711c50d8ae3SPaolo Bonzini 	int i, ret;
2712c50d8ae3SPaolo Bonzini 	gfn_t gfn;
2713c50d8ae3SPaolo Bonzini 
2714c50d8ae3SPaolo Bonzini 	gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
2715c50d8ae3SPaolo Bonzini 	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
2716c50d8ae3SPaolo Bonzini 	if (!slot)
2717c50d8ae3SPaolo Bonzini 		return -1;
2718c50d8ae3SPaolo Bonzini 
2719c50d8ae3SPaolo Bonzini 	ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start);
2720c50d8ae3SPaolo Bonzini 	if (ret <= 0)
2721c50d8ae3SPaolo Bonzini 		return -1;
2722c50d8ae3SPaolo Bonzini 
2723c50d8ae3SPaolo Bonzini 	for (i = 0; i < ret; i++, gfn++, start++) {
2724e88b8093SSean Christopherson 		mmu_set_spte(vcpu, start, access, false, sp->role.level, gfn,
2725c50d8ae3SPaolo Bonzini 			     page_to_pfn(pages[i]), true, true);
2726c50d8ae3SPaolo Bonzini 		put_page(pages[i]);
2727c50d8ae3SPaolo Bonzini 	}
2728c50d8ae3SPaolo Bonzini 
2729c50d8ae3SPaolo Bonzini 	return 0;
2730c50d8ae3SPaolo Bonzini }
2731c50d8ae3SPaolo Bonzini 
2732c50d8ae3SPaolo Bonzini static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
2733c50d8ae3SPaolo Bonzini 				  struct kvm_mmu_page *sp, u64 *sptep)
2734c50d8ae3SPaolo Bonzini {
2735c50d8ae3SPaolo Bonzini 	u64 *spte, *start = NULL;
2736c50d8ae3SPaolo Bonzini 	int i;
2737c50d8ae3SPaolo Bonzini 
2738c50d8ae3SPaolo Bonzini 	WARN_ON(!sp->role.direct);
2739c50d8ae3SPaolo Bonzini 
2740c50d8ae3SPaolo Bonzini 	i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
2741c50d8ae3SPaolo Bonzini 	spte = sp->spt + i;
2742c50d8ae3SPaolo Bonzini 
2743c50d8ae3SPaolo Bonzini 	for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
2744c50d8ae3SPaolo Bonzini 		if (is_shadow_present_pte(*spte) || spte == sptep) {
2745c50d8ae3SPaolo Bonzini 			if (!start)
2746c50d8ae3SPaolo Bonzini 				continue;
2747c50d8ae3SPaolo Bonzini 			if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
2748c50d8ae3SPaolo Bonzini 				break;
2749c50d8ae3SPaolo Bonzini 			start = NULL;
2750c50d8ae3SPaolo Bonzini 		} else if (!start)
2751c50d8ae3SPaolo Bonzini 			start = spte;
2752c50d8ae3SPaolo Bonzini 	}
2753c50d8ae3SPaolo Bonzini }
2754c50d8ae3SPaolo Bonzini 
2755c50d8ae3SPaolo Bonzini static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
2756c50d8ae3SPaolo Bonzini {
2757c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
2758c50d8ae3SPaolo Bonzini 
275957354682SSean Christopherson 	sp = sptep_to_sp(sptep);
2760c50d8ae3SPaolo Bonzini 
2761c50d8ae3SPaolo Bonzini 	/*
2762c50d8ae3SPaolo Bonzini 	 * Without accessed bits, there's no way to distinguish between
2763c50d8ae3SPaolo Bonzini 	 * actually accessed translations and prefetched, so disable pte
2764c50d8ae3SPaolo Bonzini 	 * prefetch if accessed bits aren't available.
2765c50d8ae3SPaolo Bonzini 	 */
2766c50d8ae3SPaolo Bonzini 	if (sp_ad_disabled(sp))
2767c50d8ae3SPaolo Bonzini 		return;
2768c50d8ae3SPaolo Bonzini 
27693bae0459SSean Christopherson 	if (sp->role.level > PG_LEVEL_4K)
2770c50d8ae3SPaolo Bonzini 		return;
2771c50d8ae3SPaolo Bonzini 
27724a42d848SDavid Stevens 	/*
27734a42d848SDavid Stevens 	 * If addresses are being invalidated, skip prefetching to avoid
27744a42d848SDavid Stevens 	 * accidentally prefetching those addresses.
27754a42d848SDavid Stevens 	 */
27764a42d848SDavid Stevens 	if (unlikely(vcpu->kvm->mmu_notifier_count))
27774a42d848SDavid Stevens 		return;
27784a42d848SDavid Stevens 
2779c50d8ae3SPaolo Bonzini 	__direct_pte_prefetch(vcpu, sp, sptep);
2780c50d8ae3SPaolo Bonzini }
2781c50d8ae3SPaolo Bonzini 
27821b6d9d9eSSean Christopherson static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
27838ca6f063SBen Gardon 				  const struct kvm_memory_slot *slot)
2784db543216SSean Christopherson {
2785db543216SSean Christopherson 	unsigned long hva;
2786db543216SSean Christopherson 	pte_t *pte;
2787db543216SSean Christopherson 	int level;
2788db543216SSean Christopherson 
2789e851265aSSean Christopherson 	if (!PageCompound(pfn_to_page(pfn)) && !kvm_is_zone_device_pfn(pfn))
27903bae0459SSean Christopherson 		return PG_LEVEL_4K;
2791db543216SSean Christopherson 
2792293e306eSSean Christopherson 	/*
2793293e306eSSean Christopherson 	 * Note, using the already-retrieved memslot and __gfn_to_hva_memslot()
2794293e306eSSean Christopherson 	 * is not solely for performance, it's also necessary to avoid the
2795293e306eSSean Christopherson 	 * "writable" check in __gfn_to_hva_many(), which will always fail on
2796293e306eSSean Christopherson 	 * read-only memslots due to gfn_to_hva() assuming writes.  Earlier
2797293e306eSSean Christopherson 	 * page fault steps have already verified the guest isn't writing a
2798293e306eSSean Christopherson 	 * read-only memslot.
2799293e306eSSean Christopherson 	 */
2800db543216SSean Christopherson 	hva = __gfn_to_hva_memslot(slot, gfn);
2801db543216SSean Christopherson 
28021b6d9d9eSSean Christopherson 	pte = lookup_address_in_mm(kvm->mm, hva, &level);
2803db543216SSean Christopherson 	if (unlikely(!pte))
28043bae0459SSean Christopherson 		return PG_LEVEL_4K;
2805db543216SSean Christopherson 
2806db543216SSean Christopherson 	return level;
2807db543216SSean Christopherson }
2808db543216SSean Christopherson 
28098ca6f063SBen Gardon int kvm_mmu_max_mapping_level(struct kvm *kvm,
28108ca6f063SBen Gardon 			      const struct kvm_memory_slot *slot, gfn_t gfn,
28118ca6f063SBen Gardon 			      kvm_pfn_t pfn, int max_level)
28121b6d9d9eSSean Christopherson {
28131b6d9d9eSSean Christopherson 	struct kvm_lpage_info *linfo;
28141b6d9d9eSSean Christopherson 
28151b6d9d9eSSean Christopherson 	max_level = min(max_level, max_huge_page_level);
28161b6d9d9eSSean Christopherson 	for ( ; max_level > PG_LEVEL_4K; max_level--) {
28171b6d9d9eSSean Christopherson 		linfo = lpage_info_slot(gfn, slot, max_level);
28181b6d9d9eSSean Christopherson 		if (!linfo->disallow_lpage)
28191b6d9d9eSSean Christopherson 			break;
28201b6d9d9eSSean Christopherson 	}
28211b6d9d9eSSean Christopherson 
28221b6d9d9eSSean Christopherson 	if (max_level == PG_LEVEL_4K)
28231b6d9d9eSSean Christopherson 		return PG_LEVEL_4K;
28241b6d9d9eSSean Christopherson 
28251b6d9d9eSSean Christopherson 	return host_pfn_mapping_level(kvm, gfn, pfn, slot);
28261b6d9d9eSSean Christopherson }
28271b6d9d9eSSean Christopherson 
2828bb18842eSBen Gardon int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
28293cf06612SSean Christopherson 			    int max_level, kvm_pfn_t *pfnp,
28303cf06612SSean Christopherson 			    bool huge_page_disallowed, int *req_level)
28310885904dSSean Christopherson {
2832293e306eSSean Christopherson 	struct kvm_memory_slot *slot;
28330885904dSSean Christopherson 	kvm_pfn_t pfn = *pfnp;
283417eff019SSean Christopherson 	kvm_pfn_t mask;
283583f06fa7SSean Christopherson 	int level;
28360885904dSSean Christopherson 
28373cf06612SSean Christopherson 	*req_level = PG_LEVEL_4K;
28383cf06612SSean Christopherson 
28393bae0459SSean Christopherson 	if (unlikely(max_level == PG_LEVEL_4K))
28403bae0459SSean Christopherson 		return PG_LEVEL_4K;
284117eff019SSean Christopherson 
2842e851265aSSean Christopherson 	if (is_error_noslot_pfn(pfn) || kvm_is_reserved_pfn(pfn))
28433bae0459SSean Christopherson 		return PG_LEVEL_4K;
284417eff019SSean Christopherson 
2845293e306eSSean Christopherson 	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, true);
2846293e306eSSean Christopherson 	if (!slot)
28473bae0459SSean Christopherson 		return PG_LEVEL_4K;
2848293e306eSSean Christopherson 
28491b6d9d9eSSean Christopherson 	level = kvm_mmu_max_mapping_level(vcpu->kvm, slot, gfn, pfn, max_level);
28503bae0459SSean Christopherson 	if (level == PG_LEVEL_4K)
285183f06fa7SSean Christopherson 		return level;
285217eff019SSean Christopherson 
28533cf06612SSean Christopherson 	*req_level = level = min(level, max_level);
28543cf06612SSean Christopherson 
28553cf06612SSean Christopherson 	/*
28563cf06612SSean Christopherson 	 * Enforce the iTLB multihit workaround after capturing the requested
28573cf06612SSean Christopherson 	 * level, which will be used to do precise, accurate accounting.
28583cf06612SSean Christopherson 	 */
28593cf06612SSean Christopherson 	if (huge_page_disallowed)
28603cf06612SSean Christopherson 		return PG_LEVEL_4K;
28614cd071d1SSean Christopherson 
28620885904dSSean Christopherson 	/*
28634cd071d1SSean Christopherson 	 * mmu_notifier_retry() was successful and mmu_lock is held, so
28644cd071d1SSean Christopherson 	 * the pmd can't be split from under us.
28650885904dSSean Christopherson 	 */
28660885904dSSean Christopherson 	mask = KVM_PAGES_PER_HPAGE(level) - 1;
28670885904dSSean Christopherson 	VM_BUG_ON((gfn & mask) != (pfn & mask));
28684cd071d1SSean Christopherson 	*pfnp = pfn & ~mask;
286983f06fa7SSean Christopherson 
287083f06fa7SSean Christopherson 	return level;
28710885904dSSean Christopherson }
28720885904dSSean Christopherson 
2873bb18842eSBen Gardon void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level,
2874bb18842eSBen Gardon 				kvm_pfn_t *pfnp, int *goal_levelp)
2875c50d8ae3SPaolo Bonzini {
2876bb18842eSBen Gardon 	int level = *goal_levelp;
2877c50d8ae3SPaolo Bonzini 
28787d945312SBen Gardon 	if (cur_level == level && level > PG_LEVEL_4K &&
2879c50d8ae3SPaolo Bonzini 	    is_shadow_present_pte(spte) &&
2880c50d8ae3SPaolo Bonzini 	    !is_large_pte(spte)) {
2881c50d8ae3SPaolo Bonzini 		/*
2882c50d8ae3SPaolo Bonzini 		 * A small SPTE exists for this pfn, but FNAME(fetch)
2883c50d8ae3SPaolo Bonzini 		 * and __direct_map would like to create a large PTE
2884c50d8ae3SPaolo Bonzini 		 * instead: just force them to go down another level,
2885c50d8ae3SPaolo Bonzini 		 * patching back for them into pfn the next 9 bits of
2886c50d8ae3SPaolo Bonzini 		 * the address.
2887c50d8ae3SPaolo Bonzini 		 */
28887d945312SBen Gardon 		u64 page_mask = KVM_PAGES_PER_HPAGE(level) -
28897d945312SBen Gardon 				KVM_PAGES_PER_HPAGE(level - 1);
2890c50d8ae3SPaolo Bonzini 		*pfnp |= gfn & page_mask;
2891bb18842eSBen Gardon 		(*goal_levelp)--;
2892c50d8ae3SPaolo Bonzini 	}
2893c50d8ae3SPaolo Bonzini }
2894c50d8ae3SPaolo Bonzini 
28956c2fd34fSSean Christopherson static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
289683f06fa7SSean Christopherson 			int map_writable, int max_level, kvm_pfn_t pfn,
28976c2fd34fSSean Christopherson 			bool prefault, bool is_tdp)
2898c50d8ae3SPaolo Bonzini {
28996c2fd34fSSean Christopherson 	bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled();
29006c2fd34fSSean Christopherson 	bool write = error_code & PFERR_WRITE_MASK;
29016c2fd34fSSean Christopherson 	bool exec = error_code & PFERR_FETCH_MASK;
29026c2fd34fSSean Christopherson 	bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
2903c50d8ae3SPaolo Bonzini 	struct kvm_shadow_walk_iterator it;
2904c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
29053cf06612SSean Christopherson 	int level, req_level, ret;
2906c50d8ae3SPaolo Bonzini 	gfn_t gfn = gpa >> PAGE_SHIFT;
2907c50d8ae3SPaolo Bonzini 	gfn_t base_gfn = gfn;
2908c50d8ae3SPaolo Bonzini 
29093cf06612SSean Christopherson 	level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
29103cf06612SSean Christopherson 					huge_page_disallowed, &req_level);
29114cd071d1SSean Christopherson 
2912c50d8ae3SPaolo Bonzini 	trace_kvm_mmu_spte_requested(gpa, level, pfn);
2913c50d8ae3SPaolo Bonzini 	for_each_shadow_entry(vcpu, gpa, it) {
2914c50d8ae3SPaolo Bonzini 		/*
2915c50d8ae3SPaolo Bonzini 		 * We cannot overwrite existing page tables with an NX
2916c50d8ae3SPaolo Bonzini 		 * large page, as the leaf could be executable.
2917c50d8ae3SPaolo Bonzini 		 */
2918dcc70651SSean Christopherson 		if (nx_huge_page_workaround_enabled)
29197d945312SBen Gardon 			disallowed_hugepage_adjust(*it.sptep, gfn, it.level,
29207d945312SBen Gardon 						   &pfn, &level);
2921c50d8ae3SPaolo Bonzini 
2922c50d8ae3SPaolo Bonzini 		base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
2923c50d8ae3SPaolo Bonzini 		if (it.level == level)
2924c50d8ae3SPaolo Bonzini 			break;
2925c50d8ae3SPaolo Bonzini 
2926c50d8ae3SPaolo Bonzini 		drop_large_spte(vcpu, it.sptep);
2927c50d8ae3SPaolo Bonzini 		if (!is_shadow_present_pte(*it.sptep)) {
2928c50d8ae3SPaolo Bonzini 			sp = kvm_mmu_get_page(vcpu, base_gfn, it.addr,
2929c50d8ae3SPaolo Bonzini 					      it.level - 1, true, ACC_ALL);
2930c50d8ae3SPaolo Bonzini 
2931c50d8ae3SPaolo Bonzini 			link_shadow_page(vcpu, it.sptep, sp);
29325bcaf3e1SSean Christopherson 			if (is_tdp && huge_page_disallowed &&
29335bcaf3e1SSean Christopherson 			    req_level >= it.level)
2934c50d8ae3SPaolo Bonzini 				account_huge_nx_page(vcpu->kvm, sp);
2935c50d8ae3SPaolo Bonzini 		}
2936c50d8ae3SPaolo Bonzini 	}
2937c50d8ae3SPaolo Bonzini 
2938c50d8ae3SPaolo Bonzini 	ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL,
2939c50d8ae3SPaolo Bonzini 			   write, level, base_gfn, pfn, prefault,
2940c50d8ae3SPaolo Bonzini 			   map_writable);
294112703759SSean Christopherson 	if (ret == RET_PF_SPURIOUS)
294212703759SSean Christopherson 		return ret;
294312703759SSean Christopherson 
2944c50d8ae3SPaolo Bonzini 	direct_pte_prefetch(vcpu, it.sptep);
2945c50d8ae3SPaolo Bonzini 	++vcpu->stat.pf_fixed;
2946c50d8ae3SPaolo Bonzini 	return ret;
2947c50d8ae3SPaolo Bonzini }
2948c50d8ae3SPaolo Bonzini 
2949c50d8ae3SPaolo Bonzini static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk)
2950c50d8ae3SPaolo Bonzini {
2951c50d8ae3SPaolo Bonzini 	send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, PAGE_SHIFT, tsk);
2952c50d8ae3SPaolo Bonzini }
2953c50d8ae3SPaolo Bonzini 
2954c50d8ae3SPaolo Bonzini static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
2955c50d8ae3SPaolo Bonzini {
2956c50d8ae3SPaolo Bonzini 	/*
2957c50d8ae3SPaolo Bonzini 	 * Do not cache the mmio info caused by writing the readonly gfn
2958c50d8ae3SPaolo Bonzini 	 * into the spte otherwise read access on readonly gfn also can
2959c50d8ae3SPaolo Bonzini 	 * caused mmio page fault and treat it as mmio access.
2960c50d8ae3SPaolo Bonzini 	 */
2961c50d8ae3SPaolo Bonzini 	if (pfn == KVM_PFN_ERR_RO_FAULT)
2962c50d8ae3SPaolo Bonzini 		return RET_PF_EMULATE;
2963c50d8ae3SPaolo Bonzini 
2964c50d8ae3SPaolo Bonzini 	if (pfn == KVM_PFN_ERR_HWPOISON) {
2965c50d8ae3SPaolo Bonzini 		kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current);
2966c50d8ae3SPaolo Bonzini 		return RET_PF_RETRY;
2967c50d8ae3SPaolo Bonzini 	}
2968c50d8ae3SPaolo Bonzini 
2969c50d8ae3SPaolo Bonzini 	return -EFAULT;
2970c50d8ae3SPaolo Bonzini }
2971c50d8ae3SPaolo Bonzini 
2972c50d8ae3SPaolo Bonzini static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
29730a2b64c5SBen Gardon 				kvm_pfn_t pfn, unsigned int access,
29740a2b64c5SBen Gardon 				int *ret_val)
2975c50d8ae3SPaolo Bonzini {
2976c50d8ae3SPaolo Bonzini 	/* The pfn is invalid, report the error! */
2977c50d8ae3SPaolo Bonzini 	if (unlikely(is_error_pfn(pfn))) {
2978c50d8ae3SPaolo Bonzini 		*ret_val = kvm_handle_bad_page(vcpu, gfn, pfn);
2979c50d8ae3SPaolo Bonzini 		return true;
2980c50d8ae3SPaolo Bonzini 	}
2981c50d8ae3SPaolo Bonzini 
298230ab5901SSean Christopherson 	if (unlikely(is_noslot_pfn(pfn))) {
2983c50d8ae3SPaolo Bonzini 		vcpu_cache_mmio_info(vcpu, gva, gfn,
2984c50d8ae3SPaolo Bonzini 				     access & shadow_mmio_access_mask);
298530ab5901SSean Christopherson 		/*
298630ab5901SSean Christopherson 		 * If MMIO caching is disabled, emulate immediately without
298730ab5901SSean Christopherson 		 * touching the shadow page tables as attempting to install an
298830ab5901SSean Christopherson 		 * MMIO SPTE will just be an expensive nop.
298930ab5901SSean Christopherson 		 */
299030ab5901SSean Christopherson 		if (unlikely(!shadow_mmio_value)) {
299130ab5901SSean Christopherson 			*ret_val = RET_PF_EMULATE;
299230ab5901SSean Christopherson 			return true;
299330ab5901SSean Christopherson 		}
299430ab5901SSean Christopherson 	}
2995c50d8ae3SPaolo Bonzini 
2996c50d8ae3SPaolo Bonzini 	return false;
2997c50d8ae3SPaolo Bonzini }
2998c50d8ae3SPaolo Bonzini 
2999c50d8ae3SPaolo Bonzini static bool page_fault_can_be_fast(u32 error_code)
3000c50d8ae3SPaolo Bonzini {
3001c50d8ae3SPaolo Bonzini 	/*
3002c50d8ae3SPaolo Bonzini 	 * Do not fix the mmio spte with invalid generation number which
3003c50d8ae3SPaolo Bonzini 	 * need to be updated by slow page fault path.
3004c50d8ae3SPaolo Bonzini 	 */
3005c50d8ae3SPaolo Bonzini 	if (unlikely(error_code & PFERR_RSVD_MASK))
3006c50d8ae3SPaolo Bonzini 		return false;
3007c50d8ae3SPaolo Bonzini 
3008c50d8ae3SPaolo Bonzini 	/* See if the page fault is due to an NX violation */
3009c50d8ae3SPaolo Bonzini 	if (unlikely(((error_code & (PFERR_FETCH_MASK | PFERR_PRESENT_MASK))
3010c50d8ae3SPaolo Bonzini 		      == (PFERR_FETCH_MASK | PFERR_PRESENT_MASK))))
3011c50d8ae3SPaolo Bonzini 		return false;
3012c50d8ae3SPaolo Bonzini 
3013c50d8ae3SPaolo Bonzini 	/*
3014c50d8ae3SPaolo Bonzini 	 * #PF can be fast if:
3015c50d8ae3SPaolo Bonzini 	 * 1. The shadow page table entry is not present, which could mean that
3016c50d8ae3SPaolo Bonzini 	 *    the fault is potentially caused by access tracking (if enabled).
3017c50d8ae3SPaolo Bonzini 	 * 2. The shadow page table entry is present and the fault
3018c50d8ae3SPaolo Bonzini 	 *    is caused by write-protect, that means we just need change the W
3019c50d8ae3SPaolo Bonzini 	 *    bit of the spte which can be done out of mmu-lock.
3020c50d8ae3SPaolo Bonzini 	 *
3021c50d8ae3SPaolo Bonzini 	 * However, if access tracking is disabled we know that a non-present
3022c50d8ae3SPaolo Bonzini 	 * page must be a genuine page fault where we have to create a new SPTE.
3023c50d8ae3SPaolo Bonzini 	 * So, if access tracking is disabled, we return true only for write
3024c50d8ae3SPaolo Bonzini 	 * accesses to a present page.
3025c50d8ae3SPaolo Bonzini 	 */
3026c50d8ae3SPaolo Bonzini 
3027c50d8ae3SPaolo Bonzini 	return shadow_acc_track_mask != 0 ||
3028c50d8ae3SPaolo Bonzini 	       ((error_code & (PFERR_WRITE_MASK | PFERR_PRESENT_MASK))
3029c50d8ae3SPaolo Bonzini 		== (PFERR_WRITE_MASK | PFERR_PRESENT_MASK));
3030c50d8ae3SPaolo Bonzini }
3031c50d8ae3SPaolo Bonzini 
3032c50d8ae3SPaolo Bonzini /*
3033c50d8ae3SPaolo Bonzini  * Returns true if the SPTE was fixed successfully. Otherwise,
3034c50d8ae3SPaolo Bonzini  * someone else modified the SPTE from its original value.
3035c50d8ae3SPaolo Bonzini  */
3036c50d8ae3SPaolo Bonzini static bool
3037c50d8ae3SPaolo Bonzini fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
3038c50d8ae3SPaolo Bonzini 			u64 *sptep, u64 old_spte, u64 new_spte)
3039c50d8ae3SPaolo Bonzini {
3040c50d8ae3SPaolo Bonzini 	gfn_t gfn;
3041c50d8ae3SPaolo Bonzini 
3042c50d8ae3SPaolo Bonzini 	WARN_ON(!sp->role.direct);
3043c50d8ae3SPaolo Bonzini 
3044c50d8ae3SPaolo Bonzini 	/*
3045c50d8ae3SPaolo Bonzini 	 * Theoretically we could also set dirty bit (and flush TLB) here in
3046c50d8ae3SPaolo Bonzini 	 * order to eliminate unnecessary PML logging. See comments in
3047c50d8ae3SPaolo Bonzini 	 * set_spte. But fast_page_fault is very unlikely to happen with PML
3048c50d8ae3SPaolo Bonzini 	 * enabled, so we do not do this. This might result in the same GPA
3049c50d8ae3SPaolo Bonzini 	 * to be logged in PML buffer again when the write really happens, and
3050c50d8ae3SPaolo Bonzini 	 * eventually to be called by mark_page_dirty twice. But it's also no
3051c50d8ae3SPaolo Bonzini 	 * harm. This also avoids the TLB flush needed after setting dirty bit
3052c50d8ae3SPaolo Bonzini 	 * so non-PML cases won't be impacted.
3053c50d8ae3SPaolo Bonzini 	 *
3054c50d8ae3SPaolo Bonzini 	 * Compare with set_spte where instead shadow_dirty_mask is set.
3055c50d8ae3SPaolo Bonzini 	 */
3056c50d8ae3SPaolo Bonzini 	if (cmpxchg64(sptep, old_spte, new_spte) != old_spte)
3057c50d8ae3SPaolo Bonzini 		return false;
3058c50d8ae3SPaolo Bonzini 
3059c50d8ae3SPaolo Bonzini 	if (is_writable_pte(new_spte) && !is_writable_pte(old_spte)) {
3060c50d8ae3SPaolo Bonzini 		/*
3061c50d8ae3SPaolo Bonzini 		 * The gfn of direct spte is stable since it is
3062c50d8ae3SPaolo Bonzini 		 * calculated by sp->gfn.
3063c50d8ae3SPaolo Bonzini 		 */
3064c50d8ae3SPaolo Bonzini 		gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
3065c50d8ae3SPaolo Bonzini 		kvm_vcpu_mark_page_dirty(vcpu, gfn);
3066c50d8ae3SPaolo Bonzini 	}
3067c50d8ae3SPaolo Bonzini 
3068c50d8ae3SPaolo Bonzini 	return true;
3069c50d8ae3SPaolo Bonzini }
3070c50d8ae3SPaolo Bonzini 
3071c50d8ae3SPaolo Bonzini static bool is_access_allowed(u32 fault_err_code, u64 spte)
3072c50d8ae3SPaolo Bonzini {
3073c50d8ae3SPaolo Bonzini 	if (fault_err_code & PFERR_FETCH_MASK)
3074c50d8ae3SPaolo Bonzini 		return is_executable_pte(spte);
3075c50d8ae3SPaolo Bonzini 
3076c50d8ae3SPaolo Bonzini 	if (fault_err_code & PFERR_WRITE_MASK)
3077c50d8ae3SPaolo Bonzini 		return is_writable_pte(spte);
3078c50d8ae3SPaolo Bonzini 
3079c50d8ae3SPaolo Bonzini 	/* Fault was on Read access */
3080c50d8ae3SPaolo Bonzini 	return spte & PT_PRESENT_MASK;
3081c50d8ae3SPaolo Bonzini }
3082c50d8ae3SPaolo Bonzini 
3083c50d8ae3SPaolo Bonzini /*
3084c4371c2aSSean Christopherson  * Returns one of RET_PF_INVALID, RET_PF_FIXED or RET_PF_SPURIOUS.
3085c50d8ae3SPaolo Bonzini  */
3086c4371c2aSSean Christopherson static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
3087c50d8ae3SPaolo Bonzini 			   u32 error_code)
3088c50d8ae3SPaolo Bonzini {
3089c50d8ae3SPaolo Bonzini 	struct kvm_shadow_walk_iterator iterator;
3090c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
3091c4371c2aSSean Christopherson 	int ret = RET_PF_INVALID;
3092c50d8ae3SPaolo Bonzini 	u64 spte = 0ull;
3093c50d8ae3SPaolo Bonzini 	uint retry_count = 0;
3094c50d8ae3SPaolo Bonzini 
3095c50d8ae3SPaolo Bonzini 	if (!page_fault_can_be_fast(error_code))
3096c4371c2aSSean Christopherson 		return ret;
3097c50d8ae3SPaolo Bonzini 
3098c50d8ae3SPaolo Bonzini 	walk_shadow_page_lockless_begin(vcpu);
3099c50d8ae3SPaolo Bonzini 
3100c50d8ae3SPaolo Bonzini 	do {
3101c50d8ae3SPaolo Bonzini 		u64 new_spte;
3102c50d8ae3SPaolo Bonzini 
3103736c291cSSean Christopherson 		for_each_shadow_entry_lockless(vcpu, cr2_or_gpa, iterator, spte)
3104f9fa2509SSean Christopherson 			if (!is_shadow_present_pte(spte))
3105c50d8ae3SPaolo Bonzini 				break;
3106c50d8ae3SPaolo Bonzini 
3107ec89e643SSean Christopherson 		if (!is_shadow_present_pte(spte))
3108ec89e643SSean Christopherson 			break;
3109ec89e643SSean Christopherson 
311057354682SSean Christopherson 		sp = sptep_to_sp(iterator.sptep);
3111c50d8ae3SPaolo Bonzini 		if (!is_last_spte(spte, sp->role.level))
3112c50d8ae3SPaolo Bonzini 			break;
3113c50d8ae3SPaolo Bonzini 
3114c50d8ae3SPaolo Bonzini 		/*
3115c50d8ae3SPaolo Bonzini 		 * Check whether the memory access that caused the fault would
3116c50d8ae3SPaolo Bonzini 		 * still cause it if it were to be performed right now. If not,
3117c50d8ae3SPaolo Bonzini 		 * then this is a spurious fault caused by TLB lazily flushed,
3118c50d8ae3SPaolo Bonzini 		 * or some other CPU has already fixed the PTE after the
3119c50d8ae3SPaolo Bonzini 		 * current CPU took the fault.
3120c50d8ae3SPaolo Bonzini 		 *
3121c50d8ae3SPaolo Bonzini 		 * Need not check the access of upper level table entries since
3122c50d8ae3SPaolo Bonzini 		 * they are always ACC_ALL.
3123c50d8ae3SPaolo Bonzini 		 */
3124c50d8ae3SPaolo Bonzini 		if (is_access_allowed(error_code, spte)) {
3125c4371c2aSSean Christopherson 			ret = RET_PF_SPURIOUS;
3126c50d8ae3SPaolo Bonzini 			break;
3127c50d8ae3SPaolo Bonzini 		}
3128c50d8ae3SPaolo Bonzini 
3129c50d8ae3SPaolo Bonzini 		new_spte = spte;
3130c50d8ae3SPaolo Bonzini 
3131c50d8ae3SPaolo Bonzini 		if (is_access_track_spte(spte))
3132c50d8ae3SPaolo Bonzini 			new_spte = restore_acc_track_spte(new_spte);
3133c50d8ae3SPaolo Bonzini 
3134c50d8ae3SPaolo Bonzini 		/*
3135c50d8ae3SPaolo Bonzini 		 * Currently, to simplify the code, write-protection can
3136c50d8ae3SPaolo Bonzini 		 * be removed in the fast path only if the SPTE was
3137c50d8ae3SPaolo Bonzini 		 * write-protected for dirty-logging or access tracking.
3138c50d8ae3SPaolo Bonzini 		 */
3139c50d8ae3SPaolo Bonzini 		if ((error_code & PFERR_WRITE_MASK) &&
3140e6302698SMiaohe Lin 		    spte_can_locklessly_be_made_writable(spte)) {
3141c50d8ae3SPaolo Bonzini 			new_spte |= PT_WRITABLE_MASK;
3142c50d8ae3SPaolo Bonzini 
3143c50d8ae3SPaolo Bonzini 			/*
3144c50d8ae3SPaolo Bonzini 			 * Do not fix write-permission on the large spte.  Since
3145c50d8ae3SPaolo Bonzini 			 * we only dirty the first page into the dirty-bitmap in
3146c50d8ae3SPaolo Bonzini 			 * fast_pf_fix_direct_spte(), other pages are missed
3147c50d8ae3SPaolo Bonzini 			 * if its slot has dirty logging enabled.
3148c50d8ae3SPaolo Bonzini 			 *
3149c50d8ae3SPaolo Bonzini 			 * Instead, we let the slow page fault path create a
3150c50d8ae3SPaolo Bonzini 			 * normal spte to fix the access.
3151c50d8ae3SPaolo Bonzini 			 *
3152c50d8ae3SPaolo Bonzini 			 * See the comments in kvm_arch_commit_memory_region().
3153c50d8ae3SPaolo Bonzini 			 */
31543bae0459SSean Christopherson 			if (sp->role.level > PG_LEVEL_4K)
3155c50d8ae3SPaolo Bonzini 				break;
3156c50d8ae3SPaolo Bonzini 		}
3157c50d8ae3SPaolo Bonzini 
3158c50d8ae3SPaolo Bonzini 		/* Verify that the fault can be handled in the fast path */
3159c50d8ae3SPaolo Bonzini 		if (new_spte == spte ||
3160c50d8ae3SPaolo Bonzini 		    !is_access_allowed(error_code, new_spte))
3161c50d8ae3SPaolo Bonzini 			break;
3162c50d8ae3SPaolo Bonzini 
3163c50d8ae3SPaolo Bonzini 		/*
3164c50d8ae3SPaolo Bonzini 		 * Currently, fast page fault only works for direct mapping
3165c50d8ae3SPaolo Bonzini 		 * since the gfn is not stable for indirect shadow page. See
31663ecad8c2SMauro Carvalho Chehab 		 * Documentation/virt/kvm/locking.rst to get more detail.
3167c50d8ae3SPaolo Bonzini 		 */
3168c4371c2aSSean Christopherson 		if (fast_pf_fix_direct_spte(vcpu, sp, iterator.sptep, spte,
3169c4371c2aSSean Christopherson 					    new_spte)) {
3170c4371c2aSSean Christopherson 			ret = RET_PF_FIXED;
3171c50d8ae3SPaolo Bonzini 			break;
3172c4371c2aSSean Christopherson 		}
3173c50d8ae3SPaolo Bonzini 
3174c50d8ae3SPaolo Bonzini 		if (++retry_count > 4) {
3175c50d8ae3SPaolo Bonzini 			printk_once(KERN_WARNING
3176c50d8ae3SPaolo Bonzini 				"kvm: Fast #PF retrying more than 4 times.\n");
3177c50d8ae3SPaolo Bonzini 			break;
3178c50d8ae3SPaolo Bonzini 		}
3179c50d8ae3SPaolo Bonzini 
3180c50d8ae3SPaolo Bonzini 	} while (true);
3181c50d8ae3SPaolo Bonzini 
3182736c291cSSean Christopherson 	trace_fast_page_fault(vcpu, cr2_or_gpa, error_code, iterator.sptep,
3183c4371c2aSSean Christopherson 			      spte, ret);
3184c50d8ae3SPaolo Bonzini 	walk_shadow_page_lockless_end(vcpu);
3185c50d8ae3SPaolo Bonzini 
3186c4371c2aSSean Christopherson 	return ret;
3187c50d8ae3SPaolo Bonzini }
3188c50d8ae3SPaolo Bonzini 
3189c50d8ae3SPaolo Bonzini static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
3190c50d8ae3SPaolo Bonzini 			       struct list_head *invalid_list)
3191c50d8ae3SPaolo Bonzini {
3192c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
3193c50d8ae3SPaolo Bonzini 
3194c50d8ae3SPaolo Bonzini 	if (!VALID_PAGE(*root_hpa))
3195c50d8ae3SPaolo Bonzini 		return;
3196c50d8ae3SPaolo Bonzini 
3197e47c4aeeSSean Christopherson 	sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK);
319802c00b3aSBen Gardon 
3199897218ffSPaolo Bonzini 	if (is_tdp_mmu_page(sp))
32006103bc07SBen Gardon 		kvm_tdp_mmu_put_root(kvm, sp, false);
320176eb54e7SBen Gardon 	else if (!--sp->root_count && sp->role.invalid)
3202c50d8ae3SPaolo Bonzini 		kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
3203c50d8ae3SPaolo Bonzini 
3204c50d8ae3SPaolo Bonzini 	*root_hpa = INVALID_PAGE;
3205c50d8ae3SPaolo Bonzini }
3206c50d8ae3SPaolo Bonzini 
3207c50d8ae3SPaolo Bonzini /* roots_to_free must be some combination of the KVM_MMU_ROOT_* flags */
3208c50d8ae3SPaolo Bonzini void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
3209c50d8ae3SPaolo Bonzini 			ulong roots_to_free)
3210c50d8ae3SPaolo Bonzini {
32114d710de9SSean Christopherson 	struct kvm *kvm = vcpu->kvm;
3212c50d8ae3SPaolo Bonzini 	int i;
3213c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
3214c50d8ae3SPaolo Bonzini 	bool free_active_root = roots_to_free & KVM_MMU_ROOT_CURRENT;
3215c50d8ae3SPaolo Bonzini 
3216c50d8ae3SPaolo Bonzini 	BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG);
3217c50d8ae3SPaolo Bonzini 
3218c50d8ae3SPaolo Bonzini 	/* Before acquiring the MMU lock, see if we need to do any real work. */
3219c50d8ae3SPaolo Bonzini 	if (!(free_active_root && VALID_PAGE(mmu->root_hpa))) {
3220c50d8ae3SPaolo Bonzini 		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
3221c50d8ae3SPaolo Bonzini 			if ((roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) &&
3222c50d8ae3SPaolo Bonzini 			    VALID_PAGE(mmu->prev_roots[i].hpa))
3223c50d8ae3SPaolo Bonzini 				break;
3224c50d8ae3SPaolo Bonzini 
3225c50d8ae3SPaolo Bonzini 		if (i == KVM_MMU_NUM_PREV_ROOTS)
3226c50d8ae3SPaolo Bonzini 			return;
3227c50d8ae3SPaolo Bonzini 	}
3228c50d8ae3SPaolo Bonzini 
3229531810caSBen Gardon 	write_lock(&kvm->mmu_lock);
3230c50d8ae3SPaolo Bonzini 
3231c50d8ae3SPaolo Bonzini 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
3232c50d8ae3SPaolo Bonzini 		if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i))
32334d710de9SSean Christopherson 			mmu_free_root_page(kvm, &mmu->prev_roots[i].hpa,
3234c50d8ae3SPaolo Bonzini 					   &invalid_list);
3235c50d8ae3SPaolo Bonzini 
3236c50d8ae3SPaolo Bonzini 	if (free_active_root) {
3237c50d8ae3SPaolo Bonzini 		if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
3238c50d8ae3SPaolo Bonzini 		    (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) {
32394d710de9SSean Christopherson 			mmu_free_root_page(kvm, &mmu->root_hpa, &invalid_list);
324004d45551SSean Christopherson 		} else if (mmu->pae_root) {
3241c834e5e4SSean Christopherson 			for (i = 0; i < 4; ++i) {
3242c834e5e4SSean Christopherson 				if (!IS_VALID_PAE_ROOT(mmu->pae_root[i]))
3243c834e5e4SSean Christopherson 					continue;
3244c834e5e4SSean Christopherson 
3245c834e5e4SSean Christopherson 				mmu_free_root_page(kvm, &mmu->pae_root[i],
3246c50d8ae3SPaolo Bonzini 						   &invalid_list);
3247c834e5e4SSean Christopherson 				mmu->pae_root[i] = INVALID_PAE_ROOT;
3248c50d8ae3SPaolo Bonzini 			}
3249c50d8ae3SPaolo Bonzini 		}
325004d45551SSean Christopherson 		mmu->root_hpa = INVALID_PAGE;
3251be01e8e2SSean Christopherson 		mmu->root_pgd = 0;
3252c50d8ae3SPaolo Bonzini 	}
3253c50d8ae3SPaolo Bonzini 
32544d710de9SSean Christopherson 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
3255531810caSBen Gardon 	write_unlock(&kvm->mmu_lock);
3256c50d8ae3SPaolo Bonzini }
3257c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_free_roots);
3258c50d8ae3SPaolo Bonzini 
325925b62c62SSean Christopherson void kvm_mmu_free_guest_mode_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
326025b62c62SSean Christopherson {
326125b62c62SSean Christopherson 	unsigned long roots_to_free = 0;
326225b62c62SSean Christopherson 	hpa_t root_hpa;
326325b62c62SSean Christopherson 	int i;
326425b62c62SSean Christopherson 
326525b62c62SSean Christopherson 	/*
326625b62c62SSean Christopherson 	 * This should not be called while L2 is active, L2 can't invalidate
326725b62c62SSean Christopherson 	 * _only_ its own roots, e.g. INVVPID unconditionally exits.
326825b62c62SSean Christopherson 	 */
326925b62c62SSean Christopherson 	WARN_ON_ONCE(mmu->mmu_role.base.guest_mode);
327025b62c62SSean Christopherson 
327125b62c62SSean Christopherson 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
327225b62c62SSean Christopherson 		root_hpa = mmu->prev_roots[i].hpa;
327325b62c62SSean Christopherson 		if (!VALID_PAGE(root_hpa))
327425b62c62SSean Christopherson 			continue;
327525b62c62SSean Christopherson 
327625b62c62SSean Christopherson 		if (!to_shadow_page(root_hpa) ||
327725b62c62SSean Christopherson 			to_shadow_page(root_hpa)->role.guest_mode)
327825b62c62SSean Christopherson 			roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
327925b62c62SSean Christopherson 	}
328025b62c62SSean Christopherson 
328125b62c62SSean Christopherson 	kvm_mmu_free_roots(vcpu, mmu, roots_to_free);
328225b62c62SSean Christopherson }
328325b62c62SSean Christopherson EXPORT_SYMBOL_GPL(kvm_mmu_free_guest_mode_roots);
328425b62c62SSean Christopherson 
328525b62c62SSean Christopherson 
3286c50d8ae3SPaolo Bonzini static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
3287c50d8ae3SPaolo Bonzini {
3288c50d8ae3SPaolo Bonzini 	int ret = 0;
3289c50d8ae3SPaolo Bonzini 
3290995decb6SVitaly Kuznetsov 	if (!kvm_vcpu_is_visible_gfn(vcpu, root_gfn)) {
3291c50d8ae3SPaolo Bonzini 		kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
3292c50d8ae3SPaolo Bonzini 		ret = 1;
3293c50d8ae3SPaolo Bonzini 	}
3294c50d8ae3SPaolo Bonzini 
3295c50d8ae3SPaolo Bonzini 	return ret;
3296c50d8ae3SPaolo Bonzini }
3297c50d8ae3SPaolo Bonzini 
32988123f265SSean Christopherson static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva,
32998123f265SSean Christopherson 			    u8 level, bool direct)
3300c50d8ae3SPaolo Bonzini {
3301c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
33028123f265SSean Christopherson 
33038123f265SSean Christopherson 	sp = kvm_mmu_get_page(vcpu, gfn, gva, level, direct, ACC_ALL);
33048123f265SSean Christopherson 	++sp->root_count;
33058123f265SSean Christopherson 
33068123f265SSean Christopherson 	return __pa(sp->spt);
33078123f265SSean Christopherson }
33088123f265SSean Christopherson 
33098123f265SSean Christopherson static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
33108123f265SSean Christopherson {
3311b37233c9SSean Christopherson 	struct kvm_mmu *mmu = vcpu->arch.mmu;
3312b37233c9SSean Christopherson 	u8 shadow_root_level = mmu->shadow_root_level;
33138123f265SSean Christopherson 	hpa_t root;
3314c50d8ae3SPaolo Bonzini 	unsigned i;
33154a38162eSPaolo Bonzini 	int r;
33164a38162eSPaolo Bonzini 
33174a38162eSPaolo Bonzini 	write_lock(&vcpu->kvm->mmu_lock);
33184a38162eSPaolo Bonzini 	r = make_mmu_pages_available(vcpu);
33194a38162eSPaolo Bonzini 	if (r < 0)
33204a38162eSPaolo Bonzini 		goto out_unlock;
3321c50d8ae3SPaolo Bonzini 
3322897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(vcpu->kvm)) {
332302c00b3aSBen Gardon 		root = kvm_tdp_mmu_get_vcpu_root_hpa(vcpu);
3324b37233c9SSean Christopherson 		mmu->root_hpa = root;
332502c00b3aSBen Gardon 	} else if (shadow_root_level >= PT64_ROOT_4LEVEL) {
33266e6ec584SSean Christopherson 		root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level, true);
3327b37233c9SSean Christopherson 		mmu->root_hpa = root;
33288123f265SSean Christopherson 	} else if (shadow_root_level == PT32E_ROOT_LEVEL) {
33294a38162eSPaolo Bonzini 		if (WARN_ON_ONCE(!mmu->pae_root)) {
33304a38162eSPaolo Bonzini 			r = -EIO;
33314a38162eSPaolo Bonzini 			goto out_unlock;
33324a38162eSPaolo Bonzini 		}
333373ad1606SSean Christopherson 
3334c50d8ae3SPaolo Bonzini 		for (i = 0; i < 4; ++i) {
3335c834e5e4SSean Christopherson 			WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
3336c50d8ae3SPaolo Bonzini 
33378123f265SSean Christopherson 			root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT),
33388123f265SSean Christopherson 					      i << 30, PT32_ROOT_LEVEL, true);
333917e368d9SSean Christopherson 			mmu->pae_root[i] = root | PT_PRESENT_MASK |
334017e368d9SSean Christopherson 					   shadow_me_mask;
3341c50d8ae3SPaolo Bonzini 		}
3342b37233c9SSean Christopherson 		mmu->root_hpa = __pa(mmu->pae_root);
334373ad1606SSean Christopherson 	} else {
334473ad1606SSean Christopherson 		WARN_ONCE(1, "Bad TDP root level = %d\n", shadow_root_level);
33454a38162eSPaolo Bonzini 		r = -EIO;
33464a38162eSPaolo Bonzini 		goto out_unlock;
334773ad1606SSean Christopherson 	}
33483651c7fcSSean Christopherson 
3349be01e8e2SSean Christopherson 	/* root_pgd is ignored for direct MMUs. */
3350b37233c9SSean Christopherson 	mmu->root_pgd = 0;
33514a38162eSPaolo Bonzini out_unlock:
33524a38162eSPaolo Bonzini 	write_unlock(&vcpu->kvm->mmu_lock);
33534a38162eSPaolo Bonzini 	return r;
3354c50d8ae3SPaolo Bonzini }
3355c50d8ae3SPaolo Bonzini 
3356c50d8ae3SPaolo Bonzini static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
3357c50d8ae3SPaolo Bonzini {
3358b37233c9SSean Christopherson 	struct kvm_mmu *mmu = vcpu->arch.mmu;
33596e0918aeSSean Christopherson 	u64 pdptrs[4], pm_mask;
3360be01e8e2SSean Christopherson 	gfn_t root_gfn, root_pgd;
33618123f265SSean Christopherson 	hpa_t root;
33624a38162eSPaolo Bonzini 	unsigned i;
33634a38162eSPaolo Bonzini 	int r;
3364c50d8ae3SPaolo Bonzini 
3365b37233c9SSean Christopherson 	root_pgd = mmu->get_guest_pgd(vcpu);
3366be01e8e2SSean Christopherson 	root_gfn = root_pgd >> PAGE_SHIFT;
3367c50d8ae3SPaolo Bonzini 
3368c50d8ae3SPaolo Bonzini 	if (mmu_check_root(vcpu, root_gfn))
3369c50d8ae3SPaolo Bonzini 		return 1;
3370c50d8ae3SPaolo Bonzini 
3371c50d8ae3SPaolo Bonzini 	/*
33724a38162eSPaolo Bonzini 	 * On SVM, reading PDPTRs might access guest memory, which might fault
33734a38162eSPaolo Bonzini 	 * and thus might sleep.  Grab the PDPTRs before acquiring mmu_lock.
33744a38162eSPaolo Bonzini 	 */
33756e0918aeSSean Christopherson 	if (mmu->root_level == PT32E_ROOT_LEVEL) {
33766e0918aeSSean Christopherson 		for (i = 0; i < 4; ++i) {
33776e0918aeSSean Christopherson 			pdptrs[i] = mmu->get_pdptr(vcpu, i);
33786e0918aeSSean Christopherson 			if (!(pdptrs[i] & PT_PRESENT_MASK))
33796e0918aeSSean Christopherson 				continue;
33806e0918aeSSean Christopherson 
33816e0918aeSSean Christopherson 			if (mmu_check_root(vcpu, pdptrs[i] >> PAGE_SHIFT))
33826e0918aeSSean Christopherson 				return 1;
33836e0918aeSSean Christopherson 		}
33846e0918aeSSean Christopherson 	}
33856e0918aeSSean Christopherson 
3386d501f747SBen Gardon 	r = alloc_all_memslots_rmaps(vcpu->kvm);
3387d501f747SBen Gardon 	if (r)
3388d501f747SBen Gardon 		return r;
3389d501f747SBen Gardon 
33904a38162eSPaolo Bonzini 	write_lock(&vcpu->kvm->mmu_lock);
33914a38162eSPaolo Bonzini 	r = make_mmu_pages_available(vcpu);
33924a38162eSPaolo Bonzini 	if (r < 0)
33934a38162eSPaolo Bonzini 		goto out_unlock;
33944a38162eSPaolo Bonzini 
3395c50d8ae3SPaolo Bonzini 	/*
3396c50d8ae3SPaolo Bonzini 	 * Do we shadow a long mode page table? If so we need to
3397c50d8ae3SPaolo Bonzini 	 * write-protect the guests page table root.
3398c50d8ae3SPaolo Bonzini 	 */
3399b37233c9SSean Christopherson 	if (mmu->root_level >= PT64_ROOT_4LEVEL) {
34008123f265SSean Christopherson 		root = mmu_alloc_root(vcpu, root_gfn, 0,
3401b37233c9SSean Christopherson 				      mmu->shadow_root_level, false);
3402b37233c9SSean Christopherson 		mmu->root_hpa = root;
3403be01e8e2SSean Christopherson 		goto set_root_pgd;
3404c50d8ae3SPaolo Bonzini 	}
3405c50d8ae3SPaolo Bonzini 
34064a38162eSPaolo Bonzini 	if (WARN_ON_ONCE(!mmu->pae_root)) {
34074a38162eSPaolo Bonzini 		r = -EIO;
34084a38162eSPaolo Bonzini 		goto out_unlock;
34094a38162eSPaolo Bonzini 	}
341073ad1606SSean Christopherson 
3411c50d8ae3SPaolo Bonzini 	/*
3412c50d8ae3SPaolo Bonzini 	 * We shadow a 32 bit page table. This may be a legacy 2-level
3413c50d8ae3SPaolo Bonzini 	 * or a PAE 3-level page table. In either case we need to be aware that
3414c50d8ae3SPaolo Bonzini 	 * the shadow page table may be a PAE or a long mode page table.
3415c50d8ae3SPaolo Bonzini 	 */
341617e368d9SSean Christopherson 	pm_mask = PT_PRESENT_MASK | shadow_me_mask;
3417748e52b9SSean Christopherson 	if (mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
3418c50d8ae3SPaolo Bonzini 		pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
3419c50d8ae3SPaolo Bonzini 
342003ca4589SSean Christopherson 		if (WARN_ON_ONCE(!mmu->pml4_root)) {
34214a38162eSPaolo Bonzini 			r = -EIO;
34224a38162eSPaolo Bonzini 			goto out_unlock;
34234a38162eSPaolo Bonzini 		}
342473ad1606SSean Christopherson 
342503ca4589SSean Christopherson 		mmu->pml4_root[0] = __pa(mmu->pae_root) | pm_mask;
342604d45551SSean Christopherson 	}
342704d45551SSean Christopherson 
3428c50d8ae3SPaolo Bonzini 	for (i = 0; i < 4; ++i) {
3429c834e5e4SSean Christopherson 		WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
34306e6ec584SSean Christopherson 
3431b37233c9SSean Christopherson 		if (mmu->root_level == PT32E_ROOT_LEVEL) {
34326e0918aeSSean Christopherson 			if (!(pdptrs[i] & PT_PRESENT_MASK)) {
3433c834e5e4SSean Christopherson 				mmu->pae_root[i] = INVALID_PAE_ROOT;
3434c50d8ae3SPaolo Bonzini 				continue;
3435c50d8ae3SPaolo Bonzini 			}
34366e0918aeSSean Christopherson 			root_gfn = pdptrs[i] >> PAGE_SHIFT;
3437c50d8ae3SPaolo Bonzini 		}
3438c50d8ae3SPaolo Bonzini 
34398123f265SSean Christopherson 		root = mmu_alloc_root(vcpu, root_gfn, i << 30,
34408123f265SSean Christopherson 				      PT32_ROOT_LEVEL, false);
3441b37233c9SSean Christopherson 		mmu->pae_root[i] = root | pm_mask;
3442c50d8ae3SPaolo Bonzini 	}
3443c50d8ae3SPaolo Bonzini 
3444ba0a194fSSean Christopherson 	if (mmu->shadow_root_level == PT64_ROOT_4LEVEL)
344503ca4589SSean Christopherson 		mmu->root_hpa = __pa(mmu->pml4_root);
3446ba0a194fSSean Christopherson 	else
3447ba0a194fSSean Christopherson 		mmu->root_hpa = __pa(mmu->pae_root);
3448c50d8ae3SPaolo Bonzini 
3449be01e8e2SSean Christopherson set_root_pgd:
3450b37233c9SSean Christopherson 	mmu->root_pgd = root_pgd;
34514a38162eSPaolo Bonzini out_unlock:
34524a38162eSPaolo Bonzini 	write_unlock(&vcpu->kvm->mmu_lock);
3453c50d8ae3SPaolo Bonzini 
3454c50d8ae3SPaolo Bonzini 	return 0;
3455c50d8ae3SPaolo Bonzini }
3456c50d8ae3SPaolo Bonzini 
3457748e52b9SSean Christopherson static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
3458c50d8ae3SPaolo Bonzini {
3459748e52b9SSean Christopherson 	struct kvm_mmu *mmu = vcpu->arch.mmu;
346003ca4589SSean Christopherson 	u64 *pml4_root, *pae_root;
3461748e52b9SSean Christopherson 
3462748e52b9SSean Christopherson 	/*
3463748e52b9SSean Christopherson 	 * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP
3464748e52b9SSean Christopherson 	 * tables are allocated and initialized at root creation as there is no
3465748e52b9SSean Christopherson 	 * equivalent level in the guest's NPT to shadow.  Allocate the tables
3466748e52b9SSean Christopherson 	 * on demand, as running a 32-bit L1 VMM on 64-bit KVM is very rare.
3467748e52b9SSean Christopherson 	 */
3468748e52b9SSean Christopherson 	if (mmu->direct_map || mmu->root_level >= PT64_ROOT_4LEVEL ||
3469748e52b9SSean Christopherson 	    mmu->shadow_root_level < PT64_ROOT_4LEVEL)
3470748e52b9SSean Christopherson 		return 0;
3471748e52b9SSean Christopherson 
3472748e52b9SSean Christopherson 	/*
3473748e52b9SSean Christopherson 	 * This mess only works with 4-level paging and needs to be updated to
3474748e52b9SSean Christopherson 	 * work with 5-level paging.
3475748e52b9SSean Christopherson 	 */
3476748e52b9SSean Christopherson 	if (WARN_ON_ONCE(mmu->shadow_root_level != PT64_ROOT_4LEVEL))
3477748e52b9SSean Christopherson 		return -EIO;
3478748e52b9SSean Christopherson 
347903ca4589SSean Christopherson 	if (mmu->pae_root && mmu->pml4_root)
3480748e52b9SSean Christopherson 		return 0;
3481748e52b9SSean Christopherson 
3482748e52b9SSean Christopherson 	/*
3483748e52b9SSean Christopherson 	 * The special roots should always be allocated in concert.  Yell and
3484748e52b9SSean Christopherson 	 * bail if KVM ends up in a state where only one of the roots is valid.
3485748e52b9SSean Christopherson 	 */
348603ca4589SSean Christopherson 	if (WARN_ON_ONCE(!tdp_enabled || mmu->pae_root || mmu->pml4_root))
3487748e52b9SSean Christopherson 		return -EIO;
3488748e52b9SSean Christopherson 
34894a98623dSSean Christopherson 	/*
34904a98623dSSean Christopherson 	 * Unlike 32-bit NPT, the PDP table doesn't need to be in low mem, and
34914a98623dSSean Christopherson 	 * doesn't need to be decrypted.
34924a98623dSSean Christopherson 	 */
3493748e52b9SSean Christopherson 	pae_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3494748e52b9SSean Christopherson 	if (!pae_root)
3495748e52b9SSean Christopherson 		return -ENOMEM;
3496748e52b9SSean Christopherson 
349703ca4589SSean Christopherson 	pml4_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
349803ca4589SSean Christopherson 	if (!pml4_root) {
3499748e52b9SSean Christopherson 		free_page((unsigned long)pae_root);
3500748e52b9SSean Christopherson 		return -ENOMEM;
3501748e52b9SSean Christopherson 	}
3502748e52b9SSean Christopherson 
3503748e52b9SSean Christopherson 	mmu->pae_root = pae_root;
350403ca4589SSean Christopherson 	mmu->pml4_root = pml4_root;
3505748e52b9SSean Christopherson 
3506748e52b9SSean Christopherson 	return 0;
3507c50d8ae3SPaolo Bonzini }
3508c50d8ae3SPaolo Bonzini 
3509c50d8ae3SPaolo Bonzini void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
3510c50d8ae3SPaolo Bonzini {
3511c50d8ae3SPaolo Bonzini 	int i;
3512c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
3513c50d8ae3SPaolo Bonzini 
3514c50d8ae3SPaolo Bonzini 	if (vcpu->arch.mmu->direct_map)
3515c50d8ae3SPaolo Bonzini 		return;
3516c50d8ae3SPaolo Bonzini 
3517c50d8ae3SPaolo Bonzini 	if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
3518c50d8ae3SPaolo Bonzini 		return;
3519c50d8ae3SPaolo Bonzini 
3520c50d8ae3SPaolo Bonzini 	vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
3521c50d8ae3SPaolo Bonzini 
3522c50d8ae3SPaolo Bonzini 	if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
3523c50d8ae3SPaolo Bonzini 		hpa_t root = vcpu->arch.mmu->root_hpa;
3524e47c4aeeSSean Christopherson 		sp = to_shadow_page(root);
3525c50d8ae3SPaolo Bonzini 
3526c50d8ae3SPaolo Bonzini 		/*
3527c50d8ae3SPaolo Bonzini 		 * Even if another CPU was marking the SP as unsync-ed
3528c50d8ae3SPaolo Bonzini 		 * simultaneously, any guest page table changes are not
3529c50d8ae3SPaolo Bonzini 		 * guaranteed to be visible anyway until this VCPU issues a TLB
3530c50d8ae3SPaolo Bonzini 		 * flush strictly after those changes are made. We only need to
3531c50d8ae3SPaolo Bonzini 		 * ensure that the other CPU sets these flags before any actual
3532c50d8ae3SPaolo Bonzini 		 * changes to the page tables are made. The comments in
35330337f585SSean Christopherson 		 * mmu_try_to_unsync_pages() describe what could go wrong if
35340337f585SSean Christopherson 		 * this requirement isn't satisfied.
3535c50d8ae3SPaolo Bonzini 		 */
3536c50d8ae3SPaolo Bonzini 		if (!smp_load_acquire(&sp->unsync) &&
3537c50d8ae3SPaolo Bonzini 		    !smp_load_acquire(&sp->unsync_children))
3538c50d8ae3SPaolo Bonzini 			return;
3539c50d8ae3SPaolo Bonzini 
3540531810caSBen Gardon 		write_lock(&vcpu->kvm->mmu_lock);
3541c50d8ae3SPaolo Bonzini 		kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
3542c50d8ae3SPaolo Bonzini 
3543c50d8ae3SPaolo Bonzini 		mmu_sync_children(vcpu, sp);
3544c50d8ae3SPaolo Bonzini 
3545c50d8ae3SPaolo Bonzini 		kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
3546531810caSBen Gardon 		write_unlock(&vcpu->kvm->mmu_lock);
3547c50d8ae3SPaolo Bonzini 		return;
3548c50d8ae3SPaolo Bonzini 	}
3549c50d8ae3SPaolo Bonzini 
3550531810caSBen Gardon 	write_lock(&vcpu->kvm->mmu_lock);
3551c50d8ae3SPaolo Bonzini 	kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
3552c50d8ae3SPaolo Bonzini 
3553c50d8ae3SPaolo Bonzini 	for (i = 0; i < 4; ++i) {
3554c50d8ae3SPaolo Bonzini 		hpa_t root = vcpu->arch.mmu->pae_root[i];
3555c50d8ae3SPaolo Bonzini 
3556c834e5e4SSean Christopherson 		if (IS_VALID_PAE_ROOT(root)) {
3557c50d8ae3SPaolo Bonzini 			root &= PT64_BASE_ADDR_MASK;
3558e47c4aeeSSean Christopherson 			sp = to_shadow_page(root);
3559c50d8ae3SPaolo Bonzini 			mmu_sync_children(vcpu, sp);
3560c50d8ae3SPaolo Bonzini 		}
3561c50d8ae3SPaolo Bonzini 	}
3562c50d8ae3SPaolo Bonzini 
3563c50d8ae3SPaolo Bonzini 	kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
3564531810caSBen Gardon 	write_unlock(&vcpu->kvm->mmu_lock);
3565c50d8ae3SPaolo Bonzini }
3566c50d8ae3SPaolo Bonzini 
3567736c291cSSean Christopherson static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gpa_t vaddr,
3568c50d8ae3SPaolo Bonzini 				  u32 access, struct x86_exception *exception)
3569c50d8ae3SPaolo Bonzini {
3570c50d8ae3SPaolo Bonzini 	if (exception)
3571c50d8ae3SPaolo Bonzini 		exception->error_code = 0;
3572c50d8ae3SPaolo Bonzini 	return vaddr;
3573c50d8ae3SPaolo Bonzini }
3574c50d8ae3SPaolo Bonzini 
3575736c291cSSean Christopherson static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gpa_t vaddr,
3576c50d8ae3SPaolo Bonzini 					 u32 access,
3577c50d8ae3SPaolo Bonzini 					 struct x86_exception *exception)
3578c50d8ae3SPaolo Bonzini {
3579c50d8ae3SPaolo Bonzini 	if (exception)
3580c50d8ae3SPaolo Bonzini 		exception->error_code = 0;
3581c50d8ae3SPaolo Bonzini 	return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception);
3582c50d8ae3SPaolo Bonzini }
3583c50d8ae3SPaolo Bonzini 
3584c50d8ae3SPaolo Bonzini static bool
3585c50d8ae3SPaolo Bonzini __is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check, u64 pte, int level)
3586c50d8ae3SPaolo Bonzini {
3587b5c3c1b3SSean Christopherson 	int bit7 = (pte >> 7) & 1;
3588c50d8ae3SPaolo Bonzini 
3589b5c3c1b3SSean Christopherson 	return pte & rsvd_check->rsvd_bits_mask[bit7][level-1];
3590c50d8ae3SPaolo Bonzini }
3591c50d8ae3SPaolo Bonzini 
3592b5c3c1b3SSean Christopherson static bool __is_bad_mt_xwr(struct rsvd_bits_validate *rsvd_check, u64 pte)
3593c50d8ae3SPaolo Bonzini {
3594b5c3c1b3SSean Christopherson 	return rsvd_check->bad_mt_xwr & BIT_ULL(pte & 0x3f);
3595c50d8ae3SPaolo Bonzini }
3596c50d8ae3SPaolo Bonzini 
3597c50d8ae3SPaolo Bonzini static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3598c50d8ae3SPaolo Bonzini {
3599c50d8ae3SPaolo Bonzini 	/*
3600c50d8ae3SPaolo Bonzini 	 * A nested guest cannot use the MMIO cache if it is using nested
3601c50d8ae3SPaolo Bonzini 	 * page tables, because cr2 is a nGPA while the cache stores GPAs.
3602c50d8ae3SPaolo Bonzini 	 */
3603c50d8ae3SPaolo Bonzini 	if (mmu_is_nested(vcpu))
3604c50d8ae3SPaolo Bonzini 		return false;
3605c50d8ae3SPaolo Bonzini 
3606c50d8ae3SPaolo Bonzini 	if (direct)
3607c50d8ae3SPaolo Bonzini 		return vcpu_match_mmio_gpa(vcpu, addr);
3608c50d8ae3SPaolo Bonzini 
3609c50d8ae3SPaolo Bonzini 	return vcpu_match_mmio_gva(vcpu, addr);
3610c50d8ae3SPaolo Bonzini }
3611c50d8ae3SPaolo Bonzini 
361295fb5b02SBen Gardon /*
361395fb5b02SBen Gardon  * Return the level of the lowest level SPTE added to sptes.
361495fb5b02SBen Gardon  * That SPTE may be non-present.
361595fb5b02SBen Gardon  */
361639b4d43eSSean Christopherson static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level)
3617c50d8ae3SPaolo Bonzini {
3618c50d8ae3SPaolo Bonzini 	struct kvm_shadow_walk_iterator iterator;
36192aa07893SSean Christopherson 	int leaf = -1;
362095fb5b02SBen Gardon 	u64 spte;
3621c50d8ae3SPaolo Bonzini 
3622c50d8ae3SPaolo Bonzini 	walk_shadow_page_lockless_begin(vcpu);
3623c50d8ae3SPaolo Bonzini 
362439b4d43eSSean Christopherson 	for (shadow_walk_init(&iterator, vcpu, addr),
362539b4d43eSSean Christopherson 	     *root_level = iterator.level;
3626c50d8ae3SPaolo Bonzini 	     shadow_walk_okay(&iterator);
3627c50d8ae3SPaolo Bonzini 	     __shadow_walk_next(&iterator, spte)) {
362895fb5b02SBen Gardon 		leaf = iterator.level;
3629c50d8ae3SPaolo Bonzini 		spte = mmu_spte_get_lockless(iterator.sptep);
3630c50d8ae3SPaolo Bonzini 
3631dde81f94SSean Christopherson 		sptes[leaf] = spte;
3632c50d8ae3SPaolo Bonzini 
3633c50d8ae3SPaolo Bonzini 		if (!is_shadow_present_pte(spte))
3634c50d8ae3SPaolo Bonzini 			break;
363595fb5b02SBen Gardon 	}
363695fb5b02SBen Gardon 
363795fb5b02SBen Gardon 	walk_shadow_page_lockless_end(vcpu);
363895fb5b02SBen Gardon 
363995fb5b02SBen Gardon 	return leaf;
364095fb5b02SBen Gardon }
364195fb5b02SBen Gardon 
36429aa41879SSean Christopherson /* return true if reserved bit(s) are detected on a valid, non-MMIO SPTE. */
364395fb5b02SBen Gardon static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
364495fb5b02SBen Gardon {
3645dde81f94SSean Christopherson 	u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
364695fb5b02SBen Gardon 	struct rsvd_bits_validate *rsvd_check;
364739b4d43eSSean Christopherson 	int root, leaf, level;
364895fb5b02SBen Gardon 	bool reserved = false;
364995fb5b02SBen Gardon 
365063c0cac9SDavid Matlack 	if (is_tdp_mmu(vcpu->arch.mmu))
365139b4d43eSSean Christopherson 		leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root);
365295fb5b02SBen Gardon 	else
365339b4d43eSSean Christopherson 		leaf = get_walk(vcpu, addr, sptes, &root);
365495fb5b02SBen Gardon 
36552aa07893SSean Christopherson 	if (unlikely(leaf < 0)) {
36562aa07893SSean Christopherson 		*sptep = 0ull;
36572aa07893SSean Christopherson 		return reserved;
36582aa07893SSean Christopherson 	}
36592aa07893SSean Christopherson 
36609aa41879SSean Christopherson 	*sptep = sptes[leaf];
36619aa41879SSean Christopherson 
36629aa41879SSean Christopherson 	/*
36639aa41879SSean Christopherson 	 * Skip reserved bits checks on the terminal leaf if it's not a valid
36649aa41879SSean Christopherson 	 * SPTE.  Note, this also (intentionally) skips MMIO SPTEs, which, by
36659aa41879SSean Christopherson 	 * design, always have reserved bits set.  The purpose of the checks is
36669aa41879SSean Christopherson 	 * to detect reserved bits on non-MMIO SPTEs. i.e. buggy SPTEs.
36679aa41879SSean Christopherson 	 */
36689aa41879SSean Christopherson 	if (!is_shadow_present_pte(sptes[leaf]))
36699aa41879SSean Christopherson 		leaf++;
367095fb5b02SBen Gardon 
367195fb5b02SBen Gardon 	rsvd_check = &vcpu->arch.mmu->shadow_zero_check;
367295fb5b02SBen Gardon 
36739aa41879SSean Christopherson 	for (level = root; level >= leaf; level--)
3674b5c3c1b3SSean Christopherson 		/*
3675b5c3c1b3SSean Christopherson 		 * Use a bitwise-OR instead of a logical-OR to aggregate the
3676b5c3c1b3SSean Christopherson 		 * reserved bit and EPT's invalid memtype/XWR checks to avoid
3677b5c3c1b3SSean Christopherson 		 * adding a Jcc in the loop.
3678b5c3c1b3SSean Christopherson 		 */
3679dde81f94SSean Christopherson 		reserved |= __is_bad_mt_xwr(rsvd_check, sptes[level]) |
3680dde81f94SSean Christopherson 			    __is_rsvd_bits_set(rsvd_check, sptes[level], level);
3681c50d8ae3SPaolo Bonzini 
3682c50d8ae3SPaolo Bonzini 	if (reserved) {
3683bb4cdf3aSSean Christopherson 		pr_err("%s: reserved bits set on MMU-present spte, addr 0x%llx, hierarchy:\n",
3684c50d8ae3SPaolo Bonzini 		       __func__, addr);
368595fb5b02SBen Gardon 		for (level = root; level >= leaf; level--)
3686bb4cdf3aSSean Christopherson 			pr_err("------ spte = 0x%llx level = %d, rsvd bits = 0x%llx",
3687bb4cdf3aSSean Christopherson 			       sptes[level], level,
3688bb4cdf3aSSean Christopherson 			       rsvd_check->rsvd_bits_mask[(sptes[level] >> 7) & 1][level-1]);
3689c50d8ae3SPaolo Bonzini 	}
3690ddce6208SSean Christopherson 
3691c50d8ae3SPaolo Bonzini 	return reserved;
3692c50d8ae3SPaolo Bonzini }
3693c50d8ae3SPaolo Bonzini 
3694c50d8ae3SPaolo Bonzini static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3695c50d8ae3SPaolo Bonzini {
3696c50d8ae3SPaolo Bonzini 	u64 spte;
3697c50d8ae3SPaolo Bonzini 	bool reserved;
3698c50d8ae3SPaolo Bonzini 
3699c50d8ae3SPaolo Bonzini 	if (mmio_info_in_cache(vcpu, addr, direct))
3700c50d8ae3SPaolo Bonzini 		return RET_PF_EMULATE;
3701c50d8ae3SPaolo Bonzini 
370295fb5b02SBen Gardon 	reserved = get_mmio_spte(vcpu, addr, &spte);
3703c50d8ae3SPaolo Bonzini 	if (WARN_ON(reserved))
3704c50d8ae3SPaolo Bonzini 		return -EINVAL;
3705c50d8ae3SPaolo Bonzini 
3706c50d8ae3SPaolo Bonzini 	if (is_mmio_spte(spte)) {
3707c50d8ae3SPaolo Bonzini 		gfn_t gfn = get_mmio_spte_gfn(spte);
37080a2b64c5SBen Gardon 		unsigned int access = get_mmio_spte_access(spte);
3709c50d8ae3SPaolo Bonzini 
3710c50d8ae3SPaolo Bonzini 		if (!check_mmio_spte(vcpu, spte))
3711c50d8ae3SPaolo Bonzini 			return RET_PF_INVALID;
3712c50d8ae3SPaolo Bonzini 
3713c50d8ae3SPaolo Bonzini 		if (direct)
3714c50d8ae3SPaolo Bonzini 			addr = 0;
3715c50d8ae3SPaolo Bonzini 
3716c50d8ae3SPaolo Bonzini 		trace_handle_mmio_page_fault(addr, gfn, access);
3717c50d8ae3SPaolo Bonzini 		vcpu_cache_mmio_info(vcpu, addr, gfn, access);
3718c50d8ae3SPaolo Bonzini 		return RET_PF_EMULATE;
3719c50d8ae3SPaolo Bonzini 	}
3720c50d8ae3SPaolo Bonzini 
3721c50d8ae3SPaolo Bonzini 	/*
3722c50d8ae3SPaolo Bonzini 	 * If the page table is zapped by other cpus, let CPU fault again on
3723c50d8ae3SPaolo Bonzini 	 * the address.
3724c50d8ae3SPaolo Bonzini 	 */
3725c50d8ae3SPaolo Bonzini 	return RET_PF_RETRY;
3726c50d8ae3SPaolo Bonzini }
3727c50d8ae3SPaolo Bonzini 
3728c50d8ae3SPaolo Bonzini static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
3729c50d8ae3SPaolo Bonzini 					 u32 error_code, gfn_t gfn)
3730c50d8ae3SPaolo Bonzini {
3731c50d8ae3SPaolo Bonzini 	if (unlikely(error_code & PFERR_RSVD_MASK))
3732c50d8ae3SPaolo Bonzini 		return false;
3733c50d8ae3SPaolo Bonzini 
3734c50d8ae3SPaolo Bonzini 	if (!(error_code & PFERR_PRESENT_MASK) ||
3735c50d8ae3SPaolo Bonzini 	      !(error_code & PFERR_WRITE_MASK))
3736c50d8ae3SPaolo Bonzini 		return false;
3737c50d8ae3SPaolo Bonzini 
3738c50d8ae3SPaolo Bonzini 	/*
3739c50d8ae3SPaolo Bonzini 	 * guest is writing the page which is write tracked which can
3740c50d8ae3SPaolo Bonzini 	 * not be fixed by page fault handler.
3741c50d8ae3SPaolo Bonzini 	 */
3742c50d8ae3SPaolo Bonzini 	if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
3743c50d8ae3SPaolo Bonzini 		return true;
3744c50d8ae3SPaolo Bonzini 
3745c50d8ae3SPaolo Bonzini 	return false;
3746c50d8ae3SPaolo Bonzini }
3747c50d8ae3SPaolo Bonzini 
3748c50d8ae3SPaolo Bonzini static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
3749c50d8ae3SPaolo Bonzini {
3750c50d8ae3SPaolo Bonzini 	struct kvm_shadow_walk_iterator iterator;
3751c50d8ae3SPaolo Bonzini 	u64 spte;
3752c50d8ae3SPaolo Bonzini 
3753c50d8ae3SPaolo Bonzini 	walk_shadow_page_lockless_begin(vcpu);
3754c50d8ae3SPaolo Bonzini 	for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
3755c50d8ae3SPaolo Bonzini 		clear_sp_write_flooding_count(iterator.sptep);
3756c50d8ae3SPaolo Bonzini 		if (!is_shadow_present_pte(spte))
3757c50d8ae3SPaolo Bonzini 			break;
3758c50d8ae3SPaolo Bonzini 	}
3759c50d8ae3SPaolo Bonzini 	walk_shadow_page_lockless_end(vcpu);
3760c50d8ae3SPaolo Bonzini }
3761c50d8ae3SPaolo Bonzini 
3762e8c22266SVitaly Kuznetsov static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
37639f1a8526SSean Christopherson 				    gfn_t gfn)
3764c50d8ae3SPaolo Bonzini {
3765c50d8ae3SPaolo Bonzini 	struct kvm_arch_async_pf arch;
3766c50d8ae3SPaolo Bonzini 
3767c50d8ae3SPaolo Bonzini 	arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
3768c50d8ae3SPaolo Bonzini 	arch.gfn = gfn;
3769c50d8ae3SPaolo Bonzini 	arch.direct_map = vcpu->arch.mmu->direct_map;
3770d8dd54e0SSean Christopherson 	arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
3771c50d8ae3SPaolo Bonzini 
37729f1a8526SSean Christopherson 	return kvm_setup_async_pf(vcpu, cr2_or_gpa,
37739f1a8526SSean Christopherson 				  kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
3774c50d8ae3SPaolo Bonzini }
3775c50d8ae3SPaolo Bonzini 
3776c50d8ae3SPaolo Bonzini static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
37774a42d848SDavid Stevens 			 gpa_t cr2_or_gpa, kvm_pfn_t *pfn, hva_t *hva,
37784a42d848SDavid Stevens 			 bool write, bool *writable)
3779c50d8ae3SPaolo Bonzini {
3780c36b7150SPaolo Bonzini 	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3781c50d8ae3SPaolo Bonzini 	bool async;
3782c50d8ae3SPaolo Bonzini 
3783e0c37868SSean Christopherson 	/*
3784e0c37868SSean Christopherson 	 * Retry the page fault if the gfn hit a memslot that is being deleted
3785e0c37868SSean Christopherson 	 * or moved.  This ensures any existing SPTEs for the old memslot will
3786e0c37868SSean Christopherson 	 * be zapped before KVM inserts a new MMIO SPTE for the gfn.
3787e0c37868SSean Christopherson 	 */
3788e0c37868SSean Christopherson 	if (slot && (slot->flags & KVM_MEMSLOT_INVALID))
3789e0c37868SSean Christopherson 		return true;
3790e0c37868SSean Christopherson 
3791c36b7150SPaolo Bonzini 	/* Don't expose private memslots to L2. */
3792c36b7150SPaolo Bonzini 	if (is_guest_mode(vcpu) && !kvm_is_visible_memslot(slot)) {
3793c50d8ae3SPaolo Bonzini 		*pfn = KVM_PFN_NOSLOT;
3794c583eed6SSean Christopherson 		*writable = false;
3795c50d8ae3SPaolo Bonzini 		return false;
3796c50d8ae3SPaolo Bonzini 	}
3797c50d8ae3SPaolo Bonzini 
3798c50d8ae3SPaolo Bonzini 	async = false;
37994a42d848SDavid Stevens 	*pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async,
38004a42d848SDavid Stevens 				    write, writable, hva);
3801c50d8ae3SPaolo Bonzini 	if (!async)
3802c50d8ae3SPaolo Bonzini 		return false; /* *pfn has correct page already */
3803c50d8ae3SPaolo Bonzini 
3804c50d8ae3SPaolo Bonzini 	if (!prefault && kvm_can_do_async_pf(vcpu)) {
38059f1a8526SSean Christopherson 		trace_kvm_try_async_get_page(cr2_or_gpa, gfn);
3806c50d8ae3SPaolo Bonzini 		if (kvm_find_async_pf_gfn(vcpu, gfn)) {
38079f1a8526SSean Christopherson 			trace_kvm_async_pf_doublefault(cr2_or_gpa, gfn);
3808c50d8ae3SPaolo Bonzini 			kvm_make_request(KVM_REQ_APF_HALT, vcpu);
3809c50d8ae3SPaolo Bonzini 			return true;
38109f1a8526SSean Christopherson 		} else if (kvm_arch_setup_async_pf(vcpu, cr2_or_gpa, gfn))
3811c50d8ae3SPaolo Bonzini 			return true;
3812c50d8ae3SPaolo Bonzini 	}
3813c50d8ae3SPaolo Bonzini 
38144a42d848SDavid Stevens 	*pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL,
38154a42d848SDavid Stevens 				    write, writable, hva);
3816c50d8ae3SPaolo Bonzini 	return false;
3817c50d8ae3SPaolo Bonzini }
3818c50d8ae3SPaolo Bonzini 
38190f90e1c1SSean Christopherson static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
38200f90e1c1SSean Christopherson 			     bool prefault, int max_level, bool is_tdp)
3821c50d8ae3SPaolo Bonzini {
382263c0cac9SDavid Matlack 	bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu);
3823367fd790SSean Christopherson 	bool write = error_code & PFERR_WRITE_MASK;
38240f90e1c1SSean Christopherson 	bool map_writable;
3825c50d8ae3SPaolo Bonzini 
38260f90e1c1SSean Christopherson 	gfn_t gfn = gpa >> PAGE_SHIFT;
38270f90e1c1SSean Christopherson 	unsigned long mmu_seq;
38280f90e1c1SSean Christopherson 	kvm_pfn_t pfn;
38294a42d848SDavid Stevens 	hva_t hva;
383083f06fa7SSean Christopherson 	int r;
3831c50d8ae3SPaolo Bonzini 
3832c50d8ae3SPaolo Bonzini 	if (page_fault_handle_page_track(vcpu, error_code, gfn))
3833c50d8ae3SPaolo Bonzini 		return RET_PF_EMULATE;
3834c50d8ae3SPaolo Bonzini 
38350b873fd7SDavid Matlack 	if (!is_tdp_mmu_fault) {
3836c4371c2aSSean Christopherson 		r = fast_page_fault(vcpu, gpa, error_code);
3837c4371c2aSSean Christopherson 		if (r != RET_PF_INVALID)
3838c4371c2aSSean Christopherson 			return r;
3839bb18842eSBen Gardon 	}
384083291445SSean Christopherson 
3841378f5cd6SSean Christopherson 	r = mmu_topup_memory_caches(vcpu, false);
3842c50d8ae3SPaolo Bonzini 	if (r)
3843c50d8ae3SPaolo Bonzini 		return r;
3844c50d8ae3SPaolo Bonzini 
3845367fd790SSean Christopherson 	mmu_seq = vcpu->kvm->mmu_notifier_seq;
3846367fd790SSean Christopherson 	smp_rmb();
3847367fd790SSean Christopherson 
38484a42d848SDavid Stevens 	if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, &hva,
38494a42d848SDavid Stevens 			 write, &map_writable))
3850367fd790SSean Christopherson 		return RET_PF_RETRY;
3851367fd790SSean Christopherson 
38520f90e1c1SSean Christopherson 	if (handle_abnormal_pfn(vcpu, is_tdp ? 0 : gpa, gfn, pfn, ACC_ALL, &r))
3853367fd790SSean Christopherson 		return r;
3854367fd790SSean Christopherson 
3855367fd790SSean Christopherson 	r = RET_PF_RETRY;
3856a2855afcSBen Gardon 
38570b873fd7SDavid Matlack 	if (is_tdp_mmu_fault)
3858a2855afcSBen Gardon 		read_lock(&vcpu->kvm->mmu_lock);
3859a2855afcSBen Gardon 	else
3860531810caSBen Gardon 		write_lock(&vcpu->kvm->mmu_lock);
3861a2855afcSBen Gardon 
38624a42d848SDavid Stevens 	if (!is_noslot_pfn(pfn) && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, hva))
3863367fd790SSean Christopherson 		goto out_unlock;
38647bd7ded6SSean Christopherson 	r = make_mmu_pages_available(vcpu);
38657bd7ded6SSean Christopherson 	if (r)
3866367fd790SSean Christopherson 		goto out_unlock;
3867bb18842eSBen Gardon 
38680b873fd7SDavid Matlack 	if (is_tdp_mmu_fault)
3869bb18842eSBen Gardon 		r = kvm_tdp_mmu_map(vcpu, gpa, error_code, map_writable, max_level,
3870bb18842eSBen Gardon 				    pfn, prefault);
3871bb18842eSBen Gardon 	else
38726c2fd34fSSean Christopherson 		r = __direct_map(vcpu, gpa, error_code, map_writable, max_level, pfn,
38736c2fd34fSSean Christopherson 				 prefault, is_tdp);
38740f90e1c1SSean Christopherson 
3875367fd790SSean Christopherson out_unlock:
38760b873fd7SDavid Matlack 	if (is_tdp_mmu_fault)
3877a2855afcSBen Gardon 		read_unlock(&vcpu->kvm->mmu_lock);
3878a2855afcSBen Gardon 	else
3879531810caSBen Gardon 		write_unlock(&vcpu->kvm->mmu_lock);
3880367fd790SSean Christopherson 	kvm_release_pfn_clean(pfn);
3881367fd790SSean Christopherson 	return r;
3882c50d8ae3SPaolo Bonzini }
3883c50d8ae3SPaolo Bonzini 
38840f90e1c1SSean Christopherson static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa,
38850f90e1c1SSean Christopherson 				u32 error_code, bool prefault)
38860f90e1c1SSean Christopherson {
38870f90e1c1SSean Christopherson 	pgprintk("%s: gva %lx error %x\n", __func__, gpa, error_code);
38880f90e1c1SSean Christopherson 
38890f90e1c1SSean Christopherson 	/* This path builds a PAE pagetable, we can map 2mb pages at maximum. */
38900f90e1c1SSean Christopherson 	return direct_page_fault(vcpu, gpa & PAGE_MASK, error_code, prefault,
38913bae0459SSean Christopherson 				 PG_LEVEL_2M, false);
38920f90e1c1SSean Christopherson }
38930f90e1c1SSean Christopherson 
3894c50d8ae3SPaolo Bonzini int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
3895c50d8ae3SPaolo Bonzini 				u64 fault_address, char *insn, int insn_len)
3896c50d8ae3SPaolo Bonzini {
3897c50d8ae3SPaolo Bonzini 	int r = 1;
38989ce372b3SVitaly Kuznetsov 	u32 flags = vcpu->arch.apf.host_apf_flags;
3899c50d8ae3SPaolo Bonzini 
3900736c291cSSean Christopherson #ifndef CONFIG_X86_64
3901736c291cSSean Christopherson 	/* A 64-bit CR2 should be impossible on 32-bit KVM. */
3902736c291cSSean Christopherson 	if (WARN_ON_ONCE(fault_address >> 32))
3903736c291cSSean Christopherson 		return -EFAULT;
3904736c291cSSean Christopherson #endif
3905736c291cSSean Christopherson 
3906c50d8ae3SPaolo Bonzini 	vcpu->arch.l1tf_flush_l1d = true;
39079ce372b3SVitaly Kuznetsov 	if (!flags) {
3908c50d8ae3SPaolo Bonzini 		trace_kvm_page_fault(fault_address, error_code);
3909c50d8ae3SPaolo Bonzini 
3910c50d8ae3SPaolo Bonzini 		if (kvm_event_needs_reinjection(vcpu))
3911c50d8ae3SPaolo Bonzini 			kvm_mmu_unprotect_page_virt(vcpu, fault_address);
3912c50d8ae3SPaolo Bonzini 		r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn,
3913c50d8ae3SPaolo Bonzini 				insn_len);
39149ce372b3SVitaly Kuznetsov 	} else if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) {
391568fd66f1SVitaly Kuznetsov 		vcpu->arch.apf.host_apf_flags = 0;
3916c50d8ae3SPaolo Bonzini 		local_irq_disable();
39176bca69adSThomas Gleixner 		kvm_async_pf_task_wait_schedule(fault_address);
3918c50d8ae3SPaolo Bonzini 		local_irq_enable();
39199ce372b3SVitaly Kuznetsov 	} else {
39209ce372b3SVitaly Kuznetsov 		WARN_ONCE(1, "Unexpected host async PF flags: %x\n", flags);
3921c50d8ae3SPaolo Bonzini 	}
39229ce372b3SVitaly Kuznetsov 
3923c50d8ae3SPaolo Bonzini 	return r;
3924c50d8ae3SPaolo Bonzini }
3925c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_handle_page_fault);
3926c50d8ae3SPaolo Bonzini 
39277a02674dSSean Christopherson int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
3928c50d8ae3SPaolo Bonzini 		       bool prefault)
3929c50d8ae3SPaolo Bonzini {
3930cb9b88c6SSean Christopherson 	int max_level;
3931c50d8ae3SPaolo Bonzini 
3932e662ec3eSSean Christopherson 	for (max_level = KVM_MAX_HUGEPAGE_LEVEL;
39333bae0459SSean Christopherson 	     max_level > PG_LEVEL_4K;
3934cb9b88c6SSean Christopherson 	     max_level--) {
3935cb9b88c6SSean Christopherson 		int page_num = KVM_PAGES_PER_HPAGE(max_level);
39360f90e1c1SSean Christopherson 		gfn_t base = (gpa >> PAGE_SHIFT) & ~(page_num - 1);
3937c50d8ae3SPaolo Bonzini 
3938cb9b88c6SSean Christopherson 		if (kvm_mtrr_check_gfn_range_consistency(vcpu, base, page_num))
3939cb9b88c6SSean Christopherson 			break;
3940c50d8ae3SPaolo Bonzini 	}
3941c50d8ae3SPaolo Bonzini 
39420f90e1c1SSean Christopherson 	return direct_page_fault(vcpu, gpa, error_code, prefault,
39430f90e1c1SSean Christopherson 				 max_level, true);
3944c50d8ae3SPaolo Bonzini }
3945c50d8ae3SPaolo Bonzini 
394684a16226SSean Christopherson static void nonpaging_init_context(struct kvm_mmu *context)
3947c50d8ae3SPaolo Bonzini {
3948c50d8ae3SPaolo Bonzini 	context->page_fault = nonpaging_page_fault;
3949c50d8ae3SPaolo Bonzini 	context->gva_to_gpa = nonpaging_gva_to_gpa;
3950c50d8ae3SPaolo Bonzini 	context->sync_page = nonpaging_sync_page;
39515efac074SPaolo Bonzini 	context->invlpg = NULL;
3952c50d8ae3SPaolo Bonzini 	context->root_level = 0;
3953c50d8ae3SPaolo Bonzini 	context->direct_map = true;
3954c50d8ae3SPaolo Bonzini }
3955c50d8ae3SPaolo Bonzini 
3956be01e8e2SSean Christopherson static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
39570be44352SSean Christopherson 				  union kvm_mmu_page_role role)
39580be44352SSean Christopherson {
3959be01e8e2SSean Christopherson 	return (role.direct || pgd == root->pgd) &&
3960e47c4aeeSSean Christopherson 	       VALID_PAGE(root->hpa) && to_shadow_page(root->hpa) &&
3961e47c4aeeSSean Christopherson 	       role.word == to_shadow_page(root->hpa)->role.word;
39620be44352SSean Christopherson }
39630be44352SSean Christopherson 
3964c50d8ae3SPaolo Bonzini /*
3965be01e8e2SSean Christopherson  * Find out if a previously cached root matching the new pgd/role is available.
3966c50d8ae3SPaolo Bonzini  * The current root is also inserted into the cache.
3967c50d8ae3SPaolo Bonzini  * If a matching root was found, it is assigned to kvm_mmu->root_hpa and true is
3968c50d8ae3SPaolo Bonzini  * returned.
3969c50d8ae3SPaolo Bonzini  * Otherwise, the LRU root from the cache is assigned to kvm_mmu->root_hpa and
3970c50d8ae3SPaolo Bonzini  * false is returned. This root should now be freed by the caller.
3971c50d8ae3SPaolo Bonzini  */
3972be01e8e2SSean Christopherson static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_pgd,
3973c50d8ae3SPaolo Bonzini 				  union kvm_mmu_page_role new_role)
3974c50d8ae3SPaolo Bonzini {
3975c50d8ae3SPaolo Bonzini 	uint i;
3976c50d8ae3SPaolo Bonzini 	struct kvm_mmu_root_info root;
3977c50d8ae3SPaolo Bonzini 	struct kvm_mmu *mmu = vcpu->arch.mmu;
3978c50d8ae3SPaolo Bonzini 
3979be01e8e2SSean Christopherson 	root.pgd = mmu->root_pgd;
3980c50d8ae3SPaolo Bonzini 	root.hpa = mmu->root_hpa;
3981c50d8ae3SPaolo Bonzini 
3982be01e8e2SSean Christopherson 	if (is_root_usable(&root, new_pgd, new_role))
39830be44352SSean Christopherson 		return true;
39840be44352SSean Christopherson 
3985c50d8ae3SPaolo Bonzini 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
3986c50d8ae3SPaolo Bonzini 		swap(root, mmu->prev_roots[i]);
3987c50d8ae3SPaolo Bonzini 
3988be01e8e2SSean Christopherson 		if (is_root_usable(&root, new_pgd, new_role))
3989c50d8ae3SPaolo Bonzini 			break;
3990c50d8ae3SPaolo Bonzini 	}
3991c50d8ae3SPaolo Bonzini 
3992c50d8ae3SPaolo Bonzini 	mmu->root_hpa = root.hpa;
3993be01e8e2SSean Christopherson 	mmu->root_pgd = root.pgd;
3994c50d8ae3SPaolo Bonzini 
3995c50d8ae3SPaolo Bonzini 	return i < KVM_MMU_NUM_PREV_ROOTS;
3996c50d8ae3SPaolo Bonzini }
3997c50d8ae3SPaolo Bonzini 
3998be01e8e2SSean Christopherson static bool fast_pgd_switch(struct kvm_vcpu *vcpu, gpa_t new_pgd,
3999b869855bSSean Christopherson 			    union kvm_mmu_page_role new_role)
4000c50d8ae3SPaolo Bonzini {
4001c50d8ae3SPaolo Bonzini 	struct kvm_mmu *mmu = vcpu->arch.mmu;
4002c50d8ae3SPaolo Bonzini 
4003c50d8ae3SPaolo Bonzini 	/*
4004c50d8ae3SPaolo Bonzini 	 * For now, limit the fast switch to 64-bit hosts+VMs in order to avoid
4005c50d8ae3SPaolo Bonzini 	 * having to deal with PDPTEs. We may add support for 32-bit hosts/VMs
4006c50d8ae3SPaolo Bonzini 	 * later if necessary.
4007c50d8ae3SPaolo Bonzini 	 */
4008c50d8ae3SPaolo Bonzini 	if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
4009b869855bSSean Christopherson 	    mmu->root_level >= PT64_ROOT_4LEVEL)
4010fe9304d3SVitaly Kuznetsov 		return cached_root_available(vcpu, new_pgd, new_role);
4011c50d8ae3SPaolo Bonzini 
4012c50d8ae3SPaolo Bonzini 	return false;
4013c50d8ae3SPaolo Bonzini }
4014c50d8ae3SPaolo Bonzini 
4015be01e8e2SSean Christopherson static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd,
4016b5129100SSean Christopherson 			      union kvm_mmu_page_role new_role)
4017c50d8ae3SPaolo Bonzini {
4018be01e8e2SSean Christopherson 	if (!fast_pgd_switch(vcpu, new_pgd, new_role)) {
4019b869855bSSean Christopherson 		kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, KVM_MMU_ROOT_CURRENT);
4020b869855bSSean Christopherson 		return;
4021c50d8ae3SPaolo Bonzini 	}
4022c50d8ae3SPaolo Bonzini 
4023c50d8ae3SPaolo Bonzini 	/*
4024b869855bSSean Christopherson 	 * It's possible that the cached previous root page is obsolete because
4025b869855bSSean Christopherson 	 * of a change in the MMU generation number. However, changing the
4026b869855bSSean Christopherson 	 * generation number is accompanied by KVM_REQ_MMU_RELOAD, which will
4027b869855bSSean Christopherson 	 * free the root set here and allocate a new one.
4028b869855bSSean Christopherson 	 */
4029b869855bSSean Christopherson 	kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
4030b869855bSSean Christopherson 
4031b5129100SSean Christopherson 	if (force_flush_and_sync_on_reuse) {
4032b869855bSSean Christopherson 		kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
4033b869855bSSean Christopherson 		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
4034b5129100SSean Christopherson 	}
4035b869855bSSean Christopherson 
4036b869855bSSean Christopherson 	/*
4037b869855bSSean Christopherson 	 * The last MMIO access's GVA and GPA are cached in the VCPU. When
4038b869855bSSean Christopherson 	 * switching to a new CR3, that GVA->GPA mapping may no longer be
4039b869855bSSean Christopherson 	 * valid. So clear any cached MMIO info even when we don't need to sync
4040b869855bSSean Christopherson 	 * the shadow page tables.
4041c50d8ae3SPaolo Bonzini 	 */
4042c50d8ae3SPaolo Bonzini 	vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
4043c50d8ae3SPaolo Bonzini 
4044daa5b6c1SBen Gardon 	/*
4045daa5b6c1SBen Gardon 	 * If this is a direct root page, it doesn't have a write flooding
4046daa5b6c1SBen Gardon 	 * count. Otherwise, clear the write flooding count.
4047daa5b6c1SBen Gardon 	 */
4048daa5b6c1SBen Gardon 	if (!new_role.direct)
4049daa5b6c1SBen Gardon 		__clear_sp_write_flooding_count(
4050daa5b6c1SBen Gardon 				to_shadow_page(vcpu->arch.mmu->root_hpa));
4051c50d8ae3SPaolo Bonzini }
4052c50d8ae3SPaolo Bonzini 
4053b5129100SSean Christopherson void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
4054c50d8ae3SPaolo Bonzini {
4055b5129100SSean Christopherson 	__kvm_mmu_new_pgd(vcpu, new_pgd, kvm_mmu_calc_root_page_role(vcpu));
4056c50d8ae3SPaolo Bonzini }
4057be01e8e2SSean Christopherson EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd);
4058c50d8ae3SPaolo Bonzini 
4059c50d8ae3SPaolo Bonzini static unsigned long get_cr3(struct kvm_vcpu *vcpu)
4060c50d8ae3SPaolo Bonzini {
4061c50d8ae3SPaolo Bonzini 	return kvm_read_cr3(vcpu);
4062c50d8ae3SPaolo Bonzini }
4063c50d8ae3SPaolo Bonzini 
4064c50d8ae3SPaolo Bonzini static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
40650a2b64c5SBen Gardon 			   unsigned int access, int *nr_present)
4066c50d8ae3SPaolo Bonzini {
4067c50d8ae3SPaolo Bonzini 	if (unlikely(is_mmio_spte(*sptep))) {
4068c50d8ae3SPaolo Bonzini 		if (gfn != get_mmio_spte_gfn(*sptep)) {
4069c50d8ae3SPaolo Bonzini 			mmu_spte_clear_no_track(sptep);
4070c50d8ae3SPaolo Bonzini 			return true;
4071c50d8ae3SPaolo Bonzini 		}
4072c50d8ae3SPaolo Bonzini 
4073c50d8ae3SPaolo Bonzini 		(*nr_present)++;
4074c50d8ae3SPaolo Bonzini 		mark_mmio_spte(vcpu, sptep, gfn, access);
4075c50d8ae3SPaolo Bonzini 		return true;
4076c50d8ae3SPaolo Bonzini 	}
4077c50d8ae3SPaolo Bonzini 
4078c50d8ae3SPaolo Bonzini 	return false;
4079c50d8ae3SPaolo Bonzini }
4080c50d8ae3SPaolo Bonzini 
4081c50d8ae3SPaolo Bonzini static inline bool is_last_gpte(struct kvm_mmu *mmu,
4082c50d8ae3SPaolo Bonzini 				unsigned level, unsigned gpte)
4083c50d8ae3SPaolo Bonzini {
4084c50d8ae3SPaolo Bonzini 	/*
4085c50d8ae3SPaolo Bonzini 	 * The RHS has bit 7 set iff level < mmu->last_nonleaf_level.
4086c50d8ae3SPaolo Bonzini 	 * If it is clear, there are no large pages at this level, so clear
4087c50d8ae3SPaolo Bonzini 	 * PT_PAGE_SIZE_MASK in gpte if that is the case.
4088c50d8ae3SPaolo Bonzini 	 */
4089c50d8ae3SPaolo Bonzini 	gpte &= level - mmu->last_nonleaf_level;
4090c50d8ae3SPaolo Bonzini 
4091c50d8ae3SPaolo Bonzini 	/*
40923bae0459SSean Christopherson 	 * PG_LEVEL_4K always terminates.  The RHS has bit 7 set
40933bae0459SSean Christopherson 	 * iff level <= PG_LEVEL_4K, which for our purpose means
40943bae0459SSean Christopherson 	 * level == PG_LEVEL_4K; set PT_PAGE_SIZE_MASK in gpte then.
4095c50d8ae3SPaolo Bonzini 	 */
40963bae0459SSean Christopherson 	gpte |= level - PG_LEVEL_4K - 1;
4097c50d8ae3SPaolo Bonzini 
4098c50d8ae3SPaolo Bonzini 	return gpte & PT_PAGE_SIZE_MASK;
4099c50d8ae3SPaolo Bonzini }
4100c50d8ae3SPaolo Bonzini 
4101c50d8ae3SPaolo Bonzini #define PTTYPE_EPT 18 /* arbitrary */
4102c50d8ae3SPaolo Bonzini #define PTTYPE PTTYPE_EPT
4103c50d8ae3SPaolo Bonzini #include "paging_tmpl.h"
4104c50d8ae3SPaolo Bonzini #undef PTTYPE
4105c50d8ae3SPaolo Bonzini 
4106c50d8ae3SPaolo Bonzini #define PTTYPE 64
4107c50d8ae3SPaolo Bonzini #include "paging_tmpl.h"
4108c50d8ae3SPaolo Bonzini #undef PTTYPE
4109c50d8ae3SPaolo Bonzini 
4110c50d8ae3SPaolo Bonzini #define PTTYPE 32
4111c50d8ae3SPaolo Bonzini #include "paging_tmpl.h"
4112c50d8ae3SPaolo Bonzini #undef PTTYPE
4113c50d8ae3SPaolo Bonzini 
4114c50d8ae3SPaolo Bonzini static void
4115b705a277SSean Christopherson __reset_rsvds_bits_mask(struct rsvd_bits_validate *rsvd_check,
41165b7f575cSSean Christopherson 			u64 pa_bits_rsvd, int level, bool nx, bool gbpages,
4117c50d8ae3SPaolo Bonzini 			bool pse, bool amd)
4118c50d8ae3SPaolo Bonzini {
4119c50d8ae3SPaolo Bonzini 	u64 gbpages_bit_rsvd = 0;
4120c50d8ae3SPaolo Bonzini 	u64 nonleaf_bit8_rsvd = 0;
41215b7f575cSSean Christopherson 	u64 high_bits_rsvd;
4122c50d8ae3SPaolo Bonzini 
4123c50d8ae3SPaolo Bonzini 	rsvd_check->bad_mt_xwr = 0;
4124c50d8ae3SPaolo Bonzini 
4125c50d8ae3SPaolo Bonzini 	if (!gbpages)
4126c50d8ae3SPaolo Bonzini 		gbpages_bit_rsvd = rsvd_bits(7, 7);
4127c50d8ae3SPaolo Bonzini 
41285b7f575cSSean Christopherson 	if (level == PT32E_ROOT_LEVEL)
41295b7f575cSSean Christopherson 		high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 62);
41305b7f575cSSean Christopherson 	else
41315b7f575cSSean Christopherson 		high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);
41325b7f575cSSean Christopherson 
41335b7f575cSSean Christopherson 	/* Note, NX doesn't exist in PDPTEs, this is handled below. */
41345b7f575cSSean Christopherson 	if (!nx)
41355b7f575cSSean Christopherson 		high_bits_rsvd |= rsvd_bits(63, 63);
41365b7f575cSSean Christopherson 
4137c50d8ae3SPaolo Bonzini 	/*
4138c50d8ae3SPaolo Bonzini 	 * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for
4139c50d8ae3SPaolo Bonzini 	 * leaf entries) on AMD CPUs only.
4140c50d8ae3SPaolo Bonzini 	 */
4141c50d8ae3SPaolo Bonzini 	if (amd)
4142c50d8ae3SPaolo Bonzini 		nonleaf_bit8_rsvd = rsvd_bits(8, 8);
4143c50d8ae3SPaolo Bonzini 
4144c50d8ae3SPaolo Bonzini 	switch (level) {
4145c50d8ae3SPaolo Bonzini 	case PT32_ROOT_LEVEL:
4146c50d8ae3SPaolo Bonzini 		/* no rsvd bits for 2 level 4K page table entries */
4147c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[0][1] = 0;
4148c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[0][0] = 0;
4149c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[1][0] =
4150c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[0][0];
4151c50d8ae3SPaolo Bonzini 
4152c50d8ae3SPaolo Bonzini 		if (!pse) {
4153c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[1][1] = 0;
4154c50d8ae3SPaolo Bonzini 			break;
4155c50d8ae3SPaolo Bonzini 		}
4156c50d8ae3SPaolo Bonzini 
4157c50d8ae3SPaolo Bonzini 		if (is_cpuid_PSE36())
4158c50d8ae3SPaolo Bonzini 			/* 36bits PSE 4MB page */
4159c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
4160c50d8ae3SPaolo Bonzini 		else
4161c50d8ae3SPaolo Bonzini 			/* 32 bits PSE 4MB page */
4162c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
4163c50d8ae3SPaolo Bonzini 		break;
4164c50d8ae3SPaolo Bonzini 	case PT32E_ROOT_LEVEL:
41655b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[0][2] = rsvd_bits(63, 63) |
41665b7f575cSSean Christopherson 						   high_bits_rsvd |
41675b7f575cSSean Christopherson 						   rsvd_bits(5, 8) |
41685b7f575cSSean Christopherson 						   rsvd_bits(1, 2);	/* PDPTE */
41695b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd;	/* PDE */
41705b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;	/* PTE */
41715b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
4172c50d8ae3SPaolo Bonzini 						   rsvd_bits(13, 20);	/* large page */
4173c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[1][0] =
4174c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[0][0];
4175c50d8ae3SPaolo Bonzini 		break;
4176c50d8ae3SPaolo Bonzini 	case PT64_ROOT_5LEVEL:
41775b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd |
41785b7f575cSSean Christopherson 						   nonleaf_bit8_rsvd |
41795b7f575cSSean Christopherson 						   rsvd_bits(7, 7);
4180c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[1][4] =
4181c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[0][4];
4182df561f66SGustavo A. R. Silva 		fallthrough;
4183c50d8ae3SPaolo Bonzini 	case PT64_ROOT_4LEVEL:
41845b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd |
41855b7f575cSSean Christopherson 						   nonleaf_bit8_rsvd |
41865b7f575cSSean Christopherson 						   rsvd_bits(7, 7);
41875b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd |
41885b7f575cSSean Christopherson 						   gbpages_bit_rsvd;
41895b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd;
41905b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
4191c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[1][3] =
4192c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[0][3];
41935b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd |
41945b7f575cSSean Christopherson 						   gbpages_bit_rsvd |
4195c50d8ae3SPaolo Bonzini 						   rsvd_bits(13, 29);
41965b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
4197c50d8ae3SPaolo Bonzini 						   rsvd_bits(13, 20); /* large page */
4198c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[1][0] =
4199c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[0][0];
4200c50d8ae3SPaolo Bonzini 		break;
4201c50d8ae3SPaolo Bonzini 	}
4202c50d8ae3SPaolo Bonzini }
4203c50d8ae3SPaolo Bonzini 
4204c50d8ae3SPaolo Bonzini static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
4205c50d8ae3SPaolo Bonzini 				  struct kvm_mmu *context)
4206c50d8ae3SPaolo Bonzini {
4207b705a277SSean Christopherson 	__reset_rsvds_bits_mask(&context->guest_rsvd_check,
42085b7f575cSSean Christopherson 				vcpu->arch.reserved_gpa_bits,
420990599c28SSean Christopherson 				context->root_level, is_efer_nx(context),
4210c50d8ae3SPaolo Bonzini 				guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
42114e9c0d80SSean Christopherson 				is_cr4_pse(context),
421223493d0aSSean Christopherson 				guest_cpuid_is_amd_or_hygon(vcpu));
4213c50d8ae3SPaolo Bonzini }
4214c50d8ae3SPaolo Bonzini 
4215c50d8ae3SPaolo Bonzini static void
4216c50d8ae3SPaolo Bonzini __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
42175b7f575cSSean Christopherson 			    u64 pa_bits_rsvd, bool execonly)
4218c50d8ae3SPaolo Bonzini {
42195b7f575cSSean Christopherson 	u64 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);
4220c50d8ae3SPaolo Bonzini 	u64 bad_mt_xwr;
4221c50d8ae3SPaolo Bonzini 
42225b7f575cSSean Christopherson 	rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd | rsvd_bits(3, 7);
42235b7f575cSSean Christopherson 	rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd | rsvd_bits(3, 7);
42245b7f575cSSean Christopherson 	rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd | rsvd_bits(3, 6);
42255b7f575cSSean Christopherson 	rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd | rsvd_bits(3, 6);
42265b7f575cSSean Christopherson 	rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
4227c50d8ae3SPaolo Bonzini 
4228c50d8ae3SPaolo Bonzini 	/* large page */
4229c50d8ae3SPaolo Bonzini 	rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4];
4230c50d8ae3SPaolo Bonzini 	rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3];
42315b7f575cSSean Christopherson 	rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd | rsvd_bits(12, 29);
42325b7f575cSSean Christopherson 	rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd | rsvd_bits(12, 20);
4233c50d8ae3SPaolo Bonzini 	rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0];
4234c50d8ae3SPaolo Bonzini 
4235c50d8ae3SPaolo Bonzini 	bad_mt_xwr = 0xFFull << (2 * 8);	/* bits 3..5 must not be 2 */
4236c50d8ae3SPaolo Bonzini 	bad_mt_xwr |= 0xFFull << (3 * 8);	/* bits 3..5 must not be 3 */
4237c50d8ae3SPaolo Bonzini 	bad_mt_xwr |= 0xFFull << (7 * 8);	/* bits 3..5 must not be 7 */
4238c50d8ae3SPaolo Bonzini 	bad_mt_xwr |= REPEAT_BYTE(1ull << 2);	/* bits 0..2 must not be 010 */
4239c50d8ae3SPaolo Bonzini 	bad_mt_xwr |= REPEAT_BYTE(1ull << 6);	/* bits 0..2 must not be 110 */
4240c50d8ae3SPaolo Bonzini 	if (!execonly) {
4241c50d8ae3SPaolo Bonzini 		/* bits 0..2 must not be 100 unless VMX capabilities allow it */
4242c50d8ae3SPaolo Bonzini 		bad_mt_xwr |= REPEAT_BYTE(1ull << 4);
4243c50d8ae3SPaolo Bonzini 	}
4244c50d8ae3SPaolo Bonzini 	rsvd_check->bad_mt_xwr = bad_mt_xwr;
4245c50d8ae3SPaolo Bonzini }
4246c50d8ae3SPaolo Bonzini 
4247c50d8ae3SPaolo Bonzini static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
4248c50d8ae3SPaolo Bonzini 		struct kvm_mmu *context, bool execonly)
4249c50d8ae3SPaolo Bonzini {
4250c50d8ae3SPaolo Bonzini 	__reset_rsvds_bits_mask_ept(&context->guest_rsvd_check,
42515b7f575cSSean Christopherson 				    vcpu->arch.reserved_gpa_bits, execonly);
4252c50d8ae3SPaolo Bonzini }
4253c50d8ae3SPaolo Bonzini 
42546f8e65a6SSean Christopherson static inline u64 reserved_hpa_bits(void)
42556f8e65a6SSean Christopherson {
42566f8e65a6SSean Christopherson 	return rsvd_bits(shadow_phys_bits, 63);
42576f8e65a6SSean Christopherson }
42586f8e65a6SSean Christopherson 
4259c50d8ae3SPaolo Bonzini /*
4260c50d8ae3SPaolo Bonzini  * the page table on host is the shadow page table for the page
4261c50d8ae3SPaolo Bonzini  * table in guest or amd nested guest, its mmu features completely
4262c50d8ae3SPaolo Bonzini  * follow the features in guest.
4263c50d8ae3SPaolo Bonzini  */
426416be1d12SSean Christopherson static void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
426516be1d12SSean Christopherson 					struct kvm_mmu *context)
4266c50d8ae3SPaolo Bonzini {
4267112022bdSSean Christopherson 	/*
4268112022bdSSean Christopherson 	 * KVM uses NX when TDP is disabled to handle a variety of scenarios,
4269112022bdSSean Christopherson 	 * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and
4270112022bdSSean Christopherson 	 * to generate correct permissions for CR0.WP=0/CR4.SMEP=1/EFER.NX=0.
4271112022bdSSean Christopherson 	 * The iTLB multi-hit workaround can be toggled at any time, so assume
4272112022bdSSean Christopherson 	 * NX can be used by any non-nested shadow MMU to avoid having to reset
4273112022bdSSean Christopherson 	 * MMU contexts.  Note, KVM forces EFER.NX=1 when TDP is disabled.
4274112022bdSSean Christopherson 	 */
427590599c28SSean Christopherson 	bool uses_nx = is_efer_nx(context) || !tdp_enabled;
42768c985b2dSSean Christopherson 
42778c985b2dSSean Christopherson 	/* @amd adds a check on bit of SPTEs, which KVM shouldn't use anyways. */
42788c985b2dSSean Christopherson 	bool is_amd = true;
42798c985b2dSSean Christopherson 	/* KVM doesn't use 2-level page tables for the shadow MMU. */
42808c985b2dSSean Christopherson 	bool is_pse = false;
4281c50d8ae3SPaolo Bonzini 	struct rsvd_bits_validate *shadow_zero_check;
4282c50d8ae3SPaolo Bonzini 	int i;
4283c50d8ae3SPaolo Bonzini 
42848c985b2dSSean Christopherson 	WARN_ON_ONCE(context->shadow_root_level < PT32E_ROOT_LEVEL);
42858c985b2dSSean Christopherson 
4286c50d8ae3SPaolo Bonzini 	shadow_zero_check = &context->shadow_zero_check;
4287b705a277SSean Christopherson 	__reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
4288c50d8ae3SPaolo Bonzini 				context->shadow_root_level, uses_nx,
4289c50d8ae3SPaolo Bonzini 				guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
42908c985b2dSSean Christopherson 				is_pse, is_amd);
4291c50d8ae3SPaolo Bonzini 
4292c50d8ae3SPaolo Bonzini 	if (!shadow_me_mask)
4293c50d8ae3SPaolo Bonzini 		return;
4294c50d8ae3SPaolo Bonzini 
4295c50d8ae3SPaolo Bonzini 	for (i = context->shadow_root_level; --i >= 0;) {
4296c50d8ae3SPaolo Bonzini 		shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
4297c50d8ae3SPaolo Bonzini 		shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
4298c50d8ae3SPaolo Bonzini 	}
4299c50d8ae3SPaolo Bonzini 
4300c50d8ae3SPaolo Bonzini }
4301c50d8ae3SPaolo Bonzini 
4302c50d8ae3SPaolo Bonzini static inline bool boot_cpu_is_amd(void)
4303c50d8ae3SPaolo Bonzini {
4304c50d8ae3SPaolo Bonzini 	WARN_ON_ONCE(!tdp_enabled);
4305c50d8ae3SPaolo Bonzini 	return shadow_x_mask == 0;
4306c50d8ae3SPaolo Bonzini }
4307c50d8ae3SPaolo Bonzini 
4308c50d8ae3SPaolo Bonzini /*
4309c50d8ae3SPaolo Bonzini  * the direct page table on host, use as much mmu features as
4310c50d8ae3SPaolo Bonzini  * possible, however, kvm currently does not do execution-protection.
4311c50d8ae3SPaolo Bonzini  */
4312c50d8ae3SPaolo Bonzini static void
4313c50d8ae3SPaolo Bonzini reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
4314c50d8ae3SPaolo Bonzini 				struct kvm_mmu *context)
4315c50d8ae3SPaolo Bonzini {
4316c50d8ae3SPaolo Bonzini 	struct rsvd_bits_validate *shadow_zero_check;
4317c50d8ae3SPaolo Bonzini 	int i;
4318c50d8ae3SPaolo Bonzini 
4319c50d8ae3SPaolo Bonzini 	shadow_zero_check = &context->shadow_zero_check;
4320c50d8ae3SPaolo Bonzini 
4321c50d8ae3SPaolo Bonzini 	if (boot_cpu_is_amd())
4322b705a277SSean Christopherson 		__reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
4323c50d8ae3SPaolo Bonzini 					context->shadow_root_level, false,
4324c50d8ae3SPaolo Bonzini 					boot_cpu_has(X86_FEATURE_GBPAGES),
43258c985b2dSSean Christopherson 					false, true);
4326c50d8ae3SPaolo Bonzini 	else
4327c50d8ae3SPaolo Bonzini 		__reset_rsvds_bits_mask_ept(shadow_zero_check,
43286f8e65a6SSean Christopherson 					    reserved_hpa_bits(), false);
4329c50d8ae3SPaolo Bonzini 
4330c50d8ae3SPaolo Bonzini 	if (!shadow_me_mask)
4331c50d8ae3SPaolo Bonzini 		return;
4332c50d8ae3SPaolo Bonzini 
4333c50d8ae3SPaolo Bonzini 	for (i = context->shadow_root_level; --i >= 0;) {
4334c50d8ae3SPaolo Bonzini 		shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
4335c50d8ae3SPaolo Bonzini 		shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
4336c50d8ae3SPaolo Bonzini 	}
4337c50d8ae3SPaolo Bonzini }
4338c50d8ae3SPaolo Bonzini 
4339c50d8ae3SPaolo Bonzini /*
4340c50d8ae3SPaolo Bonzini  * as the comments in reset_shadow_zero_bits_mask() except it
4341c50d8ae3SPaolo Bonzini  * is the shadow page table for intel nested guest.
4342c50d8ae3SPaolo Bonzini  */
4343c50d8ae3SPaolo Bonzini static void
4344c50d8ae3SPaolo Bonzini reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
4345c50d8ae3SPaolo Bonzini 				struct kvm_mmu *context, bool execonly)
4346c50d8ae3SPaolo Bonzini {
4347c50d8ae3SPaolo Bonzini 	__reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
43486f8e65a6SSean Christopherson 				    reserved_hpa_bits(), execonly);
4349c50d8ae3SPaolo Bonzini }
4350c50d8ae3SPaolo Bonzini 
4351c50d8ae3SPaolo Bonzini #define BYTE_MASK(access) \
4352c50d8ae3SPaolo Bonzini 	((1 & (access) ? 2 : 0) | \
4353c50d8ae3SPaolo Bonzini 	 (2 & (access) ? 4 : 0) | \
4354c50d8ae3SPaolo Bonzini 	 (3 & (access) ? 8 : 0) | \
4355c50d8ae3SPaolo Bonzini 	 (4 & (access) ? 16 : 0) | \
4356c50d8ae3SPaolo Bonzini 	 (5 & (access) ? 32 : 0) | \
4357c50d8ae3SPaolo Bonzini 	 (6 & (access) ? 64 : 0) | \
4358c50d8ae3SPaolo Bonzini 	 (7 & (access) ? 128 : 0))
4359c50d8ae3SPaolo Bonzini 
4360c50d8ae3SPaolo Bonzini 
4361c596f147SSean Christopherson static void update_permission_bitmask(struct kvm_mmu *mmu, bool ept)
4362c50d8ae3SPaolo Bonzini {
4363c50d8ae3SPaolo Bonzini 	unsigned byte;
4364c50d8ae3SPaolo Bonzini 
4365c50d8ae3SPaolo Bonzini 	const u8 x = BYTE_MASK(ACC_EXEC_MASK);
4366c50d8ae3SPaolo Bonzini 	const u8 w = BYTE_MASK(ACC_WRITE_MASK);
4367c50d8ae3SPaolo Bonzini 	const u8 u = BYTE_MASK(ACC_USER_MASK);
4368c50d8ae3SPaolo Bonzini 
4369c596f147SSean Christopherson 	bool cr4_smep = is_cr4_smep(mmu);
4370c596f147SSean Christopherson 	bool cr4_smap = is_cr4_smap(mmu);
4371c596f147SSean Christopherson 	bool cr0_wp = is_cr0_wp(mmu);
437290599c28SSean Christopherson 	bool efer_nx = is_efer_nx(mmu);
4373c50d8ae3SPaolo Bonzini 
4374c50d8ae3SPaolo Bonzini 	for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) {
4375c50d8ae3SPaolo Bonzini 		unsigned pfec = byte << 1;
4376c50d8ae3SPaolo Bonzini 
4377c50d8ae3SPaolo Bonzini 		/*
4378c50d8ae3SPaolo Bonzini 		 * Each "*f" variable has a 1 bit for each UWX value
4379c50d8ae3SPaolo Bonzini 		 * that causes a fault with the given PFEC.
4380c50d8ae3SPaolo Bonzini 		 */
4381c50d8ae3SPaolo Bonzini 
4382c50d8ae3SPaolo Bonzini 		/* Faults from writes to non-writable pages */
4383c50d8ae3SPaolo Bonzini 		u8 wf = (pfec & PFERR_WRITE_MASK) ? (u8)~w : 0;
4384c50d8ae3SPaolo Bonzini 		/* Faults from user mode accesses to supervisor pages */
4385c50d8ae3SPaolo Bonzini 		u8 uf = (pfec & PFERR_USER_MASK) ? (u8)~u : 0;
4386c50d8ae3SPaolo Bonzini 		/* Faults from fetches of non-executable pages*/
4387c50d8ae3SPaolo Bonzini 		u8 ff = (pfec & PFERR_FETCH_MASK) ? (u8)~x : 0;
4388c50d8ae3SPaolo Bonzini 		/* Faults from kernel mode fetches of user pages */
4389c50d8ae3SPaolo Bonzini 		u8 smepf = 0;
4390c50d8ae3SPaolo Bonzini 		/* Faults from kernel mode accesses of user pages */
4391c50d8ae3SPaolo Bonzini 		u8 smapf = 0;
4392c50d8ae3SPaolo Bonzini 
4393c50d8ae3SPaolo Bonzini 		if (!ept) {
4394c50d8ae3SPaolo Bonzini 			/* Faults from kernel mode accesses to user pages */
4395c50d8ae3SPaolo Bonzini 			u8 kf = (pfec & PFERR_USER_MASK) ? 0 : u;
4396c50d8ae3SPaolo Bonzini 
4397c50d8ae3SPaolo Bonzini 			/* Not really needed: !nx will cause pte.nx to fault */
439890599c28SSean Christopherson 			if (!efer_nx)
4399c50d8ae3SPaolo Bonzini 				ff = 0;
4400c50d8ae3SPaolo Bonzini 
4401c50d8ae3SPaolo Bonzini 			/* Allow supervisor writes if !cr0.wp */
4402c50d8ae3SPaolo Bonzini 			if (!cr0_wp)
4403c50d8ae3SPaolo Bonzini 				wf = (pfec & PFERR_USER_MASK) ? wf : 0;
4404c50d8ae3SPaolo Bonzini 
4405c50d8ae3SPaolo Bonzini 			/* Disallow supervisor fetches of user code if cr4.smep */
4406c50d8ae3SPaolo Bonzini 			if (cr4_smep)
4407c50d8ae3SPaolo Bonzini 				smepf = (pfec & PFERR_FETCH_MASK) ? kf : 0;
4408c50d8ae3SPaolo Bonzini 
4409c50d8ae3SPaolo Bonzini 			/*
4410c50d8ae3SPaolo Bonzini 			 * SMAP:kernel-mode data accesses from user-mode
4411c50d8ae3SPaolo Bonzini 			 * mappings should fault. A fault is considered
4412c50d8ae3SPaolo Bonzini 			 * as a SMAP violation if all of the following
4413c50d8ae3SPaolo Bonzini 			 * conditions are true:
4414c50d8ae3SPaolo Bonzini 			 *   - X86_CR4_SMAP is set in CR4
4415c50d8ae3SPaolo Bonzini 			 *   - A user page is accessed
4416c50d8ae3SPaolo Bonzini 			 *   - The access is not a fetch
4417c50d8ae3SPaolo Bonzini 			 *   - Page fault in kernel mode
4418c50d8ae3SPaolo Bonzini 			 *   - if CPL = 3 or X86_EFLAGS_AC is clear
4419c50d8ae3SPaolo Bonzini 			 *
4420c50d8ae3SPaolo Bonzini 			 * Here, we cover the first three conditions.
4421c50d8ae3SPaolo Bonzini 			 * The fourth is computed dynamically in permission_fault();
4422c50d8ae3SPaolo Bonzini 			 * PFERR_RSVD_MASK bit will be set in PFEC if the access is
4423c50d8ae3SPaolo Bonzini 			 * *not* subject to SMAP restrictions.
4424c50d8ae3SPaolo Bonzini 			 */
4425c50d8ae3SPaolo Bonzini 			if (cr4_smap)
4426c50d8ae3SPaolo Bonzini 				smapf = (pfec & (PFERR_RSVD_MASK|PFERR_FETCH_MASK)) ? 0 : kf;
4427c50d8ae3SPaolo Bonzini 		}
4428c50d8ae3SPaolo Bonzini 
4429c50d8ae3SPaolo Bonzini 		mmu->permissions[byte] = ff | uf | wf | smepf | smapf;
4430c50d8ae3SPaolo Bonzini 	}
4431c50d8ae3SPaolo Bonzini }
4432c50d8ae3SPaolo Bonzini 
4433c50d8ae3SPaolo Bonzini /*
4434c50d8ae3SPaolo Bonzini * PKU is an additional mechanism by which the paging controls access to
4435c50d8ae3SPaolo Bonzini * user-mode addresses based on the value in the PKRU register.  Protection
4436c50d8ae3SPaolo Bonzini * key violations are reported through a bit in the page fault error code.
4437c50d8ae3SPaolo Bonzini * Unlike other bits of the error code, the PK bit is not known at the
4438c50d8ae3SPaolo Bonzini * call site of e.g. gva_to_gpa; it must be computed directly in
4439c50d8ae3SPaolo Bonzini * permission_fault based on two bits of PKRU, on some machine state (CR4,
4440c50d8ae3SPaolo Bonzini * CR0, EFER, CPL), and on other bits of the error code and the page tables.
4441c50d8ae3SPaolo Bonzini *
4442c50d8ae3SPaolo Bonzini * In particular the following conditions come from the error code, the
4443c50d8ae3SPaolo Bonzini * page tables and the machine state:
4444c50d8ae3SPaolo Bonzini * - PK is always zero unless CR4.PKE=1 and EFER.LMA=1
4445c50d8ae3SPaolo Bonzini * - PK is always zero if RSVD=1 (reserved bit set) or F=1 (instruction fetch)
4446c50d8ae3SPaolo Bonzini * - PK is always zero if U=0 in the page tables
4447c50d8ae3SPaolo Bonzini * - PKRU.WD is ignored if CR0.WP=0 and the access is a supervisor access.
4448c50d8ae3SPaolo Bonzini *
4449c50d8ae3SPaolo Bonzini * The PKRU bitmask caches the result of these four conditions.  The error
4450c50d8ae3SPaolo Bonzini * code (minus the P bit) and the page table's U bit form an index into the
4451c50d8ae3SPaolo Bonzini * PKRU bitmask.  Two bits of the PKRU bitmask are then extracted and ANDed
4452c50d8ae3SPaolo Bonzini * with the two bits of the PKRU register corresponding to the protection key.
4453c50d8ae3SPaolo Bonzini * For the first three conditions above the bits will be 00, thus masking
4454c50d8ae3SPaolo Bonzini * away both AD and WD.  For all reads or if the last condition holds, WD
4455c50d8ae3SPaolo Bonzini * only will be masked away.
4456c50d8ae3SPaolo Bonzini */
44572e4c0661SSean Christopherson static void update_pkru_bitmask(struct kvm_mmu *mmu)
4458c50d8ae3SPaolo Bonzini {
4459c50d8ae3SPaolo Bonzini 	unsigned bit;
4460c50d8ae3SPaolo Bonzini 	bool wp;
4461c50d8ae3SPaolo Bonzini 
44622e4c0661SSean Christopherson 	if (!is_cr4_pke(mmu)) {
4463c50d8ae3SPaolo Bonzini 		mmu->pkru_mask = 0;
4464c50d8ae3SPaolo Bonzini 		return;
4465c50d8ae3SPaolo Bonzini 	}
4466c50d8ae3SPaolo Bonzini 
44672e4c0661SSean Christopherson 	wp = is_cr0_wp(mmu);
4468c50d8ae3SPaolo Bonzini 
4469c50d8ae3SPaolo Bonzini 	for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) {
4470c50d8ae3SPaolo Bonzini 		unsigned pfec, pkey_bits;
4471c50d8ae3SPaolo Bonzini 		bool check_pkey, check_write, ff, uf, wf, pte_user;
4472c50d8ae3SPaolo Bonzini 
4473c50d8ae3SPaolo Bonzini 		pfec = bit << 1;
4474c50d8ae3SPaolo Bonzini 		ff = pfec & PFERR_FETCH_MASK;
4475c50d8ae3SPaolo Bonzini 		uf = pfec & PFERR_USER_MASK;
4476c50d8ae3SPaolo Bonzini 		wf = pfec & PFERR_WRITE_MASK;
4477c50d8ae3SPaolo Bonzini 
4478c50d8ae3SPaolo Bonzini 		/* PFEC.RSVD is replaced by ACC_USER_MASK. */
4479c50d8ae3SPaolo Bonzini 		pte_user = pfec & PFERR_RSVD_MASK;
4480c50d8ae3SPaolo Bonzini 
4481c50d8ae3SPaolo Bonzini 		/*
4482c50d8ae3SPaolo Bonzini 		 * Only need to check the access which is not an
4483c50d8ae3SPaolo Bonzini 		 * instruction fetch and is to a user page.
4484c50d8ae3SPaolo Bonzini 		 */
4485c50d8ae3SPaolo Bonzini 		check_pkey = (!ff && pte_user);
4486c50d8ae3SPaolo Bonzini 		/*
4487c50d8ae3SPaolo Bonzini 		 * write access is controlled by PKRU if it is a
4488c50d8ae3SPaolo Bonzini 		 * user access or CR0.WP = 1.
4489c50d8ae3SPaolo Bonzini 		 */
4490c50d8ae3SPaolo Bonzini 		check_write = check_pkey && wf && (uf || wp);
4491c50d8ae3SPaolo Bonzini 
4492c50d8ae3SPaolo Bonzini 		/* PKRU.AD stops both read and write access. */
4493c50d8ae3SPaolo Bonzini 		pkey_bits = !!check_pkey;
4494c50d8ae3SPaolo Bonzini 		/* PKRU.WD stops write access. */
4495c50d8ae3SPaolo Bonzini 		pkey_bits |= (!!check_write) << 1;
4496c50d8ae3SPaolo Bonzini 
4497c50d8ae3SPaolo Bonzini 		mmu->pkru_mask |= (pkey_bits & 3) << pfec;
4498c50d8ae3SPaolo Bonzini 	}
4499c50d8ae3SPaolo Bonzini }
4500c50d8ae3SPaolo Bonzini 
4501b67a93a8SSean Christopherson static void update_last_nonleaf_level(struct kvm_mmu *mmu)
4502c50d8ae3SPaolo Bonzini {
4503c50d8ae3SPaolo Bonzini 	unsigned root_level = mmu->root_level;
4504c50d8ae3SPaolo Bonzini 
4505c50d8ae3SPaolo Bonzini 	mmu->last_nonleaf_level = root_level;
4506b67a93a8SSean Christopherson 	if (root_level == PT32_ROOT_LEVEL && is_cr4_pse(mmu))
4507c50d8ae3SPaolo Bonzini 		mmu->last_nonleaf_level++;
4508c50d8ae3SPaolo Bonzini }
4509c50d8ae3SPaolo Bonzini 
4510*533f9a4bSSean Christopherson static void reset_guest_paging_metadata(struct kvm_vcpu *vcpu,
4511*533f9a4bSSean Christopherson 					struct kvm_mmu *mmu)
4512*533f9a4bSSean Christopherson {
4513*533f9a4bSSean Christopherson 	if (!is_cr0_pg(mmu))
4514*533f9a4bSSean Christopherson 		return;
4515*533f9a4bSSean Christopherson 
4516*533f9a4bSSean Christopherson 	reset_rsvds_bits_mask(vcpu, mmu);
4517*533f9a4bSSean Christopherson 	update_permission_bitmask(mmu, false);
4518*533f9a4bSSean Christopherson 	update_pkru_bitmask(mmu);
4519*533f9a4bSSean Christopherson 	update_last_nonleaf_level(mmu);
4520*533f9a4bSSean Christopherson }
4521*533f9a4bSSean Christopherson 
452284a16226SSean Christopherson static void paging64_init_context_common(struct kvm_mmu *context,
4523d555f705SSean Christopherson 					 int root_level)
4524c50d8ae3SPaolo Bonzini {
4525d555f705SSean Christopherson 	context->root_level = root_level;
4526c50d8ae3SPaolo Bonzini 
452784a16226SSean Christopherson 	WARN_ON_ONCE(!is_cr4_pae(context));
4528c50d8ae3SPaolo Bonzini 	context->page_fault = paging64_page_fault;
4529c50d8ae3SPaolo Bonzini 	context->gva_to_gpa = paging64_gva_to_gpa;
4530c50d8ae3SPaolo Bonzini 	context->sync_page = paging64_sync_page;
4531c50d8ae3SPaolo Bonzini 	context->invlpg = paging64_invlpg;
4532c50d8ae3SPaolo Bonzini 	context->direct_map = false;
4533c50d8ae3SPaolo Bonzini }
4534c50d8ae3SPaolo Bonzini 
453584a16226SSean Christopherson static void paging64_init_context(struct kvm_mmu *context,
453684a16226SSean Christopherson 				  struct kvm_mmu_role_regs *regs)
4537c50d8ae3SPaolo Bonzini {
453884a16226SSean Christopherson 	int root_level = ____is_cr4_la57(regs) ? PT64_ROOT_5LEVEL :
453984a16226SSean Christopherson 						 PT64_ROOT_4LEVEL;
4540c50d8ae3SPaolo Bonzini 
454184a16226SSean Christopherson 	paging64_init_context_common(context, root_level);
4542c50d8ae3SPaolo Bonzini }
4543c50d8ae3SPaolo Bonzini 
454484a16226SSean Christopherson static void paging32_init_context(struct kvm_mmu *context)
4545c50d8ae3SPaolo Bonzini {
4546c50d8ae3SPaolo Bonzini 	context->root_level = PT32_ROOT_LEVEL;
4547c50d8ae3SPaolo Bonzini 	context->page_fault = paging32_page_fault;
4548c50d8ae3SPaolo Bonzini 	context->gva_to_gpa = paging32_gva_to_gpa;
4549c50d8ae3SPaolo Bonzini 	context->sync_page = paging32_sync_page;
4550c50d8ae3SPaolo Bonzini 	context->invlpg = paging32_invlpg;
4551c50d8ae3SPaolo Bonzini 	context->direct_map = false;
4552c50d8ae3SPaolo Bonzini }
4553c50d8ae3SPaolo Bonzini 
455484a16226SSean Christopherson static void paging32E_init_context(struct kvm_mmu *context)
4555c50d8ae3SPaolo Bonzini {
455684a16226SSean Christopherson 	paging64_init_context_common(context, PT32E_ROOT_LEVEL);
4557c50d8ae3SPaolo Bonzini }
4558c50d8ae3SPaolo Bonzini 
45598626c120SSean Christopherson static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu,
45608626c120SSean Christopherson 							 struct kvm_mmu_role_regs *regs)
4561c50d8ae3SPaolo Bonzini {
4562c50d8ae3SPaolo Bonzini 	union kvm_mmu_extended_role ext = {0};
4563c50d8ae3SPaolo Bonzini 
4564ca8d664fSSean Christopherson 	if (____is_cr0_pg(regs)) {
4565ca8d664fSSean Christopherson 		ext.cr0_pg = 1;
45668626c120SSean Christopherson 		ext.cr4_pae = ____is_cr4_pae(regs);
45678626c120SSean Christopherson 		ext.cr4_smep = ____is_cr4_smep(regs);
45688626c120SSean Christopherson 		ext.cr4_smap = ____is_cr4_smap(regs);
45698626c120SSean Christopherson 		ext.cr4_pse = ____is_cr4_pse(regs);
457084c679f5SSean Christopherson 
457184c679f5SSean Christopherson 		/* PKEY and LA57 are active iff long mode is active. */
457284c679f5SSean Christopherson 		ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs);
457384c679f5SSean Christopherson 		ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs);
4574ca8d664fSSean Christopherson 	}
4575c50d8ae3SPaolo Bonzini 
4576c50d8ae3SPaolo Bonzini 	ext.valid = 1;
4577c50d8ae3SPaolo Bonzini 
4578c50d8ae3SPaolo Bonzini 	return ext;
4579c50d8ae3SPaolo Bonzini }
4580c50d8ae3SPaolo Bonzini 
4581c50d8ae3SPaolo Bonzini static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
45828626c120SSean Christopherson 						   struct kvm_mmu_role_regs *regs,
4583c50d8ae3SPaolo Bonzini 						   bool base_only)
4584c50d8ae3SPaolo Bonzini {
4585c50d8ae3SPaolo Bonzini 	union kvm_mmu_role role = {0};
4586c50d8ae3SPaolo Bonzini 
4587c50d8ae3SPaolo Bonzini 	role.base.access = ACC_ALL;
4588ca8d664fSSean Christopherson 	if (____is_cr0_pg(regs)) {
4589167f8a5cSSean Christopherson 		role.base.efer_nx = ____is_efer_nx(regs);
45908626c120SSean Christopherson 		role.base.cr0_wp = ____is_cr0_wp(regs);
4591ca8d664fSSean Christopherson 	}
4592c50d8ae3SPaolo Bonzini 	role.base.smm = is_smm(vcpu);
4593c50d8ae3SPaolo Bonzini 	role.base.guest_mode = is_guest_mode(vcpu);
4594c50d8ae3SPaolo Bonzini 
4595c50d8ae3SPaolo Bonzini 	if (base_only)
4596c50d8ae3SPaolo Bonzini 		return role;
4597c50d8ae3SPaolo Bonzini 
45988626c120SSean Christopherson 	role.ext = kvm_calc_mmu_role_ext(vcpu, regs);
4599c50d8ae3SPaolo Bonzini 
4600c50d8ae3SPaolo Bonzini 	return role;
4601c50d8ae3SPaolo Bonzini }
4602c50d8ae3SPaolo Bonzini 
4603d468d94bSSean Christopherson static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
4604d468d94bSSean Christopherson {
4605d468d94bSSean Christopherson 	/* Use 5-level TDP if and only if it's useful/necessary. */
460683013059SSean Christopherson 	if (max_tdp_level == 5 && cpuid_maxphyaddr(vcpu) <= 48)
4607d468d94bSSean Christopherson 		return 4;
4608d468d94bSSean Christopherson 
460983013059SSean Christopherson 	return max_tdp_level;
4610d468d94bSSean Christopherson }
4611d468d94bSSean Christopherson 
4612c50d8ae3SPaolo Bonzini static union kvm_mmu_role
46138626c120SSean Christopherson kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
46148626c120SSean Christopherson 				struct kvm_mmu_role_regs *regs, bool base_only)
4615c50d8ae3SPaolo Bonzini {
46168626c120SSean Christopherson 	union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs, base_only);
4617c50d8ae3SPaolo Bonzini 
4618c50d8ae3SPaolo Bonzini 	role.base.ad_disabled = (shadow_accessed_mask == 0);
4619d468d94bSSean Christopherson 	role.base.level = kvm_mmu_get_tdp_level(vcpu);
4620c50d8ae3SPaolo Bonzini 	role.base.direct = true;
4621c50d8ae3SPaolo Bonzini 	role.base.gpte_is_8_bytes = true;
4622c50d8ae3SPaolo Bonzini 
4623c50d8ae3SPaolo Bonzini 	return role;
4624c50d8ae3SPaolo Bonzini }
4625c50d8ae3SPaolo Bonzini 
4626c50d8ae3SPaolo Bonzini static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
4627c50d8ae3SPaolo Bonzini {
46288c008659SPaolo Bonzini 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
46298626c120SSean Christopherson 	struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
4630c50d8ae3SPaolo Bonzini 	union kvm_mmu_role new_role =
46318626c120SSean Christopherson 		kvm_calc_tdp_mmu_root_page_role(vcpu, &regs, false);
4632c50d8ae3SPaolo Bonzini 
4633c50d8ae3SPaolo Bonzini 	if (new_role.as_u64 == context->mmu_role.as_u64)
4634c50d8ae3SPaolo Bonzini 		return;
4635c50d8ae3SPaolo Bonzini 
4636c50d8ae3SPaolo Bonzini 	context->mmu_role.as_u64 = new_role.as_u64;
46377a02674dSSean Christopherson 	context->page_fault = kvm_tdp_page_fault;
4638c50d8ae3SPaolo Bonzini 	context->sync_page = nonpaging_sync_page;
46395efac074SPaolo Bonzini 	context->invlpg = NULL;
4640d468d94bSSean Christopherson 	context->shadow_root_level = kvm_mmu_get_tdp_level(vcpu);
4641c50d8ae3SPaolo Bonzini 	context->direct_map = true;
4642d8dd54e0SSean Christopherson 	context->get_guest_pgd = get_cr3;
4643c50d8ae3SPaolo Bonzini 	context->get_pdptr = kvm_pdptr_read;
4644c50d8ae3SPaolo Bonzini 	context->inject_page_fault = kvm_inject_page_fault;
4645c50d8ae3SPaolo Bonzini 
4646c50d8ae3SPaolo Bonzini 	if (!is_paging(vcpu)) {
4647c50d8ae3SPaolo Bonzini 		context->gva_to_gpa = nonpaging_gva_to_gpa;
4648c50d8ae3SPaolo Bonzini 		context->root_level = 0;
4649c50d8ae3SPaolo Bonzini 	} else if (is_long_mode(vcpu)) {
465087e99d7dSSean Christopherson 		context->root_level = ____is_cr4_la57(&regs) ?
4651c50d8ae3SPaolo Bonzini 				PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
4652c50d8ae3SPaolo Bonzini 		context->gva_to_gpa = paging64_gva_to_gpa;
4653c50d8ae3SPaolo Bonzini 	} else if (is_pae(vcpu)) {
4654c50d8ae3SPaolo Bonzini 		context->root_level = PT32E_ROOT_LEVEL;
4655c50d8ae3SPaolo Bonzini 		context->gva_to_gpa = paging64_gva_to_gpa;
4656c50d8ae3SPaolo Bonzini 	} else {
4657c50d8ae3SPaolo Bonzini 		context->root_level = PT32_ROOT_LEVEL;
4658c50d8ae3SPaolo Bonzini 		context->gva_to_gpa = paging32_gva_to_gpa;
4659c50d8ae3SPaolo Bonzini 	}
4660c50d8ae3SPaolo Bonzini 
4661*533f9a4bSSean Christopherson 	reset_guest_paging_metadata(vcpu, context);
4662c50d8ae3SPaolo Bonzini 	reset_tdp_shadow_zero_bits_mask(vcpu, context);
4663c50d8ae3SPaolo Bonzini }
4664c50d8ae3SPaolo Bonzini 
4665c50d8ae3SPaolo Bonzini static union kvm_mmu_role
46668626c120SSean Christopherson kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu,
46678626c120SSean Christopherson 				      struct kvm_mmu_role_regs *regs, bool base_only)
4668c50d8ae3SPaolo Bonzini {
46698626c120SSean Christopherson 	union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs, base_only);
4670c50d8ae3SPaolo Bonzini 
46718626c120SSean Christopherson 	role.base.smep_andnot_wp = role.ext.cr4_smep && !____is_cr0_wp(regs);
46728626c120SSean Christopherson 	role.base.smap_andnot_wp = role.ext.cr4_smap && !____is_cr0_wp(regs);
4673ca8d664fSSean Christopherson 	role.base.gpte_is_8_bytes = ____is_cr0_pg(regs) && ____is_cr4_pae(regs);
4674c50d8ae3SPaolo Bonzini 
467559505b55SSean Christopherson 	return role;
467659505b55SSean Christopherson }
467759505b55SSean Christopherson 
467859505b55SSean Christopherson static union kvm_mmu_role
46798626c120SSean Christopherson kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu,
46808626c120SSean Christopherson 				   struct kvm_mmu_role_regs *regs, bool base_only)
468159505b55SSean Christopherson {
468259505b55SSean Christopherson 	union kvm_mmu_role role =
46838626c120SSean Christopherson 		kvm_calc_shadow_root_page_role_common(vcpu, regs, base_only);
468459505b55SSean Christopherson 
46858626c120SSean Christopherson 	role.base.direct = !____is_cr0_pg(regs);
468659505b55SSean Christopherson 
46878626c120SSean Christopherson 	if (!____is_efer_lma(regs))
4688c50d8ae3SPaolo Bonzini 		role.base.level = PT32E_ROOT_LEVEL;
46898626c120SSean Christopherson 	else if (____is_cr4_la57(regs))
4690c50d8ae3SPaolo Bonzini 		role.base.level = PT64_ROOT_5LEVEL;
4691c50d8ae3SPaolo Bonzini 	else
4692c50d8ae3SPaolo Bonzini 		role.base.level = PT64_ROOT_4LEVEL;
4693c50d8ae3SPaolo Bonzini 
4694c50d8ae3SPaolo Bonzini 	return role;
4695c50d8ae3SPaolo Bonzini }
4696c50d8ae3SPaolo Bonzini 
46978c008659SPaolo Bonzini static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
4698594e91a1SSean Christopherson 				    struct kvm_mmu_role_regs *regs,
4699594e91a1SSean Christopherson 				    union kvm_mmu_role new_role)
4700c50d8ae3SPaolo Bonzini {
470118db1b17SSean Christopherson 	if (new_role.as_u64 == context->mmu_role.as_u64)
470218db1b17SSean Christopherson 		return;
470318db1b17SSean Christopherson 
470418db1b17SSean Christopherson 	context->mmu_role.as_u64 = new_role.as_u64;
470518db1b17SSean Christopherson 
4706594e91a1SSean Christopherson 	if (!____is_cr0_pg(regs))
470784a16226SSean Christopherson 		nonpaging_init_context(context);
4708594e91a1SSean Christopherson 	else if (____is_efer_lma(regs))
470984a16226SSean Christopherson 		paging64_init_context(context, regs);
4710594e91a1SSean Christopherson 	else if (____is_cr4_pae(regs))
471184a16226SSean Christopherson 		paging32E_init_context(context);
4712c50d8ae3SPaolo Bonzini 	else
471384a16226SSean Christopherson 		paging32_init_context(context);
4714c50d8ae3SPaolo Bonzini 
4715*533f9a4bSSean Christopherson 	reset_guest_paging_metadata(vcpu, context);
4716d555f705SSean Christopherson 	context->shadow_root_level = new_role.base.level;
4717d555f705SSean Christopherson 
4718c50d8ae3SPaolo Bonzini 	reset_shadow_zero_bits_mask(vcpu, context);
4719c50d8ae3SPaolo Bonzini }
47200f04a2acSVitaly Kuznetsov 
4721594e91a1SSean Christopherson static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu,
4722594e91a1SSean Christopherson 				struct kvm_mmu_role_regs *regs)
47230f04a2acSVitaly Kuznetsov {
47248c008659SPaolo Bonzini 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
47250f04a2acSVitaly Kuznetsov 	union kvm_mmu_role new_role =
47268626c120SSean Christopherson 		kvm_calc_shadow_mmu_root_page_role(vcpu, regs, false);
47270f04a2acSVitaly Kuznetsov 
4728594e91a1SSean Christopherson 	shadow_mmu_init_context(vcpu, context, regs, new_role);
47290f04a2acSVitaly Kuznetsov }
47300f04a2acSVitaly Kuznetsov 
473159505b55SSean Christopherson static union kvm_mmu_role
47328626c120SSean Christopherson kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu,
47338626c120SSean Christopherson 				   struct kvm_mmu_role_regs *regs)
473459505b55SSean Christopherson {
473559505b55SSean Christopherson 	union kvm_mmu_role role =
47368626c120SSean Christopherson 		kvm_calc_shadow_root_page_role_common(vcpu, regs, false);
473759505b55SSean Christopherson 
473859505b55SSean Christopherson 	role.base.direct = false;
4739d468d94bSSean Christopherson 	role.base.level = kvm_mmu_get_tdp_level(vcpu);
474059505b55SSean Christopherson 
474159505b55SSean Christopherson 	return role;
474259505b55SSean Christopherson }
474359505b55SSean Christopherson 
4744dbc4739bSSean Christopherson void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
4745dbc4739bSSean Christopherson 			     unsigned long cr4, u64 efer, gpa_t nested_cr3)
47460f04a2acSVitaly Kuznetsov {
47478c008659SPaolo Bonzini 	struct kvm_mmu *context = &vcpu->arch.guest_mmu;
4748594e91a1SSean Christopherson 	struct kvm_mmu_role_regs regs = {
4749594e91a1SSean Christopherson 		.cr0 = cr0,
4750594e91a1SSean Christopherson 		.cr4 = cr4,
4751594e91a1SSean Christopherson 		.efer = efer,
4752594e91a1SSean Christopherson 	};
47538626c120SSean Christopherson 	union kvm_mmu_role new_role;
47548626c120SSean Christopherson 
47558626c120SSean Christopherson 	new_role = kvm_calc_shadow_npt_root_page_role(vcpu, &regs);
47560f04a2acSVitaly Kuznetsov 
4757b5129100SSean Christopherson 	__kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base);
4758a506fdd2SVitaly Kuznetsov 
4759594e91a1SSean Christopherson 	shadow_mmu_init_context(vcpu, context, &regs, new_role);
4760a3322d5cSSean Christopherson 
4761a3322d5cSSean Christopherson 	/*
476216be1d12SSean Christopherson 	 * Redo the shadow bits, the reset done by shadow_mmu_init_context()
476316be1d12SSean Christopherson 	 * (above) may use the wrong shadow_root_level.
476416be1d12SSean Christopherson 	 */
476516be1d12SSean Christopherson 	reset_shadow_zero_bits_mask(vcpu, context);
47660f04a2acSVitaly Kuznetsov }
47670f04a2acSVitaly Kuznetsov EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);
4768c50d8ae3SPaolo Bonzini 
4769c50d8ae3SPaolo Bonzini static union kvm_mmu_role
4770c50d8ae3SPaolo Bonzini kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
4771bb1fcc70SSean Christopherson 				   bool execonly, u8 level)
4772c50d8ae3SPaolo Bonzini {
4773c50d8ae3SPaolo Bonzini 	union kvm_mmu_role role = {0};
4774c50d8ae3SPaolo Bonzini 
4775c50d8ae3SPaolo Bonzini 	/* SMM flag is inherited from root_mmu */
4776c50d8ae3SPaolo Bonzini 	role.base.smm = vcpu->arch.root_mmu.mmu_role.base.smm;
4777c50d8ae3SPaolo Bonzini 
4778bb1fcc70SSean Christopherson 	role.base.level = level;
4779c50d8ae3SPaolo Bonzini 	role.base.gpte_is_8_bytes = true;
4780c50d8ae3SPaolo Bonzini 	role.base.direct = false;
4781c50d8ae3SPaolo Bonzini 	role.base.ad_disabled = !accessed_dirty;
4782c50d8ae3SPaolo Bonzini 	role.base.guest_mode = true;
4783c50d8ae3SPaolo Bonzini 	role.base.access = ACC_ALL;
4784c50d8ae3SPaolo Bonzini 
4785cd6767c3SSean Christopherson 	/* EPT, and thus nested EPT, does not consume CR0, CR4, nor EFER. */
4786cd6767c3SSean Christopherson 	role.ext.word = 0;
4787c50d8ae3SPaolo Bonzini 	role.ext.execonly = execonly;
4788cd6767c3SSean Christopherson 	role.ext.valid = 1;
4789c50d8ae3SPaolo Bonzini 
4790c50d8ae3SPaolo Bonzini 	return role;
4791c50d8ae3SPaolo Bonzini }
4792c50d8ae3SPaolo Bonzini 
4793c50d8ae3SPaolo Bonzini void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
4794c50d8ae3SPaolo Bonzini 			     bool accessed_dirty, gpa_t new_eptp)
4795c50d8ae3SPaolo Bonzini {
47968c008659SPaolo Bonzini 	struct kvm_mmu *context = &vcpu->arch.guest_mmu;
4797bb1fcc70SSean Christopherson 	u8 level = vmx_eptp_page_walk_level(new_eptp);
4798c50d8ae3SPaolo Bonzini 	union kvm_mmu_role new_role =
4799c50d8ae3SPaolo Bonzini 		kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
4800bb1fcc70SSean Christopherson 						   execonly, level);
4801c50d8ae3SPaolo Bonzini 
4802b5129100SSean Christopherson 	__kvm_mmu_new_pgd(vcpu, new_eptp, new_role.base);
4803c50d8ae3SPaolo Bonzini 
4804c50d8ae3SPaolo Bonzini 	if (new_role.as_u64 == context->mmu_role.as_u64)
4805c50d8ae3SPaolo Bonzini 		return;
4806c50d8ae3SPaolo Bonzini 
480718db1b17SSean Christopherson 	context->mmu_role.as_u64 = new_role.as_u64;
480818db1b17SSean Christopherson 
4809bb1fcc70SSean Christopherson 	context->shadow_root_level = level;
4810c50d8ae3SPaolo Bonzini 
4811c50d8ae3SPaolo Bonzini 	context->ept_ad = accessed_dirty;
4812c50d8ae3SPaolo Bonzini 	context->page_fault = ept_page_fault;
4813c50d8ae3SPaolo Bonzini 	context->gva_to_gpa = ept_gva_to_gpa;
4814c50d8ae3SPaolo Bonzini 	context->sync_page = ept_sync_page;
4815c50d8ae3SPaolo Bonzini 	context->invlpg = ept_invlpg;
4816bb1fcc70SSean Christopherson 	context->root_level = level;
4817c50d8ae3SPaolo Bonzini 	context->direct_map = false;
4818c50d8ae3SPaolo Bonzini 
4819c596f147SSean Christopherson 	update_permission_bitmask(context, true);
4820b67a93a8SSean Christopherson 	update_last_nonleaf_level(context);
48212e4c0661SSean Christopherson 	update_pkru_bitmask(context);
4822c50d8ae3SPaolo Bonzini 	reset_rsvds_bits_mask_ept(vcpu, context, execonly);
4823c50d8ae3SPaolo Bonzini 	reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
4824c50d8ae3SPaolo Bonzini }
4825c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
4826c50d8ae3SPaolo Bonzini 
4827c50d8ae3SPaolo Bonzini static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
4828c50d8ae3SPaolo Bonzini {
48298c008659SPaolo Bonzini 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
4830594e91a1SSean Christopherson 	struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
4831c50d8ae3SPaolo Bonzini 
4832594e91a1SSean Christopherson 	kvm_init_shadow_mmu(vcpu, &regs);
4833929d1cfaSPaolo Bonzini 
4834d8dd54e0SSean Christopherson 	context->get_guest_pgd     = get_cr3;
4835c50d8ae3SPaolo Bonzini 	context->get_pdptr         = kvm_pdptr_read;
4836c50d8ae3SPaolo Bonzini 	context->inject_page_fault = kvm_inject_page_fault;
4837c50d8ae3SPaolo Bonzini }
4838c50d8ae3SPaolo Bonzini 
48398626c120SSean Christopherson static union kvm_mmu_role
48408626c120SSean Christopherson kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu, struct kvm_mmu_role_regs *regs)
4841654430efSSean Christopherson {
48428626c120SSean Christopherson 	union kvm_mmu_role role;
48438626c120SSean Christopherson 
48448626c120SSean Christopherson 	role = kvm_calc_shadow_root_page_role_common(vcpu, regs, false);
4845654430efSSean Christopherson 
4846654430efSSean Christopherson 	/*
4847654430efSSean Christopherson 	 * Nested MMUs are used only for walking L2's gva->gpa, they never have
4848654430efSSean Christopherson 	 * shadow pages of their own and so "direct" has no meaning.   Set it
4849654430efSSean Christopherson 	 * to "true" to try to detect bogus usage of the nested MMU.
4850654430efSSean Christopherson 	 */
4851654430efSSean Christopherson 	role.base.direct = true;
4852654430efSSean Christopherson 
48538626c120SSean Christopherson 	if (!____is_cr0_pg(regs))
4854654430efSSean Christopherson 		role.base.level = 0;
48558626c120SSean Christopherson 	else if (____is_efer_lma(regs))
48568626c120SSean Christopherson 		role.base.level = ____is_cr4_la57(regs) ? PT64_ROOT_5LEVEL :
4857654430efSSean Christopherson 							  PT64_ROOT_4LEVEL;
48588626c120SSean Christopherson 	else if (____is_cr4_pae(regs))
4859654430efSSean Christopherson 		role.base.level = PT32E_ROOT_LEVEL;
4860654430efSSean Christopherson 	else
4861654430efSSean Christopherson 		role.base.level = PT32_ROOT_LEVEL;
4862654430efSSean Christopherson 
4863654430efSSean Christopherson 	return role;
4864654430efSSean Christopherson }
4865654430efSSean Christopherson 
4866c50d8ae3SPaolo Bonzini static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
4867c50d8ae3SPaolo Bonzini {
48688626c120SSean Christopherson 	struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
48698626c120SSean Christopherson 	union kvm_mmu_role new_role = kvm_calc_nested_mmu_role(vcpu, &regs);
4870c50d8ae3SPaolo Bonzini 	struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
4871c50d8ae3SPaolo Bonzini 
4872c50d8ae3SPaolo Bonzini 	if (new_role.as_u64 == g_context->mmu_role.as_u64)
4873c50d8ae3SPaolo Bonzini 		return;
4874c50d8ae3SPaolo Bonzini 
4875c50d8ae3SPaolo Bonzini 	g_context->mmu_role.as_u64 = new_role.as_u64;
4876d8dd54e0SSean Christopherson 	g_context->get_guest_pgd     = get_cr3;
4877c50d8ae3SPaolo Bonzini 	g_context->get_pdptr         = kvm_pdptr_read;
4878c50d8ae3SPaolo Bonzini 	g_context->inject_page_fault = kvm_inject_page_fault;
48795472fcd4SSean Christopherson 	g_context->root_level        = new_role.base.level;
4880c50d8ae3SPaolo Bonzini 
4881c50d8ae3SPaolo Bonzini 	/*
48825efac074SPaolo Bonzini 	 * L2 page tables are never shadowed, so there is no need to sync
48835efac074SPaolo Bonzini 	 * SPTEs.
48845efac074SPaolo Bonzini 	 */
48855efac074SPaolo Bonzini 	g_context->invlpg            = NULL;
48865efac074SPaolo Bonzini 
48875efac074SPaolo Bonzini 	/*
4888c50d8ae3SPaolo Bonzini 	 * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using
4889c50d8ae3SPaolo Bonzini 	 * L1's nested page tables (e.g. EPT12). The nested translation
4890c50d8ae3SPaolo Bonzini 	 * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using
4891c50d8ae3SPaolo Bonzini 	 * L2's page tables as the first level of translation and L1's
4892c50d8ae3SPaolo Bonzini 	 * nested page tables as the second level of translation. Basically
4893c50d8ae3SPaolo Bonzini 	 * the gva_to_gpa functions between mmu and nested_mmu are swapped.
4894c50d8ae3SPaolo Bonzini 	 */
4895fa4b5588SSean Christopherson 	if (!is_paging(vcpu))
4896c50d8ae3SPaolo Bonzini 		g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested;
4897fa4b5588SSean Christopherson 	else if (is_long_mode(vcpu))
4898c50d8ae3SPaolo Bonzini 		g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
4899fa4b5588SSean Christopherson 	else if (is_pae(vcpu))
4900c50d8ae3SPaolo Bonzini 		g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
4901fa4b5588SSean Christopherson 	else
4902c50d8ae3SPaolo Bonzini 		g_context->gva_to_gpa = paging32_gva_to_gpa_nested;
4903fa4b5588SSean Christopherson 
4904*533f9a4bSSean Christopherson 	reset_guest_paging_metadata(vcpu, g_context);
4905af0eb17eSSean Christopherson }
4906c50d8ae3SPaolo Bonzini 
4907c9060662SSean Christopherson void kvm_init_mmu(struct kvm_vcpu *vcpu)
4908c50d8ae3SPaolo Bonzini {
4909c50d8ae3SPaolo Bonzini 	if (mmu_is_nested(vcpu))
4910c50d8ae3SPaolo Bonzini 		init_kvm_nested_mmu(vcpu);
4911c50d8ae3SPaolo Bonzini 	else if (tdp_enabled)
4912c50d8ae3SPaolo Bonzini 		init_kvm_tdp_mmu(vcpu);
4913c50d8ae3SPaolo Bonzini 	else
4914c50d8ae3SPaolo Bonzini 		init_kvm_softmmu(vcpu);
4915c50d8ae3SPaolo Bonzini }
4916c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_init_mmu);
4917c50d8ae3SPaolo Bonzini 
4918c50d8ae3SPaolo Bonzini static union kvm_mmu_page_role
4919c50d8ae3SPaolo Bonzini kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu)
4920c50d8ae3SPaolo Bonzini {
49218626c120SSean Christopherson 	struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
4922c50d8ae3SPaolo Bonzini 	union kvm_mmu_role role;
4923c50d8ae3SPaolo Bonzini 
4924c50d8ae3SPaolo Bonzini 	if (tdp_enabled)
49258626c120SSean Christopherson 		role = kvm_calc_tdp_mmu_root_page_role(vcpu, &regs, true);
4926c50d8ae3SPaolo Bonzini 	else
49278626c120SSean Christopherson 		role = kvm_calc_shadow_mmu_root_page_role(vcpu, &regs, true);
4928c50d8ae3SPaolo Bonzini 
4929c50d8ae3SPaolo Bonzini 	return role.base;
4930c50d8ae3SPaolo Bonzini }
4931c50d8ae3SPaolo Bonzini 
493249c6f875SSean Christopherson void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
493349c6f875SSean Christopherson {
493449c6f875SSean Christopherson 	/*
493549c6f875SSean Christopherson 	 * Invalidate all MMU roles to force them to reinitialize as CPUID
493649c6f875SSean Christopherson 	 * information is factored into reserved bit calculations.
493749c6f875SSean Christopherson 	 */
493849c6f875SSean Christopherson 	vcpu->arch.root_mmu.mmu_role.ext.valid = 0;
493949c6f875SSean Christopherson 	vcpu->arch.guest_mmu.mmu_role.ext.valid = 0;
494049c6f875SSean Christopherson 	vcpu->arch.nested_mmu.mmu_role.ext.valid = 0;
494149c6f875SSean Christopherson 	kvm_mmu_reset_context(vcpu);
494263f5a190SSean Christopherson 
494363f5a190SSean Christopherson 	/*
494463f5a190SSean Christopherson 	 * KVM does not correctly handle changing guest CPUID after KVM_RUN, as
494563f5a190SSean Christopherson 	 * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't
494663f5a190SSean Christopherson 	 * tracked in kvm_mmu_page_role.  As a result, KVM may miss guest page
494763f5a190SSean Christopherson 	 * faults due to reusing SPs/SPTEs.  Alert userspace, but otherwise
494863f5a190SSean Christopherson 	 * sweep the problem under the rug.
494963f5a190SSean Christopherson 	 *
495063f5a190SSean Christopherson 	 * KVM's horrific CPUID ABI makes the problem all but impossible to
495163f5a190SSean Christopherson 	 * solve, as correctly handling multiple vCPU models (with respect to
495263f5a190SSean Christopherson 	 * paging and physical address properties) in a single VM would require
495363f5a190SSean Christopherson 	 * tracking all relevant CPUID information in kvm_mmu_page_role.  That
495463f5a190SSean Christopherson 	 * is very undesirable as it would double the memory requirements for
495563f5a190SSean Christopherson 	 * gfn_track (see struct kvm_mmu_page_role comments), and in practice
495663f5a190SSean Christopherson 	 * no sane VMM mucks with the core vCPU model on the fly.
495763f5a190SSean Christopherson 	 */
495863f5a190SSean Christopherson 	if (vcpu->arch.last_vmentry_cpu != -1) {
495963f5a190SSean Christopherson 		pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} after KVM_RUN may cause guest instability\n");
496063f5a190SSean Christopherson 		pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} will fail after KVM_RUN starting with Linux 5.16\n");
496163f5a190SSean Christopherson 	}
496249c6f875SSean Christopherson }
496349c6f875SSean Christopherson 
4964c50d8ae3SPaolo Bonzini void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
4965c50d8ae3SPaolo Bonzini {
4966c50d8ae3SPaolo Bonzini 	kvm_mmu_unload(vcpu);
4967c9060662SSean Christopherson 	kvm_init_mmu(vcpu);
4968c50d8ae3SPaolo Bonzini }
4969c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
4970c50d8ae3SPaolo Bonzini 
4971c50d8ae3SPaolo Bonzini int kvm_mmu_load(struct kvm_vcpu *vcpu)
4972c50d8ae3SPaolo Bonzini {
4973c50d8ae3SPaolo Bonzini 	int r;
4974c50d8ae3SPaolo Bonzini 
4975378f5cd6SSean Christopherson 	r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->direct_map);
4976c50d8ae3SPaolo Bonzini 	if (r)
4977c50d8ae3SPaolo Bonzini 		goto out;
4978748e52b9SSean Christopherson 	r = mmu_alloc_special_roots(vcpu);
4979c50d8ae3SPaolo Bonzini 	if (r)
4980c50d8ae3SPaolo Bonzini 		goto out;
49814a38162eSPaolo Bonzini 	if (vcpu->arch.mmu->direct_map)
49826e6ec584SSean Christopherson 		r = mmu_alloc_direct_roots(vcpu);
49836e6ec584SSean Christopherson 	else
49846e6ec584SSean Christopherson 		r = mmu_alloc_shadow_roots(vcpu);
4985c50d8ae3SPaolo Bonzini 	if (r)
4986c50d8ae3SPaolo Bonzini 		goto out;
4987a91f387bSSean Christopherson 
4988a91f387bSSean Christopherson 	kvm_mmu_sync_roots(vcpu);
4989a91f387bSSean Christopherson 
4990727a7e27SPaolo Bonzini 	kvm_mmu_load_pgd(vcpu);
4991b3646477SJason Baron 	static_call(kvm_x86_tlb_flush_current)(vcpu);
4992c50d8ae3SPaolo Bonzini out:
4993c50d8ae3SPaolo Bonzini 	return r;
4994c50d8ae3SPaolo Bonzini }
4995c50d8ae3SPaolo Bonzini 
4996c50d8ae3SPaolo Bonzini void kvm_mmu_unload(struct kvm_vcpu *vcpu)
4997c50d8ae3SPaolo Bonzini {
4998c50d8ae3SPaolo Bonzini 	kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL);
4999c50d8ae3SPaolo Bonzini 	WARN_ON(VALID_PAGE(vcpu->arch.root_mmu.root_hpa));
5000c50d8ae3SPaolo Bonzini 	kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
5001c50d8ae3SPaolo Bonzini 	WARN_ON(VALID_PAGE(vcpu->arch.guest_mmu.root_hpa));
5002c50d8ae3SPaolo Bonzini }
5003c50d8ae3SPaolo Bonzini 
5004c50d8ae3SPaolo Bonzini static bool need_remote_flush(u64 old, u64 new)
5005c50d8ae3SPaolo Bonzini {
5006c50d8ae3SPaolo Bonzini 	if (!is_shadow_present_pte(old))
5007c50d8ae3SPaolo Bonzini 		return false;
5008c50d8ae3SPaolo Bonzini 	if (!is_shadow_present_pte(new))
5009c50d8ae3SPaolo Bonzini 		return true;
5010c50d8ae3SPaolo Bonzini 	if ((old ^ new) & PT64_BASE_ADDR_MASK)
5011c50d8ae3SPaolo Bonzini 		return true;
5012c50d8ae3SPaolo Bonzini 	old ^= shadow_nx_mask;
5013c50d8ae3SPaolo Bonzini 	new ^= shadow_nx_mask;
5014c50d8ae3SPaolo Bonzini 	return (old & ~new & PT64_PERM_MASK) != 0;
5015c50d8ae3SPaolo Bonzini }
5016c50d8ae3SPaolo Bonzini 
5017c50d8ae3SPaolo Bonzini static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
5018c50d8ae3SPaolo Bonzini 				    int *bytes)
5019c50d8ae3SPaolo Bonzini {
5020c50d8ae3SPaolo Bonzini 	u64 gentry = 0;
5021c50d8ae3SPaolo Bonzini 	int r;
5022c50d8ae3SPaolo Bonzini 
5023c50d8ae3SPaolo Bonzini 	/*
5024c50d8ae3SPaolo Bonzini 	 * Assume that the pte write on a page table of the same type
5025c50d8ae3SPaolo Bonzini 	 * as the current vcpu paging mode since we update the sptes only
5026c50d8ae3SPaolo Bonzini 	 * when they have the same mode.
5027c50d8ae3SPaolo Bonzini 	 */
5028c50d8ae3SPaolo Bonzini 	if (is_pae(vcpu) && *bytes == 4) {
5029c50d8ae3SPaolo Bonzini 		/* Handle a 32-bit guest writing two halves of a 64-bit gpte */
5030c50d8ae3SPaolo Bonzini 		*gpa &= ~(gpa_t)7;
5031c50d8ae3SPaolo Bonzini 		*bytes = 8;
5032c50d8ae3SPaolo Bonzini 	}
5033c50d8ae3SPaolo Bonzini 
5034c50d8ae3SPaolo Bonzini 	if (*bytes == 4 || *bytes == 8) {
5035c50d8ae3SPaolo Bonzini 		r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes);
5036c50d8ae3SPaolo Bonzini 		if (r)
5037c50d8ae3SPaolo Bonzini 			gentry = 0;
5038c50d8ae3SPaolo Bonzini 	}
5039c50d8ae3SPaolo Bonzini 
5040c50d8ae3SPaolo Bonzini 	return gentry;
5041c50d8ae3SPaolo Bonzini }
5042c50d8ae3SPaolo Bonzini 
5043c50d8ae3SPaolo Bonzini /*
5044c50d8ae3SPaolo Bonzini  * If we're seeing too many writes to a page, it may no longer be a page table,
5045c50d8ae3SPaolo Bonzini  * or we may be forking, in which case it is better to unmap the page.
5046c50d8ae3SPaolo Bonzini  */
5047c50d8ae3SPaolo Bonzini static bool detect_write_flooding(struct kvm_mmu_page *sp)
5048c50d8ae3SPaolo Bonzini {
5049c50d8ae3SPaolo Bonzini 	/*
5050c50d8ae3SPaolo Bonzini 	 * Skip write-flooding detected for the sp whose level is 1, because
5051c50d8ae3SPaolo Bonzini 	 * it can become unsync, then the guest page is not write-protected.
5052c50d8ae3SPaolo Bonzini 	 */
50533bae0459SSean Christopherson 	if (sp->role.level == PG_LEVEL_4K)
5054c50d8ae3SPaolo Bonzini 		return false;
5055c50d8ae3SPaolo Bonzini 
5056c50d8ae3SPaolo Bonzini 	atomic_inc(&sp->write_flooding_count);
5057c50d8ae3SPaolo Bonzini 	return atomic_read(&sp->write_flooding_count) >= 3;
5058c50d8ae3SPaolo Bonzini }
5059c50d8ae3SPaolo Bonzini 
5060c50d8ae3SPaolo Bonzini /*
5061c50d8ae3SPaolo Bonzini  * Misaligned accesses are too much trouble to fix up; also, they usually
5062c50d8ae3SPaolo Bonzini  * indicate a page is not used as a page table.
5063c50d8ae3SPaolo Bonzini  */
5064c50d8ae3SPaolo Bonzini static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
5065c50d8ae3SPaolo Bonzini 				    int bytes)
5066c50d8ae3SPaolo Bonzini {
5067c50d8ae3SPaolo Bonzini 	unsigned offset, pte_size, misaligned;
5068c50d8ae3SPaolo Bonzini 
5069c50d8ae3SPaolo Bonzini 	pgprintk("misaligned: gpa %llx bytes %d role %x\n",
5070c50d8ae3SPaolo Bonzini 		 gpa, bytes, sp->role.word);
5071c50d8ae3SPaolo Bonzini 
5072c50d8ae3SPaolo Bonzini 	offset = offset_in_page(gpa);
5073c50d8ae3SPaolo Bonzini 	pte_size = sp->role.gpte_is_8_bytes ? 8 : 4;
5074c50d8ae3SPaolo Bonzini 
5075c50d8ae3SPaolo Bonzini 	/*
5076c50d8ae3SPaolo Bonzini 	 * Sometimes, the OS only writes the last one bytes to update status
5077c50d8ae3SPaolo Bonzini 	 * bits, for example, in linux, andb instruction is used in clear_bit().
5078c50d8ae3SPaolo Bonzini 	 */
5079c50d8ae3SPaolo Bonzini 	if (!(offset & (pte_size - 1)) && bytes == 1)
5080c50d8ae3SPaolo Bonzini 		return false;
5081c50d8ae3SPaolo Bonzini 
5082c50d8ae3SPaolo Bonzini 	misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
5083c50d8ae3SPaolo Bonzini 	misaligned |= bytes < 4;
5084c50d8ae3SPaolo Bonzini 
5085c50d8ae3SPaolo Bonzini 	return misaligned;
5086c50d8ae3SPaolo Bonzini }
5087c50d8ae3SPaolo Bonzini 
5088c50d8ae3SPaolo Bonzini static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
5089c50d8ae3SPaolo Bonzini {
5090c50d8ae3SPaolo Bonzini 	unsigned page_offset, quadrant;
5091c50d8ae3SPaolo Bonzini 	u64 *spte;
5092c50d8ae3SPaolo Bonzini 	int level;
5093c50d8ae3SPaolo Bonzini 
5094c50d8ae3SPaolo Bonzini 	page_offset = offset_in_page(gpa);
5095c50d8ae3SPaolo Bonzini 	level = sp->role.level;
5096c50d8ae3SPaolo Bonzini 	*nspte = 1;
5097c50d8ae3SPaolo Bonzini 	if (!sp->role.gpte_is_8_bytes) {
5098c50d8ae3SPaolo Bonzini 		page_offset <<= 1;	/* 32->64 */
5099c50d8ae3SPaolo Bonzini 		/*
5100c50d8ae3SPaolo Bonzini 		 * A 32-bit pde maps 4MB while the shadow pdes map
5101c50d8ae3SPaolo Bonzini 		 * only 2MB.  So we need to double the offset again
5102c50d8ae3SPaolo Bonzini 		 * and zap two pdes instead of one.
5103c50d8ae3SPaolo Bonzini 		 */
5104c50d8ae3SPaolo Bonzini 		if (level == PT32_ROOT_LEVEL) {
5105c50d8ae3SPaolo Bonzini 			page_offset &= ~7; /* kill rounding error */
5106c50d8ae3SPaolo Bonzini 			page_offset <<= 1;
5107c50d8ae3SPaolo Bonzini 			*nspte = 2;
5108c50d8ae3SPaolo Bonzini 		}
5109c50d8ae3SPaolo Bonzini 		quadrant = page_offset >> PAGE_SHIFT;
5110c50d8ae3SPaolo Bonzini 		page_offset &= ~PAGE_MASK;
5111c50d8ae3SPaolo Bonzini 		if (quadrant != sp->role.quadrant)
5112c50d8ae3SPaolo Bonzini 			return NULL;
5113c50d8ae3SPaolo Bonzini 	}
5114c50d8ae3SPaolo Bonzini 
5115c50d8ae3SPaolo Bonzini 	spte = &sp->spt[page_offset / sizeof(*spte)];
5116c50d8ae3SPaolo Bonzini 	return spte;
5117c50d8ae3SPaolo Bonzini }
5118c50d8ae3SPaolo Bonzini 
5119c50d8ae3SPaolo Bonzini static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
5120c50d8ae3SPaolo Bonzini 			      const u8 *new, int bytes,
5121c50d8ae3SPaolo Bonzini 			      struct kvm_page_track_notifier_node *node)
5122c50d8ae3SPaolo Bonzini {
5123c50d8ae3SPaolo Bonzini 	gfn_t gfn = gpa >> PAGE_SHIFT;
5124c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
5125c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
5126c50d8ae3SPaolo Bonzini 	u64 entry, gentry, *spte;
5127c50d8ae3SPaolo Bonzini 	int npte;
5128c50d8ae3SPaolo Bonzini 	bool remote_flush, local_flush;
5129c50d8ae3SPaolo Bonzini 
5130c50d8ae3SPaolo Bonzini 	/*
5131c50d8ae3SPaolo Bonzini 	 * If we don't have indirect shadow pages, it means no page is
5132c50d8ae3SPaolo Bonzini 	 * write-protected, so we can exit simply.
5133c50d8ae3SPaolo Bonzini 	 */
5134c50d8ae3SPaolo Bonzini 	if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
5135c50d8ae3SPaolo Bonzini 		return;
5136c50d8ae3SPaolo Bonzini 
5137c50d8ae3SPaolo Bonzini 	remote_flush = local_flush = false;
5138c50d8ae3SPaolo Bonzini 
5139c50d8ae3SPaolo Bonzini 	pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
5140c50d8ae3SPaolo Bonzini 
5141c50d8ae3SPaolo Bonzini 	/*
5142c50d8ae3SPaolo Bonzini 	 * No need to care whether allocation memory is successful
5143d9f6e12fSIngo Molnar 	 * or not since pte prefetch is skipped if it does not have
5144c50d8ae3SPaolo Bonzini 	 * enough objects in the cache.
5145c50d8ae3SPaolo Bonzini 	 */
5146378f5cd6SSean Christopherson 	mmu_topup_memory_caches(vcpu, true);
5147c50d8ae3SPaolo Bonzini 
5148531810caSBen Gardon 	write_lock(&vcpu->kvm->mmu_lock);
5149c50d8ae3SPaolo Bonzini 
5150c50d8ae3SPaolo Bonzini 	gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes);
5151c50d8ae3SPaolo Bonzini 
5152c50d8ae3SPaolo Bonzini 	++vcpu->kvm->stat.mmu_pte_write;
5153c50d8ae3SPaolo Bonzini 	kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
5154c50d8ae3SPaolo Bonzini 
5155c50d8ae3SPaolo Bonzini 	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
5156c50d8ae3SPaolo Bonzini 		if (detect_write_misaligned(sp, gpa, bytes) ||
5157c50d8ae3SPaolo Bonzini 		      detect_write_flooding(sp)) {
5158c50d8ae3SPaolo Bonzini 			kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
5159c50d8ae3SPaolo Bonzini 			++vcpu->kvm->stat.mmu_flooded;
5160c50d8ae3SPaolo Bonzini 			continue;
5161c50d8ae3SPaolo Bonzini 		}
5162c50d8ae3SPaolo Bonzini 
5163c50d8ae3SPaolo Bonzini 		spte = get_written_sptes(sp, gpa, &npte);
5164c50d8ae3SPaolo Bonzini 		if (!spte)
5165c50d8ae3SPaolo Bonzini 			continue;
5166c50d8ae3SPaolo Bonzini 
5167c50d8ae3SPaolo Bonzini 		local_flush = true;
5168c50d8ae3SPaolo Bonzini 		while (npte--) {
5169c50d8ae3SPaolo Bonzini 			entry = *spte;
51702de4085cSBen Gardon 			mmu_page_zap_pte(vcpu->kvm, sp, spte, NULL);
5171c5e2184dSSean Christopherson 			if (gentry && sp->role.level != PG_LEVEL_4K)
5172c5e2184dSSean Christopherson 				++vcpu->kvm->stat.mmu_pde_zapped;
5173c50d8ae3SPaolo Bonzini 			if (need_remote_flush(entry, *spte))
5174c50d8ae3SPaolo Bonzini 				remote_flush = true;
5175c50d8ae3SPaolo Bonzini 			++spte;
5176c50d8ae3SPaolo Bonzini 		}
5177c50d8ae3SPaolo Bonzini 	}
5178c50d8ae3SPaolo Bonzini 	kvm_mmu_flush_or_zap(vcpu, &invalid_list, remote_flush, local_flush);
5179c50d8ae3SPaolo Bonzini 	kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
5180531810caSBen Gardon 	write_unlock(&vcpu->kvm->mmu_lock);
5181c50d8ae3SPaolo Bonzini }
5182c50d8ae3SPaolo Bonzini 
5183736c291cSSean Christopherson int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
5184c50d8ae3SPaolo Bonzini 		       void *insn, int insn_len)
5185c50d8ae3SPaolo Bonzini {
518692daa48bSSean Christopherson 	int r, emulation_type = EMULTYPE_PF;
5187c50d8ae3SPaolo Bonzini 	bool direct = vcpu->arch.mmu->direct_map;
5188c50d8ae3SPaolo Bonzini 
51896948199aSSean Christopherson 	if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
5190ddce6208SSean Christopherson 		return RET_PF_RETRY;
5191ddce6208SSean Christopherson 
5192c50d8ae3SPaolo Bonzini 	r = RET_PF_INVALID;
5193c50d8ae3SPaolo Bonzini 	if (unlikely(error_code & PFERR_RSVD_MASK)) {
5194736c291cSSean Christopherson 		r = handle_mmio_page_fault(vcpu, cr2_or_gpa, direct);
5195c50d8ae3SPaolo Bonzini 		if (r == RET_PF_EMULATE)
5196c50d8ae3SPaolo Bonzini 			goto emulate;
5197c50d8ae3SPaolo Bonzini 	}
5198c50d8ae3SPaolo Bonzini 
5199c50d8ae3SPaolo Bonzini 	if (r == RET_PF_INVALID) {
52007a02674dSSean Christopherson 		r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa,
52017a02674dSSean Christopherson 					  lower_32_bits(error_code), false);
52027b367bc9SSean Christopherson 		if (WARN_ON_ONCE(r == RET_PF_INVALID))
52037b367bc9SSean Christopherson 			return -EIO;
5204c50d8ae3SPaolo Bonzini 	}
5205c50d8ae3SPaolo Bonzini 
5206c50d8ae3SPaolo Bonzini 	if (r < 0)
5207c50d8ae3SPaolo Bonzini 		return r;
520883a2ba4cSSean Christopherson 	if (r != RET_PF_EMULATE)
520983a2ba4cSSean Christopherson 		return 1;
5210c50d8ae3SPaolo Bonzini 
5211c50d8ae3SPaolo Bonzini 	/*
5212c50d8ae3SPaolo Bonzini 	 * Before emulating the instruction, check if the error code
5213c50d8ae3SPaolo Bonzini 	 * was due to a RO violation while translating the guest page.
5214c50d8ae3SPaolo Bonzini 	 * This can occur when using nested virtualization with nested
5215c50d8ae3SPaolo Bonzini 	 * paging in both guests. If true, we simply unprotect the page
5216c50d8ae3SPaolo Bonzini 	 * and resume the guest.
5217c50d8ae3SPaolo Bonzini 	 */
5218c50d8ae3SPaolo Bonzini 	if (vcpu->arch.mmu->direct_map &&
5219c50d8ae3SPaolo Bonzini 	    (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) {
5220736c291cSSean Christopherson 		kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2_or_gpa));
5221c50d8ae3SPaolo Bonzini 		return 1;
5222c50d8ae3SPaolo Bonzini 	}
5223c50d8ae3SPaolo Bonzini 
5224c50d8ae3SPaolo Bonzini 	/*
5225c50d8ae3SPaolo Bonzini 	 * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still
5226c50d8ae3SPaolo Bonzini 	 * optimistically try to just unprotect the page and let the processor
5227c50d8ae3SPaolo Bonzini 	 * re-execute the instruction that caused the page fault.  Do not allow
5228c50d8ae3SPaolo Bonzini 	 * retrying MMIO emulation, as it's not only pointless but could also
5229c50d8ae3SPaolo Bonzini 	 * cause us to enter an infinite loop because the processor will keep
5230c50d8ae3SPaolo Bonzini 	 * faulting on the non-existent MMIO address.  Retrying an instruction
5231c50d8ae3SPaolo Bonzini 	 * from a nested guest is also pointless and dangerous as we are only
5232c50d8ae3SPaolo Bonzini 	 * explicitly shadowing L1's page tables, i.e. unprotecting something
5233c50d8ae3SPaolo Bonzini 	 * for L1 isn't going to magically fix whatever issue cause L2 to fail.
5234c50d8ae3SPaolo Bonzini 	 */
5235736c291cSSean Christopherson 	if (!mmio_info_in_cache(vcpu, cr2_or_gpa, direct) && !is_guest_mode(vcpu))
523692daa48bSSean Christopherson 		emulation_type |= EMULTYPE_ALLOW_RETRY_PF;
5237c50d8ae3SPaolo Bonzini emulate:
5238736c291cSSean Christopherson 	return x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn,
5239c50d8ae3SPaolo Bonzini 				       insn_len);
5240c50d8ae3SPaolo Bonzini }
5241c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
5242c50d8ae3SPaolo Bonzini 
52435efac074SPaolo Bonzini void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
52445efac074SPaolo Bonzini 			    gva_t gva, hpa_t root_hpa)
5245c50d8ae3SPaolo Bonzini {
5246c50d8ae3SPaolo Bonzini 	int i;
5247c50d8ae3SPaolo Bonzini 
52485efac074SPaolo Bonzini 	/* It's actually a GPA for vcpu->arch.guest_mmu.  */
52495efac074SPaolo Bonzini 	if (mmu != &vcpu->arch.guest_mmu) {
52505efac074SPaolo Bonzini 		/* INVLPG on a non-canonical address is a NOP according to the SDM.  */
5251c50d8ae3SPaolo Bonzini 		if (is_noncanonical_address(gva, vcpu))
5252c50d8ae3SPaolo Bonzini 			return;
5253c50d8ae3SPaolo Bonzini 
5254b3646477SJason Baron 		static_call(kvm_x86_tlb_flush_gva)(vcpu, gva);
52555efac074SPaolo Bonzini 	}
52565efac074SPaolo Bonzini 
52575efac074SPaolo Bonzini 	if (!mmu->invlpg)
52585efac074SPaolo Bonzini 		return;
52595efac074SPaolo Bonzini 
52605efac074SPaolo Bonzini 	if (root_hpa == INVALID_PAGE) {
5261c50d8ae3SPaolo Bonzini 		mmu->invlpg(vcpu, gva, mmu->root_hpa);
5262c50d8ae3SPaolo Bonzini 
5263c50d8ae3SPaolo Bonzini 		/*
5264c50d8ae3SPaolo Bonzini 		 * INVLPG is required to invalidate any global mappings for the VA,
5265c50d8ae3SPaolo Bonzini 		 * irrespective of PCID. Since it would take us roughly similar amount
5266c50d8ae3SPaolo Bonzini 		 * of work to determine whether any of the prev_root mappings of the VA
5267c50d8ae3SPaolo Bonzini 		 * is marked global, or to just sync it blindly, so we might as well
5268c50d8ae3SPaolo Bonzini 		 * just always sync it.
5269c50d8ae3SPaolo Bonzini 		 *
5270c50d8ae3SPaolo Bonzini 		 * Mappings not reachable via the current cr3 or the prev_roots will be
5271c50d8ae3SPaolo Bonzini 		 * synced when switching to that cr3, so nothing needs to be done here
5272c50d8ae3SPaolo Bonzini 		 * for them.
5273c50d8ae3SPaolo Bonzini 		 */
5274c50d8ae3SPaolo Bonzini 		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
5275c50d8ae3SPaolo Bonzini 			if (VALID_PAGE(mmu->prev_roots[i].hpa))
5276c50d8ae3SPaolo Bonzini 				mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
52775efac074SPaolo Bonzini 	} else {
52785efac074SPaolo Bonzini 		mmu->invlpg(vcpu, gva, root_hpa);
52795efac074SPaolo Bonzini 	}
52805efac074SPaolo Bonzini }
5281c50d8ae3SPaolo Bonzini 
52825efac074SPaolo Bonzini void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
52835efac074SPaolo Bonzini {
52845efac074SPaolo Bonzini 	kvm_mmu_invalidate_gva(vcpu, vcpu->arch.mmu, gva, INVALID_PAGE);
5285c50d8ae3SPaolo Bonzini 	++vcpu->stat.invlpg;
5286c50d8ae3SPaolo Bonzini }
5287c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
5288c50d8ae3SPaolo Bonzini 
52895efac074SPaolo Bonzini 
5290c50d8ae3SPaolo Bonzini void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
5291c50d8ae3SPaolo Bonzini {
5292c50d8ae3SPaolo Bonzini 	struct kvm_mmu *mmu = vcpu->arch.mmu;
5293c50d8ae3SPaolo Bonzini 	bool tlb_flush = false;
5294c50d8ae3SPaolo Bonzini 	uint i;
5295c50d8ae3SPaolo Bonzini 
5296c50d8ae3SPaolo Bonzini 	if (pcid == kvm_get_active_pcid(vcpu)) {
5297c50d8ae3SPaolo Bonzini 		mmu->invlpg(vcpu, gva, mmu->root_hpa);
5298c50d8ae3SPaolo Bonzini 		tlb_flush = true;
5299c50d8ae3SPaolo Bonzini 	}
5300c50d8ae3SPaolo Bonzini 
5301c50d8ae3SPaolo Bonzini 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
5302c50d8ae3SPaolo Bonzini 		if (VALID_PAGE(mmu->prev_roots[i].hpa) &&
5303be01e8e2SSean Christopherson 		    pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd)) {
5304c50d8ae3SPaolo Bonzini 			mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
5305c50d8ae3SPaolo Bonzini 			tlb_flush = true;
5306c50d8ae3SPaolo Bonzini 		}
5307c50d8ae3SPaolo Bonzini 	}
5308c50d8ae3SPaolo Bonzini 
5309c50d8ae3SPaolo Bonzini 	if (tlb_flush)
5310b3646477SJason Baron 		static_call(kvm_x86_tlb_flush_gva)(vcpu, gva);
5311c50d8ae3SPaolo Bonzini 
5312c50d8ae3SPaolo Bonzini 	++vcpu->stat.invlpg;
5313c50d8ae3SPaolo Bonzini 
5314c50d8ae3SPaolo Bonzini 	/*
5315c50d8ae3SPaolo Bonzini 	 * Mappings not reachable via the current cr3 or the prev_roots will be
5316c50d8ae3SPaolo Bonzini 	 * synced when switching to that cr3, so nothing needs to be done here
5317c50d8ae3SPaolo Bonzini 	 * for them.
5318c50d8ae3SPaolo Bonzini 	 */
5319c50d8ae3SPaolo Bonzini }
5320c50d8ae3SPaolo Bonzini 
532183013059SSean Christopherson void kvm_configure_mmu(bool enable_tdp, int tdp_max_root_level,
532283013059SSean Christopherson 		       int tdp_huge_page_level)
5323c50d8ae3SPaolo Bonzini {
5324bde77235SSean Christopherson 	tdp_enabled = enable_tdp;
532583013059SSean Christopherson 	max_tdp_level = tdp_max_root_level;
5326703c335dSSean Christopherson 
5327703c335dSSean Christopherson 	/*
53281d92d2e8SSean Christopherson 	 * max_huge_page_level reflects KVM's MMU capabilities irrespective
5329703c335dSSean Christopherson 	 * of kernel support, e.g. KVM may be capable of using 1GB pages when
5330703c335dSSean Christopherson 	 * the kernel is not.  But, KVM never creates a page size greater than
5331703c335dSSean Christopherson 	 * what is used by the kernel for any given HVA, i.e. the kernel's
5332703c335dSSean Christopherson 	 * capabilities are ultimately consulted by kvm_mmu_hugepage_adjust().
5333703c335dSSean Christopherson 	 */
5334703c335dSSean Christopherson 	if (tdp_enabled)
53351d92d2e8SSean Christopherson 		max_huge_page_level = tdp_huge_page_level;
5336703c335dSSean Christopherson 	else if (boot_cpu_has(X86_FEATURE_GBPAGES))
53371d92d2e8SSean Christopherson 		max_huge_page_level = PG_LEVEL_1G;
5338703c335dSSean Christopherson 	else
53391d92d2e8SSean Christopherson 		max_huge_page_level = PG_LEVEL_2M;
5340c50d8ae3SPaolo Bonzini }
5341bde77235SSean Christopherson EXPORT_SYMBOL_GPL(kvm_configure_mmu);
5342c50d8ae3SPaolo Bonzini 
5343c50d8ae3SPaolo Bonzini /* The return value indicates if tlb flush on all vcpus is needed. */
53440a234f5dSSean Christopherson typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head,
53450a234f5dSSean Christopherson 				    struct kvm_memory_slot *slot);
5346c50d8ae3SPaolo Bonzini 
5347c50d8ae3SPaolo Bonzini /* The caller should hold mmu-lock before calling this function. */
5348c50d8ae3SPaolo Bonzini static __always_inline bool
5349c50d8ae3SPaolo Bonzini slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
5350c50d8ae3SPaolo Bonzini 			slot_level_handler fn, int start_level, int end_level,
53511a61b7dbSSean Christopherson 			gfn_t start_gfn, gfn_t end_gfn, bool flush_on_yield,
53521a61b7dbSSean Christopherson 			bool flush)
5353c50d8ae3SPaolo Bonzini {
5354c50d8ae3SPaolo Bonzini 	struct slot_rmap_walk_iterator iterator;
5355c50d8ae3SPaolo Bonzini 
5356c50d8ae3SPaolo Bonzini 	for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn,
5357c50d8ae3SPaolo Bonzini 			end_gfn, &iterator) {
5358c50d8ae3SPaolo Bonzini 		if (iterator.rmap)
53590a234f5dSSean Christopherson 			flush |= fn(kvm, iterator.rmap, memslot);
5360c50d8ae3SPaolo Bonzini 
5361531810caSBen Gardon 		if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
5362302695a5SSean Christopherson 			if (flush && flush_on_yield) {
5363c50d8ae3SPaolo Bonzini 				kvm_flush_remote_tlbs_with_address(kvm,
5364c50d8ae3SPaolo Bonzini 						start_gfn,
5365c50d8ae3SPaolo Bonzini 						iterator.gfn - start_gfn + 1);
5366c50d8ae3SPaolo Bonzini 				flush = false;
5367c50d8ae3SPaolo Bonzini 			}
5368531810caSBen Gardon 			cond_resched_rwlock_write(&kvm->mmu_lock);
5369c50d8ae3SPaolo Bonzini 		}
5370c50d8ae3SPaolo Bonzini 	}
5371c50d8ae3SPaolo Bonzini 
5372c50d8ae3SPaolo Bonzini 	return flush;
5373c50d8ae3SPaolo Bonzini }
5374c50d8ae3SPaolo Bonzini 
5375c50d8ae3SPaolo Bonzini static __always_inline bool
5376c50d8ae3SPaolo Bonzini slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
5377c50d8ae3SPaolo Bonzini 		  slot_level_handler fn, int start_level, int end_level,
5378302695a5SSean Christopherson 		  bool flush_on_yield)
5379c50d8ae3SPaolo Bonzini {
5380c50d8ae3SPaolo Bonzini 	return slot_handle_level_range(kvm, memslot, fn, start_level,
5381c50d8ae3SPaolo Bonzini 			end_level, memslot->base_gfn,
5382c50d8ae3SPaolo Bonzini 			memslot->base_gfn + memslot->npages - 1,
53831a61b7dbSSean Christopherson 			flush_on_yield, false);
5384c50d8ae3SPaolo Bonzini }
5385c50d8ae3SPaolo Bonzini 
5386c50d8ae3SPaolo Bonzini static __always_inline bool
5387c50d8ae3SPaolo Bonzini slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
5388302695a5SSean Christopherson 		 slot_level_handler fn, bool flush_on_yield)
5389c50d8ae3SPaolo Bonzini {
53903bae0459SSean Christopherson 	return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K,
5391302695a5SSean Christopherson 				 PG_LEVEL_4K, flush_on_yield);
5392c50d8ae3SPaolo Bonzini }
5393c50d8ae3SPaolo Bonzini 
5394c50d8ae3SPaolo Bonzini static void free_mmu_pages(struct kvm_mmu *mmu)
5395c50d8ae3SPaolo Bonzini {
53964a98623dSSean Christopherson 	if (!tdp_enabled && mmu->pae_root)
53974a98623dSSean Christopherson 		set_memory_encrypted((unsigned long)mmu->pae_root, 1);
5398c50d8ae3SPaolo Bonzini 	free_page((unsigned long)mmu->pae_root);
539903ca4589SSean Christopherson 	free_page((unsigned long)mmu->pml4_root);
5400c50d8ae3SPaolo Bonzini }
5401c50d8ae3SPaolo Bonzini 
540204d28e37SSean Christopherson static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
5403c50d8ae3SPaolo Bonzini {
5404c50d8ae3SPaolo Bonzini 	struct page *page;
5405c50d8ae3SPaolo Bonzini 	int i;
5406c50d8ae3SPaolo Bonzini 
540704d28e37SSean Christopherson 	mmu->root_hpa = INVALID_PAGE;
540804d28e37SSean Christopherson 	mmu->root_pgd = 0;
540904d28e37SSean Christopherson 	mmu->translate_gpa = translate_gpa;
541004d28e37SSean Christopherson 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
541104d28e37SSean Christopherson 		mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
541204d28e37SSean Christopherson 
5413c50d8ae3SPaolo Bonzini 	/*
5414c50d8ae3SPaolo Bonzini 	 * When using PAE paging, the four PDPTEs are treated as 'root' pages,
5415c50d8ae3SPaolo Bonzini 	 * while the PDP table is a per-vCPU construct that's allocated at MMU
5416c50d8ae3SPaolo Bonzini 	 * creation.  When emulating 32-bit mode, cr3 is only 32 bits even on
5417c50d8ae3SPaolo Bonzini 	 * x86_64.  Therefore we need to allocate the PDP table in the first
541804d45551SSean Christopherson 	 * 4GB of memory, which happens to fit the DMA32 zone.  TDP paging
541904d45551SSean Christopherson 	 * generally doesn't use PAE paging and can skip allocating the PDP
542004d45551SSean Christopherson 	 * table.  The main exception, handled here, is SVM's 32-bit NPT.  The
542104d45551SSean Christopherson 	 * other exception is for shadowing L1's 32-bit or PAE NPT on 64-bit
542204d45551SSean Christopherson 	 * KVM; that horror is handled on-demand by mmu_alloc_shadow_roots().
5423c50d8ae3SPaolo Bonzini 	 */
5424d468d94bSSean Christopherson 	if (tdp_enabled && kvm_mmu_get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
5425c50d8ae3SPaolo Bonzini 		return 0;
5426c50d8ae3SPaolo Bonzini 
5427c50d8ae3SPaolo Bonzini 	page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32);
5428c50d8ae3SPaolo Bonzini 	if (!page)
5429c50d8ae3SPaolo Bonzini 		return -ENOMEM;
5430c50d8ae3SPaolo Bonzini 
5431c50d8ae3SPaolo Bonzini 	mmu->pae_root = page_address(page);
54324a98623dSSean Christopherson 
54334a98623dSSean Christopherson 	/*
54344a98623dSSean Christopherson 	 * CR3 is only 32 bits when PAE paging is used, thus it's impossible to
54354a98623dSSean Christopherson 	 * get the CPU to treat the PDPTEs as encrypted.  Decrypt the page so
54364a98623dSSean Christopherson 	 * that KVM's writes and the CPU's reads get along.  Note, this is
54374a98623dSSean Christopherson 	 * only necessary when using shadow paging, as 64-bit NPT can get at
54384a98623dSSean Christopherson 	 * the C-bit even when shadowing 32-bit NPT, and SME isn't supported
54394a98623dSSean Christopherson 	 * by 32-bit kernels (when KVM itself uses 32-bit NPT).
54404a98623dSSean Christopherson 	 */
54414a98623dSSean Christopherson 	if (!tdp_enabled)
54424a98623dSSean Christopherson 		set_memory_decrypted((unsigned long)mmu->pae_root, 1);
54434a98623dSSean Christopherson 	else
54444a98623dSSean Christopherson 		WARN_ON_ONCE(shadow_me_mask);
54454a98623dSSean Christopherson 
5446c50d8ae3SPaolo Bonzini 	for (i = 0; i < 4; ++i)
5447c834e5e4SSean Christopherson 		mmu->pae_root[i] = INVALID_PAE_ROOT;
5448c50d8ae3SPaolo Bonzini 
5449c50d8ae3SPaolo Bonzini 	return 0;
5450c50d8ae3SPaolo Bonzini }
5451c50d8ae3SPaolo Bonzini 
5452c50d8ae3SPaolo Bonzini int kvm_mmu_create(struct kvm_vcpu *vcpu)
5453c50d8ae3SPaolo Bonzini {
5454c50d8ae3SPaolo Bonzini 	int ret;
5455c50d8ae3SPaolo Bonzini 
54565962bfb7SSean Christopherson 	vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
54575f6078f9SSean Christopherson 	vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;
54585f6078f9SSean Christopherson 
54595962bfb7SSean Christopherson 	vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
54605f6078f9SSean Christopherson 	vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
54615962bfb7SSean Christopherson 
546296880883SSean Christopherson 	vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
546396880883SSean Christopherson 
5464c50d8ae3SPaolo Bonzini 	vcpu->arch.mmu = &vcpu->arch.root_mmu;
5465c50d8ae3SPaolo Bonzini 	vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
5466c50d8ae3SPaolo Bonzini 
5467c50d8ae3SPaolo Bonzini 	vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
5468c50d8ae3SPaolo Bonzini 
546904d28e37SSean Christopherson 	ret = __kvm_mmu_create(vcpu, &vcpu->arch.guest_mmu);
5470c50d8ae3SPaolo Bonzini 	if (ret)
5471c50d8ae3SPaolo Bonzini 		return ret;
5472c50d8ae3SPaolo Bonzini 
547304d28e37SSean Christopherson 	ret = __kvm_mmu_create(vcpu, &vcpu->arch.root_mmu);
5474c50d8ae3SPaolo Bonzini 	if (ret)
5475c50d8ae3SPaolo Bonzini 		goto fail_allocate_root;
5476c50d8ae3SPaolo Bonzini 
5477c50d8ae3SPaolo Bonzini 	return ret;
5478c50d8ae3SPaolo Bonzini  fail_allocate_root:
5479c50d8ae3SPaolo Bonzini 	free_mmu_pages(&vcpu->arch.guest_mmu);
5480c50d8ae3SPaolo Bonzini 	return ret;
5481c50d8ae3SPaolo Bonzini }
5482c50d8ae3SPaolo Bonzini 
5483c50d8ae3SPaolo Bonzini #define BATCH_ZAP_PAGES	10
5484c50d8ae3SPaolo Bonzini static void kvm_zap_obsolete_pages(struct kvm *kvm)
5485c50d8ae3SPaolo Bonzini {
5486c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp, *node;
5487c50d8ae3SPaolo Bonzini 	int nr_zapped, batch = 0;
5488c50d8ae3SPaolo Bonzini 
5489c50d8ae3SPaolo Bonzini restart:
5490c50d8ae3SPaolo Bonzini 	list_for_each_entry_safe_reverse(sp, node,
5491c50d8ae3SPaolo Bonzini 	      &kvm->arch.active_mmu_pages, link) {
5492c50d8ae3SPaolo Bonzini 		/*
5493c50d8ae3SPaolo Bonzini 		 * No obsolete valid page exists before a newly created page
5494c50d8ae3SPaolo Bonzini 		 * since active_mmu_pages is a FIFO list.
5495c50d8ae3SPaolo Bonzini 		 */
5496c50d8ae3SPaolo Bonzini 		if (!is_obsolete_sp(kvm, sp))
5497c50d8ae3SPaolo Bonzini 			break;
5498c50d8ae3SPaolo Bonzini 
5499c50d8ae3SPaolo Bonzini 		/*
5500f95eec9bSSean Christopherson 		 * Invalid pages should never land back on the list of active
5501f95eec9bSSean Christopherson 		 * pages.  Skip the bogus page, otherwise we'll get stuck in an
5502f95eec9bSSean Christopherson 		 * infinite loop if the page gets put back on the list (again).
5503c50d8ae3SPaolo Bonzini 		 */
5504f95eec9bSSean Christopherson 		if (WARN_ON(sp->role.invalid))
5505c50d8ae3SPaolo Bonzini 			continue;
5506c50d8ae3SPaolo Bonzini 
5507c50d8ae3SPaolo Bonzini 		/*
5508c50d8ae3SPaolo Bonzini 		 * No need to flush the TLB since we're only zapping shadow
5509c50d8ae3SPaolo Bonzini 		 * pages with an obsolete generation number and all vCPUS have
5510c50d8ae3SPaolo Bonzini 		 * loaded a new root, i.e. the shadow pages being zapped cannot
5511c50d8ae3SPaolo Bonzini 		 * be in active use by the guest.
5512c50d8ae3SPaolo Bonzini 		 */
5513c50d8ae3SPaolo Bonzini 		if (batch >= BATCH_ZAP_PAGES &&
5514531810caSBen Gardon 		    cond_resched_rwlock_write(&kvm->mmu_lock)) {
5515c50d8ae3SPaolo Bonzini 			batch = 0;
5516c50d8ae3SPaolo Bonzini 			goto restart;
5517c50d8ae3SPaolo Bonzini 		}
5518c50d8ae3SPaolo Bonzini 
5519c50d8ae3SPaolo Bonzini 		if (__kvm_mmu_prepare_zap_page(kvm, sp,
5520c50d8ae3SPaolo Bonzini 				&kvm->arch.zapped_obsolete_pages, &nr_zapped)) {
5521c50d8ae3SPaolo Bonzini 			batch += nr_zapped;
5522c50d8ae3SPaolo Bonzini 			goto restart;
5523c50d8ae3SPaolo Bonzini 		}
5524c50d8ae3SPaolo Bonzini 	}
5525c50d8ae3SPaolo Bonzini 
5526c50d8ae3SPaolo Bonzini 	/*
5527c50d8ae3SPaolo Bonzini 	 * Trigger a remote TLB flush before freeing the page tables to ensure
5528c50d8ae3SPaolo Bonzini 	 * KVM is not in the middle of a lockless shadow page table walk, which
5529c50d8ae3SPaolo Bonzini 	 * may reference the pages.
5530c50d8ae3SPaolo Bonzini 	 */
5531c50d8ae3SPaolo Bonzini 	kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages);
5532c50d8ae3SPaolo Bonzini }
5533c50d8ae3SPaolo Bonzini 
5534c50d8ae3SPaolo Bonzini /*
5535c50d8ae3SPaolo Bonzini  * Fast invalidate all shadow pages and use lock-break technique
5536c50d8ae3SPaolo Bonzini  * to zap obsolete pages.
5537c50d8ae3SPaolo Bonzini  *
5538c50d8ae3SPaolo Bonzini  * It's required when memslot is being deleted or VM is being
5539c50d8ae3SPaolo Bonzini  * destroyed, in these cases, we should ensure that KVM MMU does
5540c50d8ae3SPaolo Bonzini  * not use any resource of the being-deleted slot or all slots
5541c50d8ae3SPaolo Bonzini  * after calling the function.
5542c50d8ae3SPaolo Bonzini  */
5543c50d8ae3SPaolo Bonzini static void kvm_mmu_zap_all_fast(struct kvm *kvm)
5544c50d8ae3SPaolo Bonzini {
5545c50d8ae3SPaolo Bonzini 	lockdep_assert_held(&kvm->slots_lock);
5546c50d8ae3SPaolo Bonzini 
5547531810caSBen Gardon 	write_lock(&kvm->mmu_lock);
5548c50d8ae3SPaolo Bonzini 	trace_kvm_mmu_zap_all_fast(kvm);
5549c50d8ae3SPaolo Bonzini 
5550c50d8ae3SPaolo Bonzini 	/*
5551c50d8ae3SPaolo Bonzini 	 * Toggle mmu_valid_gen between '0' and '1'.  Because slots_lock is
5552c50d8ae3SPaolo Bonzini 	 * held for the entire duration of zapping obsolete pages, it's
5553c50d8ae3SPaolo Bonzini 	 * impossible for there to be multiple invalid generations associated
5554c50d8ae3SPaolo Bonzini 	 * with *valid* shadow pages at any given time, i.e. there is exactly
5555c50d8ae3SPaolo Bonzini 	 * one valid generation and (at most) one invalid generation.
5556c50d8ae3SPaolo Bonzini 	 */
5557c50d8ae3SPaolo Bonzini 	kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1;
5558c50d8ae3SPaolo Bonzini 
5559b7cccd39SBen Gardon 	/* In order to ensure all threads see this change when
5560b7cccd39SBen Gardon 	 * handling the MMU reload signal, this must happen in the
5561b7cccd39SBen Gardon 	 * same critical section as kvm_reload_remote_mmus, and
5562b7cccd39SBen Gardon 	 * before kvm_zap_obsolete_pages as kvm_zap_obsolete_pages
5563b7cccd39SBen Gardon 	 * could drop the MMU lock and yield.
5564b7cccd39SBen Gardon 	 */
5565b7cccd39SBen Gardon 	if (is_tdp_mmu_enabled(kvm))
5566b7cccd39SBen Gardon 		kvm_tdp_mmu_invalidate_all_roots(kvm);
5567b7cccd39SBen Gardon 
5568c50d8ae3SPaolo Bonzini 	/*
5569c50d8ae3SPaolo Bonzini 	 * Notify all vcpus to reload its shadow page table and flush TLB.
5570c50d8ae3SPaolo Bonzini 	 * Then all vcpus will switch to new shadow page table with the new
5571c50d8ae3SPaolo Bonzini 	 * mmu_valid_gen.
5572c50d8ae3SPaolo Bonzini 	 *
5573c50d8ae3SPaolo Bonzini 	 * Note: we need to do this under the protection of mmu_lock,
5574c50d8ae3SPaolo Bonzini 	 * otherwise, vcpu would purge shadow page but miss tlb flush.
5575c50d8ae3SPaolo Bonzini 	 */
5576c50d8ae3SPaolo Bonzini 	kvm_reload_remote_mmus(kvm);
5577c50d8ae3SPaolo Bonzini 
5578c50d8ae3SPaolo Bonzini 	kvm_zap_obsolete_pages(kvm);
5579faaf05b0SBen Gardon 
5580531810caSBen Gardon 	write_unlock(&kvm->mmu_lock);
55814c6654bdSBen Gardon 
55824c6654bdSBen Gardon 	if (is_tdp_mmu_enabled(kvm)) {
55834c6654bdSBen Gardon 		read_lock(&kvm->mmu_lock);
55844c6654bdSBen Gardon 		kvm_tdp_mmu_zap_invalidated_roots(kvm);
55854c6654bdSBen Gardon 		read_unlock(&kvm->mmu_lock);
55864c6654bdSBen Gardon 	}
5587c50d8ae3SPaolo Bonzini }
5588c50d8ae3SPaolo Bonzini 
5589c50d8ae3SPaolo Bonzini static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
5590c50d8ae3SPaolo Bonzini {
5591c50d8ae3SPaolo Bonzini 	return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
5592c50d8ae3SPaolo Bonzini }
5593c50d8ae3SPaolo Bonzini 
5594c50d8ae3SPaolo Bonzini static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
5595c50d8ae3SPaolo Bonzini 			struct kvm_memory_slot *slot,
5596c50d8ae3SPaolo Bonzini 			struct kvm_page_track_notifier_node *node)
5597c50d8ae3SPaolo Bonzini {
5598c50d8ae3SPaolo Bonzini 	kvm_mmu_zap_all_fast(kvm);
5599c50d8ae3SPaolo Bonzini }
5600c50d8ae3SPaolo Bonzini 
5601c50d8ae3SPaolo Bonzini void kvm_mmu_init_vm(struct kvm *kvm)
5602c50d8ae3SPaolo Bonzini {
5603c50d8ae3SPaolo Bonzini 	struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
5604c50d8ae3SPaolo Bonzini 
5605d501f747SBen Gardon 	if (!kvm_mmu_init_tdp_mmu(kvm))
5606d501f747SBen Gardon 		/*
5607d501f747SBen Gardon 		 * No smp_load/store wrappers needed here as we are in
5608d501f747SBen Gardon 		 * VM init and there cannot be any memslots / other threads
5609d501f747SBen Gardon 		 * accessing this struct kvm yet.
5610d501f747SBen Gardon 		 */
5611a2557408SBen Gardon 		kvm->arch.memslots_have_rmaps = true;
5612a2557408SBen Gardon 
5613c50d8ae3SPaolo Bonzini 	node->track_write = kvm_mmu_pte_write;
5614c50d8ae3SPaolo Bonzini 	node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
5615c50d8ae3SPaolo Bonzini 	kvm_page_track_register_notifier(kvm, node);
5616c50d8ae3SPaolo Bonzini }
5617c50d8ae3SPaolo Bonzini 
5618c50d8ae3SPaolo Bonzini void kvm_mmu_uninit_vm(struct kvm *kvm)
5619c50d8ae3SPaolo Bonzini {
5620c50d8ae3SPaolo Bonzini 	struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
5621c50d8ae3SPaolo Bonzini 
5622c50d8ae3SPaolo Bonzini 	kvm_page_track_unregister_notifier(kvm, node);
5623fe5db27dSBen Gardon 
5624fe5db27dSBen Gardon 	kvm_mmu_uninit_tdp_mmu(kvm);
5625c50d8ae3SPaolo Bonzini }
5626c50d8ae3SPaolo Bonzini 
5627c50d8ae3SPaolo Bonzini void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
5628c50d8ae3SPaolo Bonzini {
5629c50d8ae3SPaolo Bonzini 	struct kvm_memslots *slots;
5630c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *memslot;
5631c50d8ae3SPaolo Bonzini 	int i;
56321a61b7dbSSean Christopherson 	bool flush = false;
5633c50d8ae3SPaolo Bonzini 
5634e2209710SBen Gardon 	if (kvm_memslots_have_rmaps(kvm)) {
5635531810caSBen Gardon 		write_lock(&kvm->mmu_lock);
5636c50d8ae3SPaolo Bonzini 		for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
5637c50d8ae3SPaolo Bonzini 			slots = __kvm_memslots(kvm, i);
5638c50d8ae3SPaolo Bonzini 			kvm_for_each_memslot(memslot, slots) {
5639c50d8ae3SPaolo Bonzini 				gfn_t start, end;
5640c50d8ae3SPaolo Bonzini 
5641c50d8ae3SPaolo Bonzini 				start = max(gfn_start, memslot->base_gfn);
5642c50d8ae3SPaolo Bonzini 				end = min(gfn_end, memslot->base_gfn + memslot->npages);
5643c50d8ae3SPaolo Bonzini 				if (start >= end)
5644c50d8ae3SPaolo Bonzini 					continue;
5645c50d8ae3SPaolo Bonzini 
5646e2209710SBen Gardon 				flush = slot_handle_level_range(kvm, memslot,
5647e2209710SBen Gardon 						kvm_zap_rmapp, PG_LEVEL_4K,
5648e2209710SBen Gardon 						KVM_MAX_HUGEPAGE_LEVEL, start,
5649e2209710SBen Gardon 						end - 1, true, flush);
5650c50d8ae3SPaolo Bonzini 			}
5651c50d8ae3SPaolo Bonzini 		}
5652faaf05b0SBen Gardon 		if (flush)
56531a61b7dbSSean Christopherson 			kvm_flush_remote_tlbs_with_address(kvm, gfn_start, gfn_end);
5654531810caSBen Gardon 		write_unlock(&kvm->mmu_lock);
5655e2209710SBen Gardon 	}
56566103bc07SBen Gardon 
56576103bc07SBen Gardon 	if (is_tdp_mmu_enabled(kvm)) {
56586103bc07SBen Gardon 		flush = false;
56596103bc07SBen Gardon 
56606103bc07SBen Gardon 		read_lock(&kvm->mmu_lock);
56616103bc07SBen Gardon 		for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
56626103bc07SBen Gardon 			flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, gfn_start,
56636103bc07SBen Gardon 							  gfn_end, flush, true);
56646103bc07SBen Gardon 		if (flush)
56656103bc07SBen Gardon 			kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
56666103bc07SBen Gardon 							   gfn_end);
56676103bc07SBen Gardon 
56686103bc07SBen Gardon 		read_unlock(&kvm->mmu_lock);
56696103bc07SBen Gardon 	}
5670c50d8ae3SPaolo Bonzini }
5671c50d8ae3SPaolo Bonzini 
5672c50d8ae3SPaolo Bonzini static bool slot_rmap_write_protect(struct kvm *kvm,
56730a234f5dSSean Christopherson 				    struct kvm_rmap_head *rmap_head,
56740a234f5dSSean Christopherson 				    struct kvm_memory_slot *slot)
5675c50d8ae3SPaolo Bonzini {
5676c50d8ae3SPaolo Bonzini 	return __rmap_write_protect(kvm, rmap_head, false);
5677c50d8ae3SPaolo Bonzini }
5678c50d8ae3SPaolo Bonzini 
5679c50d8ae3SPaolo Bonzini void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
56803c9bd400SJay Zhou 				      struct kvm_memory_slot *memslot,
56813c9bd400SJay Zhou 				      int start_level)
5682c50d8ae3SPaolo Bonzini {
5683e2209710SBen Gardon 	bool flush = false;
5684c50d8ae3SPaolo Bonzini 
5685e2209710SBen Gardon 	if (kvm_memslots_have_rmaps(kvm)) {
5686531810caSBen Gardon 		write_lock(&kvm->mmu_lock);
56873c9bd400SJay Zhou 		flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
5688e2209710SBen Gardon 					  start_level, KVM_MAX_HUGEPAGE_LEVEL,
5689e2209710SBen Gardon 					  false);
5690531810caSBen Gardon 		write_unlock(&kvm->mmu_lock);
5691e2209710SBen Gardon 	}
5692c50d8ae3SPaolo Bonzini 
569324ae4cfaSBen Gardon 	if (is_tdp_mmu_enabled(kvm)) {
569424ae4cfaSBen Gardon 		read_lock(&kvm->mmu_lock);
569524ae4cfaSBen Gardon 		flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, start_level);
569624ae4cfaSBen Gardon 		read_unlock(&kvm->mmu_lock);
569724ae4cfaSBen Gardon 	}
569824ae4cfaSBen Gardon 
5699c50d8ae3SPaolo Bonzini 	/*
5700c50d8ae3SPaolo Bonzini 	 * We can flush all the TLBs out of the mmu lock without TLB
5701c50d8ae3SPaolo Bonzini 	 * corruption since we just change the spte from writable to
5702c50d8ae3SPaolo Bonzini 	 * readonly so that we only need to care the case of changing
5703c50d8ae3SPaolo Bonzini 	 * spte from present to present (changing the spte from present
5704c50d8ae3SPaolo Bonzini 	 * to nonpresent will flush all the TLBs immediately), in other
5705c50d8ae3SPaolo Bonzini 	 * words, the only case we care is mmu_spte_update() where we
57065fc3424fSSean Christopherson 	 * have checked Host-writable | MMU-writable instead of
57075fc3424fSSean Christopherson 	 * PT_WRITABLE_MASK, that means it does not depend on PT_WRITABLE_MASK
57085fc3424fSSean Christopherson 	 * anymore.
5709c50d8ae3SPaolo Bonzini 	 */
5710c50d8ae3SPaolo Bonzini 	if (flush)
57117f42aa76SSean Christopherson 		kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
5712c50d8ae3SPaolo Bonzini }
5713c50d8ae3SPaolo Bonzini 
5714c50d8ae3SPaolo Bonzini static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
57150a234f5dSSean Christopherson 					 struct kvm_rmap_head *rmap_head,
57160a234f5dSSean Christopherson 					 struct kvm_memory_slot *slot)
5717c50d8ae3SPaolo Bonzini {
5718c50d8ae3SPaolo Bonzini 	u64 *sptep;
5719c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
5720c50d8ae3SPaolo Bonzini 	int need_tlb_flush = 0;
5721c50d8ae3SPaolo Bonzini 	kvm_pfn_t pfn;
5722c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
5723c50d8ae3SPaolo Bonzini 
5724c50d8ae3SPaolo Bonzini restart:
5725c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(rmap_head, &iter, sptep) {
572657354682SSean Christopherson 		sp = sptep_to_sp(sptep);
5727c50d8ae3SPaolo Bonzini 		pfn = spte_to_pfn(*sptep);
5728c50d8ae3SPaolo Bonzini 
5729c50d8ae3SPaolo Bonzini 		/*
5730c50d8ae3SPaolo Bonzini 		 * We cannot do huge page mapping for indirect shadow pages,
5731c50d8ae3SPaolo Bonzini 		 * which are found on the last rmap (level = 1) when not using
5732c50d8ae3SPaolo Bonzini 		 * tdp; such shadow pages are synced with the page table in
5733c50d8ae3SPaolo Bonzini 		 * the guest, and the guest page table is using 4K page size
5734c50d8ae3SPaolo Bonzini 		 * mapping if the indirect sp has level = 1.
5735c50d8ae3SPaolo Bonzini 		 */
5736c50d8ae3SPaolo Bonzini 		if (sp->role.direct && !kvm_is_reserved_pfn(pfn) &&
57379eba50f8SSean Christopherson 		    sp->role.level < kvm_mmu_max_mapping_level(kvm, slot, sp->gfn,
57389eba50f8SSean Christopherson 							       pfn, PG_LEVEL_NUM)) {
5739c50d8ae3SPaolo Bonzini 			pte_list_remove(rmap_head, sptep);
5740c50d8ae3SPaolo Bonzini 
5741c50d8ae3SPaolo Bonzini 			if (kvm_available_flush_tlb_with_range())
5742c50d8ae3SPaolo Bonzini 				kvm_flush_remote_tlbs_with_address(kvm, sp->gfn,
5743c50d8ae3SPaolo Bonzini 					KVM_PAGES_PER_HPAGE(sp->role.level));
5744c50d8ae3SPaolo Bonzini 			else
5745c50d8ae3SPaolo Bonzini 				need_tlb_flush = 1;
5746c50d8ae3SPaolo Bonzini 
5747c50d8ae3SPaolo Bonzini 			goto restart;
5748c50d8ae3SPaolo Bonzini 		}
5749c50d8ae3SPaolo Bonzini 	}
5750c50d8ae3SPaolo Bonzini 
5751c50d8ae3SPaolo Bonzini 	return need_tlb_flush;
5752c50d8ae3SPaolo Bonzini }
5753c50d8ae3SPaolo Bonzini 
5754c50d8ae3SPaolo Bonzini void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
5755c50d8ae3SPaolo Bonzini 				   const struct kvm_memory_slot *memslot)
5756c50d8ae3SPaolo Bonzini {
5757c50d8ae3SPaolo Bonzini 	/* FIXME: const-ify all uses of struct kvm_memory_slot.  */
57589eba50f8SSean Christopherson 	struct kvm_memory_slot *slot = (struct kvm_memory_slot *)memslot;
575931c65657SColin Ian King 	bool flush = false;
57609eba50f8SSean Christopherson 
5761e2209710SBen Gardon 	if (kvm_memslots_have_rmaps(kvm)) {
5762531810caSBen Gardon 		write_lock(&kvm->mmu_lock);
5763302695a5SSean Christopherson 		flush = slot_handle_leaf(kvm, slot, kvm_mmu_zap_collapsible_spte, true);
5764302695a5SSean Christopherson 		if (flush)
5765302695a5SSean Christopherson 			kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
5766531810caSBen Gardon 		write_unlock(&kvm->mmu_lock);
5767e2209710SBen Gardon 	}
57682db6f772SBen Gardon 
57692db6f772SBen Gardon 	if (is_tdp_mmu_enabled(kvm)) {
57702db6f772SBen Gardon 		read_lock(&kvm->mmu_lock);
57712db6f772SBen Gardon 		flush = kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot, flush);
57722db6f772SBen Gardon 		if (flush)
57732db6f772SBen Gardon 			kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
57742db6f772SBen Gardon 		read_unlock(&kvm->mmu_lock);
57752db6f772SBen Gardon 	}
5776c50d8ae3SPaolo Bonzini }
5777c50d8ae3SPaolo Bonzini 
5778b3594ffbSSean Christopherson void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
57796c9dd6d2SPaolo Bonzini 					const struct kvm_memory_slot *memslot)
5780b3594ffbSSean Christopherson {
5781b3594ffbSSean Christopherson 	/*
57827f42aa76SSean Christopherson 	 * All current use cases for flushing the TLBs for a specific memslot
5783302695a5SSean Christopherson 	 * related to dirty logging, and many do the TLB flush out of mmu_lock.
57847f42aa76SSean Christopherson 	 * The interaction between the various operations on memslot must be
57857f42aa76SSean Christopherson 	 * serialized by slots_locks to ensure the TLB flush from one operation
57867f42aa76SSean Christopherson 	 * is observed by any other operation on the same memslot.
5787b3594ffbSSean Christopherson 	 */
5788b3594ffbSSean Christopherson 	lockdep_assert_held(&kvm->slots_lock);
5789cec37648SSean Christopherson 	kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
5790cec37648SSean Christopherson 					   memslot->npages);
5791b3594ffbSSean Christopherson }
5792b3594ffbSSean Christopherson 
5793c50d8ae3SPaolo Bonzini void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
5794c50d8ae3SPaolo Bonzini 				   struct kvm_memory_slot *memslot)
5795c50d8ae3SPaolo Bonzini {
5796e2209710SBen Gardon 	bool flush = false;
5797c50d8ae3SPaolo Bonzini 
5798e2209710SBen Gardon 	if (kvm_memslots_have_rmaps(kvm)) {
5799531810caSBen Gardon 		write_lock(&kvm->mmu_lock);
5800e2209710SBen Gardon 		flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty,
5801e2209710SBen Gardon 					 false);
5802531810caSBen Gardon 		write_unlock(&kvm->mmu_lock);
5803e2209710SBen Gardon 	}
5804c50d8ae3SPaolo Bonzini 
580524ae4cfaSBen Gardon 	if (is_tdp_mmu_enabled(kvm)) {
580624ae4cfaSBen Gardon 		read_lock(&kvm->mmu_lock);
580724ae4cfaSBen Gardon 		flush |= kvm_tdp_mmu_clear_dirty_slot(kvm, memslot);
580824ae4cfaSBen Gardon 		read_unlock(&kvm->mmu_lock);
580924ae4cfaSBen Gardon 	}
581024ae4cfaSBen Gardon 
5811c50d8ae3SPaolo Bonzini 	/*
5812c50d8ae3SPaolo Bonzini 	 * It's also safe to flush TLBs out of mmu lock here as currently this
5813c50d8ae3SPaolo Bonzini 	 * function is only used for dirty logging, in which case flushing TLB
5814c50d8ae3SPaolo Bonzini 	 * out of mmu lock also guarantees no dirty pages will be lost in
5815c50d8ae3SPaolo Bonzini 	 * dirty_bitmap.
5816c50d8ae3SPaolo Bonzini 	 */
5817c50d8ae3SPaolo Bonzini 	if (flush)
58187f42aa76SSean Christopherson 		kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
5819c50d8ae3SPaolo Bonzini }
5820c50d8ae3SPaolo Bonzini 
5821c50d8ae3SPaolo Bonzini void kvm_mmu_zap_all(struct kvm *kvm)
5822c50d8ae3SPaolo Bonzini {
5823c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp, *node;
5824c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
5825c50d8ae3SPaolo Bonzini 	int ign;
5826c50d8ae3SPaolo Bonzini 
5827531810caSBen Gardon 	write_lock(&kvm->mmu_lock);
5828c50d8ae3SPaolo Bonzini restart:
5829c50d8ae3SPaolo Bonzini 	list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
5830f95eec9bSSean Christopherson 		if (WARN_ON(sp->role.invalid))
5831c50d8ae3SPaolo Bonzini 			continue;
5832c50d8ae3SPaolo Bonzini 		if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
5833c50d8ae3SPaolo Bonzini 			goto restart;
5834531810caSBen Gardon 		if (cond_resched_rwlock_write(&kvm->mmu_lock))
5835c50d8ae3SPaolo Bonzini 			goto restart;
5836c50d8ae3SPaolo Bonzini 	}
5837c50d8ae3SPaolo Bonzini 
5838c50d8ae3SPaolo Bonzini 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
5839faaf05b0SBen Gardon 
5840897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
5841faaf05b0SBen Gardon 		kvm_tdp_mmu_zap_all(kvm);
5842faaf05b0SBen Gardon 
5843531810caSBen Gardon 	write_unlock(&kvm->mmu_lock);
5844c50d8ae3SPaolo Bonzini }
5845c50d8ae3SPaolo Bonzini 
5846c50d8ae3SPaolo Bonzini void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
5847c50d8ae3SPaolo Bonzini {
5848c50d8ae3SPaolo Bonzini 	WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
5849c50d8ae3SPaolo Bonzini 
5850c50d8ae3SPaolo Bonzini 	gen &= MMIO_SPTE_GEN_MASK;
5851c50d8ae3SPaolo Bonzini 
5852c50d8ae3SPaolo Bonzini 	/*
5853c50d8ae3SPaolo Bonzini 	 * Generation numbers are incremented in multiples of the number of
5854c50d8ae3SPaolo Bonzini 	 * address spaces in order to provide unique generations across all
5855c50d8ae3SPaolo Bonzini 	 * address spaces.  Strip what is effectively the address space
5856c50d8ae3SPaolo Bonzini 	 * modifier prior to checking for a wrap of the MMIO generation so
5857c50d8ae3SPaolo Bonzini 	 * that a wrap in any address space is detected.
5858c50d8ae3SPaolo Bonzini 	 */
5859c50d8ae3SPaolo Bonzini 	gen &= ~((u64)KVM_ADDRESS_SPACE_NUM - 1);
5860c50d8ae3SPaolo Bonzini 
5861c50d8ae3SPaolo Bonzini 	/*
5862c50d8ae3SPaolo Bonzini 	 * The very rare case: if the MMIO generation number has wrapped,
5863c50d8ae3SPaolo Bonzini 	 * zap all shadow pages.
5864c50d8ae3SPaolo Bonzini 	 */
5865c50d8ae3SPaolo Bonzini 	if (unlikely(gen == 0)) {
5866c50d8ae3SPaolo Bonzini 		kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
5867c50d8ae3SPaolo Bonzini 		kvm_mmu_zap_all_fast(kvm);
5868c50d8ae3SPaolo Bonzini 	}
5869c50d8ae3SPaolo Bonzini }
5870c50d8ae3SPaolo Bonzini 
5871c50d8ae3SPaolo Bonzini static unsigned long
5872c50d8ae3SPaolo Bonzini mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
5873c50d8ae3SPaolo Bonzini {
5874c50d8ae3SPaolo Bonzini 	struct kvm *kvm;
5875c50d8ae3SPaolo Bonzini 	int nr_to_scan = sc->nr_to_scan;
5876c50d8ae3SPaolo Bonzini 	unsigned long freed = 0;
5877c50d8ae3SPaolo Bonzini 
5878c50d8ae3SPaolo Bonzini 	mutex_lock(&kvm_lock);
5879c50d8ae3SPaolo Bonzini 
5880c50d8ae3SPaolo Bonzini 	list_for_each_entry(kvm, &vm_list, vm_list) {
5881c50d8ae3SPaolo Bonzini 		int idx;
5882c50d8ae3SPaolo Bonzini 		LIST_HEAD(invalid_list);
5883c50d8ae3SPaolo Bonzini 
5884c50d8ae3SPaolo Bonzini 		/*
5885c50d8ae3SPaolo Bonzini 		 * Never scan more than sc->nr_to_scan VM instances.
5886c50d8ae3SPaolo Bonzini 		 * Will not hit this condition practically since we do not try
5887c50d8ae3SPaolo Bonzini 		 * to shrink more than one VM and it is very unlikely to see
5888c50d8ae3SPaolo Bonzini 		 * !n_used_mmu_pages so many times.
5889c50d8ae3SPaolo Bonzini 		 */
5890c50d8ae3SPaolo Bonzini 		if (!nr_to_scan--)
5891c50d8ae3SPaolo Bonzini 			break;
5892c50d8ae3SPaolo Bonzini 		/*
5893c50d8ae3SPaolo Bonzini 		 * n_used_mmu_pages is accessed without holding kvm->mmu_lock
5894c50d8ae3SPaolo Bonzini 		 * here. We may skip a VM instance errorneosly, but we do not
5895c50d8ae3SPaolo Bonzini 		 * want to shrink a VM that only started to populate its MMU
5896c50d8ae3SPaolo Bonzini 		 * anyway.
5897c50d8ae3SPaolo Bonzini 		 */
5898c50d8ae3SPaolo Bonzini 		if (!kvm->arch.n_used_mmu_pages &&
5899c50d8ae3SPaolo Bonzini 		    !kvm_has_zapped_obsolete_pages(kvm))
5900c50d8ae3SPaolo Bonzini 			continue;
5901c50d8ae3SPaolo Bonzini 
5902c50d8ae3SPaolo Bonzini 		idx = srcu_read_lock(&kvm->srcu);
5903531810caSBen Gardon 		write_lock(&kvm->mmu_lock);
5904c50d8ae3SPaolo Bonzini 
5905c50d8ae3SPaolo Bonzini 		if (kvm_has_zapped_obsolete_pages(kvm)) {
5906c50d8ae3SPaolo Bonzini 			kvm_mmu_commit_zap_page(kvm,
5907c50d8ae3SPaolo Bonzini 			      &kvm->arch.zapped_obsolete_pages);
5908c50d8ae3SPaolo Bonzini 			goto unlock;
5909c50d8ae3SPaolo Bonzini 		}
5910c50d8ae3SPaolo Bonzini 
5911ebdb292dSSean Christopherson 		freed = kvm_mmu_zap_oldest_mmu_pages(kvm, sc->nr_to_scan);
5912c50d8ae3SPaolo Bonzini 
5913c50d8ae3SPaolo Bonzini unlock:
5914531810caSBen Gardon 		write_unlock(&kvm->mmu_lock);
5915c50d8ae3SPaolo Bonzini 		srcu_read_unlock(&kvm->srcu, idx);
5916c50d8ae3SPaolo Bonzini 
5917c50d8ae3SPaolo Bonzini 		/*
5918c50d8ae3SPaolo Bonzini 		 * unfair on small ones
5919c50d8ae3SPaolo Bonzini 		 * per-vm shrinkers cry out
5920c50d8ae3SPaolo Bonzini 		 * sadness comes quickly
5921c50d8ae3SPaolo Bonzini 		 */
5922c50d8ae3SPaolo Bonzini 		list_move_tail(&kvm->vm_list, &vm_list);
5923c50d8ae3SPaolo Bonzini 		break;
5924c50d8ae3SPaolo Bonzini 	}
5925c50d8ae3SPaolo Bonzini 
5926c50d8ae3SPaolo Bonzini 	mutex_unlock(&kvm_lock);
5927c50d8ae3SPaolo Bonzini 	return freed;
5928c50d8ae3SPaolo Bonzini }
5929c50d8ae3SPaolo Bonzini 
5930c50d8ae3SPaolo Bonzini static unsigned long
5931c50d8ae3SPaolo Bonzini mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
5932c50d8ae3SPaolo Bonzini {
5933c50d8ae3SPaolo Bonzini 	return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
5934c50d8ae3SPaolo Bonzini }
5935c50d8ae3SPaolo Bonzini 
5936c50d8ae3SPaolo Bonzini static struct shrinker mmu_shrinker = {
5937c50d8ae3SPaolo Bonzini 	.count_objects = mmu_shrink_count,
5938c50d8ae3SPaolo Bonzini 	.scan_objects = mmu_shrink_scan,
5939c50d8ae3SPaolo Bonzini 	.seeks = DEFAULT_SEEKS * 10,
5940c50d8ae3SPaolo Bonzini };
5941c50d8ae3SPaolo Bonzini 
5942c50d8ae3SPaolo Bonzini static void mmu_destroy_caches(void)
5943c50d8ae3SPaolo Bonzini {
5944c50d8ae3SPaolo Bonzini 	kmem_cache_destroy(pte_list_desc_cache);
5945c50d8ae3SPaolo Bonzini 	kmem_cache_destroy(mmu_page_header_cache);
5946c50d8ae3SPaolo Bonzini }
5947c50d8ae3SPaolo Bonzini 
5948c50d8ae3SPaolo Bonzini static bool get_nx_auto_mode(void)
5949c50d8ae3SPaolo Bonzini {
5950c50d8ae3SPaolo Bonzini 	/* Return true when CPU has the bug, and mitigations are ON */
5951c50d8ae3SPaolo Bonzini 	return boot_cpu_has_bug(X86_BUG_ITLB_MULTIHIT) && !cpu_mitigations_off();
5952c50d8ae3SPaolo Bonzini }
5953c50d8ae3SPaolo Bonzini 
5954c50d8ae3SPaolo Bonzini static void __set_nx_huge_pages(bool val)
5955c50d8ae3SPaolo Bonzini {
5956c50d8ae3SPaolo Bonzini 	nx_huge_pages = itlb_multihit_kvm_mitigation = val;
5957c50d8ae3SPaolo Bonzini }
5958c50d8ae3SPaolo Bonzini 
5959c50d8ae3SPaolo Bonzini static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
5960c50d8ae3SPaolo Bonzini {
5961c50d8ae3SPaolo Bonzini 	bool old_val = nx_huge_pages;
5962c50d8ae3SPaolo Bonzini 	bool new_val;
5963c50d8ae3SPaolo Bonzini 
5964c50d8ae3SPaolo Bonzini 	/* In "auto" mode deploy workaround only if CPU has the bug. */
5965c50d8ae3SPaolo Bonzini 	if (sysfs_streq(val, "off"))
5966c50d8ae3SPaolo Bonzini 		new_val = 0;
5967c50d8ae3SPaolo Bonzini 	else if (sysfs_streq(val, "force"))
5968c50d8ae3SPaolo Bonzini 		new_val = 1;
5969c50d8ae3SPaolo Bonzini 	else if (sysfs_streq(val, "auto"))
5970c50d8ae3SPaolo Bonzini 		new_val = get_nx_auto_mode();
5971c50d8ae3SPaolo Bonzini 	else if (strtobool(val, &new_val) < 0)
5972c50d8ae3SPaolo Bonzini 		return -EINVAL;
5973c50d8ae3SPaolo Bonzini 
5974c50d8ae3SPaolo Bonzini 	__set_nx_huge_pages(new_val);
5975c50d8ae3SPaolo Bonzini 
5976c50d8ae3SPaolo Bonzini 	if (new_val != old_val) {
5977c50d8ae3SPaolo Bonzini 		struct kvm *kvm;
5978c50d8ae3SPaolo Bonzini 
5979c50d8ae3SPaolo Bonzini 		mutex_lock(&kvm_lock);
5980c50d8ae3SPaolo Bonzini 
5981c50d8ae3SPaolo Bonzini 		list_for_each_entry(kvm, &vm_list, vm_list) {
5982c50d8ae3SPaolo Bonzini 			mutex_lock(&kvm->slots_lock);
5983c50d8ae3SPaolo Bonzini 			kvm_mmu_zap_all_fast(kvm);
5984c50d8ae3SPaolo Bonzini 			mutex_unlock(&kvm->slots_lock);
5985c50d8ae3SPaolo Bonzini 
5986c50d8ae3SPaolo Bonzini 			wake_up_process(kvm->arch.nx_lpage_recovery_thread);
5987c50d8ae3SPaolo Bonzini 		}
5988c50d8ae3SPaolo Bonzini 		mutex_unlock(&kvm_lock);
5989c50d8ae3SPaolo Bonzini 	}
5990c50d8ae3SPaolo Bonzini 
5991c50d8ae3SPaolo Bonzini 	return 0;
5992c50d8ae3SPaolo Bonzini }
5993c50d8ae3SPaolo Bonzini 
5994c50d8ae3SPaolo Bonzini int kvm_mmu_module_init(void)
5995c50d8ae3SPaolo Bonzini {
5996c50d8ae3SPaolo Bonzini 	int ret = -ENOMEM;
5997c50d8ae3SPaolo Bonzini 
5998c50d8ae3SPaolo Bonzini 	if (nx_huge_pages == -1)
5999c50d8ae3SPaolo Bonzini 		__set_nx_huge_pages(get_nx_auto_mode());
6000c50d8ae3SPaolo Bonzini 
6001c50d8ae3SPaolo Bonzini 	/*
6002c50d8ae3SPaolo Bonzini 	 * MMU roles use union aliasing which is, generally speaking, an
6003c50d8ae3SPaolo Bonzini 	 * undefined behavior. However, we supposedly know how compilers behave
6004c50d8ae3SPaolo Bonzini 	 * and the current status quo is unlikely to change. Guardians below are
6005c50d8ae3SPaolo Bonzini 	 * supposed to let us know if the assumption becomes false.
6006c50d8ae3SPaolo Bonzini 	 */
6007c50d8ae3SPaolo Bonzini 	BUILD_BUG_ON(sizeof(union kvm_mmu_page_role) != sizeof(u32));
6008c50d8ae3SPaolo Bonzini 	BUILD_BUG_ON(sizeof(union kvm_mmu_extended_role) != sizeof(u32));
6009c50d8ae3SPaolo Bonzini 	BUILD_BUG_ON(sizeof(union kvm_mmu_role) != sizeof(u64));
6010c50d8ae3SPaolo Bonzini 
6011c50d8ae3SPaolo Bonzini 	kvm_mmu_reset_all_pte_masks();
6012c50d8ae3SPaolo Bonzini 
6013c50d8ae3SPaolo Bonzini 	pte_list_desc_cache = kmem_cache_create("pte_list_desc",
6014c50d8ae3SPaolo Bonzini 					    sizeof(struct pte_list_desc),
6015c50d8ae3SPaolo Bonzini 					    0, SLAB_ACCOUNT, NULL);
6016c50d8ae3SPaolo Bonzini 	if (!pte_list_desc_cache)
6017c50d8ae3SPaolo Bonzini 		goto out;
6018c50d8ae3SPaolo Bonzini 
6019c50d8ae3SPaolo Bonzini 	mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
6020c50d8ae3SPaolo Bonzini 						  sizeof(struct kvm_mmu_page),
6021c50d8ae3SPaolo Bonzini 						  0, SLAB_ACCOUNT, NULL);
6022c50d8ae3SPaolo Bonzini 	if (!mmu_page_header_cache)
6023c50d8ae3SPaolo Bonzini 		goto out;
6024c50d8ae3SPaolo Bonzini 
6025c50d8ae3SPaolo Bonzini 	if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL))
6026c50d8ae3SPaolo Bonzini 		goto out;
6027c50d8ae3SPaolo Bonzini 
6028c50d8ae3SPaolo Bonzini 	ret = register_shrinker(&mmu_shrinker);
6029c50d8ae3SPaolo Bonzini 	if (ret)
6030c50d8ae3SPaolo Bonzini 		goto out;
6031c50d8ae3SPaolo Bonzini 
6032c50d8ae3SPaolo Bonzini 	return 0;
6033c50d8ae3SPaolo Bonzini 
6034c50d8ae3SPaolo Bonzini out:
6035c50d8ae3SPaolo Bonzini 	mmu_destroy_caches();
6036c50d8ae3SPaolo Bonzini 	return ret;
6037c50d8ae3SPaolo Bonzini }
6038c50d8ae3SPaolo Bonzini 
6039c50d8ae3SPaolo Bonzini /*
6040c50d8ae3SPaolo Bonzini  * Calculate mmu pages needed for kvm.
6041c50d8ae3SPaolo Bonzini  */
6042c50d8ae3SPaolo Bonzini unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
6043c50d8ae3SPaolo Bonzini {
6044c50d8ae3SPaolo Bonzini 	unsigned long nr_mmu_pages;
6045c50d8ae3SPaolo Bonzini 	unsigned long nr_pages = 0;
6046c50d8ae3SPaolo Bonzini 	struct kvm_memslots *slots;
6047c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *memslot;
6048c50d8ae3SPaolo Bonzini 	int i;
6049c50d8ae3SPaolo Bonzini 
6050c50d8ae3SPaolo Bonzini 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
6051c50d8ae3SPaolo Bonzini 		slots = __kvm_memslots(kvm, i);
6052c50d8ae3SPaolo Bonzini 
6053c50d8ae3SPaolo Bonzini 		kvm_for_each_memslot(memslot, slots)
6054c50d8ae3SPaolo Bonzini 			nr_pages += memslot->npages;
6055c50d8ae3SPaolo Bonzini 	}
6056c50d8ae3SPaolo Bonzini 
6057c50d8ae3SPaolo Bonzini 	nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
6058c50d8ae3SPaolo Bonzini 	nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES);
6059c50d8ae3SPaolo Bonzini 
6060c50d8ae3SPaolo Bonzini 	return nr_mmu_pages;
6061c50d8ae3SPaolo Bonzini }
6062c50d8ae3SPaolo Bonzini 
6063c50d8ae3SPaolo Bonzini void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
6064c50d8ae3SPaolo Bonzini {
6065c50d8ae3SPaolo Bonzini 	kvm_mmu_unload(vcpu);
6066c50d8ae3SPaolo Bonzini 	free_mmu_pages(&vcpu->arch.root_mmu);
6067c50d8ae3SPaolo Bonzini 	free_mmu_pages(&vcpu->arch.guest_mmu);
6068c50d8ae3SPaolo Bonzini 	mmu_free_memory_caches(vcpu);
6069c50d8ae3SPaolo Bonzini }
6070c50d8ae3SPaolo Bonzini 
6071c50d8ae3SPaolo Bonzini void kvm_mmu_module_exit(void)
6072c50d8ae3SPaolo Bonzini {
6073c50d8ae3SPaolo Bonzini 	mmu_destroy_caches();
6074c50d8ae3SPaolo Bonzini 	percpu_counter_destroy(&kvm_total_used_mmu_pages);
6075c50d8ae3SPaolo Bonzini 	unregister_shrinker(&mmu_shrinker);
6076c50d8ae3SPaolo Bonzini 	mmu_audit_disable();
6077c50d8ae3SPaolo Bonzini }
6078c50d8ae3SPaolo Bonzini 
6079c50d8ae3SPaolo Bonzini static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp)
6080c50d8ae3SPaolo Bonzini {
6081c50d8ae3SPaolo Bonzini 	unsigned int old_val;
6082c50d8ae3SPaolo Bonzini 	int err;
6083c50d8ae3SPaolo Bonzini 
6084c50d8ae3SPaolo Bonzini 	old_val = nx_huge_pages_recovery_ratio;
6085c50d8ae3SPaolo Bonzini 	err = param_set_uint(val, kp);
6086c50d8ae3SPaolo Bonzini 	if (err)
6087c50d8ae3SPaolo Bonzini 		return err;
6088c50d8ae3SPaolo Bonzini 
6089c50d8ae3SPaolo Bonzini 	if (READ_ONCE(nx_huge_pages) &&
6090c50d8ae3SPaolo Bonzini 	    !old_val && nx_huge_pages_recovery_ratio) {
6091c50d8ae3SPaolo Bonzini 		struct kvm *kvm;
6092c50d8ae3SPaolo Bonzini 
6093c50d8ae3SPaolo Bonzini 		mutex_lock(&kvm_lock);
6094c50d8ae3SPaolo Bonzini 
6095c50d8ae3SPaolo Bonzini 		list_for_each_entry(kvm, &vm_list, vm_list)
6096c50d8ae3SPaolo Bonzini 			wake_up_process(kvm->arch.nx_lpage_recovery_thread);
6097c50d8ae3SPaolo Bonzini 
6098c50d8ae3SPaolo Bonzini 		mutex_unlock(&kvm_lock);
6099c50d8ae3SPaolo Bonzini 	}
6100c50d8ae3SPaolo Bonzini 
6101c50d8ae3SPaolo Bonzini 	return err;
6102c50d8ae3SPaolo Bonzini }
6103c50d8ae3SPaolo Bonzini 
6104c50d8ae3SPaolo Bonzini static void kvm_recover_nx_lpages(struct kvm *kvm)
6105c50d8ae3SPaolo Bonzini {
6106ade74e14SSean Christopherson 	unsigned long nx_lpage_splits = kvm->stat.nx_lpage_splits;
6107c50d8ae3SPaolo Bonzini 	int rcu_idx;
6108c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
6109c50d8ae3SPaolo Bonzini 	unsigned int ratio;
6110c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
6111048f4980SSean Christopherson 	bool flush = false;
6112c50d8ae3SPaolo Bonzini 	ulong to_zap;
6113c50d8ae3SPaolo Bonzini 
6114c50d8ae3SPaolo Bonzini 	rcu_idx = srcu_read_lock(&kvm->srcu);
6115531810caSBen Gardon 	write_lock(&kvm->mmu_lock);
6116c50d8ae3SPaolo Bonzini 
6117c50d8ae3SPaolo Bonzini 	ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
6118ade74e14SSean Christopherson 	to_zap = ratio ? DIV_ROUND_UP(nx_lpage_splits, ratio) : 0;
61197d919c7aSSean Christopherson 	for ( ; to_zap; --to_zap) {
61207d919c7aSSean Christopherson 		if (list_empty(&kvm->arch.lpage_disallowed_mmu_pages))
61217d919c7aSSean Christopherson 			break;
61227d919c7aSSean Christopherson 
6123c50d8ae3SPaolo Bonzini 		/*
6124c50d8ae3SPaolo Bonzini 		 * We use a separate list instead of just using active_mmu_pages
6125c50d8ae3SPaolo Bonzini 		 * because the number of lpage_disallowed pages is expected to
6126c50d8ae3SPaolo Bonzini 		 * be relatively small compared to the total.
6127c50d8ae3SPaolo Bonzini 		 */
6128c50d8ae3SPaolo Bonzini 		sp = list_first_entry(&kvm->arch.lpage_disallowed_mmu_pages,
6129c50d8ae3SPaolo Bonzini 				      struct kvm_mmu_page,
6130c50d8ae3SPaolo Bonzini 				      lpage_disallowed_link);
6131c50d8ae3SPaolo Bonzini 		WARN_ON_ONCE(!sp->lpage_disallowed);
6132897218ffSPaolo Bonzini 		if (is_tdp_mmu_page(sp)) {
6133315f02c6SPaolo Bonzini 			flush |= kvm_tdp_mmu_zap_sp(kvm, sp);
61348d1a182eSBen Gardon 		} else {
6135c50d8ae3SPaolo Bonzini 			kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
6136c50d8ae3SPaolo Bonzini 			WARN_ON_ONCE(sp->lpage_disallowed);
613729cf0f50SBen Gardon 		}
6138c50d8ae3SPaolo Bonzini 
6139531810caSBen Gardon 		if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
6140048f4980SSean Christopherson 			kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
6141531810caSBen Gardon 			cond_resched_rwlock_write(&kvm->mmu_lock);
6142048f4980SSean Christopherson 			flush = false;
6143c50d8ae3SPaolo Bonzini 		}
6144c50d8ae3SPaolo Bonzini 	}
6145048f4980SSean Christopherson 	kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
6146c50d8ae3SPaolo Bonzini 
6147531810caSBen Gardon 	write_unlock(&kvm->mmu_lock);
6148c50d8ae3SPaolo Bonzini 	srcu_read_unlock(&kvm->srcu, rcu_idx);
6149c50d8ae3SPaolo Bonzini }
6150c50d8ae3SPaolo Bonzini 
6151c50d8ae3SPaolo Bonzini static long get_nx_lpage_recovery_timeout(u64 start_time)
6152c50d8ae3SPaolo Bonzini {
6153c50d8ae3SPaolo Bonzini 	return READ_ONCE(nx_huge_pages) && READ_ONCE(nx_huge_pages_recovery_ratio)
6154c50d8ae3SPaolo Bonzini 		? start_time + 60 * HZ - get_jiffies_64()
6155c50d8ae3SPaolo Bonzini 		: MAX_SCHEDULE_TIMEOUT;
6156c50d8ae3SPaolo Bonzini }
6157c50d8ae3SPaolo Bonzini 
6158c50d8ae3SPaolo Bonzini static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data)
6159c50d8ae3SPaolo Bonzini {
6160c50d8ae3SPaolo Bonzini 	u64 start_time;
6161c50d8ae3SPaolo Bonzini 	long remaining_time;
6162c50d8ae3SPaolo Bonzini 
6163c50d8ae3SPaolo Bonzini 	while (true) {
6164c50d8ae3SPaolo Bonzini 		start_time = get_jiffies_64();
6165c50d8ae3SPaolo Bonzini 		remaining_time = get_nx_lpage_recovery_timeout(start_time);
6166c50d8ae3SPaolo Bonzini 
6167c50d8ae3SPaolo Bonzini 		set_current_state(TASK_INTERRUPTIBLE);
6168c50d8ae3SPaolo Bonzini 		while (!kthread_should_stop() && remaining_time > 0) {
6169c50d8ae3SPaolo Bonzini 			schedule_timeout(remaining_time);
6170c50d8ae3SPaolo Bonzini 			remaining_time = get_nx_lpage_recovery_timeout(start_time);
6171c50d8ae3SPaolo Bonzini 			set_current_state(TASK_INTERRUPTIBLE);
6172c50d8ae3SPaolo Bonzini 		}
6173c50d8ae3SPaolo Bonzini 
6174c50d8ae3SPaolo Bonzini 		set_current_state(TASK_RUNNING);
6175c50d8ae3SPaolo Bonzini 
6176c50d8ae3SPaolo Bonzini 		if (kthread_should_stop())
6177c50d8ae3SPaolo Bonzini 			return 0;
6178c50d8ae3SPaolo Bonzini 
6179c50d8ae3SPaolo Bonzini 		kvm_recover_nx_lpages(kvm);
6180c50d8ae3SPaolo Bonzini 	}
6181c50d8ae3SPaolo Bonzini }
6182c50d8ae3SPaolo Bonzini 
6183c50d8ae3SPaolo Bonzini int kvm_mmu_post_init_vm(struct kvm *kvm)
6184c50d8ae3SPaolo Bonzini {
6185c50d8ae3SPaolo Bonzini 	int err;
6186c50d8ae3SPaolo Bonzini 
6187c50d8ae3SPaolo Bonzini 	err = kvm_vm_create_worker_thread(kvm, kvm_nx_lpage_recovery_worker, 0,
6188c50d8ae3SPaolo Bonzini 					  "kvm-nx-lpage-recovery",
6189c50d8ae3SPaolo Bonzini 					  &kvm->arch.nx_lpage_recovery_thread);
6190c50d8ae3SPaolo Bonzini 	if (!err)
6191c50d8ae3SPaolo Bonzini 		kthread_unpark(kvm->arch.nx_lpage_recovery_thread);
6192c50d8ae3SPaolo Bonzini 
6193c50d8ae3SPaolo Bonzini 	return err;
6194c50d8ae3SPaolo Bonzini }
6195c50d8ae3SPaolo Bonzini 
6196c50d8ae3SPaolo Bonzini void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
6197c50d8ae3SPaolo Bonzini {
6198c50d8ae3SPaolo Bonzini 	if (kvm->arch.nx_lpage_recovery_thread)
6199c50d8ae3SPaolo Bonzini 		kthread_stop(kvm->arch.nx_lpage_recovery_thread);
6200c50d8ae3SPaolo Bonzini }
6201