xref: /linux/arch/x86/kvm/mmu/mmu.c (revision 90599c280123618049af5cf375aae5b4e73bec03)
1c50d8ae3SPaolo Bonzini // SPDX-License-Identifier: GPL-2.0-only
2c50d8ae3SPaolo Bonzini /*
3c50d8ae3SPaolo Bonzini  * Kernel-based Virtual Machine driver for Linux
4c50d8ae3SPaolo Bonzini  *
5c50d8ae3SPaolo Bonzini  * This module enables machines with Intel VT-x extensions to run virtual
6c50d8ae3SPaolo Bonzini  * machines without emulation or binary translation.
7c50d8ae3SPaolo Bonzini  *
8c50d8ae3SPaolo Bonzini  * MMU support
9c50d8ae3SPaolo Bonzini  *
10c50d8ae3SPaolo Bonzini  * Copyright (C) 2006 Qumranet, Inc.
11c50d8ae3SPaolo Bonzini  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
12c50d8ae3SPaolo Bonzini  *
13c50d8ae3SPaolo Bonzini  * Authors:
14c50d8ae3SPaolo Bonzini  *   Yaniv Kamay  <yaniv@qumranet.com>
15c50d8ae3SPaolo Bonzini  *   Avi Kivity   <avi@qumranet.com>
16c50d8ae3SPaolo Bonzini  */
17c50d8ae3SPaolo Bonzini 
18c50d8ae3SPaolo Bonzini #include "irq.h"
1988197e6aS彭浩(Richard) #include "ioapic.h"
20c50d8ae3SPaolo Bonzini #include "mmu.h"
216ca9a6f3SSean Christopherson #include "mmu_internal.h"
22fe5db27dSBen Gardon #include "tdp_mmu.h"
23c50d8ae3SPaolo Bonzini #include "x86.h"
24c50d8ae3SPaolo Bonzini #include "kvm_cache_regs.h"
252f728d66SSean Christopherson #include "kvm_emulate.h"
26c50d8ae3SPaolo Bonzini #include "cpuid.h"
275a9624afSPaolo Bonzini #include "spte.h"
28c50d8ae3SPaolo Bonzini 
29c50d8ae3SPaolo Bonzini #include <linux/kvm_host.h>
30c50d8ae3SPaolo Bonzini #include <linux/types.h>
31c50d8ae3SPaolo Bonzini #include <linux/string.h>
32c50d8ae3SPaolo Bonzini #include <linux/mm.h>
33c50d8ae3SPaolo Bonzini #include <linux/highmem.h>
34c50d8ae3SPaolo Bonzini #include <linux/moduleparam.h>
35c50d8ae3SPaolo Bonzini #include <linux/export.h>
36c50d8ae3SPaolo Bonzini #include <linux/swap.h>
37c50d8ae3SPaolo Bonzini #include <linux/hugetlb.h>
38c50d8ae3SPaolo Bonzini #include <linux/compiler.h>
39c50d8ae3SPaolo Bonzini #include <linux/srcu.h>
40c50d8ae3SPaolo Bonzini #include <linux/slab.h>
41c50d8ae3SPaolo Bonzini #include <linux/sched/signal.h>
42c50d8ae3SPaolo Bonzini #include <linux/uaccess.h>
43c50d8ae3SPaolo Bonzini #include <linux/hash.h>
44c50d8ae3SPaolo Bonzini #include <linux/kern_levels.h>
45c50d8ae3SPaolo Bonzini #include <linux/kthread.h>
46c50d8ae3SPaolo Bonzini 
47c50d8ae3SPaolo Bonzini #include <asm/page.h>
48eb243d1dSIngo Molnar #include <asm/memtype.h>
49c50d8ae3SPaolo Bonzini #include <asm/cmpxchg.h>
50c50d8ae3SPaolo Bonzini #include <asm/io.h>
514a98623dSSean Christopherson #include <asm/set_memory.h>
52c50d8ae3SPaolo Bonzini #include <asm/vmx.h>
53c50d8ae3SPaolo Bonzini #include <asm/kvm_page_track.h>
54c50d8ae3SPaolo Bonzini #include "trace.h"
55c50d8ae3SPaolo Bonzini 
56c50d8ae3SPaolo Bonzini extern bool itlb_multihit_kvm_mitigation;
57c50d8ae3SPaolo Bonzini 
58a9d6496dSShaokun Zhang int __read_mostly nx_huge_pages = -1;
59c50d8ae3SPaolo Bonzini #ifdef CONFIG_PREEMPT_RT
60c50d8ae3SPaolo Bonzini /* Recovery can cause latency spikes, disable it for PREEMPT_RT.  */
61c50d8ae3SPaolo Bonzini static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
62c50d8ae3SPaolo Bonzini #else
63c50d8ae3SPaolo Bonzini static uint __read_mostly nx_huge_pages_recovery_ratio = 60;
64c50d8ae3SPaolo Bonzini #endif
65c50d8ae3SPaolo Bonzini 
66c50d8ae3SPaolo Bonzini static int set_nx_huge_pages(const char *val, const struct kernel_param *kp);
67c50d8ae3SPaolo Bonzini static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp);
68c50d8ae3SPaolo Bonzini 
69d5d6c18dSJoe Perches static const struct kernel_param_ops nx_huge_pages_ops = {
70c50d8ae3SPaolo Bonzini 	.set = set_nx_huge_pages,
71c50d8ae3SPaolo Bonzini 	.get = param_get_bool,
72c50d8ae3SPaolo Bonzini };
73c50d8ae3SPaolo Bonzini 
74d5d6c18dSJoe Perches static const struct kernel_param_ops nx_huge_pages_recovery_ratio_ops = {
75c50d8ae3SPaolo Bonzini 	.set = set_nx_huge_pages_recovery_ratio,
76c50d8ae3SPaolo Bonzini 	.get = param_get_uint,
77c50d8ae3SPaolo Bonzini };
78c50d8ae3SPaolo Bonzini 
79c50d8ae3SPaolo Bonzini module_param_cb(nx_huge_pages, &nx_huge_pages_ops, &nx_huge_pages, 0644);
80c50d8ae3SPaolo Bonzini __MODULE_PARM_TYPE(nx_huge_pages, "bool");
81c50d8ae3SPaolo Bonzini module_param_cb(nx_huge_pages_recovery_ratio, &nx_huge_pages_recovery_ratio_ops,
82c50d8ae3SPaolo Bonzini 		&nx_huge_pages_recovery_ratio, 0644);
83c50d8ae3SPaolo Bonzini __MODULE_PARM_TYPE(nx_huge_pages_recovery_ratio, "uint");
84c50d8ae3SPaolo Bonzini 
8571fe7013SSean Christopherson static bool __read_mostly force_flush_and_sync_on_reuse;
8671fe7013SSean Christopherson module_param_named(flush_on_reuse, force_flush_and_sync_on_reuse, bool, 0644);
8771fe7013SSean Christopherson 
88c50d8ae3SPaolo Bonzini /*
89c50d8ae3SPaolo Bonzini  * When setting this variable to true it enables Two-Dimensional-Paging
90c50d8ae3SPaolo Bonzini  * where the hardware walks 2 page tables:
91c50d8ae3SPaolo Bonzini  * 1. the guest-virtual to guest-physical
92c50d8ae3SPaolo Bonzini  * 2. while doing 1. it walks guest-physical to host-physical
93c50d8ae3SPaolo Bonzini  * If the hardware supports that we don't need to do shadow paging.
94c50d8ae3SPaolo Bonzini  */
95c50d8ae3SPaolo Bonzini bool tdp_enabled = false;
96c50d8ae3SPaolo Bonzini 
971d92d2e8SSean Christopherson static int max_huge_page_level __read_mostly;
9883013059SSean Christopherson static int max_tdp_level __read_mostly;
99703c335dSSean Christopherson 
100c50d8ae3SPaolo Bonzini enum {
101c50d8ae3SPaolo Bonzini 	AUDIT_PRE_PAGE_FAULT,
102c50d8ae3SPaolo Bonzini 	AUDIT_POST_PAGE_FAULT,
103c50d8ae3SPaolo Bonzini 	AUDIT_PRE_PTE_WRITE,
104c50d8ae3SPaolo Bonzini 	AUDIT_POST_PTE_WRITE,
105c50d8ae3SPaolo Bonzini 	AUDIT_PRE_SYNC,
106c50d8ae3SPaolo Bonzini 	AUDIT_POST_SYNC
107c50d8ae3SPaolo Bonzini };
108c50d8ae3SPaolo Bonzini 
109c50d8ae3SPaolo Bonzini #ifdef MMU_DEBUG
1105a9624afSPaolo Bonzini bool dbg = 0;
111c50d8ae3SPaolo Bonzini module_param(dbg, bool, 0644);
112c50d8ae3SPaolo Bonzini #endif
113c50d8ae3SPaolo Bonzini 
114c50d8ae3SPaolo Bonzini #define PTE_PREFETCH_NUM		8
115c50d8ae3SPaolo Bonzini 
116c50d8ae3SPaolo Bonzini #define PT32_LEVEL_BITS 10
117c50d8ae3SPaolo Bonzini 
118c50d8ae3SPaolo Bonzini #define PT32_LEVEL_SHIFT(level) \
119c50d8ae3SPaolo Bonzini 		(PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
120c50d8ae3SPaolo Bonzini 
121c50d8ae3SPaolo Bonzini #define PT32_LVL_OFFSET_MASK(level) \
122c50d8ae3SPaolo Bonzini 	(PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
123c50d8ae3SPaolo Bonzini 						* PT32_LEVEL_BITS))) - 1))
124c50d8ae3SPaolo Bonzini 
125c50d8ae3SPaolo Bonzini #define PT32_INDEX(address, level)\
126c50d8ae3SPaolo Bonzini 	(((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
127c50d8ae3SPaolo Bonzini 
128c50d8ae3SPaolo Bonzini 
129c50d8ae3SPaolo Bonzini #define PT32_BASE_ADDR_MASK PAGE_MASK
130c50d8ae3SPaolo Bonzini #define PT32_DIR_BASE_ADDR_MASK \
131c50d8ae3SPaolo Bonzini 	(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
132c50d8ae3SPaolo Bonzini #define PT32_LVL_ADDR_MASK(level) \
133c50d8ae3SPaolo Bonzini 	(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
134c50d8ae3SPaolo Bonzini 					    * PT32_LEVEL_BITS))) - 1))
135c50d8ae3SPaolo Bonzini 
136c50d8ae3SPaolo Bonzini #include <trace/events/kvm.h>
137c50d8ae3SPaolo Bonzini 
138c50d8ae3SPaolo Bonzini /* make pte_list_desc fit well in cache line */
139c50d8ae3SPaolo Bonzini #define PTE_LIST_EXT 3
140c50d8ae3SPaolo Bonzini 
141c50d8ae3SPaolo Bonzini struct pte_list_desc {
142c50d8ae3SPaolo Bonzini 	u64 *sptes[PTE_LIST_EXT];
143c50d8ae3SPaolo Bonzini 	struct pte_list_desc *more;
144c50d8ae3SPaolo Bonzini };
145c50d8ae3SPaolo Bonzini 
146c50d8ae3SPaolo Bonzini struct kvm_shadow_walk_iterator {
147c50d8ae3SPaolo Bonzini 	u64 addr;
148c50d8ae3SPaolo Bonzini 	hpa_t shadow_addr;
149c50d8ae3SPaolo Bonzini 	u64 *sptep;
150c50d8ae3SPaolo Bonzini 	int level;
151c50d8ae3SPaolo Bonzini 	unsigned index;
152c50d8ae3SPaolo Bonzini };
153c50d8ae3SPaolo Bonzini 
154c50d8ae3SPaolo Bonzini #define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker)     \
155c50d8ae3SPaolo Bonzini 	for (shadow_walk_init_using_root(&(_walker), (_vcpu),              \
156c50d8ae3SPaolo Bonzini 					 (_root), (_addr));                \
157c50d8ae3SPaolo Bonzini 	     shadow_walk_okay(&(_walker));			           \
158c50d8ae3SPaolo Bonzini 	     shadow_walk_next(&(_walker)))
159c50d8ae3SPaolo Bonzini 
160c50d8ae3SPaolo Bonzini #define for_each_shadow_entry(_vcpu, _addr, _walker)            \
161c50d8ae3SPaolo Bonzini 	for (shadow_walk_init(&(_walker), _vcpu, _addr);	\
162c50d8ae3SPaolo Bonzini 	     shadow_walk_okay(&(_walker));			\
163c50d8ae3SPaolo Bonzini 	     shadow_walk_next(&(_walker)))
164c50d8ae3SPaolo Bonzini 
165c50d8ae3SPaolo Bonzini #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte)	\
166c50d8ae3SPaolo Bonzini 	for (shadow_walk_init(&(_walker), _vcpu, _addr);		\
167c50d8ae3SPaolo Bonzini 	     shadow_walk_okay(&(_walker)) &&				\
168c50d8ae3SPaolo Bonzini 		({ spte = mmu_spte_get_lockless(_walker.sptep); 1; });	\
169c50d8ae3SPaolo Bonzini 	     __shadow_walk_next(&(_walker), spte))
170c50d8ae3SPaolo Bonzini 
171c50d8ae3SPaolo Bonzini static struct kmem_cache *pte_list_desc_cache;
17202c00b3aSBen Gardon struct kmem_cache *mmu_page_header_cache;
173c50d8ae3SPaolo Bonzini static struct percpu_counter kvm_total_used_mmu_pages;
174c50d8ae3SPaolo Bonzini 
175c50d8ae3SPaolo Bonzini static void mmu_spte_set(u64 *sptep, u64 spte);
176c50d8ae3SPaolo Bonzini static union kvm_mmu_page_role
177c50d8ae3SPaolo Bonzini kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
178c50d8ae3SPaolo Bonzini 
179594e91a1SSean Christopherson struct kvm_mmu_role_regs {
180594e91a1SSean Christopherson 	const unsigned long cr0;
181594e91a1SSean Christopherson 	const unsigned long cr4;
182594e91a1SSean Christopherson 	const u64 efer;
183594e91a1SSean Christopherson };
184594e91a1SSean Christopherson 
185c50d8ae3SPaolo Bonzini #define CREATE_TRACE_POINTS
186c50d8ae3SPaolo Bonzini #include "mmutrace.h"
187c50d8ae3SPaolo Bonzini 
188594e91a1SSean Christopherson /*
189594e91a1SSean Christopherson  * Yes, lot's of underscores.  They're a hint that you probably shouldn't be
190594e91a1SSean Christopherson  * reading from the role_regs.  Once the mmu_role is constructed, it becomes
191594e91a1SSean Christopherson  * the single source of truth for the MMU's state.
192594e91a1SSean Christopherson  */
193594e91a1SSean Christopherson #define BUILD_MMU_ROLE_REGS_ACCESSOR(reg, name, flag)			\
194594e91a1SSean Christopherson static inline bool ____is_##reg##_##name(struct kvm_mmu_role_regs *regs)\
195594e91a1SSean Christopherson {									\
196594e91a1SSean Christopherson 	return !!(regs->reg & flag);					\
197594e91a1SSean Christopherson }
198594e91a1SSean Christopherson BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, pg, X86_CR0_PG);
199594e91a1SSean Christopherson BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, wp, X86_CR0_WP);
200594e91a1SSean Christopherson BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pse, X86_CR4_PSE);
201594e91a1SSean Christopherson BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pae, X86_CR4_PAE);
202594e91a1SSean Christopherson BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smep, X86_CR4_SMEP);
203594e91a1SSean Christopherson BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smap, X86_CR4_SMAP);
204594e91a1SSean Christopherson BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pke, X86_CR4_PKE);
205594e91a1SSean Christopherson BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, la57, X86_CR4_LA57);
206594e91a1SSean Christopherson BUILD_MMU_ROLE_REGS_ACCESSOR(efer, nx, EFER_NX);
207594e91a1SSean Christopherson BUILD_MMU_ROLE_REGS_ACCESSOR(efer, lma, EFER_LMA);
208594e91a1SSean Christopherson 
20960667724SSean Christopherson /*
21060667724SSean Christopherson  * The MMU itself (with a valid role) is the single source of truth for the
21160667724SSean Christopherson  * MMU.  Do not use the regs used to build the MMU/role, nor the vCPU.  The
21260667724SSean Christopherson  * regs don't account for dependencies, e.g. clearing CR4 bits if CR0.PG=1,
21360667724SSean Christopherson  * and the vCPU may be incorrect/irrelevant.
21460667724SSean Christopherson  */
21560667724SSean Christopherson #define BUILD_MMU_ROLE_ACCESSOR(base_or_ext, reg, name)		\
21660667724SSean Christopherson static inline bool is_##reg##_##name(struct kvm_mmu *mmu)	\
21760667724SSean Christopherson {								\
21860667724SSean Christopherson 	return !!(mmu->mmu_role. base_or_ext . reg##_##name);	\
21960667724SSean Christopherson }
22060667724SSean Christopherson BUILD_MMU_ROLE_ACCESSOR(ext,  cr0, pg);
22160667724SSean Christopherson BUILD_MMU_ROLE_ACCESSOR(base, cr0, wp);
22260667724SSean Christopherson BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pse);
22360667724SSean Christopherson BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pae);
22460667724SSean Christopherson BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, smep);
22560667724SSean Christopherson BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, smap);
22660667724SSean Christopherson BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pke);
22760667724SSean Christopherson BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, la57);
22860667724SSean Christopherson BUILD_MMU_ROLE_ACCESSOR(base, efer, nx);
22960667724SSean Christopherson 
230594e91a1SSean Christopherson static struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu)
231594e91a1SSean Christopherson {
232594e91a1SSean Christopherson 	struct kvm_mmu_role_regs regs = {
233594e91a1SSean Christopherson 		.cr0 = kvm_read_cr0_bits(vcpu, KVM_MMU_CR0_ROLE_BITS),
234594e91a1SSean Christopherson 		.cr4 = kvm_read_cr4_bits(vcpu, KVM_MMU_CR4_ROLE_BITS),
235594e91a1SSean Christopherson 		.efer = vcpu->arch.efer,
236594e91a1SSean Christopherson 	};
237594e91a1SSean Christopherson 
238594e91a1SSean Christopherson 	return regs;
239594e91a1SSean Christopherson }
240c50d8ae3SPaolo Bonzini 
241c50d8ae3SPaolo Bonzini static inline bool kvm_available_flush_tlb_with_range(void)
242c50d8ae3SPaolo Bonzini {
243afaf0b2fSSean Christopherson 	return kvm_x86_ops.tlb_remote_flush_with_range;
244c50d8ae3SPaolo Bonzini }
245c50d8ae3SPaolo Bonzini 
246c50d8ae3SPaolo Bonzini static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm,
247c50d8ae3SPaolo Bonzini 		struct kvm_tlb_range *range)
248c50d8ae3SPaolo Bonzini {
249c50d8ae3SPaolo Bonzini 	int ret = -ENOTSUPP;
250c50d8ae3SPaolo Bonzini 
251afaf0b2fSSean Christopherson 	if (range && kvm_x86_ops.tlb_remote_flush_with_range)
252b3646477SJason Baron 		ret = static_call(kvm_x86_tlb_remote_flush_with_range)(kvm, range);
253c50d8ae3SPaolo Bonzini 
254c50d8ae3SPaolo Bonzini 	if (ret)
255c50d8ae3SPaolo Bonzini 		kvm_flush_remote_tlbs(kvm);
256c50d8ae3SPaolo Bonzini }
257c50d8ae3SPaolo Bonzini 
2582f2fad08SBen Gardon void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
259c50d8ae3SPaolo Bonzini 		u64 start_gfn, u64 pages)
260c50d8ae3SPaolo Bonzini {
261c50d8ae3SPaolo Bonzini 	struct kvm_tlb_range range;
262c50d8ae3SPaolo Bonzini 
263c50d8ae3SPaolo Bonzini 	range.start_gfn = start_gfn;
264c50d8ae3SPaolo Bonzini 	range.pages = pages;
265c50d8ae3SPaolo Bonzini 
266c50d8ae3SPaolo Bonzini 	kvm_flush_remote_tlbs_with_range(kvm, &range);
267c50d8ae3SPaolo Bonzini }
268c50d8ae3SPaolo Bonzini 
2698f79b064SBen Gardon static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
2708f79b064SBen Gardon 			   unsigned int access)
2718f79b064SBen Gardon {
272c236d962SSean Christopherson 	u64 spte = make_mmio_spte(vcpu, gfn, access);
2738f79b064SBen Gardon 
274c236d962SSean Christopherson 	trace_mark_mmio_spte(sptep, gfn, spte);
275c236d962SSean Christopherson 	mmu_spte_set(sptep, spte);
276c50d8ae3SPaolo Bonzini }
277c50d8ae3SPaolo Bonzini 
278c50d8ae3SPaolo Bonzini static gfn_t get_mmio_spte_gfn(u64 spte)
279c50d8ae3SPaolo Bonzini {
280c50d8ae3SPaolo Bonzini 	u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
281c50d8ae3SPaolo Bonzini 
2828a967d65SPaolo Bonzini 	gpa |= (spte >> SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)
283c50d8ae3SPaolo Bonzini 	       & shadow_nonpresent_or_rsvd_mask;
284c50d8ae3SPaolo Bonzini 
285c50d8ae3SPaolo Bonzini 	return gpa >> PAGE_SHIFT;
286c50d8ae3SPaolo Bonzini }
287c50d8ae3SPaolo Bonzini 
288c50d8ae3SPaolo Bonzini static unsigned get_mmio_spte_access(u64 spte)
289c50d8ae3SPaolo Bonzini {
290c50d8ae3SPaolo Bonzini 	return spte & shadow_mmio_access_mask;
291c50d8ae3SPaolo Bonzini }
292c50d8ae3SPaolo Bonzini 
293c50d8ae3SPaolo Bonzini static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
294c50d8ae3SPaolo Bonzini {
295c50d8ae3SPaolo Bonzini 	u64 kvm_gen, spte_gen, gen;
296c50d8ae3SPaolo Bonzini 
297c50d8ae3SPaolo Bonzini 	gen = kvm_vcpu_memslots(vcpu)->generation;
298c50d8ae3SPaolo Bonzini 	if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
299c50d8ae3SPaolo Bonzini 		return false;
300c50d8ae3SPaolo Bonzini 
301c50d8ae3SPaolo Bonzini 	kvm_gen = gen & MMIO_SPTE_GEN_MASK;
302c50d8ae3SPaolo Bonzini 	spte_gen = get_mmio_spte_generation(spte);
303c50d8ae3SPaolo Bonzini 
304c50d8ae3SPaolo Bonzini 	trace_check_mmio_spte(spte, kvm_gen, spte_gen);
305c50d8ae3SPaolo Bonzini 	return likely(kvm_gen == spte_gen);
306c50d8ae3SPaolo Bonzini }
307c50d8ae3SPaolo Bonzini 
308cd313569SMohammed Gamal static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
309cd313569SMohammed Gamal                                   struct x86_exception *exception)
310cd313569SMohammed Gamal {
311ec7771abSMohammed Gamal 	/* Check if guest physical address doesn't exceed guest maximum */
312dc46515cSSean Christopherson 	if (kvm_vcpu_is_illegal_gpa(vcpu, gpa)) {
313ec7771abSMohammed Gamal 		exception->error_code |= PFERR_RSVD_MASK;
314ec7771abSMohammed Gamal 		return UNMAPPED_GVA;
315ec7771abSMohammed Gamal 	}
316ec7771abSMohammed Gamal 
317cd313569SMohammed Gamal         return gpa;
318cd313569SMohammed Gamal }
319cd313569SMohammed Gamal 
320c50d8ae3SPaolo Bonzini static int is_cpuid_PSE36(void)
321c50d8ae3SPaolo Bonzini {
322c50d8ae3SPaolo Bonzini 	return 1;
323c50d8ae3SPaolo Bonzini }
324c50d8ae3SPaolo Bonzini 
325c50d8ae3SPaolo Bonzini static int is_nx(struct kvm_vcpu *vcpu)
326c50d8ae3SPaolo Bonzini {
327c50d8ae3SPaolo Bonzini 	return vcpu->arch.efer & EFER_NX;
328c50d8ae3SPaolo Bonzini }
329c50d8ae3SPaolo Bonzini 
330c50d8ae3SPaolo Bonzini static gfn_t pse36_gfn_delta(u32 gpte)
331c50d8ae3SPaolo Bonzini {
332c50d8ae3SPaolo Bonzini 	int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
333c50d8ae3SPaolo Bonzini 
334c50d8ae3SPaolo Bonzini 	return (gpte & PT32_DIR_PSE36_MASK) << shift;
335c50d8ae3SPaolo Bonzini }
336c50d8ae3SPaolo Bonzini 
337c50d8ae3SPaolo Bonzini #ifdef CONFIG_X86_64
338c50d8ae3SPaolo Bonzini static void __set_spte(u64 *sptep, u64 spte)
339c50d8ae3SPaolo Bonzini {
340c50d8ae3SPaolo Bonzini 	WRITE_ONCE(*sptep, spte);
341c50d8ae3SPaolo Bonzini }
342c50d8ae3SPaolo Bonzini 
343c50d8ae3SPaolo Bonzini static void __update_clear_spte_fast(u64 *sptep, u64 spte)
344c50d8ae3SPaolo Bonzini {
345c50d8ae3SPaolo Bonzini 	WRITE_ONCE(*sptep, spte);
346c50d8ae3SPaolo Bonzini }
347c50d8ae3SPaolo Bonzini 
348c50d8ae3SPaolo Bonzini static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
349c50d8ae3SPaolo Bonzini {
350c50d8ae3SPaolo Bonzini 	return xchg(sptep, spte);
351c50d8ae3SPaolo Bonzini }
352c50d8ae3SPaolo Bonzini 
353c50d8ae3SPaolo Bonzini static u64 __get_spte_lockless(u64 *sptep)
354c50d8ae3SPaolo Bonzini {
355c50d8ae3SPaolo Bonzini 	return READ_ONCE(*sptep);
356c50d8ae3SPaolo Bonzini }
357c50d8ae3SPaolo Bonzini #else
358c50d8ae3SPaolo Bonzini union split_spte {
359c50d8ae3SPaolo Bonzini 	struct {
360c50d8ae3SPaolo Bonzini 		u32 spte_low;
361c50d8ae3SPaolo Bonzini 		u32 spte_high;
362c50d8ae3SPaolo Bonzini 	};
363c50d8ae3SPaolo Bonzini 	u64 spte;
364c50d8ae3SPaolo Bonzini };
365c50d8ae3SPaolo Bonzini 
366c50d8ae3SPaolo Bonzini static void count_spte_clear(u64 *sptep, u64 spte)
367c50d8ae3SPaolo Bonzini {
36857354682SSean Christopherson 	struct kvm_mmu_page *sp =  sptep_to_sp(sptep);
369c50d8ae3SPaolo Bonzini 
370c50d8ae3SPaolo Bonzini 	if (is_shadow_present_pte(spte))
371c50d8ae3SPaolo Bonzini 		return;
372c50d8ae3SPaolo Bonzini 
373c50d8ae3SPaolo Bonzini 	/* Ensure the spte is completely set before we increase the count */
374c50d8ae3SPaolo Bonzini 	smp_wmb();
375c50d8ae3SPaolo Bonzini 	sp->clear_spte_count++;
376c50d8ae3SPaolo Bonzini }
377c50d8ae3SPaolo Bonzini 
378c50d8ae3SPaolo Bonzini static void __set_spte(u64 *sptep, u64 spte)
379c50d8ae3SPaolo Bonzini {
380c50d8ae3SPaolo Bonzini 	union split_spte *ssptep, sspte;
381c50d8ae3SPaolo Bonzini 
382c50d8ae3SPaolo Bonzini 	ssptep = (union split_spte *)sptep;
383c50d8ae3SPaolo Bonzini 	sspte = (union split_spte)spte;
384c50d8ae3SPaolo Bonzini 
385c50d8ae3SPaolo Bonzini 	ssptep->spte_high = sspte.spte_high;
386c50d8ae3SPaolo Bonzini 
387c50d8ae3SPaolo Bonzini 	/*
388c50d8ae3SPaolo Bonzini 	 * If we map the spte from nonpresent to present, We should store
389c50d8ae3SPaolo Bonzini 	 * the high bits firstly, then set present bit, so cpu can not
390c50d8ae3SPaolo Bonzini 	 * fetch this spte while we are setting the spte.
391c50d8ae3SPaolo Bonzini 	 */
392c50d8ae3SPaolo Bonzini 	smp_wmb();
393c50d8ae3SPaolo Bonzini 
394c50d8ae3SPaolo Bonzini 	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
395c50d8ae3SPaolo Bonzini }
396c50d8ae3SPaolo Bonzini 
397c50d8ae3SPaolo Bonzini static void __update_clear_spte_fast(u64 *sptep, u64 spte)
398c50d8ae3SPaolo Bonzini {
399c50d8ae3SPaolo Bonzini 	union split_spte *ssptep, sspte;
400c50d8ae3SPaolo Bonzini 
401c50d8ae3SPaolo Bonzini 	ssptep = (union split_spte *)sptep;
402c50d8ae3SPaolo Bonzini 	sspte = (union split_spte)spte;
403c50d8ae3SPaolo Bonzini 
404c50d8ae3SPaolo Bonzini 	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
405c50d8ae3SPaolo Bonzini 
406c50d8ae3SPaolo Bonzini 	/*
407c50d8ae3SPaolo Bonzini 	 * If we map the spte from present to nonpresent, we should clear
408c50d8ae3SPaolo Bonzini 	 * present bit firstly to avoid vcpu fetch the old high bits.
409c50d8ae3SPaolo Bonzini 	 */
410c50d8ae3SPaolo Bonzini 	smp_wmb();
411c50d8ae3SPaolo Bonzini 
412c50d8ae3SPaolo Bonzini 	ssptep->spte_high = sspte.spte_high;
413c50d8ae3SPaolo Bonzini 	count_spte_clear(sptep, spte);
414c50d8ae3SPaolo Bonzini }
415c50d8ae3SPaolo Bonzini 
416c50d8ae3SPaolo Bonzini static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
417c50d8ae3SPaolo Bonzini {
418c50d8ae3SPaolo Bonzini 	union split_spte *ssptep, sspte, orig;
419c50d8ae3SPaolo Bonzini 
420c50d8ae3SPaolo Bonzini 	ssptep = (union split_spte *)sptep;
421c50d8ae3SPaolo Bonzini 	sspte = (union split_spte)spte;
422c50d8ae3SPaolo Bonzini 
423c50d8ae3SPaolo Bonzini 	/* xchg acts as a barrier before the setting of the high bits */
424c50d8ae3SPaolo Bonzini 	orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low);
425c50d8ae3SPaolo Bonzini 	orig.spte_high = ssptep->spte_high;
426c50d8ae3SPaolo Bonzini 	ssptep->spte_high = sspte.spte_high;
427c50d8ae3SPaolo Bonzini 	count_spte_clear(sptep, spte);
428c50d8ae3SPaolo Bonzini 
429c50d8ae3SPaolo Bonzini 	return orig.spte;
430c50d8ae3SPaolo Bonzini }
431c50d8ae3SPaolo Bonzini 
432c50d8ae3SPaolo Bonzini /*
433c50d8ae3SPaolo Bonzini  * The idea using the light way get the spte on x86_32 guest is from
434c50d8ae3SPaolo Bonzini  * gup_get_pte (mm/gup.c).
435c50d8ae3SPaolo Bonzini  *
436c50d8ae3SPaolo Bonzini  * An spte tlb flush may be pending, because kvm_set_pte_rmapp
437c50d8ae3SPaolo Bonzini  * coalesces them and we are running out of the MMU lock.  Therefore
438c50d8ae3SPaolo Bonzini  * we need to protect against in-progress updates of the spte.
439c50d8ae3SPaolo Bonzini  *
440c50d8ae3SPaolo Bonzini  * Reading the spte while an update is in progress may get the old value
441c50d8ae3SPaolo Bonzini  * for the high part of the spte.  The race is fine for a present->non-present
442c50d8ae3SPaolo Bonzini  * change (because the high part of the spte is ignored for non-present spte),
443c50d8ae3SPaolo Bonzini  * but for a present->present change we must reread the spte.
444c50d8ae3SPaolo Bonzini  *
445c50d8ae3SPaolo Bonzini  * All such changes are done in two steps (present->non-present and
446c50d8ae3SPaolo Bonzini  * non-present->present), hence it is enough to count the number of
447c50d8ae3SPaolo Bonzini  * present->non-present updates: if it changed while reading the spte,
448c50d8ae3SPaolo Bonzini  * we might have hit the race.  This is done using clear_spte_count.
449c50d8ae3SPaolo Bonzini  */
450c50d8ae3SPaolo Bonzini static u64 __get_spte_lockless(u64 *sptep)
451c50d8ae3SPaolo Bonzini {
45257354682SSean Christopherson 	struct kvm_mmu_page *sp =  sptep_to_sp(sptep);
453c50d8ae3SPaolo Bonzini 	union split_spte spte, *orig = (union split_spte *)sptep;
454c50d8ae3SPaolo Bonzini 	int count;
455c50d8ae3SPaolo Bonzini 
456c50d8ae3SPaolo Bonzini retry:
457c50d8ae3SPaolo Bonzini 	count = sp->clear_spte_count;
458c50d8ae3SPaolo Bonzini 	smp_rmb();
459c50d8ae3SPaolo Bonzini 
460c50d8ae3SPaolo Bonzini 	spte.spte_low = orig->spte_low;
461c50d8ae3SPaolo Bonzini 	smp_rmb();
462c50d8ae3SPaolo Bonzini 
463c50d8ae3SPaolo Bonzini 	spte.spte_high = orig->spte_high;
464c50d8ae3SPaolo Bonzini 	smp_rmb();
465c50d8ae3SPaolo Bonzini 
466c50d8ae3SPaolo Bonzini 	if (unlikely(spte.spte_low != orig->spte_low ||
467c50d8ae3SPaolo Bonzini 	      count != sp->clear_spte_count))
468c50d8ae3SPaolo Bonzini 		goto retry;
469c50d8ae3SPaolo Bonzini 
470c50d8ae3SPaolo Bonzini 	return spte.spte;
471c50d8ae3SPaolo Bonzini }
472c50d8ae3SPaolo Bonzini #endif
473c50d8ae3SPaolo Bonzini 
474c50d8ae3SPaolo Bonzini static bool spte_has_volatile_bits(u64 spte)
475c50d8ae3SPaolo Bonzini {
476c50d8ae3SPaolo Bonzini 	if (!is_shadow_present_pte(spte))
477c50d8ae3SPaolo Bonzini 		return false;
478c50d8ae3SPaolo Bonzini 
479c50d8ae3SPaolo Bonzini 	/*
480c50d8ae3SPaolo Bonzini 	 * Always atomically update spte if it can be updated
481c50d8ae3SPaolo Bonzini 	 * out of mmu-lock, it can ensure dirty bit is not lost,
482c50d8ae3SPaolo Bonzini 	 * also, it can help us to get a stable is_writable_pte()
483c50d8ae3SPaolo Bonzini 	 * to ensure tlb flush is not missed.
484c50d8ae3SPaolo Bonzini 	 */
485c50d8ae3SPaolo Bonzini 	if (spte_can_locklessly_be_made_writable(spte) ||
486c50d8ae3SPaolo Bonzini 	    is_access_track_spte(spte))
487c50d8ae3SPaolo Bonzini 		return true;
488c50d8ae3SPaolo Bonzini 
489c50d8ae3SPaolo Bonzini 	if (spte_ad_enabled(spte)) {
490c50d8ae3SPaolo Bonzini 		if ((spte & shadow_accessed_mask) == 0 ||
491c50d8ae3SPaolo Bonzini 	    	    (is_writable_pte(spte) && (spte & shadow_dirty_mask) == 0))
492c50d8ae3SPaolo Bonzini 			return true;
493c50d8ae3SPaolo Bonzini 	}
494c50d8ae3SPaolo Bonzini 
495c50d8ae3SPaolo Bonzini 	return false;
496c50d8ae3SPaolo Bonzini }
497c50d8ae3SPaolo Bonzini 
498c50d8ae3SPaolo Bonzini /* Rules for using mmu_spte_set:
499c50d8ae3SPaolo Bonzini  * Set the sptep from nonpresent to present.
500c50d8ae3SPaolo Bonzini  * Note: the sptep being assigned *must* be either not present
501c50d8ae3SPaolo Bonzini  * or in a state where the hardware will not attempt to update
502c50d8ae3SPaolo Bonzini  * the spte.
503c50d8ae3SPaolo Bonzini  */
504c50d8ae3SPaolo Bonzini static void mmu_spte_set(u64 *sptep, u64 new_spte)
505c50d8ae3SPaolo Bonzini {
506c50d8ae3SPaolo Bonzini 	WARN_ON(is_shadow_present_pte(*sptep));
507c50d8ae3SPaolo Bonzini 	__set_spte(sptep, new_spte);
508c50d8ae3SPaolo Bonzini }
509c50d8ae3SPaolo Bonzini 
510c50d8ae3SPaolo Bonzini /*
511c50d8ae3SPaolo Bonzini  * Update the SPTE (excluding the PFN), but do not track changes in its
512c50d8ae3SPaolo Bonzini  * accessed/dirty status.
513c50d8ae3SPaolo Bonzini  */
514c50d8ae3SPaolo Bonzini static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte)
515c50d8ae3SPaolo Bonzini {
516c50d8ae3SPaolo Bonzini 	u64 old_spte = *sptep;
517c50d8ae3SPaolo Bonzini 
518c50d8ae3SPaolo Bonzini 	WARN_ON(!is_shadow_present_pte(new_spte));
519c50d8ae3SPaolo Bonzini 
520c50d8ae3SPaolo Bonzini 	if (!is_shadow_present_pte(old_spte)) {
521c50d8ae3SPaolo Bonzini 		mmu_spte_set(sptep, new_spte);
522c50d8ae3SPaolo Bonzini 		return old_spte;
523c50d8ae3SPaolo Bonzini 	}
524c50d8ae3SPaolo Bonzini 
525c50d8ae3SPaolo Bonzini 	if (!spte_has_volatile_bits(old_spte))
526c50d8ae3SPaolo Bonzini 		__update_clear_spte_fast(sptep, new_spte);
527c50d8ae3SPaolo Bonzini 	else
528c50d8ae3SPaolo Bonzini 		old_spte = __update_clear_spte_slow(sptep, new_spte);
529c50d8ae3SPaolo Bonzini 
530c50d8ae3SPaolo Bonzini 	WARN_ON(spte_to_pfn(old_spte) != spte_to_pfn(new_spte));
531c50d8ae3SPaolo Bonzini 
532c50d8ae3SPaolo Bonzini 	return old_spte;
533c50d8ae3SPaolo Bonzini }
534c50d8ae3SPaolo Bonzini 
535c50d8ae3SPaolo Bonzini /* Rules for using mmu_spte_update:
536c50d8ae3SPaolo Bonzini  * Update the state bits, it means the mapped pfn is not changed.
537c50d8ae3SPaolo Bonzini  *
538c50d8ae3SPaolo Bonzini  * Whenever we overwrite a writable spte with a read-only one we
539c50d8ae3SPaolo Bonzini  * should flush remote TLBs. Otherwise rmap_write_protect
540c50d8ae3SPaolo Bonzini  * will find a read-only spte, even though the writable spte
541c50d8ae3SPaolo Bonzini  * might be cached on a CPU's TLB, the return value indicates this
542c50d8ae3SPaolo Bonzini  * case.
543c50d8ae3SPaolo Bonzini  *
544c50d8ae3SPaolo Bonzini  * Returns true if the TLB needs to be flushed
545c50d8ae3SPaolo Bonzini  */
546c50d8ae3SPaolo Bonzini static bool mmu_spte_update(u64 *sptep, u64 new_spte)
547c50d8ae3SPaolo Bonzini {
548c50d8ae3SPaolo Bonzini 	bool flush = false;
549c50d8ae3SPaolo Bonzini 	u64 old_spte = mmu_spte_update_no_track(sptep, new_spte);
550c50d8ae3SPaolo Bonzini 
551c50d8ae3SPaolo Bonzini 	if (!is_shadow_present_pte(old_spte))
552c50d8ae3SPaolo Bonzini 		return false;
553c50d8ae3SPaolo Bonzini 
554c50d8ae3SPaolo Bonzini 	/*
555c50d8ae3SPaolo Bonzini 	 * For the spte updated out of mmu-lock is safe, since
556c50d8ae3SPaolo Bonzini 	 * we always atomically update it, see the comments in
557c50d8ae3SPaolo Bonzini 	 * spte_has_volatile_bits().
558c50d8ae3SPaolo Bonzini 	 */
559c50d8ae3SPaolo Bonzini 	if (spte_can_locklessly_be_made_writable(old_spte) &&
560c50d8ae3SPaolo Bonzini 	      !is_writable_pte(new_spte))
561c50d8ae3SPaolo Bonzini 		flush = true;
562c50d8ae3SPaolo Bonzini 
563c50d8ae3SPaolo Bonzini 	/*
564c50d8ae3SPaolo Bonzini 	 * Flush TLB when accessed/dirty states are changed in the page tables,
565c50d8ae3SPaolo Bonzini 	 * to guarantee consistency between TLB and page tables.
566c50d8ae3SPaolo Bonzini 	 */
567c50d8ae3SPaolo Bonzini 
568c50d8ae3SPaolo Bonzini 	if (is_accessed_spte(old_spte) && !is_accessed_spte(new_spte)) {
569c50d8ae3SPaolo Bonzini 		flush = true;
570c50d8ae3SPaolo Bonzini 		kvm_set_pfn_accessed(spte_to_pfn(old_spte));
571c50d8ae3SPaolo Bonzini 	}
572c50d8ae3SPaolo Bonzini 
573c50d8ae3SPaolo Bonzini 	if (is_dirty_spte(old_spte) && !is_dirty_spte(new_spte)) {
574c50d8ae3SPaolo Bonzini 		flush = true;
575c50d8ae3SPaolo Bonzini 		kvm_set_pfn_dirty(spte_to_pfn(old_spte));
576c50d8ae3SPaolo Bonzini 	}
577c50d8ae3SPaolo Bonzini 
578c50d8ae3SPaolo Bonzini 	return flush;
579c50d8ae3SPaolo Bonzini }
580c50d8ae3SPaolo Bonzini 
581c50d8ae3SPaolo Bonzini /*
582c50d8ae3SPaolo Bonzini  * Rules for using mmu_spte_clear_track_bits:
583c50d8ae3SPaolo Bonzini  * It sets the sptep from present to nonpresent, and track the
584c50d8ae3SPaolo Bonzini  * state bits, it is used to clear the last level sptep.
585c50d8ae3SPaolo Bonzini  * Returns non-zero if the PTE was previously valid.
586c50d8ae3SPaolo Bonzini  */
587c50d8ae3SPaolo Bonzini static int mmu_spte_clear_track_bits(u64 *sptep)
588c50d8ae3SPaolo Bonzini {
589c50d8ae3SPaolo Bonzini 	kvm_pfn_t pfn;
590c50d8ae3SPaolo Bonzini 	u64 old_spte = *sptep;
591c50d8ae3SPaolo Bonzini 
592c50d8ae3SPaolo Bonzini 	if (!spte_has_volatile_bits(old_spte))
593c50d8ae3SPaolo Bonzini 		__update_clear_spte_fast(sptep, 0ull);
594c50d8ae3SPaolo Bonzini 	else
595c50d8ae3SPaolo Bonzini 		old_spte = __update_clear_spte_slow(sptep, 0ull);
596c50d8ae3SPaolo Bonzini 
597c50d8ae3SPaolo Bonzini 	if (!is_shadow_present_pte(old_spte))
598c50d8ae3SPaolo Bonzini 		return 0;
599c50d8ae3SPaolo Bonzini 
600c50d8ae3SPaolo Bonzini 	pfn = spte_to_pfn(old_spte);
601c50d8ae3SPaolo Bonzini 
602c50d8ae3SPaolo Bonzini 	/*
603c50d8ae3SPaolo Bonzini 	 * KVM does not hold the refcount of the page used by
604c50d8ae3SPaolo Bonzini 	 * kvm mmu, before reclaiming the page, we should
605c50d8ae3SPaolo Bonzini 	 * unmap it from mmu first.
606c50d8ae3SPaolo Bonzini 	 */
607c50d8ae3SPaolo Bonzini 	WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
608c50d8ae3SPaolo Bonzini 
609c50d8ae3SPaolo Bonzini 	if (is_accessed_spte(old_spte))
610c50d8ae3SPaolo Bonzini 		kvm_set_pfn_accessed(pfn);
611c50d8ae3SPaolo Bonzini 
612c50d8ae3SPaolo Bonzini 	if (is_dirty_spte(old_spte))
613c50d8ae3SPaolo Bonzini 		kvm_set_pfn_dirty(pfn);
614c50d8ae3SPaolo Bonzini 
615c50d8ae3SPaolo Bonzini 	return 1;
616c50d8ae3SPaolo Bonzini }
617c50d8ae3SPaolo Bonzini 
618c50d8ae3SPaolo Bonzini /*
619c50d8ae3SPaolo Bonzini  * Rules for using mmu_spte_clear_no_track:
620c50d8ae3SPaolo Bonzini  * Directly clear spte without caring the state bits of sptep,
621c50d8ae3SPaolo Bonzini  * it is used to set the upper level spte.
622c50d8ae3SPaolo Bonzini  */
623c50d8ae3SPaolo Bonzini static void mmu_spte_clear_no_track(u64 *sptep)
624c50d8ae3SPaolo Bonzini {
625c50d8ae3SPaolo Bonzini 	__update_clear_spte_fast(sptep, 0ull);
626c50d8ae3SPaolo Bonzini }
627c50d8ae3SPaolo Bonzini 
628c50d8ae3SPaolo Bonzini static u64 mmu_spte_get_lockless(u64 *sptep)
629c50d8ae3SPaolo Bonzini {
630c50d8ae3SPaolo Bonzini 	return __get_spte_lockless(sptep);
631c50d8ae3SPaolo Bonzini }
632c50d8ae3SPaolo Bonzini 
633c50d8ae3SPaolo Bonzini /* Restore an acc-track PTE back to a regular PTE */
634c50d8ae3SPaolo Bonzini static u64 restore_acc_track_spte(u64 spte)
635c50d8ae3SPaolo Bonzini {
636c50d8ae3SPaolo Bonzini 	u64 new_spte = spte;
6378a967d65SPaolo Bonzini 	u64 saved_bits = (spte >> SHADOW_ACC_TRACK_SAVED_BITS_SHIFT)
6388a967d65SPaolo Bonzini 			 & SHADOW_ACC_TRACK_SAVED_BITS_MASK;
639c50d8ae3SPaolo Bonzini 
640c50d8ae3SPaolo Bonzini 	WARN_ON_ONCE(spte_ad_enabled(spte));
641c50d8ae3SPaolo Bonzini 	WARN_ON_ONCE(!is_access_track_spte(spte));
642c50d8ae3SPaolo Bonzini 
643c50d8ae3SPaolo Bonzini 	new_spte &= ~shadow_acc_track_mask;
6448a967d65SPaolo Bonzini 	new_spte &= ~(SHADOW_ACC_TRACK_SAVED_BITS_MASK <<
6458a967d65SPaolo Bonzini 		      SHADOW_ACC_TRACK_SAVED_BITS_SHIFT);
646c50d8ae3SPaolo Bonzini 	new_spte |= saved_bits;
647c50d8ae3SPaolo Bonzini 
648c50d8ae3SPaolo Bonzini 	return new_spte;
649c50d8ae3SPaolo Bonzini }
650c50d8ae3SPaolo Bonzini 
651c50d8ae3SPaolo Bonzini /* Returns the Accessed status of the PTE and resets it at the same time. */
652c50d8ae3SPaolo Bonzini static bool mmu_spte_age(u64 *sptep)
653c50d8ae3SPaolo Bonzini {
654c50d8ae3SPaolo Bonzini 	u64 spte = mmu_spte_get_lockless(sptep);
655c50d8ae3SPaolo Bonzini 
656c50d8ae3SPaolo Bonzini 	if (!is_accessed_spte(spte))
657c50d8ae3SPaolo Bonzini 		return false;
658c50d8ae3SPaolo Bonzini 
659c50d8ae3SPaolo Bonzini 	if (spte_ad_enabled(spte)) {
660c50d8ae3SPaolo Bonzini 		clear_bit((ffs(shadow_accessed_mask) - 1),
661c50d8ae3SPaolo Bonzini 			  (unsigned long *)sptep);
662c50d8ae3SPaolo Bonzini 	} else {
663c50d8ae3SPaolo Bonzini 		/*
664c50d8ae3SPaolo Bonzini 		 * Capture the dirty status of the page, so that it doesn't get
665c50d8ae3SPaolo Bonzini 		 * lost when the SPTE is marked for access tracking.
666c50d8ae3SPaolo Bonzini 		 */
667c50d8ae3SPaolo Bonzini 		if (is_writable_pte(spte))
668c50d8ae3SPaolo Bonzini 			kvm_set_pfn_dirty(spte_to_pfn(spte));
669c50d8ae3SPaolo Bonzini 
670c50d8ae3SPaolo Bonzini 		spte = mark_spte_for_access_track(spte);
671c50d8ae3SPaolo Bonzini 		mmu_spte_update_no_track(sptep, spte);
672c50d8ae3SPaolo Bonzini 	}
673c50d8ae3SPaolo Bonzini 
674c50d8ae3SPaolo Bonzini 	return true;
675c50d8ae3SPaolo Bonzini }
676c50d8ae3SPaolo Bonzini 
677c50d8ae3SPaolo Bonzini static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
678c50d8ae3SPaolo Bonzini {
679c50d8ae3SPaolo Bonzini 	/*
680c50d8ae3SPaolo Bonzini 	 * Prevent page table teardown by making any free-er wait during
681c50d8ae3SPaolo Bonzini 	 * kvm_flush_remote_tlbs() IPI to all active vcpus.
682c50d8ae3SPaolo Bonzini 	 */
683c50d8ae3SPaolo Bonzini 	local_irq_disable();
684c50d8ae3SPaolo Bonzini 
685c50d8ae3SPaolo Bonzini 	/*
686c50d8ae3SPaolo Bonzini 	 * Make sure a following spte read is not reordered ahead of the write
687c50d8ae3SPaolo Bonzini 	 * to vcpu->mode.
688c50d8ae3SPaolo Bonzini 	 */
689c50d8ae3SPaolo Bonzini 	smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
690c50d8ae3SPaolo Bonzini }
691c50d8ae3SPaolo Bonzini 
692c50d8ae3SPaolo Bonzini static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
693c50d8ae3SPaolo Bonzini {
694c50d8ae3SPaolo Bonzini 	/*
695c50d8ae3SPaolo Bonzini 	 * Make sure the write to vcpu->mode is not reordered in front of
696c50d8ae3SPaolo Bonzini 	 * reads to sptes.  If it does, kvm_mmu_commit_zap_page() can see us
697c50d8ae3SPaolo Bonzini 	 * OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
698c50d8ae3SPaolo Bonzini 	 */
699c50d8ae3SPaolo Bonzini 	smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
700c50d8ae3SPaolo Bonzini 	local_irq_enable();
701c50d8ae3SPaolo Bonzini }
702c50d8ae3SPaolo Bonzini 
703378f5cd6SSean Christopherson static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
704c50d8ae3SPaolo Bonzini {
705c50d8ae3SPaolo Bonzini 	int r;
706c50d8ae3SPaolo Bonzini 
707531281adSSean Christopherson 	/* 1 rmap, 1 parent PTE per level, and the prefetched rmaps. */
70894ce87efSSean Christopherson 	r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
709531281adSSean Christopherson 				       1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);
710c50d8ae3SPaolo Bonzini 	if (r)
711c50d8ae3SPaolo Bonzini 		return r;
71294ce87efSSean Christopherson 	r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
713171a90d7SSean Christopherson 				       PT64_ROOT_MAX_LEVEL);
714171a90d7SSean Christopherson 	if (r)
715171a90d7SSean Christopherson 		return r;
716378f5cd6SSean Christopherson 	if (maybe_indirect) {
71794ce87efSSean Christopherson 		r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
718171a90d7SSean Christopherson 					       PT64_ROOT_MAX_LEVEL);
719c50d8ae3SPaolo Bonzini 		if (r)
720c50d8ae3SPaolo Bonzini 			return r;
721378f5cd6SSean Christopherson 	}
72294ce87efSSean Christopherson 	return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
723531281adSSean Christopherson 					  PT64_ROOT_MAX_LEVEL);
724c50d8ae3SPaolo Bonzini }
725c50d8ae3SPaolo Bonzini 
726c50d8ae3SPaolo Bonzini static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
727c50d8ae3SPaolo Bonzini {
72894ce87efSSean Christopherson 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
72994ce87efSSean Christopherson 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
73094ce87efSSean Christopherson 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_gfn_array_cache);
73194ce87efSSean Christopherson 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
732c50d8ae3SPaolo Bonzini }
733c50d8ae3SPaolo Bonzini 
734c50d8ae3SPaolo Bonzini static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu)
735c50d8ae3SPaolo Bonzini {
73694ce87efSSean Christopherson 	return kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache);
737c50d8ae3SPaolo Bonzini }
738c50d8ae3SPaolo Bonzini 
739c50d8ae3SPaolo Bonzini static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
740c50d8ae3SPaolo Bonzini {
741c50d8ae3SPaolo Bonzini 	kmem_cache_free(pte_list_desc_cache, pte_list_desc);
742c50d8ae3SPaolo Bonzini }
743c50d8ae3SPaolo Bonzini 
744c50d8ae3SPaolo Bonzini static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
745c50d8ae3SPaolo Bonzini {
746c50d8ae3SPaolo Bonzini 	if (!sp->role.direct)
747c50d8ae3SPaolo Bonzini 		return sp->gfns[index];
748c50d8ae3SPaolo Bonzini 
749c50d8ae3SPaolo Bonzini 	return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS));
750c50d8ae3SPaolo Bonzini }
751c50d8ae3SPaolo Bonzini 
752c50d8ae3SPaolo Bonzini static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
753c50d8ae3SPaolo Bonzini {
754c50d8ae3SPaolo Bonzini 	if (!sp->role.direct) {
755c50d8ae3SPaolo Bonzini 		sp->gfns[index] = gfn;
756c50d8ae3SPaolo Bonzini 		return;
757c50d8ae3SPaolo Bonzini 	}
758c50d8ae3SPaolo Bonzini 
759c50d8ae3SPaolo Bonzini 	if (WARN_ON(gfn != kvm_mmu_page_get_gfn(sp, index)))
760c50d8ae3SPaolo Bonzini 		pr_err_ratelimited("gfn mismatch under direct page %llx "
761c50d8ae3SPaolo Bonzini 				   "(expected %llx, got %llx)\n",
762c50d8ae3SPaolo Bonzini 				   sp->gfn,
763c50d8ae3SPaolo Bonzini 				   kvm_mmu_page_get_gfn(sp, index), gfn);
764c50d8ae3SPaolo Bonzini }
765c50d8ae3SPaolo Bonzini 
766c50d8ae3SPaolo Bonzini /*
767c50d8ae3SPaolo Bonzini  * Return the pointer to the large page information for a given gfn,
768c50d8ae3SPaolo Bonzini  * handling slots that are not large page aligned.
769c50d8ae3SPaolo Bonzini  */
770c50d8ae3SPaolo Bonzini static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
7718ca6f063SBen Gardon 		const struct kvm_memory_slot *slot, int level)
772c50d8ae3SPaolo Bonzini {
773c50d8ae3SPaolo Bonzini 	unsigned long idx;
774c50d8ae3SPaolo Bonzini 
775c50d8ae3SPaolo Bonzini 	idx = gfn_to_index(gfn, slot->base_gfn, level);
776c50d8ae3SPaolo Bonzini 	return &slot->arch.lpage_info[level - 2][idx];
777c50d8ae3SPaolo Bonzini }
778c50d8ae3SPaolo Bonzini 
779c50d8ae3SPaolo Bonzini static void update_gfn_disallow_lpage_count(struct kvm_memory_slot *slot,
780c50d8ae3SPaolo Bonzini 					    gfn_t gfn, int count)
781c50d8ae3SPaolo Bonzini {
782c50d8ae3SPaolo Bonzini 	struct kvm_lpage_info *linfo;
783c50d8ae3SPaolo Bonzini 	int i;
784c50d8ae3SPaolo Bonzini 
7853bae0459SSean Christopherson 	for (i = PG_LEVEL_2M; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
786c50d8ae3SPaolo Bonzini 		linfo = lpage_info_slot(gfn, slot, i);
787c50d8ae3SPaolo Bonzini 		linfo->disallow_lpage += count;
788c50d8ae3SPaolo Bonzini 		WARN_ON(linfo->disallow_lpage < 0);
789c50d8ae3SPaolo Bonzini 	}
790c50d8ae3SPaolo Bonzini }
791c50d8ae3SPaolo Bonzini 
792c50d8ae3SPaolo Bonzini void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn)
793c50d8ae3SPaolo Bonzini {
794c50d8ae3SPaolo Bonzini 	update_gfn_disallow_lpage_count(slot, gfn, 1);
795c50d8ae3SPaolo Bonzini }
796c50d8ae3SPaolo Bonzini 
797c50d8ae3SPaolo Bonzini void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn)
798c50d8ae3SPaolo Bonzini {
799c50d8ae3SPaolo Bonzini 	update_gfn_disallow_lpage_count(slot, gfn, -1);
800c50d8ae3SPaolo Bonzini }
801c50d8ae3SPaolo Bonzini 
802c50d8ae3SPaolo Bonzini static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
803c50d8ae3SPaolo Bonzini {
804c50d8ae3SPaolo Bonzini 	struct kvm_memslots *slots;
805c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
806c50d8ae3SPaolo Bonzini 	gfn_t gfn;
807c50d8ae3SPaolo Bonzini 
808c50d8ae3SPaolo Bonzini 	kvm->arch.indirect_shadow_pages++;
809c50d8ae3SPaolo Bonzini 	gfn = sp->gfn;
810c50d8ae3SPaolo Bonzini 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
811c50d8ae3SPaolo Bonzini 	slot = __gfn_to_memslot(slots, gfn);
812c50d8ae3SPaolo Bonzini 
813c50d8ae3SPaolo Bonzini 	/* the non-leaf shadow pages are keeping readonly. */
8143bae0459SSean Christopherson 	if (sp->role.level > PG_LEVEL_4K)
815c50d8ae3SPaolo Bonzini 		return kvm_slot_page_track_add_page(kvm, slot, gfn,
816c50d8ae3SPaolo Bonzini 						    KVM_PAGE_TRACK_WRITE);
817c50d8ae3SPaolo Bonzini 
818c50d8ae3SPaolo Bonzini 	kvm_mmu_gfn_disallow_lpage(slot, gfn);
819c50d8ae3SPaolo Bonzini }
820c50d8ae3SPaolo Bonzini 
82129cf0f50SBen Gardon void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
822c50d8ae3SPaolo Bonzini {
823c50d8ae3SPaolo Bonzini 	if (sp->lpage_disallowed)
824c50d8ae3SPaolo Bonzini 		return;
825c50d8ae3SPaolo Bonzini 
826c50d8ae3SPaolo Bonzini 	++kvm->stat.nx_lpage_splits;
827c50d8ae3SPaolo Bonzini 	list_add_tail(&sp->lpage_disallowed_link,
828c50d8ae3SPaolo Bonzini 		      &kvm->arch.lpage_disallowed_mmu_pages);
829c50d8ae3SPaolo Bonzini 	sp->lpage_disallowed = true;
830c50d8ae3SPaolo Bonzini }
831c50d8ae3SPaolo Bonzini 
832c50d8ae3SPaolo Bonzini static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
833c50d8ae3SPaolo Bonzini {
834c50d8ae3SPaolo Bonzini 	struct kvm_memslots *slots;
835c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
836c50d8ae3SPaolo Bonzini 	gfn_t gfn;
837c50d8ae3SPaolo Bonzini 
838c50d8ae3SPaolo Bonzini 	kvm->arch.indirect_shadow_pages--;
839c50d8ae3SPaolo Bonzini 	gfn = sp->gfn;
840c50d8ae3SPaolo Bonzini 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
841c50d8ae3SPaolo Bonzini 	slot = __gfn_to_memslot(slots, gfn);
8423bae0459SSean Christopherson 	if (sp->role.level > PG_LEVEL_4K)
843c50d8ae3SPaolo Bonzini 		return kvm_slot_page_track_remove_page(kvm, slot, gfn,
844c50d8ae3SPaolo Bonzini 						       KVM_PAGE_TRACK_WRITE);
845c50d8ae3SPaolo Bonzini 
846c50d8ae3SPaolo Bonzini 	kvm_mmu_gfn_allow_lpage(slot, gfn);
847c50d8ae3SPaolo Bonzini }
848c50d8ae3SPaolo Bonzini 
84929cf0f50SBen Gardon void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
850c50d8ae3SPaolo Bonzini {
851c50d8ae3SPaolo Bonzini 	--kvm->stat.nx_lpage_splits;
852c50d8ae3SPaolo Bonzini 	sp->lpage_disallowed = false;
853c50d8ae3SPaolo Bonzini 	list_del(&sp->lpage_disallowed_link);
854c50d8ae3SPaolo Bonzini }
855c50d8ae3SPaolo Bonzini 
856c50d8ae3SPaolo Bonzini static struct kvm_memory_slot *
857c50d8ae3SPaolo Bonzini gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
858c50d8ae3SPaolo Bonzini 			    bool no_dirty_log)
859c50d8ae3SPaolo Bonzini {
860c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
861c50d8ae3SPaolo Bonzini 
862c50d8ae3SPaolo Bonzini 	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
86391b0d268SPaolo Bonzini 	if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
86491b0d268SPaolo Bonzini 		return NULL;
865044c59c4SPeter Xu 	if (no_dirty_log && kvm_slot_dirty_track_enabled(slot))
86691b0d268SPaolo Bonzini 		return NULL;
867c50d8ae3SPaolo Bonzini 
868c50d8ae3SPaolo Bonzini 	return slot;
869c50d8ae3SPaolo Bonzini }
870c50d8ae3SPaolo Bonzini 
871c50d8ae3SPaolo Bonzini /*
872c50d8ae3SPaolo Bonzini  * About rmap_head encoding:
873c50d8ae3SPaolo Bonzini  *
874c50d8ae3SPaolo Bonzini  * If the bit zero of rmap_head->val is clear, then it points to the only spte
875c50d8ae3SPaolo Bonzini  * in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct
876c50d8ae3SPaolo Bonzini  * pte_list_desc containing more mappings.
877c50d8ae3SPaolo Bonzini  */
878c50d8ae3SPaolo Bonzini 
879c50d8ae3SPaolo Bonzini /*
880c50d8ae3SPaolo Bonzini  * Returns the number of pointers in the rmap chain, not counting the new one.
881c50d8ae3SPaolo Bonzini  */
882c50d8ae3SPaolo Bonzini static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
883c50d8ae3SPaolo Bonzini 			struct kvm_rmap_head *rmap_head)
884c50d8ae3SPaolo Bonzini {
885c50d8ae3SPaolo Bonzini 	struct pte_list_desc *desc;
886c50d8ae3SPaolo Bonzini 	int i, count = 0;
887c50d8ae3SPaolo Bonzini 
888c50d8ae3SPaolo Bonzini 	if (!rmap_head->val) {
889805a0f83SStephen Zhang 		rmap_printk("%p %llx 0->1\n", spte, *spte);
890c50d8ae3SPaolo Bonzini 		rmap_head->val = (unsigned long)spte;
891c50d8ae3SPaolo Bonzini 	} else if (!(rmap_head->val & 1)) {
892805a0f83SStephen Zhang 		rmap_printk("%p %llx 1->many\n", spte, *spte);
893c50d8ae3SPaolo Bonzini 		desc = mmu_alloc_pte_list_desc(vcpu);
894c50d8ae3SPaolo Bonzini 		desc->sptes[0] = (u64 *)rmap_head->val;
895c50d8ae3SPaolo Bonzini 		desc->sptes[1] = spte;
896c50d8ae3SPaolo Bonzini 		rmap_head->val = (unsigned long)desc | 1;
897c50d8ae3SPaolo Bonzini 		++count;
898c50d8ae3SPaolo Bonzini 	} else {
899805a0f83SStephen Zhang 		rmap_printk("%p %llx many->many\n", spte, *spte);
900c50d8ae3SPaolo Bonzini 		desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
901c6c4f961SLi RongQing 		while (desc->sptes[PTE_LIST_EXT-1]) {
902c50d8ae3SPaolo Bonzini 			count += PTE_LIST_EXT;
903c6c4f961SLi RongQing 
904c6c4f961SLi RongQing 			if (!desc->more) {
905c50d8ae3SPaolo Bonzini 				desc->more = mmu_alloc_pte_list_desc(vcpu);
906c50d8ae3SPaolo Bonzini 				desc = desc->more;
907c6c4f961SLi RongQing 				break;
908c6c4f961SLi RongQing 			}
909c6c4f961SLi RongQing 			desc = desc->more;
910c50d8ae3SPaolo Bonzini 		}
911c50d8ae3SPaolo Bonzini 		for (i = 0; desc->sptes[i]; ++i)
912c50d8ae3SPaolo Bonzini 			++count;
913c50d8ae3SPaolo Bonzini 		desc->sptes[i] = spte;
914c50d8ae3SPaolo Bonzini 	}
915c50d8ae3SPaolo Bonzini 	return count;
916c50d8ae3SPaolo Bonzini }
917c50d8ae3SPaolo Bonzini 
918c50d8ae3SPaolo Bonzini static void
919c50d8ae3SPaolo Bonzini pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
920c50d8ae3SPaolo Bonzini 			   struct pte_list_desc *desc, int i,
921c50d8ae3SPaolo Bonzini 			   struct pte_list_desc *prev_desc)
922c50d8ae3SPaolo Bonzini {
923c50d8ae3SPaolo Bonzini 	int j;
924c50d8ae3SPaolo Bonzini 
925c50d8ae3SPaolo Bonzini 	for (j = PTE_LIST_EXT - 1; !desc->sptes[j] && j > i; --j)
926c50d8ae3SPaolo Bonzini 		;
927c50d8ae3SPaolo Bonzini 	desc->sptes[i] = desc->sptes[j];
928c50d8ae3SPaolo Bonzini 	desc->sptes[j] = NULL;
929c50d8ae3SPaolo Bonzini 	if (j != 0)
930c50d8ae3SPaolo Bonzini 		return;
931c50d8ae3SPaolo Bonzini 	if (!prev_desc && !desc->more)
932fe3c2b4cSMiaohe Lin 		rmap_head->val = 0;
933c50d8ae3SPaolo Bonzini 	else
934c50d8ae3SPaolo Bonzini 		if (prev_desc)
935c50d8ae3SPaolo Bonzini 			prev_desc->more = desc->more;
936c50d8ae3SPaolo Bonzini 		else
937c50d8ae3SPaolo Bonzini 			rmap_head->val = (unsigned long)desc->more | 1;
938c50d8ae3SPaolo Bonzini 	mmu_free_pte_list_desc(desc);
939c50d8ae3SPaolo Bonzini }
940c50d8ae3SPaolo Bonzini 
941c50d8ae3SPaolo Bonzini static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
942c50d8ae3SPaolo Bonzini {
943c50d8ae3SPaolo Bonzini 	struct pte_list_desc *desc;
944c50d8ae3SPaolo Bonzini 	struct pte_list_desc *prev_desc;
945c50d8ae3SPaolo Bonzini 	int i;
946c50d8ae3SPaolo Bonzini 
947c50d8ae3SPaolo Bonzini 	if (!rmap_head->val) {
948c50d8ae3SPaolo Bonzini 		pr_err("%s: %p 0->BUG\n", __func__, spte);
949c50d8ae3SPaolo Bonzini 		BUG();
950c50d8ae3SPaolo Bonzini 	} else if (!(rmap_head->val & 1)) {
951805a0f83SStephen Zhang 		rmap_printk("%p 1->0\n", spte);
952c50d8ae3SPaolo Bonzini 		if ((u64 *)rmap_head->val != spte) {
953c50d8ae3SPaolo Bonzini 			pr_err("%s:  %p 1->BUG\n", __func__, spte);
954c50d8ae3SPaolo Bonzini 			BUG();
955c50d8ae3SPaolo Bonzini 		}
956c50d8ae3SPaolo Bonzini 		rmap_head->val = 0;
957c50d8ae3SPaolo Bonzini 	} else {
958805a0f83SStephen Zhang 		rmap_printk("%p many->many\n", spte);
959c50d8ae3SPaolo Bonzini 		desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
960c50d8ae3SPaolo Bonzini 		prev_desc = NULL;
961c50d8ae3SPaolo Bonzini 		while (desc) {
962c50d8ae3SPaolo Bonzini 			for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) {
963c50d8ae3SPaolo Bonzini 				if (desc->sptes[i] == spte) {
964c50d8ae3SPaolo Bonzini 					pte_list_desc_remove_entry(rmap_head,
965c50d8ae3SPaolo Bonzini 							desc, i, prev_desc);
966c50d8ae3SPaolo Bonzini 					return;
967c50d8ae3SPaolo Bonzini 				}
968c50d8ae3SPaolo Bonzini 			}
969c50d8ae3SPaolo Bonzini 			prev_desc = desc;
970c50d8ae3SPaolo Bonzini 			desc = desc->more;
971c50d8ae3SPaolo Bonzini 		}
972c50d8ae3SPaolo Bonzini 		pr_err("%s: %p many->many\n", __func__, spte);
973c50d8ae3SPaolo Bonzini 		BUG();
974c50d8ae3SPaolo Bonzini 	}
975c50d8ae3SPaolo Bonzini }
976c50d8ae3SPaolo Bonzini 
977c50d8ae3SPaolo Bonzini static void pte_list_remove(struct kvm_rmap_head *rmap_head, u64 *sptep)
978c50d8ae3SPaolo Bonzini {
979c50d8ae3SPaolo Bonzini 	mmu_spte_clear_track_bits(sptep);
980c50d8ae3SPaolo Bonzini 	__pte_list_remove(sptep, rmap_head);
981c50d8ae3SPaolo Bonzini }
982c50d8ae3SPaolo Bonzini 
983c50d8ae3SPaolo Bonzini static struct kvm_rmap_head *__gfn_to_rmap(gfn_t gfn, int level,
984c50d8ae3SPaolo Bonzini 					   struct kvm_memory_slot *slot)
985c50d8ae3SPaolo Bonzini {
986c50d8ae3SPaolo Bonzini 	unsigned long idx;
987c50d8ae3SPaolo Bonzini 
988c50d8ae3SPaolo Bonzini 	idx = gfn_to_index(gfn, slot->base_gfn, level);
9893bae0459SSean Christopherson 	return &slot->arch.rmap[level - PG_LEVEL_4K][idx];
990c50d8ae3SPaolo Bonzini }
991c50d8ae3SPaolo Bonzini 
992c50d8ae3SPaolo Bonzini static struct kvm_rmap_head *gfn_to_rmap(struct kvm *kvm, gfn_t gfn,
993c50d8ae3SPaolo Bonzini 					 struct kvm_mmu_page *sp)
994c50d8ae3SPaolo Bonzini {
995c50d8ae3SPaolo Bonzini 	struct kvm_memslots *slots;
996c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
997c50d8ae3SPaolo Bonzini 
998c50d8ae3SPaolo Bonzini 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
999c50d8ae3SPaolo Bonzini 	slot = __gfn_to_memslot(slots, gfn);
1000c50d8ae3SPaolo Bonzini 	return __gfn_to_rmap(gfn, sp->role.level, slot);
1001c50d8ae3SPaolo Bonzini }
1002c50d8ae3SPaolo Bonzini 
1003c50d8ae3SPaolo Bonzini static bool rmap_can_add(struct kvm_vcpu *vcpu)
1004c50d8ae3SPaolo Bonzini {
1005356ec69aSSean Christopherson 	struct kvm_mmu_memory_cache *mc;
1006c50d8ae3SPaolo Bonzini 
1007356ec69aSSean Christopherson 	mc = &vcpu->arch.mmu_pte_list_desc_cache;
100894ce87efSSean Christopherson 	return kvm_mmu_memory_cache_nr_free_objects(mc);
1009c50d8ae3SPaolo Bonzini }
1010c50d8ae3SPaolo Bonzini 
1011c50d8ae3SPaolo Bonzini static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
1012c50d8ae3SPaolo Bonzini {
1013c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
1014c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *rmap_head;
1015c50d8ae3SPaolo Bonzini 
101657354682SSean Christopherson 	sp = sptep_to_sp(spte);
1017c50d8ae3SPaolo Bonzini 	kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
1018c50d8ae3SPaolo Bonzini 	rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
1019c50d8ae3SPaolo Bonzini 	return pte_list_add(vcpu, spte, rmap_head);
1020c50d8ae3SPaolo Bonzini }
1021c50d8ae3SPaolo Bonzini 
1022c50d8ae3SPaolo Bonzini static void rmap_remove(struct kvm *kvm, u64 *spte)
1023c50d8ae3SPaolo Bonzini {
1024c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
1025c50d8ae3SPaolo Bonzini 	gfn_t gfn;
1026c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *rmap_head;
1027c50d8ae3SPaolo Bonzini 
102857354682SSean Christopherson 	sp = sptep_to_sp(spte);
1029c50d8ae3SPaolo Bonzini 	gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
1030c50d8ae3SPaolo Bonzini 	rmap_head = gfn_to_rmap(kvm, gfn, sp);
1031c50d8ae3SPaolo Bonzini 	__pte_list_remove(spte, rmap_head);
1032c50d8ae3SPaolo Bonzini }
1033c50d8ae3SPaolo Bonzini 
1034c50d8ae3SPaolo Bonzini /*
1035c50d8ae3SPaolo Bonzini  * Used by the following functions to iterate through the sptes linked by a
1036c50d8ae3SPaolo Bonzini  * rmap.  All fields are private and not assumed to be used outside.
1037c50d8ae3SPaolo Bonzini  */
1038c50d8ae3SPaolo Bonzini struct rmap_iterator {
1039c50d8ae3SPaolo Bonzini 	/* private fields */
1040c50d8ae3SPaolo Bonzini 	struct pte_list_desc *desc;	/* holds the sptep if not NULL */
1041c50d8ae3SPaolo Bonzini 	int pos;			/* index of the sptep */
1042c50d8ae3SPaolo Bonzini };
1043c50d8ae3SPaolo Bonzini 
1044c50d8ae3SPaolo Bonzini /*
1045c50d8ae3SPaolo Bonzini  * Iteration must be started by this function.  This should also be used after
1046c50d8ae3SPaolo Bonzini  * removing/dropping sptes from the rmap link because in such cases the
10470a03cbdaSMiaohe Lin  * information in the iterator may not be valid.
1048c50d8ae3SPaolo Bonzini  *
1049c50d8ae3SPaolo Bonzini  * Returns sptep if found, NULL otherwise.
1050c50d8ae3SPaolo Bonzini  */
1051c50d8ae3SPaolo Bonzini static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head,
1052c50d8ae3SPaolo Bonzini 			   struct rmap_iterator *iter)
1053c50d8ae3SPaolo Bonzini {
1054c50d8ae3SPaolo Bonzini 	u64 *sptep;
1055c50d8ae3SPaolo Bonzini 
1056c50d8ae3SPaolo Bonzini 	if (!rmap_head->val)
1057c50d8ae3SPaolo Bonzini 		return NULL;
1058c50d8ae3SPaolo Bonzini 
1059c50d8ae3SPaolo Bonzini 	if (!(rmap_head->val & 1)) {
1060c50d8ae3SPaolo Bonzini 		iter->desc = NULL;
1061c50d8ae3SPaolo Bonzini 		sptep = (u64 *)rmap_head->val;
1062c50d8ae3SPaolo Bonzini 		goto out;
1063c50d8ae3SPaolo Bonzini 	}
1064c50d8ae3SPaolo Bonzini 
1065c50d8ae3SPaolo Bonzini 	iter->desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1066c50d8ae3SPaolo Bonzini 	iter->pos = 0;
1067c50d8ae3SPaolo Bonzini 	sptep = iter->desc->sptes[iter->pos];
1068c50d8ae3SPaolo Bonzini out:
1069c50d8ae3SPaolo Bonzini 	BUG_ON(!is_shadow_present_pte(*sptep));
1070c50d8ae3SPaolo Bonzini 	return sptep;
1071c50d8ae3SPaolo Bonzini }
1072c50d8ae3SPaolo Bonzini 
1073c50d8ae3SPaolo Bonzini /*
1074c50d8ae3SPaolo Bonzini  * Must be used with a valid iterator: e.g. after rmap_get_first().
1075c50d8ae3SPaolo Bonzini  *
1076c50d8ae3SPaolo Bonzini  * Returns sptep if found, NULL otherwise.
1077c50d8ae3SPaolo Bonzini  */
1078c50d8ae3SPaolo Bonzini static u64 *rmap_get_next(struct rmap_iterator *iter)
1079c50d8ae3SPaolo Bonzini {
1080c50d8ae3SPaolo Bonzini 	u64 *sptep;
1081c50d8ae3SPaolo Bonzini 
1082c50d8ae3SPaolo Bonzini 	if (iter->desc) {
1083c50d8ae3SPaolo Bonzini 		if (iter->pos < PTE_LIST_EXT - 1) {
1084c50d8ae3SPaolo Bonzini 			++iter->pos;
1085c50d8ae3SPaolo Bonzini 			sptep = iter->desc->sptes[iter->pos];
1086c50d8ae3SPaolo Bonzini 			if (sptep)
1087c50d8ae3SPaolo Bonzini 				goto out;
1088c50d8ae3SPaolo Bonzini 		}
1089c50d8ae3SPaolo Bonzini 
1090c50d8ae3SPaolo Bonzini 		iter->desc = iter->desc->more;
1091c50d8ae3SPaolo Bonzini 
1092c50d8ae3SPaolo Bonzini 		if (iter->desc) {
1093c50d8ae3SPaolo Bonzini 			iter->pos = 0;
1094c50d8ae3SPaolo Bonzini 			/* desc->sptes[0] cannot be NULL */
1095c50d8ae3SPaolo Bonzini 			sptep = iter->desc->sptes[iter->pos];
1096c50d8ae3SPaolo Bonzini 			goto out;
1097c50d8ae3SPaolo Bonzini 		}
1098c50d8ae3SPaolo Bonzini 	}
1099c50d8ae3SPaolo Bonzini 
1100c50d8ae3SPaolo Bonzini 	return NULL;
1101c50d8ae3SPaolo Bonzini out:
1102c50d8ae3SPaolo Bonzini 	BUG_ON(!is_shadow_present_pte(*sptep));
1103c50d8ae3SPaolo Bonzini 	return sptep;
1104c50d8ae3SPaolo Bonzini }
1105c50d8ae3SPaolo Bonzini 
1106c50d8ae3SPaolo Bonzini #define for_each_rmap_spte(_rmap_head_, _iter_, _spte_)			\
1107c50d8ae3SPaolo Bonzini 	for (_spte_ = rmap_get_first(_rmap_head_, _iter_);		\
1108c50d8ae3SPaolo Bonzini 	     _spte_; _spte_ = rmap_get_next(_iter_))
1109c50d8ae3SPaolo Bonzini 
1110c50d8ae3SPaolo Bonzini static void drop_spte(struct kvm *kvm, u64 *sptep)
1111c50d8ae3SPaolo Bonzini {
1112c50d8ae3SPaolo Bonzini 	if (mmu_spte_clear_track_bits(sptep))
1113c50d8ae3SPaolo Bonzini 		rmap_remove(kvm, sptep);
1114c50d8ae3SPaolo Bonzini }
1115c50d8ae3SPaolo Bonzini 
1116c50d8ae3SPaolo Bonzini 
1117c50d8ae3SPaolo Bonzini static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
1118c50d8ae3SPaolo Bonzini {
1119c50d8ae3SPaolo Bonzini 	if (is_large_pte(*sptep)) {
112057354682SSean Christopherson 		WARN_ON(sptep_to_sp(sptep)->role.level == PG_LEVEL_4K);
1121c50d8ae3SPaolo Bonzini 		drop_spte(kvm, sptep);
1122c50d8ae3SPaolo Bonzini 		--kvm->stat.lpages;
1123c50d8ae3SPaolo Bonzini 		return true;
1124c50d8ae3SPaolo Bonzini 	}
1125c50d8ae3SPaolo Bonzini 
1126c50d8ae3SPaolo Bonzini 	return false;
1127c50d8ae3SPaolo Bonzini }
1128c50d8ae3SPaolo Bonzini 
1129c50d8ae3SPaolo Bonzini static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
1130c50d8ae3SPaolo Bonzini {
1131c50d8ae3SPaolo Bonzini 	if (__drop_large_spte(vcpu->kvm, sptep)) {
113257354682SSean Christopherson 		struct kvm_mmu_page *sp = sptep_to_sp(sptep);
1133c50d8ae3SPaolo Bonzini 
1134c50d8ae3SPaolo Bonzini 		kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
1135c50d8ae3SPaolo Bonzini 			KVM_PAGES_PER_HPAGE(sp->role.level));
1136c50d8ae3SPaolo Bonzini 	}
1137c50d8ae3SPaolo Bonzini }
1138c50d8ae3SPaolo Bonzini 
1139c50d8ae3SPaolo Bonzini /*
1140c50d8ae3SPaolo Bonzini  * Write-protect on the specified @sptep, @pt_protect indicates whether
1141c50d8ae3SPaolo Bonzini  * spte write-protection is caused by protecting shadow page table.
1142c50d8ae3SPaolo Bonzini  *
1143c50d8ae3SPaolo Bonzini  * Note: write protection is difference between dirty logging and spte
1144c50d8ae3SPaolo Bonzini  * protection:
1145c50d8ae3SPaolo Bonzini  * - for dirty logging, the spte can be set to writable at anytime if
1146c50d8ae3SPaolo Bonzini  *   its dirty bitmap is properly set.
1147c50d8ae3SPaolo Bonzini  * - for spte protection, the spte can be writable only after unsync-ing
1148c50d8ae3SPaolo Bonzini  *   shadow page.
1149c50d8ae3SPaolo Bonzini  *
1150c50d8ae3SPaolo Bonzini  * Return true if tlb need be flushed.
1151c50d8ae3SPaolo Bonzini  */
1152c50d8ae3SPaolo Bonzini static bool spte_write_protect(u64 *sptep, bool pt_protect)
1153c50d8ae3SPaolo Bonzini {
1154c50d8ae3SPaolo Bonzini 	u64 spte = *sptep;
1155c50d8ae3SPaolo Bonzini 
1156c50d8ae3SPaolo Bonzini 	if (!is_writable_pte(spte) &&
1157c50d8ae3SPaolo Bonzini 	      !(pt_protect && spte_can_locklessly_be_made_writable(spte)))
1158c50d8ae3SPaolo Bonzini 		return false;
1159c50d8ae3SPaolo Bonzini 
1160805a0f83SStephen Zhang 	rmap_printk("spte %p %llx\n", sptep, *sptep);
1161c50d8ae3SPaolo Bonzini 
1162c50d8ae3SPaolo Bonzini 	if (pt_protect)
11635fc3424fSSean Christopherson 		spte &= ~shadow_mmu_writable_mask;
1164c50d8ae3SPaolo Bonzini 	spte = spte & ~PT_WRITABLE_MASK;
1165c50d8ae3SPaolo Bonzini 
1166c50d8ae3SPaolo Bonzini 	return mmu_spte_update(sptep, spte);
1167c50d8ae3SPaolo Bonzini }
1168c50d8ae3SPaolo Bonzini 
1169c50d8ae3SPaolo Bonzini static bool __rmap_write_protect(struct kvm *kvm,
1170c50d8ae3SPaolo Bonzini 				 struct kvm_rmap_head *rmap_head,
1171c50d8ae3SPaolo Bonzini 				 bool pt_protect)
1172c50d8ae3SPaolo Bonzini {
1173c50d8ae3SPaolo Bonzini 	u64 *sptep;
1174c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
1175c50d8ae3SPaolo Bonzini 	bool flush = false;
1176c50d8ae3SPaolo Bonzini 
1177c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(rmap_head, &iter, sptep)
1178c50d8ae3SPaolo Bonzini 		flush |= spte_write_protect(sptep, pt_protect);
1179c50d8ae3SPaolo Bonzini 
1180c50d8ae3SPaolo Bonzini 	return flush;
1181c50d8ae3SPaolo Bonzini }
1182c50d8ae3SPaolo Bonzini 
1183c50d8ae3SPaolo Bonzini static bool spte_clear_dirty(u64 *sptep)
1184c50d8ae3SPaolo Bonzini {
1185c50d8ae3SPaolo Bonzini 	u64 spte = *sptep;
1186c50d8ae3SPaolo Bonzini 
1187805a0f83SStephen Zhang 	rmap_printk("spte %p %llx\n", sptep, *sptep);
1188c50d8ae3SPaolo Bonzini 
1189c50d8ae3SPaolo Bonzini 	MMU_WARN_ON(!spte_ad_enabled(spte));
1190c50d8ae3SPaolo Bonzini 	spte &= ~shadow_dirty_mask;
1191c50d8ae3SPaolo Bonzini 	return mmu_spte_update(sptep, spte);
1192c50d8ae3SPaolo Bonzini }
1193c50d8ae3SPaolo Bonzini 
1194c50d8ae3SPaolo Bonzini static bool spte_wrprot_for_clear_dirty(u64 *sptep)
1195c50d8ae3SPaolo Bonzini {
1196c50d8ae3SPaolo Bonzini 	bool was_writable = test_and_clear_bit(PT_WRITABLE_SHIFT,
1197c50d8ae3SPaolo Bonzini 					       (unsigned long *)sptep);
1198c50d8ae3SPaolo Bonzini 	if (was_writable && !spte_ad_enabled(*sptep))
1199c50d8ae3SPaolo Bonzini 		kvm_set_pfn_dirty(spte_to_pfn(*sptep));
1200c50d8ae3SPaolo Bonzini 
1201c50d8ae3SPaolo Bonzini 	return was_writable;
1202c50d8ae3SPaolo Bonzini }
1203c50d8ae3SPaolo Bonzini 
1204c50d8ae3SPaolo Bonzini /*
1205c50d8ae3SPaolo Bonzini  * Gets the GFN ready for another round of dirty logging by clearing the
1206c50d8ae3SPaolo Bonzini  *	- D bit on ad-enabled SPTEs, and
1207c50d8ae3SPaolo Bonzini  *	- W bit on ad-disabled SPTEs.
1208c50d8ae3SPaolo Bonzini  * Returns true iff any D or W bits were cleared.
1209c50d8ae3SPaolo Bonzini  */
12100a234f5dSSean Christopherson static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
12110a234f5dSSean Christopherson 			       struct kvm_memory_slot *slot)
1212c50d8ae3SPaolo Bonzini {
1213c50d8ae3SPaolo Bonzini 	u64 *sptep;
1214c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
1215c50d8ae3SPaolo Bonzini 	bool flush = false;
1216c50d8ae3SPaolo Bonzini 
1217c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(rmap_head, &iter, sptep)
1218c50d8ae3SPaolo Bonzini 		if (spte_ad_need_write_protect(*sptep))
1219c50d8ae3SPaolo Bonzini 			flush |= spte_wrprot_for_clear_dirty(sptep);
1220c50d8ae3SPaolo Bonzini 		else
1221c50d8ae3SPaolo Bonzini 			flush |= spte_clear_dirty(sptep);
1222c50d8ae3SPaolo Bonzini 
1223c50d8ae3SPaolo Bonzini 	return flush;
1224c50d8ae3SPaolo Bonzini }
1225c50d8ae3SPaolo Bonzini 
1226c50d8ae3SPaolo Bonzini /**
1227c50d8ae3SPaolo Bonzini  * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
1228c50d8ae3SPaolo Bonzini  * @kvm: kvm instance
1229c50d8ae3SPaolo Bonzini  * @slot: slot to protect
1230c50d8ae3SPaolo Bonzini  * @gfn_offset: start of the BITS_PER_LONG pages we care about
1231c50d8ae3SPaolo Bonzini  * @mask: indicates which pages we should protect
1232c50d8ae3SPaolo Bonzini  *
123389212919SKeqian Zhu  * Used when we do not need to care about huge page mappings.
1234c50d8ae3SPaolo Bonzini  */
1235c50d8ae3SPaolo Bonzini static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1236c50d8ae3SPaolo Bonzini 				     struct kvm_memory_slot *slot,
1237c50d8ae3SPaolo Bonzini 				     gfn_t gfn_offset, unsigned long mask)
1238c50d8ae3SPaolo Bonzini {
1239c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *rmap_head;
1240c50d8ae3SPaolo Bonzini 
1241897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
1242a6a0b05dSBen Gardon 		kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
1243a6a0b05dSBen Gardon 				slot->base_gfn + gfn_offset, mask, true);
1244e2209710SBen Gardon 
1245e2209710SBen Gardon 	if (!kvm_memslots_have_rmaps(kvm))
1246e2209710SBen Gardon 		return;
1247e2209710SBen Gardon 
1248c50d8ae3SPaolo Bonzini 	while (mask) {
1249c50d8ae3SPaolo Bonzini 		rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
12503bae0459SSean Christopherson 					  PG_LEVEL_4K, slot);
1251c50d8ae3SPaolo Bonzini 		__rmap_write_protect(kvm, rmap_head, false);
1252c50d8ae3SPaolo Bonzini 
1253c50d8ae3SPaolo Bonzini 		/* clear the first set bit */
1254c50d8ae3SPaolo Bonzini 		mask &= mask - 1;
1255c50d8ae3SPaolo Bonzini 	}
1256c50d8ae3SPaolo Bonzini }
1257c50d8ae3SPaolo Bonzini 
1258c50d8ae3SPaolo Bonzini /**
1259c50d8ae3SPaolo Bonzini  * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages, or write
1260c50d8ae3SPaolo Bonzini  * protect the page if the D-bit isn't supported.
1261c50d8ae3SPaolo Bonzini  * @kvm: kvm instance
1262c50d8ae3SPaolo Bonzini  * @slot: slot to clear D-bit
1263c50d8ae3SPaolo Bonzini  * @gfn_offset: start of the BITS_PER_LONG pages we care about
1264c50d8ae3SPaolo Bonzini  * @mask: indicates which pages we should clear D-bit
1265c50d8ae3SPaolo Bonzini  *
1266c50d8ae3SPaolo Bonzini  * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap.
1267c50d8ae3SPaolo Bonzini  */
1268a018eba5SSean Christopherson static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1269c50d8ae3SPaolo Bonzini 					 struct kvm_memory_slot *slot,
1270c50d8ae3SPaolo Bonzini 					 gfn_t gfn_offset, unsigned long mask)
1271c50d8ae3SPaolo Bonzini {
1272c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *rmap_head;
1273c50d8ae3SPaolo Bonzini 
1274897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
1275a6a0b05dSBen Gardon 		kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
1276a6a0b05dSBen Gardon 				slot->base_gfn + gfn_offset, mask, false);
1277e2209710SBen Gardon 
1278e2209710SBen Gardon 	if (!kvm_memslots_have_rmaps(kvm))
1279e2209710SBen Gardon 		return;
1280e2209710SBen Gardon 
1281c50d8ae3SPaolo Bonzini 	while (mask) {
1282c50d8ae3SPaolo Bonzini 		rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
12833bae0459SSean Christopherson 					  PG_LEVEL_4K, slot);
12840a234f5dSSean Christopherson 		__rmap_clear_dirty(kvm, rmap_head, slot);
1285c50d8ae3SPaolo Bonzini 
1286c50d8ae3SPaolo Bonzini 		/* clear the first set bit */
1287c50d8ae3SPaolo Bonzini 		mask &= mask - 1;
1288c50d8ae3SPaolo Bonzini 	}
1289c50d8ae3SPaolo Bonzini }
1290c50d8ae3SPaolo Bonzini 
1291c50d8ae3SPaolo Bonzini /**
1292c50d8ae3SPaolo Bonzini  * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
1293c50d8ae3SPaolo Bonzini  * PT level pages.
1294c50d8ae3SPaolo Bonzini  *
1295c50d8ae3SPaolo Bonzini  * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
1296c50d8ae3SPaolo Bonzini  * enable dirty logging for them.
1297c50d8ae3SPaolo Bonzini  *
129889212919SKeqian Zhu  * We need to care about huge page mappings: e.g. during dirty logging we may
129989212919SKeqian Zhu  * have such mappings.
1300c50d8ae3SPaolo Bonzini  */
1301c50d8ae3SPaolo Bonzini void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1302c50d8ae3SPaolo Bonzini 				struct kvm_memory_slot *slot,
1303c50d8ae3SPaolo Bonzini 				gfn_t gfn_offset, unsigned long mask)
1304c50d8ae3SPaolo Bonzini {
130589212919SKeqian Zhu 	/*
130689212919SKeqian Zhu 	 * Huge pages are NOT write protected when we start dirty logging in
130789212919SKeqian Zhu 	 * initially-all-set mode; must write protect them here so that they
130889212919SKeqian Zhu 	 * are split to 4K on the first write.
130989212919SKeqian Zhu 	 *
131089212919SKeqian Zhu 	 * The gfn_offset is guaranteed to be aligned to 64, but the base_gfn
131189212919SKeqian Zhu 	 * of memslot has no such restriction, so the range can cross two large
131289212919SKeqian Zhu 	 * pages.
131389212919SKeqian Zhu 	 */
131489212919SKeqian Zhu 	if (kvm_dirty_log_manual_protect_and_init_set(kvm)) {
131589212919SKeqian Zhu 		gfn_t start = slot->base_gfn + gfn_offset + __ffs(mask);
131689212919SKeqian Zhu 		gfn_t end = slot->base_gfn + gfn_offset + __fls(mask);
131789212919SKeqian Zhu 
131889212919SKeqian Zhu 		kvm_mmu_slot_gfn_write_protect(kvm, slot, start, PG_LEVEL_2M);
131989212919SKeqian Zhu 
132089212919SKeqian Zhu 		/* Cross two large pages? */
132189212919SKeqian Zhu 		if (ALIGN(start << PAGE_SHIFT, PMD_SIZE) !=
132289212919SKeqian Zhu 		    ALIGN(end << PAGE_SHIFT, PMD_SIZE))
132389212919SKeqian Zhu 			kvm_mmu_slot_gfn_write_protect(kvm, slot, end,
132489212919SKeqian Zhu 						       PG_LEVEL_2M);
132589212919SKeqian Zhu 	}
132689212919SKeqian Zhu 
132789212919SKeqian Zhu 	/* Now handle 4K PTEs.  */
1328a018eba5SSean Christopherson 	if (kvm_x86_ops.cpu_dirty_log_size)
1329a018eba5SSean Christopherson 		kvm_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset, mask);
1330c50d8ae3SPaolo Bonzini 	else
1331c50d8ae3SPaolo Bonzini 		kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1332c50d8ae3SPaolo Bonzini }
1333c50d8ae3SPaolo Bonzini 
1334fb04a1edSPeter Xu int kvm_cpu_dirty_log_size(void)
1335fb04a1edSPeter Xu {
13366dd03800SSean Christopherson 	return kvm_x86_ops.cpu_dirty_log_size;
1337fb04a1edSPeter Xu }
1338fb04a1edSPeter Xu 
1339c50d8ae3SPaolo Bonzini bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
13403ad93562SKeqian Zhu 				    struct kvm_memory_slot *slot, u64 gfn,
13413ad93562SKeqian Zhu 				    int min_level)
1342c50d8ae3SPaolo Bonzini {
1343c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *rmap_head;
1344c50d8ae3SPaolo Bonzini 	int i;
1345c50d8ae3SPaolo Bonzini 	bool write_protected = false;
1346c50d8ae3SPaolo Bonzini 
1347e2209710SBen Gardon 	if (kvm_memslots_have_rmaps(kvm)) {
13483ad93562SKeqian Zhu 		for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
1349c50d8ae3SPaolo Bonzini 			rmap_head = __gfn_to_rmap(gfn, i, slot);
1350c50d8ae3SPaolo Bonzini 			write_protected |= __rmap_write_protect(kvm, rmap_head, true);
1351c50d8ae3SPaolo Bonzini 		}
1352e2209710SBen Gardon 	}
1353c50d8ae3SPaolo Bonzini 
1354897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
135546044f72SBen Gardon 		write_protected |=
13563ad93562SKeqian Zhu 			kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn, min_level);
135746044f72SBen Gardon 
1358c50d8ae3SPaolo Bonzini 	return write_protected;
1359c50d8ae3SPaolo Bonzini }
1360c50d8ae3SPaolo Bonzini 
1361c50d8ae3SPaolo Bonzini static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
1362c50d8ae3SPaolo Bonzini {
1363c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
1364c50d8ae3SPaolo Bonzini 
1365c50d8ae3SPaolo Bonzini 	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
13663ad93562SKeqian Zhu 	return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K);
1367c50d8ae3SPaolo Bonzini }
1368c50d8ae3SPaolo Bonzini 
13690a234f5dSSean Christopherson static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
13700a234f5dSSean Christopherson 			  struct kvm_memory_slot *slot)
1371c50d8ae3SPaolo Bonzini {
1372c50d8ae3SPaolo Bonzini 	u64 *sptep;
1373c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
1374c50d8ae3SPaolo Bonzini 	bool flush = false;
1375c50d8ae3SPaolo Bonzini 
1376c50d8ae3SPaolo Bonzini 	while ((sptep = rmap_get_first(rmap_head, &iter))) {
1377805a0f83SStephen Zhang 		rmap_printk("spte %p %llx.\n", sptep, *sptep);
1378c50d8ae3SPaolo Bonzini 
1379c50d8ae3SPaolo Bonzini 		pte_list_remove(rmap_head, sptep);
1380c50d8ae3SPaolo Bonzini 		flush = true;
1381c50d8ae3SPaolo Bonzini 	}
1382c50d8ae3SPaolo Bonzini 
1383c50d8ae3SPaolo Bonzini 	return flush;
1384c50d8ae3SPaolo Bonzini }
1385c50d8ae3SPaolo Bonzini 
13863039bcc7SSean Christopherson static bool kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1387c50d8ae3SPaolo Bonzini 			    struct kvm_memory_slot *slot, gfn_t gfn, int level,
13883039bcc7SSean Christopherson 			    pte_t unused)
1389c50d8ae3SPaolo Bonzini {
13900a234f5dSSean Christopherson 	return kvm_zap_rmapp(kvm, rmap_head, slot);
1391c50d8ae3SPaolo Bonzini }
1392c50d8ae3SPaolo Bonzini 
13933039bcc7SSean Christopherson static bool kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1394c50d8ae3SPaolo Bonzini 			      struct kvm_memory_slot *slot, gfn_t gfn, int level,
13953039bcc7SSean Christopherson 			      pte_t pte)
1396c50d8ae3SPaolo Bonzini {
1397c50d8ae3SPaolo Bonzini 	u64 *sptep;
1398c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
1399c50d8ae3SPaolo Bonzini 	int need_flush = 0;
1400c50d8ae3SPaolo Bonzini 	u64 new_spte;
1401c50d8ae3SPaolo Bonzini 	kvm_pfn_t new_pfn;
1402c50d8ae3SPaolo Bonzini 
14033039bcc7SSean Christopherson 	WARN_ON(pte_huge(pte));
14043039bcc7SSean Christopherson 	new_pfn = pte_pfn(pte);
1405c50d8ae3SPaolo Bonzini 
1406c50d8ae3SPaolo Bonzini restart:
1407c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(rmap_head, &iter, sptep) {
1408805a0f83SStephen Zhang 		rmap_printk("spte %p %llx gfn %llx (%d)\n",
1409c50d8ae3SPaolo Bonzini 			    sptep, *sptep, gfn, level);
1410c50d8ae3SPaolo Bonzini 
1411c50d8ae3SPaolo Bonzini 		need_flush = 1;
1412c50d8ae3SPaolo Bonzini 
14133039bcc7SSean Christopherson 		if (pte_write(pte)) {
1414c50d8ae3SPaolo Bonzini 			pte_list_remove(rmap_head, sptep);
1415c50d8ae3SPaolo Bonzini 			goto restart;
1416c50d8ae3SPaolo Bonzini 		} else {
1417cb3eedabSPaolo Bonzini 			new_spte = kvm_mmu_changed_pte_notifier_make_spte(
1418cb3eedabSPaolo Bonzini 					*sptep, new_pfn);
1419c50d8ae3SPaolo Bonzini 
1420c50d8ae3SPaolo Bonzini 			mmu_spte_clear_track_bits(sptep);
1421c50d8ae3SPaolo Bonzini 			mmu_spte_set(sptep, new_spte);
1422c50d8ae3SPaolo Bonzini 		}
1423c50d8ae3SPaolo Bonzini 	}
1424c50d8ae3SPaolo Bonzini 
1425c50d8ae3SPaolo Bonzini 	if (need_flush && kvm_available_flush_tlb_with_range()) {
1426c50d8ae3SPaolo Bonzini 		kvm_flush_remote_tlbs_with_address(kvm, gfn, 1);
1427c50d8ae3SPaolo Bonzini 		return 0;
1428c50d8ae3SPaolo Bonzini 	}
1429c50d8ae3SPaolo Bonzini 
1430c50d8ae3SPaolo Bonzini 	return need_flush;
1431c50d8ae3SPaolo Bonzini }
1432c50d8ae3SPaolo Bonzini 
1433c50d8ae3SPaolo Bonzini struct slot_rmap_walk_iterator {
1434c50d8ae3SPaolo Bonzini 	/* input fields. */
1435c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
1436c50d8ae3SPaolo Bonzini 	gfn_t start_gfn;
1437c50d8ae3SPaolo Bonzini 	gfn_t end_gfn;
1438c50d8ae3SPaolo Bonzini 	int start_level;
1439c50d8ae3SPaolo Bonzini 	int end_level;
1440c50d8ae3SPaolo Bonzini 
1441c50d8ae3SPaolo Bonzini 	/* output fields. */
1442c50d8ae3SPaolo Bonzini 	gfn_t gfn;
1443c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *rmap;
1444c50d8ae3SPaolo Bonzini 	int level;
1445c50d8ae3SPaolo Bonzini 
1446c50d8ae3SPaolo Bonzini 	/* private field. */
1447c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *end_rmap;
1448c50d8ae3SPaolo Bonzini };
1449c50d8ae3SPaolo Bonzini 
1450c50d8ae3SPaolo Bonzini static void
1451c50d8ae3SPaolo Bonzini rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level)
1452c50d8ae3SPaolo Bonzini {
1453c50d8ae3SPaolo Bonzini 	iterator->level = level;
1454c50d8ae3SPaolo Bonzini 	iterator->gfn = iterator->start_gfn;
1455c50d8ae3SPaolo Bonzini 	iterator->rmap = __gfn_to_rmap(iterator->gfn, level, iterator->slot);
1456c50d8ae3SPaolo Bonzini 	iterator->end_rmap = __gfn_to_rmap(iterator->end_gfn, level,
1457c50d8ae3SPaolo Bonzini 					   iterator->slot);
1458c50d8ae3SPaolo Bonzini }
1459c50d8ae3SPaolo Bonzini 
1460c50d8ae3SPaolo Bonzini static void
1461c50d8ae3SPaolo Bonzini slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator,
1462c50d8ae3SPaolo Bonzini 		    struct kvm_memory_slot *slot, int start_level,
1463c50d8ae3SPaolo Bonzini 		    int end_level, gfn_t start_gfn, gfn_t end_gfn)
1464c50d8ae3SPaolo Bonzini {
1465c50d8ae3SPaolo Bonzini 	iterator->slot = slot;
1466c50d8ae3SPaolo Bonzini 	iterator->start_level = start_level;
1467c50d8ae3SPaolo Bonzini 	iterator->end_level = end_level;
1468c50d8ae3SPaolo Bonzini 	iterator->start_gfn = start_gfn;
1469c50d8ae3SPaolo Bonzini 	iterator->end_gfn = end_gfn;
1470c50d8ae3SPaolo Bonzini 
1471c50d8ae3SPaolo Bonzini 	rmap_walk_init_level(iterator, iterator->start_level);
1472c50d8ae3SPaolo Bonzini }
1473c50d8ae3SPaolo Bonzini 
1474c50d8ae3SPaolo Bonzini static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator)
1475c50d8ae3SPaolo Bonzini {
1476c50d8ae3SPaolo Bonzini 	return !!iterator->rmap;
1477c50d8ae3SPaolo Bonzini }
1478c50d8ae3SPaolo Bonzini 
1479c50d8ae3SPaolo Bonzini static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
1480c50d8ae3SPaolo Bonzini {
1481c50d8ae3SPaolo Bonzini 	if (++iterator->rmap <= iterator->end_rmap) {
1482c50d8ae3SPaolo Bonzini 		iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level));
1483c50d8ae3SPaolo Bonzini 		return;
1484c50d8ae3SPaolo Bonzini 	}
1485c50d8ae3SPaolo Bonzini 
1486c50d8ae3SPaolo Bonzini 	if (++iterator->level > iterator->end_level) {
1487c50d8ae3SPaolo Bonzini 		iterator->rmap = NULL;
1488c50d8ae3SPaolo Bonzini 		return;
1489c50d8ae3SPaolo Bonzini 	}
1490c50d8ae3SPaolo Bonzini 
1491c50d8ae3SPaolo Bonzini 	rmap_walk_init_level(iterator, iterator->level);
1492c50d8ae3SPaolo Bonzini }
1493c50d8ae3SPaolo Bonzini 
1494c50d8ae3SPaolo Bonzini #define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_,	\
1495c50d8ae3SPaolo Bonzini 	   _start_gfn, _end_gfn, _iter_)				\
1496c50d8ae3SPaolo Bonzini 	for (slot_rmap_walk_init(_iter_, _slot_, _start_level_,		\
1497c50d8ae3SPaolo Bonzini 				 _end_level_, _start_gfn, _end_gfn);	\
1498c50d8ae3SPaolo Bonzini 	     slot_rmap_walk_okay(_iter_);				\
1499c50d8ae3SPaolo Bonzini 	     slot_rmap_walk_next(_iter_))
1500c50d8ae3SPaolo Bonzini 
15013039bcc7SSean Christopherson typedef bool (*rmap_handler_t)(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1502c1b91493SSean Christopherson 			       struct kvm_memory_slot *slot, gfn_t gfn,
15033039bcc7SSean Christopherson 			       int level, pte_t pte);
1504c1b91493SSean Christopherson 
15053039bcc7SSean Christopherson static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm,
15063039bcc7SSean Christopherson 						 struct kvm_gfn_range *range,
1507c1b91493SSean Christopherson 						 rmap_handler_t handler)
1508c50d8ae3SPaolo Bonzini {
1509c50d8ae3SPaolo Bonzini 	struct slot_rmap_walk_iterator iterator;
15103039bcc7SSean Christopherson 	bool ret = false;
1511c50d8ae3SPaolo Bonzini 
15123039bcc7SSean Christopherson 	for_each_slot_rmap_range(range->slot, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
15133039bcc7SSean Christopherson 				 range->start, range->end - 1, &iterator)
15143039bcc7SSean Christopherson 		ret |= handler(kvm, iterator.rmap, range->slot, iterator.gfn,
15153039bcc7SSean Christopherson 			       iterator.level, range->pte);
1516c50d8ae3SPaolo Bonzini 
1517c50d8ae3SPaolo Bonzini 	return ret;
1518c50d8ae3SPaolo Bonzini }
1519c50d8ae3SPaolo Bonzini 
15203039bcc7SSean Christopherson bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1521c50d8ae3SPaolo Bonzini {
1522e2209710SBen Gardon 	bool flush = false;
1523c50d8ae3SPaolo Bonzini 
1524e2209710SBen Gardon 	if (kvm_memslots_have_rmaps(kvm))
15253039bcc7SSean Christopherson 		flush = kvm_handle_gfn_range(kvm, range, kvm_unmap_rmapp);
1526063afacdSBen Gardon 
1527897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
15283039bcc7SSean Christopherson 		flush |= kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
1529063afacdSBen Gardon 
15303039bcc7SSean Christopherson 	return flush;
1531c50d8ae3SPaolo Bonzini }
1532c50d8ae3SPaolo Bonzini 
15333039bcc7SSean Christopherson bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1534c50d8ae3SPaolo Bonzini {
1535e2209710SBen Gardon 	bool flush = false;
15361d8dd6b3SBen Gardon 
1537e2209710SBen Gardon 	if (kvm_memslots_have_rmaps(kvm))
15383039bcc7SSean Christopherson 		flush = kvm_handle_gfn_range(kvm, range, kvm_set_pte_rmapp);
15391d8dd6b3SBen Gardon 
1540897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
15413039bcc7SSean Christopherson 		flush |= kvm_tdp_mmu_set_spte_gfn(kvm, range);
15421d8dd6b3SBen Gardon 
15433039bcc7SSean Christopherson 	return flush;
1544c50d8ae3SPaolo Bonzini }
1545c50d8ae3SPaolo Bonzini 
15463039bcc7SSean Christopherson static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1547c50d8ae3SPaolo Bonzini 			  struct kvm_memory_slot *slot, gfn_t gfn, int level,
15483039bcc7SSean Christopherson 			  pte_t unused)
1549c50d8ae3SPaolo Bonzini {
1550c50d8ae3SPaolo Bonzini 	u64 *sptep;
15513f649ab7SKees Cook 	struct rmap_iterator iter;
1552c50d8ae3SPaolo Bonzini 	int young = 0;
1553c50d8ae3SPaolo Bonzini 
1554c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(rmap_head, &iter, sptep)
1555c50d8ae3SPaolo Bonzini 		young |= mmu_spte_age(sptep);
1556c50d8ae3SPaolo Bonzini 
1557c50d8ae3SPaolo Bonzini 	return young;
1558c50d8ae3SPaolo Bonzini }
1559c50d8ae3SPaolo Bonzini 
15603039bcc7SSean Christopherson static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1561c50d8ae3SPaolo Bonzini 			       struct kvm_memory_slot *slot, gfn_t gfn,
15623039bcc7SSean Christopherson 			       int level, pte_t unused)
1563c50d8ae3SPaolo Bonzini {
1564c50d8ae3SPaolo Bonzini 	u64 *sptep;
1565c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
1566c50d8ae3SPaolo Bonzini 
1567c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(rmap_head, &iter, sptep)
1568c50d8ae3SPaolo Bonzini 		if (is_accessed_spte(*sptep))
1569c50d8ae3SPaolo Bonzini 			return 1;
1570c50d8ae3SPaolo Bonzini 	return 0;
1571c50d8ae3SPaolo Bonzini }
1572c50d8ae3SPaolo Bonzini 
1573c50d8ae3SPaolo Bonzini #define RMAP_RECYCLE_THRESHOLD 1000
1574c50d8ae3SPaolo Bonzini 
1575c50d8ae3SPaolo Bonzini static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
1576c50d8ae3SPaolo Bonzini {
1577c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *rmap_head;
1578c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
1579c50d8ae3SPaolo Bonzini 
158057354682SSean Christopherson 	sp = sptep_to_sp(spte);
1581c50d8ae3SPaolo Bonzini 
1582c50d8ae3SPaolo Bonzini 	rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
1583c50d8ae3SPaolo Bonzini 
15843039bcc7SSean Christopherson 	kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0));
1585c50d8ae3SPaolo Bonzini 	kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
1586c50d8ae3SPaolo Bonzini 			KVM_PAGES_PER_HPAGE(sp->role.level));
1587c50d8ae3SPaolo Bonzini }
1588c50d8ae3SPaolo Bonzini 
15893039bcc7SSean Christopherson bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1590c50d8ae3SPaolo Bonzini {
1591e2209710SBen Gardon 	bool young = false;
1592f8e14497SBen Gardon 
1593e2209710SBen Gardon 	if (kvm_memslots_have_rmaps(kvm))
15943039bcc7SSean Christopherson 		young = kvm_handle_gfn_range(kvm, range, kvm_age_rmapp);
15953039bcc7SSean Christopherson 
1596897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
15973039bcc7SSean Christopherson 		young |= kvm_tdp_mmu_age_gfn_range(kvm, range);
1598f8e14497SBen Gardon 
1599f8e14497SBen Gardon 	return young;
1600c50d8ae3SPaolo Bonzini }
1601c50d8ae3SPaolo Bonzini 
16023039bcc7SSean Christopherson bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1603c50d8ae3SPaolo Bonzini {
1604e2209710SBen Gardon 	bool young = false;
1605f8e14497SBen Gardon 
1606e2209710SBen Gardon 	if (kvm_memslots_have_rmaps(kvm))
16073039bcc7SSean Christopherson 		young = kvm_handle_gfn_range(kvm, range, kvm_test_age_rmapp);
16083039bcc7SSean Christopherson 
1609897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
16103039bcc7SSean Christopherson 		young |= kvm_tdp_mmu_test_age_gfn(kvm, range);
1611f8e14497SBen Gardon 
1612f8e14497SBen Gardon 	return young;
1613c50d8ae3SPaolo Bonzini }
1614c50d8ae3SPaolo Bonzini 
1615c50d8ae3SPaolo Bonzini #ifdef MMU_DEBUG
1616c50d8ae3SPaolo Bonzini static int is_empty_shadow_page(u64 *spt)
1617c50d8ae3SPaolo Bonzini {
1618c50d8ae3SPaolo Bonzini 	u64 *pos;
1619c50d8ae3SPaolo Bonzini 	u64 *end;
1620c50d8ae3SPaolo Bonzini 
1621c50d8ae3SPaolo Bonzini 	for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
1622c50d8ae3SPaolo Bonzini 		if (is_shadow_present_pte(*pos)) {
1623c50d8ae3SPaolo Bonzini 			printk(KERN_ERR "%s: %p %llx\n", __func__,
1624c50d8ae3SPaolo Bonzini 			       pos, *pos);
1625c50d8ae3SPaolo Bonzini 			return 0;
1626c50d8ae3SPaolo Bonzini 		}
1627c50d8ae3SPaolo Bonzini 	return 1;
1628c50d8ae3SPaolo Bonzini }
1629c50d8ae3SPaolo Bonzini #endif
1630c50d8ae3SPaolo Bonzini 
1631c50d8ae3SPaolo Bonzini /*
1632c50d8ae3SPaolo Bonzini  * This value is the sum of all of the kvm instances's
1633c50d8ae3SPaolo Bonzini  * kvm->arch.n_used_mmu_pages values.  We need a global,
1634c50d8ae3SPaolo Bonzini  * aggregate version in order to make the slab shrinker
1635c50d8ae3SPaolo Bonzini  * faster
1636c50d8ae3SPaolo Bonzini  */
1637c50d8ae3SPaolo Bonzini static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, unsigned long nr)
1638c50d8ae3SPaolo Bonzini {
1639c50d8ae3SPaolo Bonzini 	kvm->arch.n_used_mmu_pages += nr;
1640c50d8ae3SPaolo Bonzini 	percpu_counter_add(&kvm_total_used_mmu_pages, nr);
1641c50d8ae3SPaolo Bonzini }
1642c50d8ae3SPaolo Bonzini 
1643c50d8ae3SPaolo Bonzini static void kvm_mmu_free_page(struct kvm_mmu_page *sp)
1644c50d8ae3SPaolo Bonzini {
1645c50d8ae3SPaolo Bonzini 	MMU_WARN_ON(!is_empty_shadow_page(sp->spt));
1646c50d8ae3SPaolo Bonzini 	hlist_del(&sp->hash_link);
1647c50d8ae3SPaolo Bonzini 	list_del(&sp->link);
1648c50d8ae3SPaolo Bonzini 	free_page((unsigned long)sp->spt);
1649c50d8ae3SPaolo Bonzini 	if (!sp->role.direct)
1650c50d8ae3SPaolo Bonzini 		free_page((unsigned long)sp->gfns);
1651c50d8ae3SPaolo Bonzini 	kmem_cache_free(mmu_page_header_cache, sp);
1652c50d8ae3SPaolo Bonzini }
1653c50d8ae3SPaolo Bonzini 
1654c50d8ae3SPaolo Bonzini static unsigned kvm_page_table_hashfn(gfn_t gfn)
1655c50d8ae3SPaolo Bonzini {
1656c50d8ae3SPaolo Bonzini 	return hash_64(gfn, KVM_MMU_HASH_SHIFT);
1657c50d8ae3SPaolo Bonzini }
1658c50d8ae3SPaolo Bonzini 
1659c50d8ae3SPaolo Bonzini static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
1660c50d8ae3SPaolo Bonzini 				    struct kvm_mmu_page *sp, u64 *parent_pte)
1661c50d8ae3SPaolo Bonzini {
1662c50d8ae3SPaolo Bonzini 	if (!parent_pte)
1663c50d8ae3SPaolo Bonzini 		return;
1664c50d8ae3SPaolo Bonzini 
1665c50d8ae3SPaolo Bonzini 	pte_list_add(vcpu, parent_pte, &sp->parent_ptes);
1666c50d8ae3SPaolo Bonzini }
1667c50d8ae3SPaolo Bonzini 
1668c50d8ae3SPaolo Bonzini static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
1669c50d8ae3SPaolo Bonzini 				       u64 *parent_pte)
1670c50d8ae3SPaolo Bonzini {
1671c50d8ae3SPaolo Bonzini 	__pte_list_remove(parent_pte, &sp->parent_ptes);
1672c50d8ae3SPaolo Bonzini }
1673c50d8ae3SPaolo Bonzini 
1674c50d8ae3SPaolo Bonzini static void drop_parent_pte(struct kvm_mmu_page *sp,
1675c50d8ae3SPaolo Bonzini 			    u64 *parent_pte)
1676c50d8ae3SPaolo Bonzini {
1677c50d8ae3SPaolo Bonzini 	mmu_page_remove_parent_pte(sp, parent_pte);
1678c50d8ae3SPaolo Bonzini 	mmu_spte_clear_no_track(parent_pte);
1679c50d8ae3SPaolo Bonzini }
1680c50d8ae3SPaolo Bonzini 
1681c50d8ae3SPaolo Bonzini static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct)
1682c50d8ae3SPaolo Bonzini {
1683c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
1684c50d8ae3SPaolo Bonzini 
168594ce87efSSean Christopherson 	sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
168694ce87efSSean Christopherson 	sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
1687c50d8ae3SPaolo Bonzini 	if (!direct)
168894ce87efSSean Christopherson 		sp->gfns = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache);
1689c50d8ae3SPaolo Bonzini 	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
1690c50d8ae3SPaolo Bonzini 
1691c50d8ae3SPaolo Bonzini 	/*
1692c50d8ae3SPaolo Bonzini 	 * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
1693c50d8ae3SPaolo Bonzini 	 * depends on valid pages being added to the head of the list.  See
1694c50d8ae3SPaolo Bonzini 	 * comments in kvm_zap_obsolete_pages().
1695c50d8ae3SPaolo Bonzini 	 */
1696c50d8ae3SPaolo Bonzini 	sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
1697c50d8ae3SPaolo Bonzini 	list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
1698c50d8ae3SPaolo Bonzini 	kvm_mod_used_mmu_pages(vcpu->kvm, +1);
1699c50d8ae3SPaolo Bonzini 	return sp;
1700c50d8ae3SPaolo Bonzini }
1701c50d8ae3SPaolo Bonzini 
1702c50d8ae3SPaolo Bonzini static void mark_unsync(u64 *spte);
1703c50d8ae3SPaolo Bonzini static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
1704c50d8ae3SPaolo Bonzini {
1705c50d8ae3SPaolo Bonzini 	u64 *sptep;
1706c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
1707c50d8ae3SPaolo Bonzini 
1708c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) {
1709c50d8ae3SPaolo Bonzini 		mark_unsync(sptep);
1710c50d8ae3SPaolo Bonzini 	}
1711c50d8ae3SPaolo Bonzini }
1712c50d8ae3SPaolo Bonzini 
1713c50d8ae3SPaolo Bonzini static void mark_unsync(u64 *spte)
1714c50d8ae3SPaolo Bonzini {
1715c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
1716c50d8ae3SPaolo Bonzini 	unsigned int index;
1717c50d8ae3SPaolo Bonzini 
171857354682SSean Christopherson 	sp = sptep_to_sp(spte);
1719c50d8ae3SPaolo Bonzini 	index = spte - sp->spt;
1720c50d8ae3SPaolo Bonzini 	if (__test_and_set_bit(index, sp->unsync_child_bitmap))
1721c50d8ae3SPaolo Bonzini 		return;
1722c50d8ae3SPaolo Bonzini 	if (sp->unsync_children++)
1723c50d8ae3SPaolo Bonzini 		return;
1724c50d8ae3SPaolo Bonzini 	kvm_mmu_mark_parents_unsync(sp);
1725c50d8ae3SPaolo Bonzini }
1726c50d8ae3SPaolo Bonzini 
1727c50d8ae3SPaolo Bonzini static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
1728c50d8ae3SPaolo Bonzini 			       struct kvm_mmu_page *sp)
1729c50d8ae3SPaolo Bonzini {
1730c50d8ae3SPaolo Bonzini 	return 0;
1731c50d8ae3SPaolo Bonzini }
1732c50d8ae3SPaolo Bonzini 
1733c50d8ae3SPaolo Bonzini #define KVM_PAGE_ARRAY_NR 16
1734c50d8ae3SPaolo Bonzini 
1735c50d8ae3SPaolo Bonzini struct kvm_mmu_pages {
1736c50d8ae3SPaolo Bonzini 	struct mmu_page_and_offset {
1737c50d8ae3SPaolo Bonzini 		struct kvm_mmu_page *sp;
1738c50d8ae3SPaolo Bonzini 		unsigned int idx;
1739c50d8ae3SPaolo Bonzini 	} page[KVM_PAGE_ARRAY_NR];
1740c50d8ae3SPaolo Bonzini 	unsigned int nr;
1741c50d8ae3SPaolo Bonzini };
1742c50d8ae3SPaolo Bonzini 
1743c50d8ae3SPaolo Bonzini static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
1744c50d8ae3SPaolo Bonzini 			 int idx)
1745c50d8ae3SPaolo Bonzini {
1746c50d8ae3SPaolo Bonzini 	int i;
1747c50d8ae3SPaolo Bonzini 
1748c50d8ae3SPaolo Bonzini 	if (sp->unsync)
1749c50d8ae3SPaolo Bonzini 		for (i=0; i < pvec->nr; i++)
1750c50d8ae3SPaolo Bonzini 			if (pvec->page[i].sp == sp)
1751c50d8ae3SPaolo Bonzini 				return 0;
1752c50d8ae3SPaolo Bonzini 
1753c50d8ae3SPaolo Bonzini 	pvec->page[pvec->nr].sp = sp;
1754c50d8ae3SPaolo Bonzini 	pvec->page[pvec->nr].idx = idx;
1755c50d8ae3SPaolo Bonzini 	pvec->nr++;
1756c50d8ae3SPaolo Bonzini 	return (pvec->nr == KVM_PAGE_ARRAY_NR);
1757c50d8ae3SPaolo Bonzini }
1758c50d8ae3SPaolo Bonzini 
1759c50d8ae3SPaolo Bonzini static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx)
1760c50d8ae3SPaolo Bonzini {
1761c50d8ae3SPaolo Bonzini 	--sp->unsync_children;
1762c50d8ae3SPaolo Bonzini 	WARN_ON((int)sp->unsync_children < 0);
1763c50d8ae3SPaolo Bonzini 	__clear_bit(idx, sp->unsync_child_bitmap);
1764c50d8ae3SPaolo Bonzini }
1765c50d8ae3SPaolo Bonzini 
1766c50d8ae3SPaolo Bonzini static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
1767c50d8ae3SPaolo Bonzini 			   struct kvm_mmu_pages *pvec)
1768c50d8ae3SPaolo Bonzini {
1769c50d8ae3SPaolo Bonzini 	int i, ret, nr_unsync_leaf = 0;
1770c50d8ae3SPaolo Bonzini 
1771c50d8ae3SPaolo Bonzini 	for_each_set_bit(i, sp->unsync_child_bitmap, 512) {
1772c50d8ae3SPaolo Bonzini 		struct kvm_mmu_page *child;
1773c50d8ae3SPaolo Bonzini 		u64 ent = sp->spt[i];
1774c50d8ae3SPaolo Bonzini 
1775c50d8ae3SPaolo Bonzini 		if (!is_shadow_present_pte(ent) || is_large_pte(ent)) {
1776c50d8ae3SPaolo Bonzini 			clear_unsync_child_bit(sp, i);
1777c50d8ae3SPaolo Bonzini 			continue;
1778c50d8ae3SPaolo Bonzini 		}
1779c50d8ae3SPaolo Bonzini 
1780e47c4aeeSSean Christopherson 		child = to_shadow_page(ent & PT64_BASE_ADDR_MASK);
1781c50d8ae3SPaolo Bonzini 
1782c50d8ae3SPaolo Bonzini 		if (child->unsync_children) {
1783c50d8ae3SPaolo Bonzini 			if (mmu_pages_add(pvec, child, i))
1784c50d8ae3SPaolo Bonzini 				return -ENOSPC;
1785c50d8ae3SPaolo Bonzini 
1786c50d8ae3SPaolo Bonzini 			ret = __mmu_unsync_walk(child, pvec);
1787c50d8ae3SPaolo Bonzini 			if (!ret) {
1788c50d8ae3SPaolo Bonzini 				clear_unsync_child_bit(sp, i);
1789c50d8ae3SPaolo Bonzini 				continue;
1790c50d8ae3SPaolo Bonzini 			} else if (ret > 0) {
1791c50d8ae3SPaolo Bonzini 				nr_unsync_leaf += ret;
1792c50d8ae3SPaolo Bonzini 			} else
1793c50d8ae3SPaolo Bonzini 				return ret;
1794c50d8ae3SPaolo Bonzini 		} else if (child->unsync) {
1795c50d8ae3SPaolo Bonzini 			nr_unsync_leaf++;
1796c50d8ae3SPaolo Bonzini 			if (mmu_pages_add(pvec, child, i))
1797c50d8ae3SPaolo Bonzini 				return -ENOSPC;
1798c50d8ae3SPaolo Bonzini 		} else
1799c50d8ae3SPaolo Bonzini 			clear_unsync_child_bit(sp, i);
1800c50d8ae3SPaolo Bonzini 	}
1801c50d8ae3SPaolo Bonzini 
1802c50d8ae3SPaolo Bonzini 	return nr_unsync_leaf;
1803c50d8ae3SPaolo Bonzini }
1804c50d8ae3SPaolo Bonzini 
1805c50d8ae3SPaolo Bonzini #define INVALID_INDEX (-1)
1806c50d8ae3SPaolo Bonzini 
1807c50d8ae3SPaolo Bonzini static int mmu_unsync_walk(struct kvm_mmu_page *sp,
1808c50d8ae3SPaolo Bonzini 			   struct kvm_mmu_pages *pvec)
1809c50d8ae3SPaolo Bonzini {
1810c50d8ae3SPaolo Bonzini 	pvec->nr = 0;
1811c50d8ae3SPaolo Bonzini 	if (!sp->unsync_children)
1812c50d8ae3SPaolo Bonzini 		return 0;
1813c50d8ae3SPaolo Bonzini 
1814c50d8ae3SPaolo Bonzini 	mmu_pages_add(pvec, sp, INVALID_INDEX);
1815c50d8ae3SPaolo Bonzini 	return __mmu_unsync_walk(sp, pvec);
1816c50d8ae3SPaolo Bonzini }
1817c50d8ae3SPaolo Bonzini 
1818c50d8ae3SPaolo Bonzini static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1819c50d8ae3SPaolo Bonzini {
1820c50d8ae3SPaolo Bonzini 	WARN_ON(!sp->unsync);
1821c50d8ae3SPaolo Bonzini 	trace_kvm_mmu_sync_page(sp);
1822c50d8ae3SPaolo Bonzini 	sp->unsync = 0;
1823c50d8ae3SPaolo Bonzini 	--kvm->stat.mmu_unsync;
1824c50d8ae3SPaolo Bonzini }
1825c50d8ae3SPaolo Bonzini 
1826c50d8ae3SPaolo Bonzini static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1827c50d8ae3SPaolo Bonzini 				     struct list_head *invalid_list);
1828c50d8ae3SPaolo Bonzini static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1829c50d8ae3SPaolo Bonzini 				    struct list_head *invalid_list);
1830c50d8ae3SPaolo Bonzini 
1831ac101b7cSSean Christopherson #define for_each_valid_sp(_kvm, _sp, _list)				\
1832ac101b7cSSean Christopherson 	hlist_for_each_entry(_sp, _list, hash_link)			\
1833c50d8ae3SPaolo Bonzini 		if (is_obsolete_sp((_kvm), (_sp))) {			\
1834c50d8ae3SPaolo Bonzini 		} else
1835c50d8ae3SPaolo Bonzini 
1836c50d8ae3SPaolo Bonzini #define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn)			\
1837ac101b7cSSean Christopherson 	for_each_valid_sp(_kvm, _sp,					\
1838ac101b7cSSean Christopherson 	  &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)])	\
1839c50d8ae3SPaolo Bonzini 		if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
1840c50d8ae3SPaolo Bonzini 
1841479a1efcSSean Christopherson static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1842c50d8ae3SPaolo Bonzini 			 struct list_head *invalid_list)
1843c50d8ae3SPaolo Bonzini {
18442640b086SSean Christopherson 	if (vcpu->arch.mmu->sync_page(vcpu, sp) == 0) {
1845c50d8ae3SPaolo Bonzini 		kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
1846c50d8ae3SPaolo Bonzini 		return false;
1847c50d8ae3SPaolo Bonzini 	}
1848c50d8ae3SPaolo Bonzini 
1849c50d8ae3SPaolo Bonzini 	return true;
1850c50d8ae3SPaolo Bonzini }
1851c50d8ae3SPaolo Bonzini 
1852c50d8ae3SPaolo Bonzini static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
1853c50d8ae3SPaolo Bonzini 					struct list_head *invalid_list,
1854c50d8ae3SPaolo Bonzini 					bool remote_flush)
1855c50d8ae3SPaolo Bonzini {
1856c50d8ae3SPaolo Bonzini 	if (!remote_flush && list_empty(invalid_list))
1857c50d8ae3SPaolo Bonzini 		return false;
1858c50d8ae3SPaolo Bonzini 
1859c50d8ae3SPaolo Bonzini 	if (!list_empty(invalid_list))
1860c50d8ae3SPaolo Bonzini 		kvm_mmu_commit_zap_page(kvm, invalid_list);
1861c50d8ae3SPaolo Bonzini 	else
1862c50d8ae3SPaolo Bonzini 		kvm_flush_remote_tlbs(kvm);
1863c50d8ae3SPaolo Bonzini 	return true;
1864c50d8ae3SPaolo Bonzini }
1865c50d8ae3SPaolo Bonzini 
1866c50d8ae3SPaolo Bonzini static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu,
1867c50d8ae3SPaolo Bonzini 				 struct list_head *invalid_list,
1868c50d8ae3SPaolo Bonzini 				 bool remote_flush, bool local_flush)
1869c50d8ae3SPaolo Bonzini {
1870c50d8ae3SPaolo Bonzini 	if (kvm_mmu_remote_flush_or_zap(vcpu->kvm, invalid_list, remote_flush))
1871c50d8ae3SPaolo Bonzini 		return;
1872c50d8ae3SPaolo Bonzini 
1873c50d8ae3SPaolo Bonzini 	if (local_flush)
18748c8560b8SSean Christopherson 		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
1875c50d8ae3SPaolo Bonzini }
1876c50d8ae3SPaolo Bonzini 
1877c50d8ae3SPaolo Bonzini #ifdef CONFIG_KVM_MMU_AUDIT
1878c50d8ae3SPaolo Bonzini #include "mmu_audit.c"
1879c50d8ae3SPaolo Bonzini #else
1880c50d8ae3SPaolo Bonzini static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { }
1881c50d8ae3SPaolo Bonzini static void mmu_audit_disable(void) { }
1882c50d8ae3SPaolo Bonzini #endif
1883c50d8ae3SPaolo Bonzini 
1884c50d8ae3SPaolo Bonzini static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
1885c50d8ae3SPaolo Bonzini {
1886c50d8ae3SPaolo Bonzini 	return sp->role.invalid ||
1887c50d8ae3SPaolo Bonzini 	       unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
1888c50d8ae3SPaolo Bonzini }
1889c50d8ae3SPaolo Bonzini 
1890c50d8ae3SPaolo Bonzini struct mmu_page_path {
1891c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL];
1892c50d8ae3SPaolo Bonzini 	unsigned int idx[PT64_ROOT_MAX_LEVEL];
1893c50d8ae3SPaolo Bonzini };
1894c50d8ae3SPaolo Bonzini 
1895c50d8ae3SPaolo Bonzini #define for_each_sp(pvec, sp, parents, i)			\
1896c50d8ae3SPaolo Bonzini 		for (i = mmu_pages_first(&pvec, &parents);	\
1897c50d8ae3SPaolo Bonzini 			i < pvec.nr && ({ sp = pvec.page[i].sp; 1;});	\
1898c50d8ae3SPaolo Bonzini 			i = mmu_pages_next(&pvec, &parents, i))
1899c50d8ae3SPaolo Bonzini 
1900c50d8ae3SPaolo Bonzini static int mmu_pages_next(struct kvm_mmu_pages *pvec,
1901c50d8ae3SPaolo Bonzini 			  struct mmu_page_path *parents,
1902c50d8ae3SPaolo Bonzini 			  int i)
1903c50d8ae3SPaolo Bonzini {
1904c50d8ae3SPaolo Bonzini 	int n;
1905c50d8ae3SPaolo Bonzini 
1906c50d8ae3SPaolo Bonzini 	for (n = i+1; n < pvec->nr; n++) {
1907c50d8ae3SPaolo Bonzini 		struct kvm_mmu_page *sp = pvec->page[n].sp;
1908c50d8ae3SPaolo Bonzini 		unsigned idx = pvec->page[n].idx;
1909c50d8ae3SPaolo Bonzini 		int level = sp->role.level;
1910c50d8ae3SPaolo Bonzini 
1911c50d8ae3SPaolo Bonzini 		parents->idx[level-1] = idx;
19123bae0459SSean Christopherson 		if (level == PG_LEVEL_4K)
1913c50d8ae3SPaolo Bonzini 			break;
1914c50d8ae3SPaolo Bonzini 
1915c50d8ae3SPaolo Bonzini 		parents->parent[level-2] = sp;
1916c50d8ae3SPaolo Bonzini 	}
1917c50d8ae3SPaolo Bonzini 
1918c50d8ae3SPaolo Bonzini 	return n;
1919c50d8ae3SPaolo Bonzini }
1920c50d8ae3SPaolo Bonzini 
1921c50d8ae3SPaolo Bonzini static int mmu_pages_first(struct kvm_mmu_pages *pvec,
1922c50d8ae3SPaolo Bonzini 			   struct mmu_page_path *parents)
1923c50d8ae3SPaolo Bonzini {
1924c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
1925c50d8ae3SPaolo Bonzini 	int level;
1926c50d8ae3SPaolo Bonzini 
1927c50d8ae3SPaolo Bonzini 	if (pvec->nr == 0)
1928c50d8ae3SPaolo Bonzini 		return 0;
1929c50d8ae3SPaolo Bonzini 
1930c50d8ae3SPaolo Bonzini 	WARN_ON(pvec->page[0].idx != INVALID_INDEX);
1931c50d8ae3SPaolo Bonzini 
1932c50d8ae3SPaolo Bonzini 	sp = pvec->page[0].sp;
1933c50d8ae3SPaolo Bonzini 	level = sp->role.level;
19343bae0459SSean Christopherson 	WARN_ON(level == PG_LEVEL_4K);
1935c50d8ae3SPaolo Bonzini 
1936c50d8ae3SPaolo Bonzini 	parents->parent[level-2] = sp;
1937c50d8ae3SPaolo Bonzini 
1938c50d8ae3SPaolo Bonzini 	/* Also set up a sentinel.  Further entries in pvec are all
1939c50d8ae3SPaolo Bonzini 	 * children of sp, so this element is never overwritten.
1940c50d8ae3SPaolo Bonzini 	 */
1941c50d8ae3SPaolo Bonzini 	parents->parent[level-1] = NULL;
1942c50d8ae3SPaolo Bonzini 	return mmu_pages_next(pvec, parents, 0);
1943c50d8ae3SPaolo Bonzini }
1944c50d8ae3SPaolo Bonzini 
1945c50d8ae3SPaolo Bonzini static void mmu_pages_clear_parents(struct mmu_page_path *parents)
1946c50d8ae3SPaolo Bonzini {
1947c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
1948c50d8ae3SPaolo Bonzini 	unsigned int level = 0;
1949c50d8ae3SPaolo Bonzini 
1950c50d8ae3SPaolo Bonzini 	do {
1951c50d8ae3SPaolo Bonzini 		unsigned int idx = parents->idx[level];
1952c50d8ae3SPaolo Bonzini 		sp = parents->parent[level];
1953c50d8ae3SPaolo Bonzini 		if (!sp)
1954c50d8ae3SPaolo Bonzini 			return;
1955c50d8ae3SPaolo Bonzini 
1956c50d8ae3SPaolo Bonzini 		WARN_ON(idx == INVALID_INDEX);
1957c50d8ae3SPaolo Bonzini 		clear_unsync_child_bit(sp, idx);
1958c50d8ae3SPaolo Bonzini 		level++;
1959c50d8ae3SPaolo Bonzini 	} while (!sp->unsync_children);
1960c50d8ae3SPaolo Bonzini }
1961c50d8ae3SPaolo Bonzini 
1962c50d8ae3SPaolo Bonzini static void mmu_sync_children(struct kvm_vcpu *vcpu,
1963c50d8ae3SPaolo Bonzini 			      struct kvm_mmu_page *parent)
1964c50d8ae3SPaolo Bonzini {
1965c50d8ae3SPaolo Bonzini 	int i;
1966c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
1967c50d8ae3SPaolo Bonzini 	struct mmu_page_path parents;
1968c50d8ae3SPaolo Bonzini 	struct kvm_mmu_pages pages;
1969c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
1970c50d8ae3SPaolo Bonzini 	bool flush = false;
1971c50d8ae3SPaolo Bonzini 
1972c50d8ae3SPaolo Bonzini 	while (mmu_unsync_walk(parent, &pages)) {
1973c50d8ae3SPaolo Bonzini 		bool protected = false;
1974c50d8ae3SPaolo Bonzini 
1975c50d8ae3SPaolo Bonzini 		for_each_sp(pages, sp, parents, i)
1976c50d8ae3SPaolo Bonzini 			protected |= rmap_write_protect(vcpu, sp->gfn);
1977c50d8ae3SPaolo Bonzini 
1978c50d8ae3SPaolo Bonzini 		if (protected) {
1979c50d8ae3SPaolo Bonzini 			kvm_flush_remote_tlbs(vcpu->kvm);
1980c50d8ae3SPaolo Bonzini 			flush = false;
1981c50d8ae3SPaolo Bonzini 		}
1982c50d8ae3SPaolo Bonzini 
1983c50d8ae3SPaolo Bonzini 		for_each_sp(pages, sp, parents, i) {
1984479a1efcSSean Christopherson 			kvm_unlink_unsync_page(vcpu->kvm, sp);
1985c50d8ae3SPaolo Bonzini 			flush |= kvm_sync_page(vcpu, sp, &invalid_list);
1986c50d8ae3SPaolo Bonzini 			mmu_pages_clear_parents(&parents);
1987c50d8ae3SPaolo Bonzini 		}
1988531810caSBen Gardon 		if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
1989c50d8ae3SPaolo Bonzini 			kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
1990531810caSBen Gardon 			cond_resched_rwlock_write(&vcpu->kvm->mmu_lock);
1991c50d8ae3SPaolo Bonzini 			flush = false;
1992c50d8ae3SPaolo Bonzini 		}
1993c50d8ae3SPaolo Bonzini 	}
1994c50d8ae3SPaolo Bonzini 
1995c50d8ae3SPaolo Bonzini 	kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
1996c50d8ae3SPaolo Bonzini }
1997c50d8ae3SPaolo Bonzini 
1998c50d8ae3SPaolo Bonzini static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
1999c50d8ae3SPaolo Bonzini {
2000c50d8ae3SPaolo Bonzini 	atomic_set(&sp->write_flooding_count,  0);
2001c50d8ae3SPaolo Bonzini }
2002c50d8ae3SPaolo Bonzini 
2003c50d8ae3SPaolo Bonzini static void clear_sp_write_flooding_count(u64 *spte)
2004c50d8ae3SPaolo Bonzini {
200557354682SSean Christopherson 	__clear_sp_write_flooding_count(sptep_to_sp(spte));
2006c50d8ae3SPaolo Bonzini }
2007c50d8ae3SPaolo Bonzini 
2008c50d8ae3SPaolo Bonzini static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
2009c50d8ae3SPaolo Bonzini 					     gfn_t gfn,
2010c50d8ae3SPaolo Bonzini 					     gva_t gaddr,
2011c50d8ae3SPaolo Bonzini 					     unsigned level,
2012c50d8ae3SPaolo Bonzini 					     int direct,
20130a2b64c5SBen Gardon 					     unsigned int access)
2014c50d8ae3SPaolo Bonzini {
2015fb58a9c3SSean Christopherson 	bool direct_mmu = vcpu->arch.mmu->direct_map;
2016c50d8ae3SPaolo Bonzini 	union kvm_mmu_page_role role;
2017ac101b7cSSean Christopherson 	struct hlist_head *sp_list;
2018c50d8ae3SPaolo Bonzini 	unsigned quadrant;
2019c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
2020c50d8ae3SPaolo Bonzini 	int collisions = 0;
2021c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
2022c50d8ae3SPaolo Bonzini 
2023c50d8ae3SPaolo Bonzini 	role = vcpu->arch.mmu->mmu_role.base;
2024c50d8ae3SPaolo Bonzini 	role.level = level;
2025c50d8ae3SPaolo Bonzini 	role.direct = direct;
2026c50d8ae3SPaolo Bonzini 	if (role.direct)
2027c50d8ae3SPaolo Bonzini 		role.gpte_is_8_bytes = true;
2028c50d8ae3SPaolo Bonzini 	role.access = access;
2029fb58a9c3SSean Christopherson 	if (!direct_mmu && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) {
2030c50d8ae3SPaolo Bonzini 		quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
2031c50d8ae3SPaolo Bonzini 		quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
2032c50d8ae3SPaolo Bonzini 		role.quadrant = quadrant;
2033c50d8ae3SPaolo Bonzini 	}
2034ac101b7cSSean Christopherson 
2035ac101b7cSSean Christopherson 	sp_list = &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
2036ac101b7cSSean Christopherson 	for_each_valid_sp(vcpu->kvm, sp, sp_list) {
2037c50d8ae3SPaolo Bonzini 		if (sp->gfn != gfn) {
2038c50d8ae3SPaolo Bonzini 			collisions++;
2039c50d8ae3SPaolo Bonzini 			continue;
2040c50d8ae3SPaolo Bonzini 		}
2041c50d8ae3SPaolo Bonzini 
2042ddc16abbSSean Christopherson 		if (sp->role.word != role.word) {
2043ddc16abbSSean Christopherson 			/*
2044ddc16abbSSean Christopherson 			 * If the guest is creating an upper-level page, zap
2045ddc16abbSSean Christopherson 			 * unsync pages for the same gfn.  While it's possible
2046ddc16abbSSean Christopherson 			 * the guest is using recursive page tables, in all
2047ddc16abbSSean Christopherson 			 * likelihood the guest has stopped using the unsync
2048ddc16abbSSean Christopherson 			 * page and is installing a completely unrelated page.
2049ddc16abbSSean Christopherson 			 * Unsync pages must not be left as is, because the new
2050ddc16abbSSean Christopherson 			 * upper-level page will be write-protected.
2051ddc16abbSSean Christopherson 			 */
2052ddc16abbSSean Christopherson 			if (level > PG_LEVEL_4K && sp->unsync)
2053ddc16abbSSean Christopherson 				kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
2054ddc16abbSSean Christopherson 							 &invalid_list);
2055c50d8ae3SPaolo Bonzini 			continue;
2056ddc16abbSSean Christopherson 		}
2057c50d8ae3SPaolo Bonzini 
2058fb58a9c3SSean Christopherson 		if (direct_mmu)
2059fb58a9c3SSean Christopherson 			goto trace_get_page;
2060fb58a9c3SSean Christopherson 
2061c50d8ae3SPaolo Bonzini 		if (sp->unsync) {
206207dc4f35SSean Christopherson 			/*
2063479a1efcSSean Christopherson 			 * The page is good, but is stale.  kvm_sync_page does
206407dc4f35SSean Christopherson 			 * get the latest guest state, but (unlike mmu_unsync_children)
206507dc4f35SSean Christopherson 			 * it doesn't write-protect the page or mark it synchronized!
206607dc4f35SSean Christopherson 			 * This way the validity of the mapping is ensured, but the
206707dc4f35SSean Christopherson 			 * overhead of write protection is not incurred until the
206807dc4f35SSean Christopherson 			 * guest invalidates the TLB mapping.  This allows multiple
206907dc4f35SSean Christopherson 			 * SPs for a single gfn to be unsync.
207007dc4f35SSean Christopherson 			 *
207107dc4f35SSean Christopherson 			 * If the sync fails, the page is zapped.  If so, break
207207dc4f35SSean Christopherson 			 * in order to rebuild it.
2073c50d8ae3SPaolo Bonzini 			 */
2074479a1efcSSean Christopherson 			if (!kvm_sync_page(vcpu, sp, &invalid_list))
2075c50d8ae3SPaolo Bonzini 				break;
2076c50d8ae3SPaolo Bonzini 
2077c50d8ae3SPaolo Bonzini 			WARN_ON(!list_empty(&invalid_list));
20788c8560b8SSean Christopherson 			kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
2079c50d8ae3SPaolo Bonzini 		}
2080c50d8ae3SPaolo Bonzini 
2081c50d8ae3SPaolo Bonzini 		if (sp->unsync_children)
2082f6f6195bSLai Jiangshan 			kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
2083c50d8ae3SPaolo Bonzini 
2084c50d8ae3SPaolo Bonzini 		__clear_sp_write_flooding_count(sp);
2085fb58a9c3SSean Christopherson 
2086fb58a9c3SSean Christopherson trace_get_page:
2087c50d8ae3SPaolo Bonzini 		trace_kvm_mmu_get_page(sp, false);
2088c50d8ae3SPaolo Bonzini 		goto out;
2089c50d8ae3SPaolo Bonzini 	}
2090c50d8ae3SPaolo Bonzini 
2091c50d8ae3SPaolo Bonzini 	++vcpu->kvm->stat.mmu_cache_miss;
2092c50d8ae3SPaolo Bonzini 
2093c50d8ae3SPaolo Bonzini 	sp = kvm_mmu_alloc_page(vcpu, direct);
2094c50d8ae3SPaolo Bonzini 
2095c50d8ae3SPaolo Bonzini 	sp->gfn = gfn;
2096c50d8ae3SPaolo Bonzini 	sp->role = role;
2097ac101b7cSSean Christopherson 	hlist_add_head(&sp->hash_link, sp_list);
2098c50d8ae3SPaolo Bonzini 	if (!direct) {
2099c50d8ae3SPaolo Bonzini 		account_shadowed(vcpu->kvm, sp);
21003bae0459SSean Christopherson 		if (level == PG_LEVEL_4K && rmap_write_protect(vcpu, gfn))
2101c50d8ae3SPaolo Bonzini 			kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
2102c50d8ae3SPaolo Bonzini 	}
2103c50d8ae3SPaolo Bonzini 	trace_kvm_mmu_get_page(sp, true);
2104c50d8ae3SPaolo Bonzini out:
2105ddc16abbSSean Christopherson 	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
2106ddc16abbSSean Christopherson 
2107c50d8ae3SPaolo Bonzini 	if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions)
2108c50d8ae3SPaolo Bonzini 		vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions;
2109c50d8ae3SPaolo Bonzini 	return sp;
2110c50d8ae3SPaolo Bonzini }
2111c50d8ae3SPaolo Bonzini 
2112c50d8ae3SPaolo Bonzini static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator,
2113c50d8ae3SPaolo Bonzini 					struct kvm_vcpu *vcpu, hpa_t root,
2114c50d8ae3SPaolo Bonzini 					u64 addr)
2115c50d8ae3SPaolo Bonzini {
2116c50d8ae3SPaolo Bonzini 	iterator->addr = addr;
2117c50d8ae3SPaolo Bonzini 	iterator->shadow_addr = root;
2118c50d8ae3SPaolo Bonzini 	iterator->level = vcpu->arch.mmu->shadow_root_level;
2119c50d8ae3SPaolo Bonzini 
2120c50d8ae3SPaolo Bonzini 	if (iterator->level == PT64_ROOT_4LEVEL &&
2121c50d8ae3SPaolo Bonzini 	    vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL &&
2122c50d8ae3SPaolo Bonzini 	    !vcpu->arch.mmu->direct_map)
2123c50d8ae3SPaolo Bonzini 		--iterator->level;
2124c50d8ae3SPaolo Bonzini 
2125c50d8ae3SPaolo Bonzini 	if (iterator->level == PT32E_ROOT_LEVEL) {
2126c50d8ae3SPaolo Bonzini 		/*
2127c50d8ae3SPaolo Bonzini 		 * prev_root is currently only used for 64-bit hosts. So only
2128c50d8ae3SPaolo Bonzini 		 * the active root_hpa is valid here.
2129c50d8ae3SPaolo Bonzini 		 */
2130c50d8ae3SPaolo Bonzini 		BUG_ON(root != vcpu->arch.mmu->root_hpa);
2131c50d8ae3SPaolo Bonzini 
2132c50d8ae3SPaolo Bonzini 		iterator->shadow_addr
2133c50d8ae3SPaolo Bonzini 			= vcpu->arch.mmu->pae_root[(addr >> 30) & 3];
2134c50d8ae3SPaolo Bonzini 		iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
2135c50d8ae3SPaolo Bonzini 		--iterator->level;
2136c50d8ae3SPaolo Bonzini 		if (!iterator->shadow_addr)
2137c50d8ae3SPaolo Bonzini 			iterator->level = 0;
2138c50d8ae3SPaolo Bonzini 	}
2139c50d8ae3SPaolo Bonzini }
2140c50d8ae3SPaolo Bonzini 
2141c50d8ae3SPaolo Bonzini static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
2142c50d8ae3SPaolo Bonzini 			     struct kvm_vcpu *vcpu, u64 addr)
2143c50d8ae3SPaolo Bonzini {
2144c50d8ae3SPaolo Bonzini 	shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root_hpa,
2145c50d8ae3SPaolo Bonzini 				    addr);
2146c50d8ae3SPaolo Bonzini }
2147c50d8ae3SPaolo Bonzini 
2148c50d8ae3SPaolo Bonzini static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
2149c50d8ae3SPaolo Bonzini {
21503bae0459SSean Christopherson 	if (iterator->level < PG_LEVEL_4K)
2151c50d8ae3SPaolo Bonzini 		return false;
2152c50d8ae3SPaolo Bonzini 
2153c50d8ae3SPaolo Bonzini 	iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
2154c50d8ae3SPaolo Bonzini 	iterator->sptep	= ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
2155c50d8ae3SPaolo Bonzini 	return true;
2156c50d8ae3SPaolo Bonzini }
2157c50d8ae3SPaolo Bonzini 
2158c50d8ae3SPaolo Bonzini static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator,
2159c50d8ae3SPaolo Bonzini 			       u64 spte)
2160c50d8ae3SPaolo Bonzini {
2161c50d8ae3SPaolo Bonzini 	if (is_last_spte(spte, iterator->level)) {
2162c50d8ae3SPaolo Bonzini 		iterator->level = 0;
2163c50d8ae3SPaolo Bonzini 		return;
2164c50d8ae3SPaolo Bonzini 	}
2165c50d8ae3SPaolo Bonzini 
2166c50d8ae3SPaolo Bonzini 	iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK;
2167c50d8ae3SPaolo Bonzini 	--iterator->level;
2168c50d8ae3SPaolo Bonzini }
2169c50d8ae3SPaolo Bonzini 
2170c50d8ae3SPaolo Bonzini static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
2171c50d8ae3SPaolo Bonzini {
2172c50d8ae3SPaolo Bonzini 	__shadow_walk_next(iterator, *iterator->sptep);
2173c50d8ae3SPaolo Bonzini }
2174c50d8ae3SPaolo Bonzini 
2175c50d8ae3SPaolo Bonzini static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
2176c50d8ae3SPaolo Bonzini 			     struct kvm_mmu_page *sp)
2177c50d8ae3SPaolo Bonzini {
2178c50d8ae3SPaolo Bonzini 	u64 spte;
2179c50d8ae3SPaolo Bonzini 
2180c50d8ae3SPaolo Bonzini 	BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
2181c50d8ae3SPaolo Bonzini 
2182cc4674d0SBen Gardon 	spte = make_nonleaf_spte(sp->spt, sp_ad_disabled(sp));
2183c50d8ae3SPaolo Bonzini 
2184c50d8ae3SPaolo Bonzini 	mmu_spte_set(sptep, spte);
2185c50d8ae3SPaolo Bonzini 
2186c50d8ae3SPaolo Bonzini 	mmu_page_add_parent_pte(vcpu, sp, sptep);
2187c50d8ae3SPaolo Bonzini 
2188c50d8ae3SPaolo Bonzini 	if (sp->unsync_children || sp->unsync)
2189c50d8ae3SPaolo Bonzini 		mark_unsync(sptep);
2190c50d8ae3SPaolo Bonzini }
2191c50d8ae3SPaolo Bonzini 
2192c50d8ae3SPaolo Bonzini static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2193c50d8ae3SPaolo Bonzini 				   unsigned direct_access)
2194c50d8ae3SPaolo Bonzini {
2195c50d8ae3SPaolo Bonzini 	if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
2196c50d8ae3SPaolo Bonzini 		struct kvm_mmu_page *child;
2197c50d8ae3SPaolo Bonzini 
2198c50d8ae3SPaolo Bonzini 		/*
2199c50d8ae3SPaolo Bonzini 		 * For the direct sp, if the guest pte's dirty bit
2200c50d8ae3SPaolo Bonzini 		 * changed form clean to dirty, it will corrupt the
2201c50d8ae3SPaolo Bonzini 		 * sp's access: allow writable in the read-only sp,
2202c50d8ae3SPaolo Bonzini 		 * so we should update the spte at this point to get
2203c50d8ae3SPaolo Bonzini 		 * a new sp with the correct access.
2204c50d8ae3SPaolo Bonzini 		 */
2205e47c4aeeSSean Christopherson 		child = to_shadow_page(*sptep & PT64_BASE_ADDR_MASK);
2206c50d8ae3SPaolo Bonzini 		if (child->role.access == direct_access)
2207c50d8ae3SPaolo Bonzini 			return;
2208c50d8ae3SPaolo Bonzini 
2209c50d8ae3SPaolo Bonzini 		drop_parent_pte(child, sptep);
2210c50d8ae3SPaolo Bonzini 		kvm_flush_remote_tlbs_with_address(vcpu->kvm, child->gfn, 1);
2211c50d8ae3SPaolo Bonzini 	}
2212c50d8ae3SPaolo Bonzini }
2213c50d8ae3SPaolo Bonzini 
22142de4085cSBen Gardon /* Returns the number of zapped non-leaf child shadow pages. */
22152de4085cSBen Gardon static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
22162de4085cSBen Gardon 			    u64 *spte, struct list_head *invalid_list)
2217c50d8ae3SPaolo Bonzini {
2218c50d8ae3SPaolo Bonzini 	u64 pte;
2219c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *child;
2220c50d8ae3SPaolo Bonzini 
2221c50d8ae3SPaolo Bonzini 	pte = *spte;
2222c50d8ae3SPaolo Bonzini 	if (is_shadow_present_pte(pte)) {
2223c50d8ae3SPaolo Bonzini 		if (is_last_spte(pte, sp->role.level)) {
2224c50d8ae3SPaolo Bonzini 			drop_spte(kvm, spte);
2225c50d8ae3SPaolo Bonzini 			if (is_large_pte(pte))
2226c50d8ae3SPaolo Bonzini 				--kvm->stat.lpages;
2227c50d8ae3SPaolo Bonzini 		} else {
2228e47c4aeeSSean Christopherson 			child = to_shadow_page(pte & PT64_BASE_ADDR_MASK);
2229c50d8ae3SPaolo Bonzini 			drop_parent_pte(child, spte);
22302de4085cSBen Gardon 
22312de4085cSBen Gardon 			/*
22322de4085cSBen Gardon 			 * Recursively zap nested TDP SPs, parentless SPs are
22332de4085cSBen Gardon 			 * unlikely to be used again in the near future.  This
22342de4085cSBen Gardon 			 * avoids retaining a large number of stale nested SPs.
22352de4085cSBen Gardon 			 */
22362de4085cSBen Gardon 			if (tdp_enabled && invalid_list &&
22372de4085cSBen Gardon 			    child->role.guest_mode && !child->parent_ptes.val)
22382de4085cSBen Gardon 				return kvm_mmu_prepare_zap_page(kvm, child,
22392de4085cSBen Gardon 								invalid_list);
2240c50d8ae3SPaolo Bonzini 		}
2241ace569e0SSean Christopherson 	} else if (is_mmio_spte(pte)) {
2242c50d8ae3SPaolo Bonzini 		mmu_spte_clear_no_track(spte);
2243ace569e0SSean Christopherson 	}
22442de4085cSBen Gardon 	return 0;
2245c50d8ae3SPaolo Bonzini }
2246c50d8ae3SPaolo Bonzini 
22472de4085cSBen Gardon static int kvm_mmu_page_unlink_children(struct kvm *kvm,
22482de4085cSBen Gardon 					struct kvm_mmu_page *sp,
22492de4085cSBen Gardon 					struct list_head *invalid_list)
2250c50d8ae3SPaolo Bonzini {
22512de4085cSBen Gardon 	int zapped = 0;
2252c50d8ae3SPaolo Bonzini 	unsigned i;
2253c50d8ae3SPaolo Bonzini 
2254c50d8ae3SPaolo Bonzini 	for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
22552de4085cSBen Gardon 		zapped += mmu_page_zap_pte(kvm, sp, sp->spt + i, invalid_list);
22562de4085cSBen Gardon 
22572de4085cSBen Gardon 	return zapped;
2258c50d8ae3SPaolo Bonzini }
2259c50d8ae3SPaolo Bonzini 
2260c50d8ae3SPaolo Bonzini static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
2261c50d8ae3SPaolo Bonzini {
2262c50d8ae3SPaolo Bonzini 	u64 *sptep;
2263c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
2264c50d8ae3SPaolo Bonzini 
2265c50d8ae3SPaolo Bonzini 	while ((sptep = rmap_get_first(&sp->parent_ptes, &iter)))
2266c50d8ae3SPaolo Bonzini 		drop_parent_pte(sp, sptep);
2267c50d8ae3SPaolo Bonzini }
2268c50d8ae3SPaolo Bonzini 
2269c50d8ae3SPaolo Bonzini static int mmu_zap_unsync_children(struct kvm *kvm,
2270c50d8ae3SPaolo Bonzini 				   struct kvm_mmu_page *parent,
2271c50d8ae3SPaolo Bonzini 				   struct list_head *invalid_list)
2272c50d8ae3SPaolo Bonzini {
2273c50d8ae3SPaolo Bonzini 	int i, zapped = 0;
2274c50d8ae3SPaolo Bonzini 	struct mmu_page_path parents;
2275c50d8ae3SPaolo Bonzini 	struct kvm_mmu_pages pages;
2276c50d8ae3SPaolo Bonzini 
22773bae0459SSean Christopherson 	if (parent->role.level == PG_LEVEL_4K)
2278c50d8ae3SPaolo Bonzini 		return 0;
2279c50d8ae3SPaolo Bonzini 
2280c50d8ae3SPaolo Bonzini 	while (mmu_unsync_walk(parent, &pages)) {
2281c50d8ae3SPaolo Bonzini 		struct kvm_mmu_page *sp;
2282c50d8ae3SPaolo Bonzini 
2283c50d8ae3SPaolo Bonzini 		for_each_sp(pages, sp, parents, i) {
2284c50d8ae3SPaolo Bonzini 			kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
2285c50d8ae3SPaolo Bonzini 			mmu_pages_clear_parents(&parents);
2286c50d8ae3SPaolo Bonzini 			zapped++;
2287c50d8ae3SPaolo Bonzini 		}
2288c50d8ae3SPaolo Bonzini 	}
2289c50d8ae3SPaolo Bonzini 
2290c50d8ae3SPaolo Bonzini 	return zapped;
2291c50d8ae3SPaolo Bonzini }
2292c50d8ae3SPaolo Bonzini 
2293c50d8ae3SPaolo Bonzini static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
2294c50d8ae3SPaolo Bonzini 				       struct kvm_mmu_page *sp,
2295c50d8ae3SPaolo Bonzini 				       struct list_head *invalid_list,
2296c50d8ae3SPaolo Bonzini 				       int *nr_zapped)
2297c50d8ae3SPaolo Bonzini {
2298c50d8ae3SPaolo Bonzini 	bool list_unstable;
2299c50d8ae3SPaolo Bonzini 
2300c50d8ae3SPaolo Bonzini 	trace_kvm_mmu_prepare_zap_page(sp);
2301c50d8ae3SPaolo Bonzini 	++kvm->stat.mmu_shadow_zapped;
2302c50d8ae3SPaolo Bonzini 	*nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list);
23032de4085cSBen Gardon 	*nr_zapped += kvm_mmu_page_unlink_children(kvm, sp, invalid_list);
2304c50d8ae3SPaolo Bonzini 	kvm_mmu_unlink_parents(kvm, sp);
2305c50d8ae3SPaolo Bonzini 
2306c50d8ae3SPaolo Bonzini 	/* Zapping children means active_mmu_pages has become unstable. */
2307c50d8ae3SPaolo Bonzini 	list_unstable = *nr_zapped;
2308c50d8ae3SPaolo Bonzini 
2309c50d8ae3SPaolo Bonzini 	if (!sp->role.invalid && !sp->role.direct)
2310c50d8ae3SPaolo Bonzini 		unaccount_shadowed(kvm, sp);
2311c50d8ae3SPaolo Bonzini 
2312c50d8ae3SPaolo Bonzini 	if (sp->unsync)
2313c50d8ae3SPaolo Bonzini 		kvm_unlink_unsync_page(kvm, sp);
2314c50d8ae3SPaolo Bonzini 	if (!sp->root_count) {
2315c50d8ae3SPaolo Bonzini 		/* Count self */
2316c50d8ae3SPaolo Bonzini 		(*nr_zapped)++;
2317f95eec9bSSean Christopherson 
2318f95eec9bSSean Christopherson 		/*
2319f95eec9bSSean Christopherson 		 * Already invalid pages (previously active roots) are not on
2320f95eec9bSSean Christopherson 		 * the active page list.  See list_del() in the "else" case of
2321f95eec9bSSean Christopherson 		 * !sp->root_count.
2322f95eec9bSSean Christopherson 		 */
2323f95eec9bSSean Christopherson 		if (sp->role.invalid)
2324f95eec9bSSean Christopherson 			list_add(&sp->link, invalid_list);
2325f95eec9bSSean Christopherson 		else
2326c50d8ae3SPaolo Bonzini 			list_move(&sp->link, invalid_list);
2327c50d8ae3SPaolo Bonzini 		kvm_mod_used_mmu_pages(kvm, -1);
2328c50d8ae3SPaolo Bonzini 	} else {
2329f95eec9bSSean Christopherson 		/*
2330f95eec9bSSean Christopherson 		 * Remove the active root from the active page list, the root
2331f95eec9bSSean Christopherson 		 * will be explicitly freed when the root_count hits zero.
2332f95eec9bSSean Christopherson 		 */
2333f95eec9bSSean Christopherson 		list_del(&sp->link);
2334c50d8ae3SPaolo Bonzini 
2335c50d8ae3SPaolo Bonzini 		/*
2336c50d8ae3SPaolo Bonzini 		 * Obsolete pages cannot be used on any vCPUs, see the comment
2337c50d8ae3SPaolo Bonzini 		 * in kvm_mmu_zap_all_fast().  Note, is_obsolete_sp() also
2338c50d8ae3SPaolo Bonzini 		 * treats invalid shadow pages as being obsolete.
2339c50d8ae3SPaolo Bonzini 		 */
2340c50d8ae3SPaolo Bonzini 		if (!is_obsolete_sp(kvm, sp))
2341c50d8ae3SPaolo Bonzini 			kvm_reload_remote_mmus(kvm);
2342c50d8ae3SPaolo Bonzini 	}
2343c50d8ae3SPaolo Bonzini 
2344c50d8ae3SPaolo Bonzini 	if (sp->lpage_disallowed)
2345c50d8ae3SPaolo Bonzini 		unaccount_huge_nx_page(kvm, sp);
2346c50d8ae3SPaolo Bonzini 
2347c50d8ae3SPaolo Bonzini 	sp->role.invalid = 1;
2348c50d8ae3SPaolo Bonzini 	return list_unstable;
2349c50d8ae3SPaolo Bonzini }
2350c50d8ae3SPaolo Bonzini 
2351c50d8ae3SPaolo Bonzini static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2352c50d8ae3SPaolo Bonzini 				     struct list_head *invalid_list)
2353c50d8ae3SPaolo Bonzini {
2354c50d8ae3SPaolo Bonzini 	int nr_zapped;
2355c50d8ae3SPaolo Bonzini 
2356c50d8ae3SPaolo Bonzini 	__kvm_mmu_prepare_zap_page(kvm, sp, invalid_list, &nr_zapped);
2357c50d8ae3SPaolo Bonzini 	return nr_zapped;
2358c50d8ae3SPaolo Bonzini }
2359c50d8ae3SPaolo Bonzini 
2360c50d8ae3SPaolo Bonzini static void kvm_mmu_commit_zap_page(struct kvm *kvm,
2361c50d8ae3SPaolo Bonzini 				    struct list_head *invalid_list)
2362c50d8ae3SPaolo Bonzini {
2363c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp, *nsp;
2364c50d8ae3SPaolo Bonzini 
2365c50d8ae3SPaolo Bonzini 	if (list_empty(invalid_list))
2366c50d8ae3SPaolo Bonzini 		return;
2367c50d8ae3SPaolo Bonzini 
2368c50d8ae3SPaolo Bonzini 	/*
2369c50d8ae3SPaolo Bonzini 	 * We need to make sure everyone sees our modifications to
2370c50d8ae3SPaolo Bonzini 	 * the page tables and see changes to vcpu->mode here. The barrier
2371c50d8ae3SPaolo Bonzini 	 * in the kvm_flush_remote_tlbs() achieves this. This pairs
2372c50d8ae3SPaolo Bonzini 	 * with vcpu_enter_guest and walk_shadow_page_lockless_begin/end.
2373c50d8ae3SPaolo Bonzini 	 *
2374c50d8ae3SPaolo Bonzini 	 * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit
2375c50d8ae3SPaolo Bonzini 	 * guest mode and/or lockless shadow page table walks.
2376c50d8ae3SPaolo Bonzini 	 */
2377c50d8ae3SPaolo Bonzini 	kvm_flush_remote_tlbs(kvm);
2378c50d8ae3SPaolo Bonzini 
2379c50d8ae3SPaolo Bonzini 	list_for_each_entry_safe(sp, nsp, invalid_list, link) {
2380c50d8ae3SPaolo Bonzini 		WARN_ON(!sp->role.invalid || sp->root_count);
2381c50d8ae3SPaolo Bonzini 		kvm_mmu_free_page(sp);
2382c50d8ae3SPaolo Bonzini 	}
2383c50d8ae3SPaolo Bonzini }
2384c50d8ae3SPaolo Bonzini 
23856b82ef2cSSean Christopherson static unsigned long kvm_mmu_zap_oldest_mmu_pages(struct kvm *kvm,
23866b82ef2cSSean Christopherson 						  unsigned long nr_to_zap)
2387c50d8ae3SPaolo Bonzini {
23886b82ef2cSSean Christopherson 	unsigned long total_zapped = 0;
23896b82ef2cSSean Christopherson 	struct kvm_mmu_page *sp, *tmp;
2390ba7888ddSSean Christopherson 	LIST_HEAD(invalid_list);
23916b82ef2cSSean Christopherson 	bool unstable;
23926b82ef2cSSean Christopherson 	int nr_zapped;
2393c50d8ae3SPaolo Bonzini 
2394c50d8ae3SPaolo Bonzini 	if (list_empty(&kvm->arch.active_mmu_pages))
2395ba7888ddSSean Christopherson 		return 0;
2396c50d8ae3SPaolo Bonzini 
23976b82ef2cSSean Christopherson restart:
23988fc51726SSean Christopherson 	list_for_each_entry_safe_reverse(sp, tmp, &kvm->arch.active_mmu_pages, link) {
23996b82ef2cSSean Christopherson 		/*
24006b82ef2cSSean Christopherson 		 * Don't zap active root pages, the page itself can't be freed
24016b82ef2cSSean Christopherson 		 * and zapping it will just force vCPUs to realloc and reload.
24026b82ef2cSSean Christopherson 		 */
24036b82ef2cSSean Christopherson 		if (sp->root_count)
24046b82ef2cSSean Christopherson 			continue;
24056b82ef2cSSean Christopherson 
24066b82ef2cSSean Christopherson 		unstable = __kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list,
24076b82ef2cSSean Christopherson 						      &nr_zapped);
24086b82ef2cSSean Christopherson 		total_zapped += nr_zapped;
24096b82ef2cSSean Christopherson 		if (total_zapped >= nr_to_zap)
2410ba7888ddSSean Christopherson 			break;
2411ba7888ddSSean Christopherson 
24126b82ef2cSSean Christopherson 		if (unstable)
24136b82ef2cSSean Christopherson 			goto restart;
2414ba7888ddSSean Christopherson 	}
24156b82ef2cSSean Christopherson 
24166b82ef2cSSean Christopherson 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
24176b82ef2cSSean Christopherson 
24186b82ef2cSSean Christopherson 	kvm->stat.mmu_recycled += total_zapped;
24196b82ef2cSSean Christopherson 	return total_zapped;
24206b82ef2cSSean Christopherson }
24216b82ef2cSSean Christopherson 
2422afe8d7e6SSean Christopherson static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
2423afe8d7e6SSean Christopherson {
2424afe8d7e6SSean Christopherson 	if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
2425afe8d7e6SSean Christopherson 		return kvm->arch.n_max_mmu_pages -
2426afe8d7e6SSean Christopherson 			kvm->arch.n_used_mmu_pages;
2427afe8d7e6SSean Christopherson 
2428afe8d7e6SSean Christopherson 	return 0;
2429c50d8ae3SPaolo Bonzini }
2430c50d8ae3SPaolo Bonzini 
2431ba7888ddSSean Christopherson static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
2432ba7888ddSSean Christopherson {
24336b82ef2cSSean Christopherson 	unsigned long avail = kvm_mmu_available_pages(vcpu->kvm);
2434ba7888ddSSean Christopherson 
24356b82ef2cSSean Christopherson 	if (likely(avail >= KVM_MIN_FREE_MMU_PAGES))
2436ba7888ddSSean Christopherson 		return 0;
2437ba7888ddSSean Christopherson 
24386b82ef2cSSean Christopherson 	kvm_mmu_zap_oldest_mmu_pages(vcpu->kvm, KVM_REFILL_PAGES - avail);
2439ba7888ddSSean Christopherson 
24406e6ec584SSean Christopherson 	/*
24416e6ec584SSean Christopherson 	 * Note, this check is intentionally soft, it only guarantees that one
24426e6ec584SSean Christopherson 	 * page is available, while the caller may end up allocating as many as
24436e6ec584SSean Christopherson 	 * four pages, e.g. for PAE roots or for 5-level paging.  Temporarily
24446e6ec584SSean Christopherson 	 * exceeding the (arbitrary by default) limit will not harm the host,
24456e6ec584SSean Christopherson 	 * being too agressive may unnecessarily kill the guest, and getting an
24466e6ec584SSean Christopherson 	 * exact count is far more trouble than it's worth, especially in the
24476e6ec584SSean Christopherson 	 * page fault paths.
24486e6ec584SSean Christopherson 	 */
2449ba7888ddSSean Christopherson 	if (!kvm_mmu_available_pages(vcpu->kvm))
2450ba7888ddSSean Christopherson 		return -ENOSPC;
2451ba7888ddSSean Christopherson 	return 0;
2452ba7888ddSSean Christopherson }
2453ba7888ddSSean Christopherson 
2454c50d8ae3SPaolo Bonzini /*
2455c50d8ae3SPaolo Bonzini  * Changing the number of mmu pages allocated to the vm
2456c50d8ae3SPaolo Bonzini  * Note: if goal_nr_mmu_pages is too small, you will get dead lock
2457c50d8ae3SPaolo Bonzini  */
2458c50d8ae3SPaolo Bonzini void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
2459c50d8ae3SPaolo Bonzini {
2460531810caSBen Gardon 	write_lock(&kvm->mmu_lock);
2461c50d8ae3SPaolo Bonzini 
2462c50d8ae3SPaolo Bonzini 	if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
24636b82ef2cSSean Christopherson 		kvm_mmu_zap_oldest_mmu_pages(kvm, kvm->arch.n_used_mmu_pages -
24646b82ef2cSSean Christopherson 						  goal_nr_mmu_pages);
2465c50d8ae3SPaolo Bonzini 
2466c50d8ae3SPaolo Bonzini 		goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
2467c50d8ae3SPaolo Bonzini 	}
2468c50d8ae3SPaolo Bonzini 
2469c50d8ae3SPaolo Bonzini 	kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
2470c50d8ae3SPaolo Bonzini 
2471531810caSBen Gardon 	write_unlock(&kvm->mmu_lock);
2472c50d8ae3SPaolo Bonzini }
2473c50d8ae3SPaolo Bonzini 
2474c50d8ae3SPaolo Bonzini int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
2475c50d8ae3SPaolo Bonzini {
2476c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
2477c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
2478c50d8ae3SPaolo Bonzini 	int r;
2479c50d8ae3SPaolo Bonzini 
2480c50d8ae3SPaolo Bonzini 	pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
2481c50d8ae3SPaolo Bonzini 	r = 0;
2482531810caSBen Gardon 	write_lock(&kvm->mmu_lock);
2483c50d8ae3SPaolo Bonzini 	for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
2484c50d8ae3SPaolo Bonzini 		pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
2485c50d8ae3SPaolo Bonzini 			 sp->role.word);
2486c50d8ae3SPaolo Bonzini 		r = 1;
2487c50d8ae3SPaolo Bonzini 		kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
2488c50d8ae3SPaolo Bonzini 	}
2489c50d8ae3SPaolo Bonzini 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
2490531810caSBen Gardon 	write_unlock(&kvm->mmu_lock);
2491c50d8ae3SPaolo Bonzini 
2492c50d8ae3SPaolo Bonzini 	return r;
2493c50d8ae3SPaolo Bonzini }
249496ad91aeSSean Christopherson 
249596ad91aeSSean Christopherson static int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
249696ad91aeSSean Christopherson {
249796ad91aeSSean Christopherson 	gpa_t gpa;
249896ad91aeSSean Christopherson 	int r;
249996ad91aeSSean Christopherson 
250096ad91aeSSean Christopherson 	if (vcpu->arch.mmu->direct_map)
250196ad91aeSSean Christopherson 		return 0;
250296ad91aeSSean Christopherson 
250396ad91aeSSean Christopherson 	gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
250496ad91aeSSean Christopherson 
250596ad91aeSSean Christopherson 	r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
250696ad91aeSSean Christopherson 
250796ad91aeSSean Christopherson 	return r;
250896ad91aeSSean Christopherson }
2509c50d8ae3SPaolo Bonzini 
2510c50d8ae3SPaolo Bonzini static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
2511c50d8ae3SPaolo Bonzini {
2512c50d8ae3SPaolo Bonzini 	trace_kvm_mmu_unsync_page(sp);
2513c50d8ae3SPaolo Bonzini 	++vcpu->kvm->stat.mmu_unsync;
2514c50d8ae3SPaolo Bonzini 	sp->unsync = 1;
2515c50d8ae3SPaolo Bonzini 
2516c50d8ae3SPaolo Bonzini 	kvm_mmu_mark_parents_unsync(sp);
2517c50d8ae3SPaolo Bonzini }
2518c50d8ae3SPaolo Bonzini 
25190337f585SSean Christopherson /*
25200337f585SSean Christopherson  * Attempt to unsync any shadow pages that can be reached by the specified gfn,
25210337f585SSean Christopherson  * KVM is creating a writable mapping for said gfn.  Returns 0 if all pages
25220337f585SSean Christopherson  * were marked unsync (or if there is no shadow page), -EPERM if the SPTE must
25230337f585SSean Christopherson  * be write-protected.
25240337f585SSean Christopherson  */
25250337f585SSean Christopherson int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync)
2526c50d8ae3SPaolo Bonzini {
2527c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
2528c50d8ae3SPaolo Bonzini 
25290337f585SSean Christopherson 	/*
25300337f585SSean Christopherson 	 * Force write-protection if the page is being tracked.  Note, the page
25310337f585SSean Christopherson 	 * track machinery is used to write-protect upper-level shadow pages,
25320337f585SSean Christopherson 	 * i.e. this guards the role.level == 4K assertion below!
25330337f585SSean Christopherson 	 */
2534c50d8ae3SPaolo Bonzini 	if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
25350337f585SSean Christopherson 		return -EPERM;
2536c50d8ae3SPaolo Bonzini 
25370337f585SSean Christopherson 	/*
25380337f585SSean Christopherson 	 * The page is not write-tracked, mark existing shadow pages unsync
25390337f585SSean Christopherson 	 * unless KVM is synchronizing an unsync SP (can_unsync = false).  In
25400337f585SSean Christopherson 	 * that case, KVM must complete emulation of the guest TLB flush before
25410337f585SSean Christopherson 	 * allowing shadow pages to become unsync (writable by the guest).
25420337f585SSean Christopherson 	 */
2543c50d8ae3SPaolo Bonzini 	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
2544c50d8ae3SPaolo Bonzini 		if (!can_unsync)
25450337f585SSean Christopherson 			return -EPERM;
2546c50d8ae3SPaolo Bonzini 
2547c50d8ae3SPaolo Bonzini 		if (sp->unsync)
2548c50d8ae3SPaolo Bonzini 			continue;
2549c50d8ae3SPaolo Bonzini 
25503bae0459SSean Christopherson 		WARN_ON(sp->role.level != PG_LEVEL_4K);
2551c50d8ae3SPaolo Bonzini 		kvm_unsync_page(vcpu, sp);
2552c50d8ae3SPaolo Bonzini 	}
2553c50d8ae3SPaolo Bonzini 
2554c50d8ae3SPaolo Bonzini 	/*
2555c50d8ae3SPaolo Bonzini 	 * We need to ensure that the marking of unsync pages is visible
2556c50d8ae3SPaolo Bonzini 	 * before the SPTE is updated to allow writes because
2557c50d8ae3SPaolo Bonzini 	 * kvm_mmu_sync_roots() checks the unsync flags without holding
2558c50d8ae3SPaolo Bonzini 	 * the MMU lock and so can race with this. If the SPTE was updated
2559c50d8ae3SPaolo Bonzini 	 * before the page had been marked as unsync-ed, something like the
2560c50d8ae3SPaolo Bonzini 	 * following could happen:
2561c50d8ae3SPaolo Bonzini 	 *
2562c50d8ae3SPaolo Bonzini 	 * CPU 1                    CPU 2
2563c50d8ae3SPaolo Bonzini 	 * ---------------------------------------------------------------------
2564c50d8ae3SPaolo Bonzini 	 * 1.2 Host updates SPTE
2565c50d8ae3SPaolo Bonzini 	 *     to be writable
2566c50d8ae3SPaolo Bonzini 	 *                      2.1 Guest writes a GPTE for GVA X.
2567c50d8ae3SPaolo Bonzini 	 *                          (GPTE being in the guest page table shadowed
2568c50d8ae3SPaolo Bonzini 	 *                           by the SP from CPU 1.)
2569c50d8ae3SPaolo Bonzini 	 *                          This reads SPTE during the page table walk.
2570c50d8ae3SPaolo Bonzini 	 *                          Since SPTE.W is read as 1, there is no
2571c50d8ae3SPaolo Bonzini 	 *                          fault.
2572c50d8ae3SPaolo Bonzini 	 *
2573c50d8ae3SPaolo Bonzini 	 *                      2.2 Guest issues TLB flush.
2574c50d8ae3SPaolo Bonzini 	 *                          That causes a VM Exit.
2575c50d8ae3SPaolo Bonzini 	 *
25760337f585SSean Christopherson 	 *                      2.3 Walking of unsync pages sees sp->unsync is
25770337f585SSean Christopherson 	 *                          false and skips the page.
2578c50d8ae3SPaolo Bonzini 	 *
2579c50d8ae3SPaolo Bonzini 	 *                      2.4 Guest accesses GVA X.
2580c50d8ae3SPaolo Bonzini 	 *                          Since the mapping in the SP was not updated,
2581c50d8ae3SPaolo Bonzini 	 *                          so the old mapping for GVA X incorrectly
2582c50d8ae3SPaolo Bonzini 	 *                          gets used.
2583c50d8ae3SPaolo Bonzini 	 * 1.1 Host marks SP
2584c50d8ae3SPaolo Bonzini 	 *     as unsync
2585c50d8ae3SPaolo Bonzini 	 *     (sp->unsync = true)
2586c50d8ae3SPaolo Bonzini 	 *
2587c50d8ae3SPaolo Bonzini 	 * The write barrier below ensures that 1.1 happens before 1.2 and thus
2588c50d8ae3SPaolo Bonzini 	 * the situation in 2.4 does not arise. The implicit barrier in 2.2
2589c50d8ae3SPaolo Bonzini 	 * pairs with this write barrier.
2590c50d8ae3SPaolo Bonzini 	 */
2591c50d8ae3SPaolo Bonzini 	smp_wmb();
2592c50d8ae3SPaolo Bonzini 
25930337f585SSean Christopherson 	return 0;
2594c50d8ae3SPaolo Bonzini }
2595c50d8ae3SPaolo Bonzini 
2596799a4190SBen Gardon static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2597799a4190SBen Gardon 		    unsigned int pte_access, int level,
2598799a4190SBen Gardon 		    gfn_t gfn, kvm_pfn_t pfn, bool speculative,
2599799a4190SBen Gardon 		    bool can_unsync, bool host_writable)
2600799a4190SBen Gardon {
2601799a4190SBen Gardon 	u64 spte;
2602799a4190SBen Gardon 	struct kvm_mmu_page *sp;
2603799a4190SBen Gardon 	int ret;
2604799a4190SBen Gardon 
2605799a4190SBen Gardon 	sp = sptep_to_sp(sptep);
2606799a4190SBen Gardon 
2607799a4190SBen Gardon 	ret = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative,
2608799a4190SBen Gardon 			can_unsync, host_writable, sp_ad_disabled(sp), &spte);
2609799a4190SBen Gardon 
2610799a4190SBen Gardon 	if (spte & PT_WRITABLE_MASK)
2611799a4190SBen Gardon 		kvm_vcpu_mark_page_dirty(vcpu, gfn);
2612799a4190SBen Gardon 
261312703759SSean Christopherson 	if (*sptep == spte)
261412703759SSean Christopherson 		ret |= SET_SPTE_SPURIOUS;
261512703759SSean Christopherson 	else if (mmu_spte_update(sptep, spte))
2616c50d8ae3SPaolo Bonzini 		ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
2617c50d8ae3SPaolo Bonzini 	return ret;
2618c50d8ae3SPaolo Bonzini }
2619c50d8ae3SPaolo Bonzini 
26200a2b64c5SBen Gardon static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2621e88b8093SSean Christopherson 			unsigned int pte_access, bool write_fault, int level,
26220a2b64c5SBen Gardon 			gfn_t gfn, kvm_pfn_t pfn, bool speculative,
26230a2b64c5SBen Gardon 			bool host_writable)
2624c50d8ae3SPaolo Bonzini {
2625c50d8ae3SPaolo Bonzini 	int was_rmapped = 0;
2626c50d8ae3SPaolo Bonzini 	int rmap_count;
2627c50d8ae3SPaolo Bonzini 	int set_spte_ret;
2628c4371c2aSSean Christopherson 	int ret = RET_PF_FIXED;
2629c50d8ae3SPaolo Bonzini 	bool flush = false;
2630c50d8ae3SPaolo Bonzini 
2631c50d8ae3SPaolo Bonzini 	pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
2632c50d8ae3SPaolo Bonzini 		 *sptep, write_fault, gfn);
2633c50d8ae3SPaolo Bonzini 
2634a54aa15cSSean Christopherson 	if (unlikely(is_noslot_pfn(pfn))) {
2635a54aa15cSSean Christopherson 		mark_mmio_spte(vcpu, sptep, gfn, pte_access);
2636a54aa15cSSean Christopherson 		return RET_PF_EMULATE;
2637a54aa15cSSean Christopherson 	}
2638a54aa15cSSean Christopherson 
2639c50d8ae3SPaolo Bonzini 	if (is_shadow_present_pte(*sptep)) {
2640c50d8ae3SPaolo Bonzini 		/*
2641c50d8ae3SPaolo Bonzini 		 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
2642c50d8ae3SPaolo Bonzini 		 * the parent of the now unreachable PTE.
2643c50d8ae3SPaolo Bonzini 		 */
26443bae0459SSean Christopherson 		if (level > PG_LEVEL_4K && !is_large_pte(*sptep)) {
2645c50d8ae3SPaolo Bonzini 			struct kvm_mmu_page *child;
2646c50d8ae3SPaolo Bonzini 			u64 pte = *sptep;
2647c50d8ae3SPaolo Bonzini 
2648e47c4aeeSSean Christopherson 			child = to_shadow_page(pte & PT64_BASE_ADDR_MASK);
2649c50d8ae3SPaolo Bonzini 			drop_parent_pte(child, sptep);
2650c50d8ae3SPaolo Bonzini 			flush = true;
2651c50d8ae3SPaolo Bonzini 		} else if (pfn != spte_to_pfn(*sptep)) {
2652c50d8ae3SPaolo Bonzini 			pgprintk("hfn old %llx new %llx\n",
2653c50d8ae3SPaolo Bonzini 				 spte_to_pfn(*sptep), pfn);
2654c50d8ae3SPaolo Bonzini 			drop_spte(vcpu->kvm, sptep);
2655c50d8ae3SPaolo Bonzini 			flush = true;
2656c50d8ae3SPaolo Bonzini 		} else
2657c50d8ae3SPaolo Bonzini 			was_rmapped = 1;
2658c50d8ae3SPaolo Bonzini 	}
2659c50d8ae3SPaolo Bonzini 
2660c50d8ae3SPaolo Bonzini 	set_spte_ret = set_spte(vcpu, sptep, pte_access, level, gfn, pfn,
2661c50d8ae3SPaolo Bonzini 				speculative, true, host_writable);
2662c50d8ae3SPaolo Bonzini 	if (set_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
2663c50d8ae3SPaolo Bonzini 		if (write_fault)
2664c50d8ae3SPaolo Bonzini 			ret = RET_PF_EMULATE;
26658c8560b8SSean Christopherson 		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
2666c50d8ae3SPaolo Bonzini 	}
2667c50d8ae3SPaolo Bonzini 
2668c50d8ae3SPaolo Bonzini 	if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH || flush)
2669c50d8ae3SPaolo Bonzini 		kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn,
2670c50d8ae3SPaolo Bonzini 				KVM_PAGES_PER_HPAGE(level));
2671c50d8ae3SPaolo Bonzini 
267212703759SSean Christopherson 	/*
267312703759SSean Christopherson 	 * The fault is fully spurious if and only if the new SPTE and old SPTE
267412703759SSean Christopherson 	 * are identical, and emulation is not required.
267512703759SSean Christopherson 	 */
267612703759SSean Christopherson 	if ((set_spte_ret & SET_SPTE_SPURIOUS) && ret == RET_PF_FIXED) {
267712703759SSean Christopherson 		WARN_ON_ONCE(!was_rmapped);
267812703759SSean Christopherson 		return RET_PF_SPURIOUS;
267912703759SSean Christopherson 	}
268012703759SSean Christopherson 
2681c50d8ae3SPaolo Bonzini 	pgprintk("%s: setting spte %llx\n", __func__, *sptep);
2682c50d8ae3SPaolo Bonzini 	trace_kvm_mmu_set_spte(level, gfn, sptep);
2683c50d8ae3SPaolo Bonzini 	if (!was_rmapped && is_large_pte(*sptep))
2684c50d8ae3SPaolo Bonzini 		++vcpu->kvm->stat.lpages;
2685c50d8ae3SPaolo Bonzini 
2686c50d8ae3SPaolo Bonzini 	if (is_shadow_present_pte(*sptep)) {
2687c50d8ae3SPaolo Bonzini 		if (!was_rmapped) {
2688c50d8ae3SPaolo Bonzini 			rmap_count = rmap_add(vcpu, sptep, gfn);
2689c50d8ae3SPaolo Bonzini 			if (rmap_count > RMAP_RECYCLE_THRESHOLD)
2690c50d8ae3SPaolo Bonzini 				rmap_recycle(vcpu, sptep, gfn);
2691c50d8ae3SPaolo Bonzini 		}
2692c50d8ae3SPaolo Bonzini 	}
2693c50d8ae3SPaolo Bonzini 
2694c50d8ae3SPaolo Bonzini 	return ret;
2695c50d8ae3SPaolo Bonzini }
2696c50d8ae3SPaolo Bonzini 
2697c50d8ae3SPaolo Bonzini static kvm_pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
2698c50d8ae3SPaolo Bonzini 				     bool no_dirty_log)
2699c50d8ae3SPaolo Bonzini {
2700c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
2701c50d8ae3SPaolo Bonzini 
2702c50d8ae3SPaolo Bonzini 	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log);
2703c50d8ae3SPaolo Bonzini 	if (!slot)
2704c50d8ae3SPaolo Bonzini 		return KVM_PFN_ERR_FAULT;
2705c50d8ae3SPaolo Bonzini 
2706c50d8ae3SPaolo Bonzini 	return gfn_to_pfn_memslot_atomic(slot, gfn);
2707c50d8ae3SPaolo Bonzini }
2708c50d8ae3SPaolo Bonzini 
2709c50d8ae3SPaolo Bonzini static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
2710c50d8ae3SPaolo Bonzini 				    struct kvm_mmu_page *sp,
2711c50d8ae3SPaolo Bonzini 				    u64 *start, u64 *end)
2712c50d8ae3SPaolo Bonzini {
2713c50d8ae3SPaolo Bonzini 	struct page *pages[PTE_PREFETCH_NUM];
2714c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
27150a2b64c5SBen Gardon 	unsigned int access = sp->role.access;
2716c50d8ae3SPaolo Bonzini 	int i, ret;
2717c50d8ae3SPaolo Bonzini 	gfn_t gfn;
2718c50d8ae3SPaolo Bonzini 
2719c50d8ae3SPaolo Bonzini 	gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
2720c50d8ae3SPaolo Bonzini 	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
2721c50d8ae3SPaolo Bonzini 	if (!slot)
2722c50d8ae3SPaolo Bonzini 		return -1;
2723c50d8ae3SPaolo Bonzini 
2724c50d8ae3SPaolo Bonzini 	ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start);
2725c50d8ae3SPaolo Bonzini 	if (ret <= 0)
2726c50d8ae3SPaolo Bonzini 		return -1;
2727c50d8ae3SPaolo Bonzini 
2728c50d8ae3SPaolo Bonzini 	for (i = 0; i < ret; i++, gfn++, start++) {
2729e88b8093SSean Christopherson 		mmu_set_spte(vcpu, start, access, false, sp->role.level, gfn,
2730c50d8ae3SPaolo Bonzini 			     page_to_pfn(pages[i]), true, true);
2731c50d8ae3SPaolo Bonzini 		put_page(pages[i]);
2732c50d8ae3SPaolo Bonzini 	}
2733c50d8ae3SPaolo Bonzini 
2734c50d8ae3SPaolo Bonzini 	return 0;
2735c50d8ae3SPaolo Bonzini }
2736c50d8ae3SPaolo Bonzini 
2737c50d8ae3SPaolo Bonzini static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
2738c50d8ae3SPaolo Bonzini 				  struct kvm_mmu_page *sp, u64 *sptep)
2739c50d8ae3SPaolo Bonzini {
2740c50d8ae3SPaolo Bonzini 	u64 *spte, *start = NULL;
2741c50d8ae3SPaolo Bonzini 	int i;
2742c50d8ae3SPaolo Bonzini 
2743c50d8ae3SPaolo Bonzini 	WARN_ON(!sp->role.direct);
2744c50d8ae3SPaolo Bonzini 
2745c50d8ae3SPaolo Bonzini 	i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
2746c50d8ae3SPaolo Bonzini 	spte = sp->spt + i;
2747c50d8ae3SPaolo Bonzini 
2748c50d8ae3SPaolo Bonzini 	for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
2749c50d8ae3SPaolo Bonzini 		if (is_shadow_present_pte(*spte) || spte == sptep) {
2750c50d8ae3SPaolo Bonzini 			if (!start)
2751c50d8ae3SPaolo Bonzini 				continue;
2752c50d8ae3SPaolo Bonzini 			if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
2753c50d8ae3SPaolo Bonzini 				break;
2754c50d8ae3SPaolo Bonzini 			start = NULL;
2755c50d8ae3SPaolo Bonzini 		} else if (!start)
2756c50d8ae3SPaolo Bonzini 			start = spte;
2757c50d8ae3SPaolo Bonzini 	}
2758c50d8ae3SPaolo Bonzini }
2759c50d8ae3SPaolo Bonzini 
2760c50d8ae3SPaolo Bonzini static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
2761c50d8ae3SPaolo Bonzini {
2762c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
2763c50d8ae3SPaolo Bonzini 
276457354682SSean Christopherson 	sp = sptep_to_sp(sptep);
2765c50d8ae3SPaolo Bonzini 
2766c50d8ae3SPaolo Bonzini 	/*
2767c50d8ae3SPaolo Bonzini 	 * Without accessed bits, there's no way to distinguish between
2768c50d8ae3SPaolo Bonzini 	 * actually accessed translations and prefetched, so disable pte
2769c50d8ae3SPaolo Bonzini 	 * prefetch if accessed bits aren't available.
2770c50d8ae3SPaolo Bonzini 	 */
2771c50d8ae3SPaolo Bonzini 	if (sp_ad_disabled(sp))
2772c50d8ae3SPaolo Bonzini 		return;
2773c50d8ae3SPaolo Bonzini 
27743bae0459SSean Christopherson 	if (sp->role.level > PG_LEVEL_4K)
2775c50d8ae3SPaolo Bonzini 		return;
2776c50d8ae3SPaolo Bonzini 
27774a42d848SDavid Stevens 	/*
27784a42d848SDavid Stevens 	 * If addresses are being invalidated, skip prefetching to avoid
27794a42d848SDavid Stevens 	 * accidentally prefetching those addresses.
27804a42d848SDavid Stevens 	 */
27814a42d848SDavid Stevens 	if (unlikely(vcpu->kvm->mmu_notifier_count))
27824a42d848SDavid Stevens 		return;
27834a42d848SDavid Stevens 
2784c50d8ae3SPaolo Bonzini 	__direct_pte_prefetch(vcpu, sp, sptep);
2785c50d8ae3SPaolo Bonzini }
2786c50d8ae3SPaolo Bonzini 
27871b6d9d9eSSean Christopherson static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
27888ca6f063SBen Gardon 				  const struct kvm_memory_slot *slot)
2789db543216SSean Christopherson {
2790db543216SSean Christopherson 	unsigned long hva;
2791db543216SSean Christopherson 	pte_t *pte;
2792db543216SSean Christopherson 	int level;
2793db543216SSean Christopherson 
2794e851265aSSean Christopherson 	if (!PageCompound(pfn_to_page(pfn)) && !kvm_is_zone_device_pfn(pfn))
27953bae0459SSean Christopherson 		return PG_LEVEL_4K;
2796db543216SSean Christopherson 
2797293e306eSSean Christopherson 	/*
2798293e306eSSean Christopherson 	 * Note, using the already-retrieved memslot and __gfn_to_hva_memslot()
2799293e306eSSean Christopherson 	 * is not solely for performance, it's also necessary to avoid the
2800293e306eSSean Christopherson 	 * "writable" check in __gfn_to_hva_many(), which will always fail on
2801293e306eSSean Christopherson 	 * read-only memslots due to gfn_to_hva() assuming writes.  Earlier
2802293e306eSSean Christopherson 	 * page fault steps have already verified the guest isn't writing a
2803293e306eSSean Christopherson 	 * read-only memslot.
2804293e306eSSean Christopherson 	 */
2805db543216SSean Christopherson 	hva = __gfn_to_hva_memslot(slot, gfn);
2806db543216SSean Christopherson 
28071b6d9d9eSSean Christopherson 	pte = lookup_address_in_mm(kvm->mm, hva, &level);
2808db543216SSean Christopherson 	if (unlikely(!pte))
28093bae0459SSean Christopherson 		return PG_LEVEL_4K;
2810db543216SSean Christopherson 
2811db543216SSean Christopherson 	return level;
2812db543216SSean Christopherson }
2813db543216SSean Christopherson 
28148ca6f063SBen Gardon int kvm_mmu_max_mapping_level(struct kvm *kvm,
28158ca6f063SBen Gardon 			      const struct kvm_memory_slot *slot, gfn_t gfn,
28168ca6f063SBen Gardon 			      kvm_pfn_t pfn, int max_level)
28171b6d9d9eSSean Christopherson {
28181b6d9d9eSSean Christopherson 	struct kvm_lpage_info *linfo;
28191b6d9d9eSSean Christopherson 
28201b6d9d9eSSean Christopherson 	max_level = min(max_level, max_huge_page_level);
28211b6d9d9eSSean Christopherson 	for ( ; max_level > PG_LEVEL_4K; max_level--) {
28221b6d9d9eSSean Christopherson 		linfo = lpage_info_slot(gfn, slot, max_level);
28231b6d9d9eSSean Christopherson 		if (!linfo->disallow_lpage)
28241b6d9d9eSSean Christopherson 			break;
28251b6d9d9eSSean Christopherson 	}
28261b6d9d9eSSean Christopherson 
28271b6d9d9eSSean Christopherson 	if (max_level == PG_LEVEL_4K)
28281b6d9d9eSSean Christopherson 		return PG_LEVEL_4K;
28291b6d9d9eSSean Christopherson 
28301b6d9d9eSSean Christopherson 	return host_pfn_mapping_level(kvm, gfn, pfn, slot);
28311b6d9d9eSSean Christopherson }
28321b6d9d9eSSean Christopherson 
2833bb18842eSBen Gardon int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
28343cf06612SSean Christopherson 			    int max_level, kvm_pfn_t *pfnp,
28353cf06612SSean Christopherson 			    bool huge_page_disallowed, int *req_level)
28360885904dSSean Christopherson {
2837293e306eSSean Christopherson 	struct kvm_memory_slot *slot;
28380885904dSSean Christopherson 	kvm_pfn_t pfn = *pfnp;
283917eff019SSean Christopherson 	kvm_pfn_t mask;
284083f06fa7SSean Christopherson 	int level;
28410885904dSSean Christopherson 
28423cf06612SSean Christopherson 	*req_level = PG_LEVEL_4K;
28433cf06612SSean Christopherson 
28443bae0459SSean Christopherson 	if (unlikely(max_level == PG_LEVEL_4K))
28453bae0459SSean Christopherson 		return PG_LEVEL_4K;
284617eff019SSean Christopherson 
2847e851265aSSean Christopherson 	if (is_error_noslot_pfn(pfn) || kvm_is_reserved_pfn(pfn))
28483bae0459SSean Christopherson 		return PG_LEVEL_4K;
284917eff019SSean Christopherson 
2850293e306eSSean Christopherson 	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, true);
2851293e306eSSean Christopherson 	if (!slot)
28523bae0459SSean Christopherson 		return PG_LEVEL_4K;
2853293e306eSSean Christopherson 
28541b6d9d9eSSean Christopherson 	level = kvm_mmu_max_mapping_level(vcpu->kvm, slot, gfn, pfn, max_level);
28553bae0459SSean Christopherson 	if (level == PG_LEVEL_4K)
285683f06fa7SSean Christopherson 		return level;
285717eff019SSean Christopherson 
28583cf06612SSean Christopherson 	*req_level = level = min(level, max_level);
28593cf06612SSean Christopherson 
28603cf06612SSean Christopherson 	/*
28613cf06612SSean Christopherson 	 * Enforce the iTLB multihit workaround after capturing the requested
28623cf06612SSean Christopherson 	 * level, which will be used to do precise, accurate accounting.
28633cf06612SSean Christopherson 	 */
28643cf06612SSean Christopherson 	if (huge_page_disallowed)
28653cf06612SSean Christopherson 		return PG_LEVEL_4K;
28664cd071d1SSean Christopherson 
28670885904dSSean Christopherson 	/*
28684cd071d1SSean Christopherson 	 * mmu_notifier_retry() was successful and mmu_lock is held, so
28694cd071d1SSean Christopherson 	 * the pmd can't be split from under us.
28700885904dSSean Christopherson 	 */
28710885904dSSean Christopherson 	mask = KVM_PAGES_PER_HPAGE(level) - 1;
28720885904dSSean Christopherson 	VM_BUG_ON((gfn & mask) != (pfn & mask));
28734cd071d1SSean Christopherson 	*pfnp = pfn & ~mask;
287483f06fa7SSean Christopherson 
287583f06fa7SSean Christopherson 	return level;
28760885904dSSean Christopherson }
28770885904dSSean Christopherson 
2878bb18842eSBen Gardon void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level,
2879bb18842eSBen Gardon 				kvm_pfn_t *pfnp, int *goal_levelp)
2880c50d8ae3SPaolo Bonzini {
2881bb18842eSBen Gardon 	int level = *goal_levelp;
2882c50d8ae3SPaolo Bonzini 
28837d945312SBen Gardon 	if (cur_level == level && level > PG_LEVEL_4K &&
2884c50d8ae3SPaolo Bonzini 	    is_shadow_present_pte(spte) &&
2885c50d8ae3SPaolo Bonzini 	    !is_large_pte(spte)) {
2886c50d8ae3SPaolo Bonzini 		/*
2887c50d8ae3SPaolo Bonzini 		 * A small SPTE exists for this pfn, but FNAME(fetch)
2888c50d8ae3SPaolo Bonzini 		 * and __direct_map would like to create a large PTE
2889c50d8ae3SPaolo Bonzini 		 * instead: just force them to go down another level,
2890c50d8ae3SPaolo Bonzini 		 * patching back for them into pfn the next 9 bits of
2891c50d8ae3SPaolo Bonzini 		 * the address.
2892c50d8ae3SPaolo Bonzini 		 */
28937d945312SBen Gardon 		u64 page_mask = KVM_PAGES_PER_HPAGE(level) -
28947d945312SBen Gardon 				KVM_PAGES_PER_HPAGE(level - 1);
2895c50d8ae3SPaolo Bonzini 		*pfnp |= gfn & page_mask;
2896bb18842eSBen Gardon 		(*goal_levelp)--;
2897c50d8ae3SPaolo Bonzini 	}
2898c50d8ae3SPaolo Bonzini }
2899c50d8ae3SPaolo Bonzini 
29006c2fd34fSSean Christopherson static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
290183f06fa7SSean Christopherson 			int map_writable, int max_level, kvm_pfn_t pfn,
29026c2fd34fSSean Christopherson 			bool prefault, bool is_tdp)
2903c50d8ae3SPaolo Bonzini {
29046c2fd34fSSean Christopherson 	bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled();
29056c2fd34fSSean Christopherson 	bool write = error_code & PFERR_WRITE_MASK;
29066c2fd34fSSean Christopherson 	bool exec = error_code & PFERR_FETCH_MASK;
29076c2fd34fSSean Christopherson 	bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
2908c50d8ae3SPaolo Bonzini 	struct kvm_shadow_walk_iterator it;
2909c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
29103cf06612SSean Christopherson 	int level, req_level, ret;
2911c50d8ae3SPaolo Bonzini 	gfn_t gfn = gpa >> PAGE_SHIFT;
2912c50d8ae3SPaolo Bonzini 	gfn_t base_gfn = gfn;
2913c50d8ae3SPaolo Bonzini 
29143cf06612SSean Christopherson 	level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
29153cf06612SSean Christopherson 					huge_page_disallowed, &req_level);
29164cd071d1SSean Christopherson 
2917c50d8ae3SPaolo Bonzini 	trace_kvm_mmu_spte_requested(gpa, level, pfn);
2918c50d8ae3SPaolo Bonzini 	for_each_shadow_entry(vcpu, gpa, it) {
2919c50d8ae3SPaolo Bonzini 		/*
2920c50d8ae3SPaolo Bonzini 		 * We cannot overwrite existing page tables with an NX
2921c50d8ae3SPaolo Bonzini 		 * large page, as the leaf could be executable.
2922c50d8ae3SPaolo Bonzini 		 */
2923dcc70651SSean Christopherson 		if (nx_huge_page_workaround_enabled)
29247d945312SBen Gardon 			disallowed_hugepage_adjust(*it.sptep, gfn, it.level,
29257d945312SBen Gardon 						   &pfn, &level);
2926c50d8ae3SPaolo Bonzini 
2927c50d8ae3SPaolo Bonzini 		base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
2928c50d8ae3SPaolo Bonzini 		if (it.level == level)
2929c50d8ae3SPaolo Bonzini 			break;
2930c50d8ae3SPaolo Bonzini 
2931c50d8ae3SPaolo Bonzini 		drop_large_spte(vcpu, it.sptep);
2932c50d8ae3SPaolo Bonzini 		if (!is_shadow_present_pte(*it.sptep)) {
2933c50d8ae3SPaolo Bonzini 			sp = kvm_mmu_get_page(vcpu, base_gfn, it.addr,
2934c50d8ae3SPaolo Bonzini 					      it.level - 1, true, ACC_ALL);
2935c50d8ae3SPaolo Bonzini 
2936c50d8ae3SPaolo Bonzini 			link_shadow_page(vcpu, it.sptep, sp);
29375bcaf3e1SSean Christopherson 			if (is_tdp && huge_page_disallowed &&
29385bcaf3e1SSean Christopherson 			    req_level >= it.level)
2939c50d8ae3SPaolo Bonzini 				account_huge_nx_page(vcpu->kvm, sp);
2940c50d8ae3SPaolo Bonzini 		}
2941c50d8ae3SPaolo Bonzini 	}
2942c50d8ae3SPaolo Bonzini 
2943c50d8ae3SPaolo Bonzini 	ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL,
2944c50d8ae3SPaolo Bonzini 			   write, level, base_gfn, pfn, prefault,
2945c50d8ae3SPaolo Bonzini 			   map_writable);
294612703759SSean Christopherson 	if (ret == RET_PF_SPURIOUS)
294712703759SSean Christopherson 		return ret;
294812703759SSean Christopherson 
2949c50d8ae3SPaolo Bonzini 	direct_pte_prefetch(vcpu, it.sptep);
2950c50d8ae3SPaolo Bonzini 	++vcpu->stat.pf_fixed;
2951c50d8ae3SPaolo Bonzini 	return ret;
2952c50d8ae3SPaolo Bonzini }
2953c50d8ae3SPaolo Bonzini 
2954c50d8ae3SPaolo Bonzini static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk)
2955c50d8ae3SPaolo Bonzini {
2956c50d8ae3SPaolo Bonzini 	send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, PAGE_SHIFT, tsk);
2957c50d8ae3SPaolo Bonzini }
2958c50d8ae3SPaolo Bonzini 
2959c50d8ae3SPaolo Bonzini static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
2960c50d8ae3SPaolo Bonzini {
2961c50d8ae3SPaolo Bonzini 	/*
2962c50d8ae3SPaolo Bonzini 	 * Do not cache the mmio info caused by writing the readonly gfn
2963c50d8ae3SPaolo Bonzini 	 * into the spte otherwise read access on readonly gfn also can
2964c50d8ae3SPaolo Bonzini 	 * caused mmio page fault and treat it as mmio access.
2965c50d8ae3SPaolo Bonzini 	 */
2966c50d8ae3SPaolo Bonzini 	if (pfn == KVM_PFN_ERR_RO_FAULT)
2967c50d8ae3SPaolo Bonzini 		return RET_PF_EMULATE;
2968c50d8ae3SPaolo Bonzini 
2969c50d8ae3SPaolo Bonzini 	if (pfn == KVM_PFN_ERR_HWPOISON) {
2970c50d8ae3SPaolo Bonzini 		kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current);
2971c50d8ae3SPaolo Bonzini 		return RET_PF_RETRY;
2972c50d8ae3SPaolo Bonzini 	}
2973c50d8ae3SPaolo Bonzini 
2974c50d8ae3SPaolo Bonzini 	return -EFAULT;
2975c50d8ae3SPaolo Bonzini }
2976c50d8ae3SPaolo Bonzini 
2977c50d8ae3SPaolo Bonzini static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
29780a2b64c5SBen Gardon 				kvm_pfn_t pfn, unsigned int access,
29790a2b64c5SBen Gardon 				int *ret_val)
2980c50d8ae3SPaolo Bonzini {
2981c50d8ae3SPaolo Bonzini 	/* The pfn is invalid, report the error! */
2982c50d8ae3SPaolo Bonzini 	if (unlikely(is_error_pfn(pfn))) {
2983c50d8ae3SPaolo Bonzini 		*ret_val = kvm_handle_bad_page(vcpu, gfn, pfn);
2984c50d8ae3SPaolo Bonzini 		return true;
2985c50d8ae3SPaolo Bonzini 	}
2986c50d8ae3SPaolo Bonzini 
298730ab5901SSean Christopherson 	if (unlikely(is_noslot_pfn(pfn))) {
2988c50d8ae3SPaolo Bonzini 		vcpu_cache_mmio_info(vcpu, gva, gfn,
2989c50d8ae3SPaolo Bonzini 				     access & shadow_mmio_access_mask);
299030ab5901SSean Christopherson 		/*
299130ab5901SSean Christopherson 		 * If MMIO caching is disabled, emulate immediately without
299230ab5901SSean Christopherson 		 * touching the shadow page tables as attempting to install an
299330ab5901SSean Christopherson 		 * MMIO SPTE will just be an expensive nop.
299430ab5901SSean Christopherson 		 */
299530ab5901SSean Christopherson 		if (unlikely(!shadow_mmio_value)) {
299630ab5901SSean Christopherson 			*ret_val = RET_PF_EMULATE;
299730ab5901SSean Christopherson 			return true;
299830ab5901SSean Christopherson 		}
299930ab5901SSean Christopherson 	}
3000c50d8ae3SPaolo Bonzini 
3001c50d8ae3SPaolo Bonzini 	return false;
3002c50d8ae3SPaolo Bonzini }
3003c50d8ae3SPaolo Bonzini 
3004c50d8ae3SPaolo Bonzini static bool page_fault_can_be_fast(u32 error_code)
3005c50d8ae3SPaolo Bonzini {
3006c50d8ae3SPaolo Bonzini 	/*
3007c50d8ae3SPaolo Bonzini 	 * Do not fix the mmio spte with invalid generation number which
3008c50d8ae3SPaolo Bonzini 	 * need to be updated by slow page fault path.
3009c50d8ae3SPaolo Bonzini 	 */
3010c50d8ae3SPaolo Bonzini 	if (unlikely(error_code & PFERR_RSVD_MASK))
3011c50d8ae3SPaolo Bonzini 		return false;
3012c50d8ae3SPaolo Bonzini 
3013c50d8ae3SPaolo Bonzini 	/* See if the page fault is due to an NX violation */
3014c50d8ae3SPaolo Bonzini 	if (unlikely(((error_code & (PFERR_FETCH_MASK | PFERR_PRESENT_MASK))
3015c50d8ae3SPaolo Bonzini 		      == (PFERR_FETCH_MASK | PFERR_PRESENT_MASK))))
3016c50d8ae3SPaolo Bonzini 		return false;
3017c50d8ae3SPaolo Bonzini 
3018c50d8ae3SPaolo Bonzini 	/*
3019c50d8ae3SPaolo Bonzini 	 * #PF can be fast if:
3020c50d8ae3SPaolo Bonzini 	 * 1. The shadow page table entry is not present, which could mean that
3021c50d8ae3SPaolo Bonzini 	 *    the fault is potentially caused by access tracking (if enabled).
3022c50d8ae3SPaolo Bonzini 	 * 2. The shadow page table entry is present and the fault
3023c50d8ae3SPaolo Bonzini 	 *    is caused by write-protect, that means we just need change the W
3024c50d8ae3SPaolo Bonzini 	 *    bit of the spte which can be done out of mmu-lock.
3025c50d8ae3SPaolo Bonzini 	 *
3026c50d8ae3SPaolo Bonzini 	 * However, if access tracking is disabled we know that a non-present
3027c50d8ae3SPaolo Bonzini 	 * page must be a genuine page fault where we have to create a new SPTE.
3028c50d8ae3SPaolo Bonzini 	 * So, if access tracking is disabled, we return true only for write
3029c50d8ae3SPaolo Bonzini 	 * accesses to a present page.
3030c50d8ae3SPaolo Bonzini 	 */
3031c50d8ae3SPaolo Bonzini 
3032c50d8ae3SPaolo Bonzini 	return shadow_acc_track_mask != 0 ||
3033c50d8ae3SPaolo Bonzini 	       ((error_code & (PFERR_WRITE_MASK | PFERR_PRESENT_MASK))
3034c50d8ae3SPaolo Bonzini 		== (PFERR_WRITE_MASK | PFERR_PRESENT_MASK));
3035c50d8ae3SPaolo Bonzini }
3036c50d8ae3SPaolo Bonzini 
3037c50d8ae3SPaolo Bonzini /*
3038c50d8ae3SPaolo Bonzini  * Returns true if the SPTE was fixed successfully. Otherwise,
3039c50d8ae3SPaolo Bonzini  * someone else modified the SPTE from its original value.
3040c50d8ae3SPaolo Bonzini  */
3041c50d8ae3SPaolo Bonzini static bool
3042c50d8ae3SPaolo Bonzini fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
3043c50d8ae3SPaolo Bonzini 			u64 *sptep, u64 old_spte, u64 new_spte)
3044c50d8ae3SPaolo Bonzini {
3045c50d8ae3SPaolo Bonzini 	gfn_t gfn;
3046c50d8ae3SPaolo Bonzini 
3047c50d8ae3SPaolo Bonzini 	WARN_ON(!sp->role.direct);
3048c50d8ae3SPaolo Bonzini 
3049c50d8ae3SPaolo Bonzini 	/*
3050c50d8ae3SPaolo Bonzini 	 * Theoretically we could also set dirty bit (and flush TLB) here in
3051c50d8ae3SPaolo Bonzini 	 * order to eliminate unnecessary PML logging. See comments in
3052c50d8ae3SPaolo Bonzini 	 * set_spte. But fast_page_fault is very unlikely to happen with PML
3053c50d8ae3SPaolo Bonzini 	 * enabled, so we do not do this. This might result in the same GPA
3054c50d8ae3SPaolo Bonzini 	 * to be logged in PML buffer again when the write really happens, and
3055c50d8ae3SPaolo Bonzini 	 * eventually to be called by mark_page_dirty twice. But it's also no
3056c50d8ae3SPaolo Bonzini 	 * harm. This also avoids the TLB flush needed after setting dirty bit
3057c50d8ae3SPaolo Bonzini 	 * so non-PML cases won't be impacted.
3058c50d8ae3SPaolo Bonzini 	 *
3059c50d8ae3SPaolo Bonzini 	 * Compare with set_spte where instead shadow_dirty_mask is set.
3060c50d8ae3SPaolo Bonzini 	 */
3061c50d8ae3SPaolo Bonzini 	if (cmpxchg64(sptep, old_spte, new_spte) != old_spte)
3062c50d8ae3SPaolo Bonzini 		return false;
3063c50d8ae3SPaolo Bonzini 
3064c50d8ae3SPaolo Bonzini 	if (is_writable_pte(new_spte) && !is_writable_pte(old_spte)) {
3065c50d8ae3SPaolo Bonzini 		/*
3066c50d8ae3SPaolo Bonzini 		 * The gfn of direct spte is stable since it is
3067c50d8ae3SPaolo Bonzini 		 * calculated by sp->gfn.
3068c50d8ae3SPaolo Bonzini 		 */
3069c50d8ae3SPaolo Bonzini 		gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
3070c50d8ae3SPaolo Bonzini 		kvm_vcpu_mark_page_dirty(vcpu, gfn);
3071c50d8ae3SPaolo Bonzini 	}
3072c50d8ae3SPaolo Bonzini 
3073c50d8ae3SPaolo Bonzini 	return true;
3074c50d8ae3SPaolo Bonzini }
3075c50d8ae3SPaolo Bonzini 
3076c50d8ae3SPaolo Bonzini static bool is_access_allowed(u32 fault_err_code, u64 spte)
3077c50d8ae3SPaolo Bonzini {
3078c50d8ae3SPaolo Bonzini 	if (fault_err_code & PFERR_FETCH_MASK)
3079c50d8ae3SPaolo Bonzini 		return is_executable_pte(spte);
3080c50d8ae3SPaolo Bonzini 
3081c50d8ae3SPaolo Bonzini 	if (fault_err_code & PFERR_WRITE_MASK)
3082c50d8ae3SPaolo Bonzini 		return is_writable_pte(spte);
3083c50d8ae3SPaolo Bonzini 
3084c50d8ae3SPaolo Bonzini 	/* Fault was on Read access */
3085c50d8ae3SPaolo Bonzini 	return spte & PT_PRESENT_MASK;
3086c50d8ae3SPaolo Bonzini }
3087c50d8ae3SPaolo Bonzini 
3088c50d8ae3SPaolo Bonzini /*
3089c4371c2aSSean Christopherson  * Returns one of RET_PF_INVALID, RET_PF_FIXED or RET_PF_SPURIOUS.
3090c50d8ae3SPaolo Bonzini  */
3091c4371c2aSSean Christopherson static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
3092c50d8ae3SPaolo Bonzini 			   u32 error_code)
3093c50d8ae3SPaolo Bonzini {
3094c50d8ae3SPaolo Bonzini 	struct kvm_shadow_walk_iterator iterator;
3095c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
3096c4371c2aSSean Christopherson 	int ret = RET_PF_INVALID;
3097c50d8ae3SPaolo Bonzini 	u64 spte = 0ull;
3098c50d8ae3SPaolo Bonzini 	uint retry_count = 0;
3099c50d8ae3SPaolo Bonzini 
3100c50d8ae3SPaolo Bonzini 	if (!page_fault_can_be_fast(error_code))
3101c4371c2aSSean Christopherson 		return ret;
3102c50d8ae3SPaolo Bonzini 
3103c50d8ae3SPaolo Bonzini 	walk_shadow_page_lockless_begin(vcpu);
3104c50d8ae3SPaolo Bonzini 
3105c50d8ae3SPaolo Bonzini 	do {
3106c50d8ae3SPaolo Bonzini 		u64 new_spte;
3107c50d8ae3SPaolo Bonzini 
3108736c291cSSean Christopherson 		for_each_shadow_entry_lockless(vcpu, cr2_or_gpa, iterator, spte)
3109f9fa2509SSean Christopherson 			if (!is_shadow_present_pte(spte))
3110c50d8ae3SPaolo Bonzini 				break;
3111c50d8ae3SPaolo Bonzini 
3112ec89e643SSean Christopherson 		if (!is_shadow_present_pte(spte))
3113ec89e643SSean Christopherson 			break;
3114ec89e643SSean Christopherson 
311557354682SSean Christopherson 		sp = sptep_to_sp(iterator.sptep);
3116c50d8ae3SPaolo Bonzini 		if (!is_last_spte(spte, sp->role.level))
3117c50d8ae3SPaolo Bonzini 			break;
3118c50d8ae3SPaolo Bonzini 
3119c50d8ae3SPaolo Bonzini 		/*
3120c50d8ae3SPaolo Bonzini 		 * Check whether the memory access that caused the fault would
3121c50d8ae3SPaolo Bonzini 		 * still cause it if it were to be performed right now. If not,
3122c50d8ae3SPaolo Bonzini 		 * then this is a spurious fault caused by TLB lazily flushed,
3123c50d8ae3SPaolo Bonzini 		 * or some other CPU has already fixed the PTE after the
3124c50d8ae3SPaolo Bonzini 		 * current CPU took the fault.
3125c50d8ae3SPaolo Bonzini 		 *
3126c50d8ae3SPaolo Bonzini 		 * Need not check the access of upper level table entries since
3127c50d8ae3SPaolo Bonzini 		 * they are always ACC_ALL.
3128c50d8ae3SPaolo Bonzini 		 */
3129c50d8ae3SPaolo Bonzini 		if (is_access_allowed(error_code, spte)) {
3130c4371c2aSSean Christopherson 			ret = RET_PF_SPURIOUS;
3131c50d8ae3SPaolo Bonzini 			break;
3132c50d8ae3SPaolo Bonzini 		}
3133c50d8ae3SPaolo Bonzini 
3134c50d8ae3SPaolo Bonzini 		new_spte = spte;
3135c50d8ae3SPaolo Bonzini 
3136c50d8ae3SPaolo Bonzini 		if (is_access_track_spte(spte))
3137c50d8ae3SPaolo Bonzini 			new_spte = restore_acc_track_spte(new_spte);
3138c50d8ae3SPaolo Bonzini 
3139c50d8ae3SPaolo Bonzini 		/*
3140c50d8ae3SPaolo Bonzini 		 * Currently, to simplify the code, write-protection can
3141c50d8ae3SPaolo Bonzini 		 * be removed in the fast path only if the SPTE was
3142c50d8ae3SPaolo Bonzini 		 * write-protected for dirty-logging or access tracking.
3143c50d8ae3SPaolo Bonzini 		 */
3144c50d8ae3SPaolo Bonzini 		if ((error_code & PFERR_WRITE_MASK) &&
3145e6302698SMiaohe Lin 		    spte_can_locklessly_be_made_writable(spte)) {
3146c50d8ae3SPaolo Bonzini 			new_spte |= PT_WRITABLE_MASK;
3147c50d8ae3SPaolo Bonzini 
3148c50d8ae3SPaolo Bonzini 			/*
3149c50d8ae3SPaolo Bonzini 			 * Do not fix write-permission on the large spte.  Since
3150c50d8ae3SPaolo Bonzini 			 * we only dirty the first page into the dirty-bitmap in
3151c50d8ae3SPaolo Bonzini 			 * fast_pf_fix_direct_spte(), other pages are missed
3152c50d8ae3SPaolo Bonzini 			 * if its slot has dirty logging enabled.
3153c50d8ae3SPaolo Bonzini 			 *
3154c50d8ae3SPaolo Bonzini 			 * Instead, we let the slow page fault path create a
3155c50d8ae3SPaolo Bonzini 			 * normal spte to fix the access.
3156c50d8ae3SPaolo Bonzini 			 *
3157c50d8ae3SPaolo Bonzini 			 * See the comments in kvm_arch_commit_memory_region().
3158c50d8ae3SPaolo Bonzini 			 */
31593bae0459SSean Christopherson 			if (sp->role.level > PG_LEVEL_4K)
3160c50d8ae3SPaolo Bonzini 				break;
3161c50d8ae3SPaolo Bonzini 		}
3162c50d8ae3SPaolo Bonzini 
3163c50d8ae3SPaolo Bonzini 		/* Verify that the fault can be handled in the fast path */
3164c50d8ae3SPaolo Bonzini 		if (new_spte == spte ||
3165c50d8ae3SPaolo Bonzini 		    !is_access_allowed(error_code, new_spte))
3166c50d8ae3SPaolo Bonzini 			break;
3167c50d8ae3SPaolo Bonzini 
3168c50d8ae3SPaolo Bonzini 		/*
3169c50d8ae3SPaolo Bonzini 		 * Currently, fast page fault only works for direct mapping
3170c50d8ae3SPaolo Bonzini 		 * since the gfn is not stable for indirect shadow page. See
31713ecad8c2SMauro Carvalho Chehab 		 * Documentation/virt/kvm/locking.rst to get more detail.
3172c50d8ae3SPaolo Bonzini 		 */
3173c4371c2aSSean Christopherson 		if (fast_pf_fix_direct_spte(vcpu, sp, iterator.sptep, spte,
3174c4371c2aSSean Christopherson 					    new_spte)) {
3175c4371c2aSSean Christopherson 			ret = RET_PF_FIXED;
3176c50d8ae3SPaolo Bonzini 			break;
3177c4371c2aSSean Christopherson 		}
3178c50d8ae3SPaolo Bonzini 
3179c50d8ae3SPaolo Bonzini 		if (++retry_count > 4) {
3180c50d8ae3SPaolo Bonzini 			printk_once(KERN_WARNING
3181c50d8ae3SPaolo Bonzini 				"kvm: Fast #PF retrying more than 4 times.\n");
3182c50d8ae3SPaolo Bonzini 			break;
3183c50d8ae3SPaolo Bonzini 		}
3184c50d8ae3SPaolo Bonzini 
3185c50d8ae3SPaolo Bonzini 	} while (true);
3186c50d8ae3SPaolo Bonzini 
3187736c291cSSean Christopherson 	trace_fast_page_fault(vcpu, cr2_or_gpa, error_code, iterator.sptep,
3188c4371c2aSSean Christopherson 			      spte, ret);
3189c50d8ae3SPaolo Bonzini 	walk_shadow_page_lockless_end(vcpu);
3190c50d8ae3SPaolo Bonzini 
3191c4371c2aSSean Christopherson 	return ret;
3192c50d8ae3SPaolo Bonzini }
3193c50d8ae3SPaolo Bonzini 
3194c50d8ae3SPaolo Bonzini static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
3195c50d8ae3SPaolo Bonzini 			       struct list_head *invalid_list)
3196c50d8ae3SPaolo Bonzini {
3197c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
3198c50d8ae3SPaolo Bonzini 
3199c50d8ae3SPaolo Bonzini 	if (!VALID_PAGE(*root_hpa))
3200c50d8ae3SPaolo Bonzini 		return;
3201c50d8ae3SPaolo Bonzini 
3202e47c4aeeSSean Christopherson 	sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK);
320302c00b3aSBen Gardon 
3204897218ffSPaolo Bonzini 	if (is_tdp_mmu_page(sp))
32056103bc07SBen Gardon 		kvm_tdp_mmu_put_root(kvm, sp, false);
320676eb54e7SBen Gardon 	else if (!--sp->root_count && sp->role.invalid)
3207c50d8ae3SPaolo Bonzini 		kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
3208c50d8ae3SPaolo Bonzini 
3209c50d8ae3SPaolo Bonzini 	*root_hpa = INVALID_PAGE;
3210c50d8ae3SPaolo Bonzini }
3211c50d8ae3SPaolo Bonzini 
3212c50d8ae3SPaolo Bonzini /* roots_to_free must be some combination of the KVM_MMU_ROOT_* flags */
3213c50d8ae3SPaolo Bonzini void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
3214c50d8ae3SPaolo Bonzini 			ulong roots_to_free)
3215c50d8ae3SPaolo Bonzini {
32164d710de9SSean Christopherson 	struct kvm *kvm = vcpu->kvm;
3217c50d8ae3SPaolo Bonzini 	int i;
3218c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
3219c50d8ae3SPaolo Bonzini 	bool free_active_root = roots_to_free & KVM_MMU_ROOT_CURRENT;
3220c50d8ae3SPaolo Bonzini 
3221c50d8ae3SPaolo Bonzini 	BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG);
3222c50d8ae3SPaolo Bonzini 
3223c50d8ae3SPaolo Bonzini 	/* Before acquiring the MMU lock, see if we need to do any real work. */
3224c50d8ae3SPaolo Bonzini 	if (!(free_active_root && VALID_PAGE(mmu->root_hpa))) {
3225c50d8ae3SPaolo Bonzini 		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
3226c50d8ae3SPaolo Bonzini 			if ((roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) &&
3227c50d8ae3SPaolo Bonzini 			    VALID_PAGE(mmu->prev_roots[i].hpa))
3228c50d8ae3SPaolo Bonzini 				break;
3229c50d8ae3SPaolo Bonzini 
3230c50d8ae3SPaolo Bonzini 		if (i == KVM_MMU_NUM_PREV_ROOTS)
3231c50d8ae3SPaolo Bonzini 			return;
3232c50d8ae3SPaolo Bonzini 	}
3233c50d8ae3SPaolo Bonzini 
3234531810caSBen Gardon 	write_lock(&kvm->mmu_lock);
3235c50d8ae3SPaolo Bonzini 
3236c50d8ae3SPaolo Bonzini 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
3237c50d8ae3SPaolo Bonzini 		if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i))
32384d710de9SSean Christopherson 			mmu_free_root_page(kvm, &mmu->prev_roots[i].hpa,
3239c50d8ae3SPaolo Bonzini 					   &invalid_list);
3240c50d8ae3SPaolo Bonzini 
3241c50d8ae3SPaolo Bonzini 	if (free_active_root) {
3242c50d8ae3SPaolo Bonzini 		if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
3243c50d8ae3SPaolo Bonzini 		    (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) {
32444d710de9SSean Christopherson 			mmu_free_root_page(kvm, &mmu->root_hpa, &invalid_list);
324504d45551SSean Christopherson 		} else if (mmu->pae_root) {
3246c834e5e4SSean Christopherson 			for (i = 0; i < 4; ++i) {
3247c834e5e4SSean Christopherson 				if (!IS_VALID_PAE_ROOT(mmu->pae_root[i]))
3248c834e5e4SSean Christopherson 					continue;
3249c834e5e4SSean Christopherson 
3250c834e5e4SSean Christopherson 				mmu_free_root_page(kvm, &mmu->pae_root[i],
3251c50d8ae3SPaolo Bonzini 						   &invalid_list);
3252c834e5e4SSean Christopherson 				mmu->pae_root[i] = INVALID_PAE_ROOT;
3253c50d8ae3SPaolo Bonzini 			}
3254c50d8ae3SPaolo Bonzini 		}
325504d45551SSean Christopherson 		mmu->root_hpa = INVALID_PAGE;
3256be01e8e2SSean Christopherson 		mmu->root_pgd = 0;
3257c50d8ae3SPaolo Bonzini 	}
3258c50d8ae3SPaolo Bonzini 
32594d710de9SSean Christopherson 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
3260531810caSBen Gardon 	write_unlock(&kvm->mmu_lock);
3261c50d8ae3SPaolo Bonzini }
3262c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_free_roots);
3263c50d8ae3SPaolo Bonzini 
326425b62c62SSean Christopherson void kvm_mmu_free_guest_mode_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
326525b62c62SSean Christopherson {
326625b62c62SSean Christopherson 	unsigned long roots_to_free = 0;
326725b62c62SSean Christopherson 	hpa_t root_hpa;
326825b62c62SSean Christopherson 	int i;
326925b62c62SSean Christopherson 
327025b62c62SSean Christopherson 	/*
327125b62c62SSean Christopherson 	 * This should not be called while L2 is active, L2 can't invalidate
327225b62c62SSean Christopherson 	 * _only_ its own roots, e.g. INVVPID unconditionally exits.
327325b62c62SSean Christopherson 	 */
327425b62c62SSean Christopherson 	WARN_ON_ONCE(mmu->mmu_role.base.guest_mode);
327525b62c62SSean Christopherson 
327625b62c62SSean Christopherson 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
327725b62c62SSean Christopherson 		root_hpa = mmu->prev_roots[i].hpa;
327825b62c62SSean Christopherson 		if (!VALID_PAGE(root_hpa))
327925b62c62SSean Christopherson 			continue;
328025b62c62SSean Christopherson 
328125b62c62SSean Christopherson 		if (!to_shadow_page(root_hpa) ||
328225b62c62SSean Christopherson 			to_shadow_page(root_hpa)->role.guest_mode)
328325b62c62SSean Christopherson 			roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
328425b62c62SSean Christopherson 	}
328525b62c62SSean Christopherson 
328625b62c62SSean Christopherson 	kvm_mmu_free_roots(vcpu, mmu, roots_to_free);
328725b62c62SSean Christopherson }
328825b62c62SSean Christopherson EXPORT_SYMBOL_GPL(kvm_mmu_free_guest_mode_roots);
328925b62c62SSean Christopherson 
329025b62c62SSean Christopherson 
3291c50d8ae3SPaolo Bonzini static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
3292c50d8ae3SPaolo Bonzini {
3293c50d8ae3SPaolo Bonzini 	int ret = 0;
3294c50d8ae3SPaolo Bonzini 
3295995decb6SVitaly Kuznetsov 	if (!kvm_vcpu_is_visible_gfn(vcpu, root_gfn)) {
3296c50d8ae3SPaolo Bonzini 		kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
3297c50d8ae3SPaolo Bonzini 		ret = 1;
3298c50d8ae3SPaolo Bonzini 	}
3299c50d8ae3SPaolo Bonzini 
3300c50d8ae3SPaolo Bonzini 	return ret;
3301c50d8ae3SPaolo Bonzini }
3302c50d8ae3SPaolo Bonzini 
33038123f265SSean Christopherson static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva,
33048123f265SSean Christopherson 			    u8 level, bool direct)
3305c50d8ae3SPaolo Bonzini {
3306c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
33078123f265SSean Christopherson 
33088123f265SSean Christopherson 	sp = kvm_mmu_get_page(vcpu, gfn, gva, level, direct, ACC_ALL);
33098123f265SSean Christopherson 	++sp->root_count;
33108123f265SSean Christopherson 
33118123f265SSean Christopherson 	return __pa(sp->spt);
33128123f265SSean Christopherson }
33138123f265SSean Christopherson 
33148123f265SSean Christopherson static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
33158123f265SSean Christopherson {
3316b37233c9SSean Christopherson 	struct kvm_mmu *mmu = vcpu->arch.mmu;
3317b37233c9SSean Christopherson 	u8 shadow_root_level = mmu->shadow_root_level;
33188123f265SSean Christopherson 	hpa_t root;
3319c50d8ae3SPaolo Bonzini 	unsigned i;
33204a38162eSPaolo Bonzini 	int r;
33214a38162eSPaolo Bonzini 
33224a38162eSPaolo Bonzini 	write_lock(&vcpu->kvm->mmu_lock);
33234a38162eSPaolo Bonzini 	r = make_mmu_pages_available(vcpu);
33244a38162eSPaolo Bonzini 	if (r < 0)
33254a38162eSPaolo Bonzini 		goto out_unlock;
3326c50d8ae3SPaolo Bonzini 
3327897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(vcpu->kvm)) {
332802c00b3aSBen Gardon 		root = kvm_tdp_mmu_get_vcpu_root_hpa(vcpu);
3329b37233c9SSean Christopherson 		mmu->root_hpa = root;
333002c00b3aSBen Gardon 	} else if (shadow_root_level >= PT64_ROOT_4LEVEL) {
33316e6ec584SSean Christopherson 		root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level, true);
3332b37233c9SSean Christopherson 		mmu->root_hpa = root;
33338123f265SSean Christopherson 	} else if (shadow_root_level == PT32E_ROOT_LEVEL) {
33344a38162eSPaolo Bonzini 		if (WARN_ON_ONCE(!mmu->pae_root)) {
33354a38162eSPaolo Bonzini 			r = -EIO;
33364a38162eSPaolo Bonzini 			goto out_unlock;
33374a38162eSPaolo Bonzini 		}
333873ad1606SSean Christopherson 
3339c50d8ae3SPaolo Bonzini 		for (i = 0; i < 4; ++i) {
3340c834e5e4SSean Christopherson 			WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
3341c50d8ae3SPaolo Bonzini 
33428123f265SSean Christopherson 			root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT),
33438123f265SSean Christopherson 					      i << 30, PT32_ROOT_LEVEL, true);
334417e368d9SSean Christopherson 			mmu->pae_root[i] = root | PT_PRESENT_MASK |
334517e368d9SSean Christopherson 					   shadow_me_mask;
3346c50d8ae3SPaolo Bonzini 		}
3347b37233c9SSean Christopherson 		mmu->root_hpa = __pa(mmu->pae_root);
334873ad1606SSean Christopherson 	} else {
334973ad1606SSean Christopherson 		WARN_ONCE(1, "Bad TDP root level = %d\n", shadow_root_level);
33504a38162eSPaolo Bonzini 		r = -EIO;
33514a38162eSPaolo Bonzini 		goto out_unlock;
335273ad1606SSean Christopherson 	}
33533651c7fcSSean Christopherson 
3354be01e8e2SSean Christopherson 	/* root_pgd is ignored for direct MMUs. */
3355b37233c9SSean Christopherson 	mmu->root_pgd = 0;
33564a38162eSPaolo Bonzini out_unlock:
33574a38162eSPaolo Bonzini 	write_unlock(&vcpu->kvm->mmu_lock);
33584a38162eSPaolo Bonzini 	return r;
3359c50d8ae3SPaolo Bonzini }
3360c50d8ae3SPaolo Bonzini 
3361c50d8ae3SPaolo Bonzini static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
3362c50d8ae3SPaolo Bonzini {
3363b37233c9SSean Christopherson 	struct kvm_mmu *mmu = vcpu->arch.mmu;
33646e0918aeSSean Christopherson 	u64 pdptrs[4], pm_mask;
3365be01e8e2SSean Christopherson 	gfn_t root_gfn, root_pgd;
33668123f265SSean Christopherson 	hpa_t root;
33674a38162eSPaolo Bonzini 	unsigned i;
33684a38162eSPaolo Bonzini 	int r;
3369c50d8ae3SPaolo Bonzini 
3370b37233c9SSean Christopherson 	root_pgd = mmu->get_guest_pgd(vcpu);
3371be01e8e2SSean Christopherson 	root_gfn = root_pgd >> PAGE_SHIFT;
3372c50d8ae3SPaolo Bonzini 
3373c50d8ae3SPaolo Bonzini 	if (mmu_check_root(vcpu, root_gfn))
3374c50d8ae3SPaolo Bonzini 		return 1;
3375c50d8ae3SPaolo Bonzini 
3376c50d8ae3SPaolo Bonzini 	/*
33774a38162eSPaolo Bonzini 	 * On SVM, reading PDPTRs might access guest memory, which might fault
33784a38162eSPaolo Bonzini 	 * and thus might sleep.  Grab the PDPTRs before acquiring mmu_lock.
33794a38162eSPaolo Bonzini 	 */
33806e0918aeSSean Christopherson 	if (mmu->root_level == PT32E_ROOT_LEVEL) {
33816e0918aeSSean Christopherson 		for (i = 0; i < 4; ++i) {
33826e0918aeSSean Christopherson 			pdptrs[i] = mmu->get_pdptr(vcpu, i);
33836e0918aeSSean Christopherson 			if (!(pdptrs[i] & PT_PRESENT_MASK))
33846e0918aeSSean Christopherson 				continue;
33856e0918aeSSean Christopherson 
33866e0918aeSSean Christopherson 			if (mmu_check_root(vcpu, pdptrs[i] >> PAGE_SHIFT))
33876e0918aeSSean Christopherson 				return 1;
33886e0918aeSSean Christopherson 		}
33896e0918aeSSean Christopherson 	}
33906e0918aeSSean Christopherson 
3391d501f747SBen Gardon 	r = alloc_all_memslots_rmaps(vcpu->kvm);
3392d501f747SBen Gardon 	if (r)
3393d501f747SBen Gardon 		return r;
3394d501f747SBen Gardon 
33954a38162eSPaolo Bonzini 	write_lock(&vcpu->kvm->mmu_lock);
33964a38162eSPaolo Bonzini 	r = make_mmu_pages_available(vcpu);
33974a38162eSPaolo Bonzini 	if (r < 0)
33984a38162eSPaolo Bonzini 		goto out_unlock;
33994a38162eSPaolo Bonzini 
3400c50d8ae3SPaolo Bonzini 	/*
3401c50d8ae3SPaolo Bonzini 	 * Do we shadow a long mode page table? If so we need to
3402c50d8ae3SPaolo Bonzini 	 * write-protect the guests page table root.
3403c50d8ae3SPaolo Bonzini 	 */
3404b37233c9SSean Christopherson 	if (mmu->root_level >= PT64_ROOT_4LEVEL) {
34058123f265SSean Christopherson 		root = mmu_alloc_root(vcpu, root_gfn, 0,
3406b37233c9SSean Christopherson 				      mmu->shadow_root_level, false);
3407b37233c9SSean Christopherson 		mmu->root_hpa = root;
3408be01e8e2SSean Christopherson 		goto set_root_pgd;
3409c50d8ae3SPaolo Bonzini 	}
3410c50d8ae3SPaolo Bonzini 
34114a38162eSPaolo Bonzini 	if (WARN_ON_ONCE(!mmu->pae_root)) {
34124a38162eSPaolo Bonzini 		r = -EIO;
34134a38162eSPaolo Bonzini 		goto out_unlock;
34144a38162eSPaolo Bonzini 	}
341573ad1606SSean Christopherson 
3416c50d8ae3SPaolo Bonzini 	/*
3417c50d8ae3SPaolo Bonzini 	 * We shadow a 32 bit page table. This may be a legacy 2-level
3418c50d8ae3SPaolo Bonzini 	 * or a PAE 3-level page table. In either case we need to be aware that
3419c50d8ae3SPaolo Bonzini 	 * the shadow page table may be a PAE or a long mode page table.
3420c50d8ae3SPaolo Bonzini 	 */
342117e368d9SSean Christopherson 	pm_mask = PT_PRESENT_MASK | shadow_me_mask;
3422748e52b9SSean Christopherson 	if (mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
3423c50d8ae3SPaolo Bonzini 		pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
3424c50d8ae3SPaolo Bonzini 
342503ca4589SSean Christopherson 		if (WARN_ON_ONCE(!mmu->pml4_root)) {
34264a38162eSPaolo Bonzini 			r = -EIO;
34274a38162eSPaolo Bonzini 			goto out_unlock;
34284a38162eSPaolo Bonzini 		}
342973ad1606SSean Christopherson 
343003ca4589SSean Christopherson 		mmu->pml4_root[0] = __pa(mmu->pae_root) | pm_mask;
343104d45551SSean Christopherson 	}
343204d45551SSean Christopherson 
3433c50d8ae3SPaolo Bonzini 	for (i = 0; i < 4; ++i) {
3434c834e5e4SSean Christopherson 		WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
34356e6ec584SSean Christopherson 
3436b37233c9SSean Christopherson 		if (mmu->root_level == PT32E_ROOT_LEVEL) {
34376e0918aeSSean Christopherson 			if (!(pdptrs[i] & PT_PRESENT_MASK)) {
3438c834e5e4SSean Christopherson 				mmu->pae_root[i] = INVALID_PAE_ROOT;
3439c50d8ae3SPaolo Bonzini 				continue;
3440c50d8ae3SPaolo Bonzini 			}
34416e0918aeSSean Christopherson 			root_gfn = pdptrs[i] >> PAGE_SHIFT;
3442c50d8ae3SPaolo Bonzini 		}
3443c50d8ae3SPaolo Bonzini 
34448123f265SSean Christopherson 		root = mmu_alloc_root(vcpu, root_gfn, i << 30,
34458123f265SSean Christopherson 				      PT32_ROOT_LEVEL, false);
3446b37233c9SSean Christopherson 		mmu->pae_root[i] = root | pm_mask;
3447c50d8ae3SPaolo Bonzini 	}
3448c50d8ae3SPaolo Bonzini 
3449ba0a194fSSean Christopherson 	if (mmu->shadow_root_level == PT64_ROOT_4LEVEL)
345003ca4589SSean Christopherson 		mmu->root_hpa = __pa(mmu->pml4_root);
3451ba0a194fSSean Christopherson 	else
3452ba0a194fSSean Christopherson 		mmu->root_hpa = __pa(mmu->pae_root);
3453c50d8ae3SPaolo Bonzini 
3454be01e8e2SSean Christopherson set_root_pgd:
3455b37233c9SSean Christopherson 	mmu->root_pgd = root_pgd;
34564a38162eSPaolo Bonzini out_unlock:
34574a38162eSPaolo Bonzini 	write_unlock(&vcpu->kvm->mmu_lock);
3458c50d8ae3SPaolo Bonzini 
3459c50d8ae3SPaolo Bonzini 	return 0;
3460c50d8ae3SPaolo Bonzini }
3461c50d8ae3SPaolo Bonzini 
3462748e52b9SSean Christopherson static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
3463c50d8ae3SPaolo Bonzini {
3464748e52b9SSean Christopherson 	struct kvm_mmu *mmu = vcpu->arch.mmu;
346503ca4589SSean Christopherson 	u64 *pml4_root, *pae_root;
3466748e52b9SSean Christopherson 
3467748e52b9SSean Christopherson 	/*
3468748e52b9SSean Christopherson 	 * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP
3469748e52b9SSean Christopherson 	 * tables are allocated and initialized at root creation as there is no
3470748e52b9SSean Christopherson 	 * equivalent level in the guest's NPT to shadow.  Allocate the tables
3471748e52b9SSean Christopherson 	 * on demand, as running a 32-bit L1 VMM on 64-bit KVM is very rare.
3472748e52b9SSean Christopherson 	 */
3473748e52b9SSean Christopherson 	if (mmu->direct_map || mmu->root_level >= PT64_ROOT_4LEVEL ||
3474748e52b9SSean Christopherson 	    mmu->shadow_root_level < PT64_ROOT_4LEVEL)
3475748e52b9SSean Christopherson 		return 0;
3476748e52b9SSean Christopherson 
3477748e52b9SSean Christopherson 	/*
3478748e52b9SSean Christopherson 	 * This mess only works with 4-level paging and needs to be updated to
3479748e52b9SSean Christopherson 	 * work with 5-level paging.
3480748e52b9SSean Christopherson 	 */
3481748e52b9SSean Christopherson 	if (WARN_ON_ONCE(mmu->shadow_root_level != PT64_ROOT_4LEVEL))
3482748e52b9SSean Christopherson 		return -EIO;
3483748e52b9SSean Christopherson 
348403ca4589SSean Christopherson 	if (mmu->pae_root && mmu->pml4_root)
3485748e52b9SSean Christopherson 		return 0;
3486748e52b9SSean Christopherson 
3487748e52b9SSean Christopherson 	/*
3488748e52b9SSean Christopherson 	 * The special roots should always be allocated in concert.  Yell and
3489748e52b9SSean Christopherson 	 * bail if KVM ends up in a state where only one of the roots is valid.
3490748e52b9SSean Christopherson 	 */
349103ca4589SSean Christopherson 	if (WARN_ON_ONCE(!tdp_enabled || mmu->pae_root || mmu->pml4_root))
3492748e52b9SSean Christopherson 		return -EIO;
3493748e52b9SSean Christopherson 
34944a98623dSSean Christopherson 	/*
34954a98623dSSean Christopherson 	 * Unlike 32-bit NPT, the PDP table doesn't need to be in low mem, and
34964a98623dSSean Christopherson 	 * doesn't need to be decrypted.
34974a98623dSSean Christopherson 	 */
3498748e52b9SSean Christopherson 	pae_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3499748e52b9SSean Christopherson 	if (!pae_root)
3500748e52b9SSean Christopherson 		return -ENOMEM;
3501748e52b9SSean Christopherson 
350203ca4589SSean Christopherson 	pml4_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
350303ca4589SSean Christopherson 	if (!pml4_root) {
3504748e52b9SSean Christopherson 		free_page((unsigned long)pae_root);
3505748e52b9SSean Christopherson 		return -ENOMEM;
3506748e52b9SSean Christopherson 	}
3507748e52b9SSean Christopherson 
3508748e52b9SSean Christopherson 	mmu->pae_root = pae_root;
350903ca4589SSean Christopherson 	mmu->pml4_root = pml4_root;
3510748e52b9SSean Christopherson 
3511748e52b9SSean Christopherson 	return 0;
3512c50d8ae3SPaolo Bonzini }
3513c50d8ae3SPaolo Bonzini 
3514c50d8ae3SPaolo Bonzini void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
3515c50d8ae3SPaolo Bonzini {
3516c50d8ae3SPaolo Bonzini 	int i;
3517c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
3518c50d8ae3SPaolo Bonzini 
3519c50d8ae3SPaolo Bonzini 	if (vcpu->arch.mmu->direct_map)
3520c50d8ae3SPaolo Bonzini 		return;
3521c50d8ae3SPaolo Bonzini 
3522c50d8ae3SPaolo Bonzini 	if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
3523c50d8ae3SPaolo Bonzini 		return;
3524c50d8ae3SPaolo Bonzini 
3525c50d8ae3SPaolo Bonzini 	vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
3526c50d8ae3SPaolo Bonzini 
3527c50d8ae3SPaolo Bonzini 	if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
3528c50d8ae3SPaolo Bonzini 		hpa_t root = vcpu->arch.mmu->root_hpa;
3529e47c4aeeSSean Christopherson 		sp = to_shadow_page(root);
3530c50d8ae3SPaolo Bonzini 
3531c50d8ae3SPaolo Bonzini 		/*
3532c50d8ae3SPaolo Bonzini 		 * Even if another CPU was marking the SP as unsync-ed
3533c50d8ae3SPaolo Bonzini 		 * simultaneously, any guest page table changes are not
3534c50d8ae3SPaolo Bonzini 		 * guaranteed to be visible anyway until this VCPU issues a TLB
3535c50d8ae3SPaolo Bonzini 		 * flush strictly after those changes are made. We only need to
3536c50d8ae3SPaolo Bonzini 		 * ensure that the other CPU sets these flags before any actual
3537c50d8ae3SPaolo Bonzini 		 * changes to the page tables are made. The comments in
35380337f585SSean Christopherson 		 * mmu_try_to_unsync_pages() describe what could go wrong if
35390337f585SSean Christopherson 		 * this requirement isn't satisfied.
3540c50d8ae3SPaolo Bonzini 		 */
3541c50d8ae3SPaolo Bonzini 		if (!smp_load_acquire(&sp->unsync) &&
3542c50d8ae3SPaolo Bonzini 		    !smp_load_acquire(&sp->unsync_children))
3543c50d8ae3SPaolo Bonzini 			return;
3544c50d8ae3SPaolo Bonzini 
3545531810caSBen Gardon 		write_lock(&vcpu->kvm->mmu_lock);
3546c50d8ae3SPaolo Bonzini 		kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
3547c50d8ae3SPaolo Bonzini 
3548c50d8ae3SPaolo Bonzini 		mmu_sync_children(vcpu, sp);
3549c50d8ae3SPaolo Bonzini 
3550c50d8ae3SPaolo Bonzini 		kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
3551531810caSBen Gardon 		write_unlock(&vcpu->kvm->mmu_lock);
3552c50d8ae3SPaolo Bonzini 		return;
3553c50d8ae3SPaolo Bonzini 	}
3554c50d8ae3SPaolo Bonzini 
3555531810caSBen Gardon 	write_lock(&vcpu->kvm->mmu_lock);
3556c50d8ae3SPaolo Bonzini 	kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
3557c50d8ae3SPaolo Bonzini 
3558c50d8ae3SPaolo Bonzini 	for (i = 0; i < 4; ++i) {
3559c50d8ae3SPaolo Bonzini 		hpa_t root = vcpu->arch.mmu->pae_root[i];
3560c50d8ae3SPaolo Bonzini 
3561c834e5e4SSean Christopherson 		if (IS_VALID_PAE_ROOT(root)) {
3562c50d8ae3SPaolo Bonzini 			root &= PT64_BASE_ADDR_MASK;
3563e47c4aeeSSean Christopherson 			sp = to_shadow_page(root);
3564c50d8ae3SPaolo Bonzini 			mmu_sync_children(vcpu, sp);
3565c50d8ae3SPaolo Bonzini 		}
3566c50d8ae3SPaolo Bonzini 	}
3567c50d8ae3SPaolo Bonzini 
3568c50d8ae3SPaolo Bonzini 	kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
3569531810caSBen Gardon 	write_unlock(&vcpu->kvm->mmu_lock);
3570c50d8ae3SPaolo Bonzini }
3571c50d8ae3SPaolo Bonzini 
3572736c291cSSean Christopherson static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gpa_t vaddr,
3573c50d8ae3SPaolo Bonzini 				  u32 access, struct x86_exception *exception)
3574c50d8ae3SPaolo Bonzini {
3575c50d8ae3SPaolo Bonzini 	if (exception)
3576c50d8ae3SPaolo Bonzini 		exception->error_code = 0;
3577c50d8ae3SPaolo Bonzini 	return vaddr;
3578c50d8ae3SPaolo Bonzini }
3579c50d8ae3SPaolo Bonzini 
3580736c291cSSean Christopherson static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gpa_t vaddr,
3581c50d8ae3SPaolo Bonzini 					 u32 access,
3582c50d8ae3SPaolo Bonzini 					 struct x86_exception *exception)
3583c50d8ae3SPaolo Bonzini {
3584c50d8ae3SPaolo Bonzini 	if (exception)
3585c50d8ae3SPaolo Bonzini 		exception->error_code = 0;
3586c50d8ae3SPaolo Bonzini 	return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception);
3587c50d8ae3SPaolo Bonzini }
3588c50d8ae3SPaolo Bonzini 
3589c50d8ae3SPaolo Bonzini static bool
3590c50d8ae3SPaolo Bonzini __is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check, u64 pte, int level)
3591c50d8ae3SPaolo Bonzini {
3592b5c3c1b3SSean Christopherson 	int bit7 = (pte >> 7) & 1;
3593c50d8ae3SPaolo Bonzini 
3594b5c3c1b3SSean Christopherson 	return pte & rsvd_check->rsvd_bits_mask[bit7][level-1];
3595c50d8ae3SPaolo Bonzini }
3596c50d8ae3SPaolo Bonzini 
3597b5c3c1b3SSean Christopherson static bool __is_bad_mt_xwr(struct rsvd_bits_validate *rsvd_check, u64 pte)
3598c50d8ae3SPaolo Bonzini {
3599b5c3c1b3SSean Christopherson 	return rsvd_check->bad_mt_xwr & BIT_ULL(pte & 0x3f);
3600c50d8ae3SPaolo Bonzini }
3601c50d8ae3SPaolo Bonzini 
3602c50d8ae3SPaolo Bonzini static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3603c50d8ae3SPaolo Bonzini {
3604c50d8ae3SPaolo Bonzini 	/*
3605c50d8ae3SPaolo Bonzini 	 * A nested guest cannot use the MMIO cache if it is using nested
3606c50d8ae3SPaolo Bonzini 	 * page tables, because cr2 is a nGPA while the cache stores GPAs.
3607c50d8ae3SPaolo Bonzini 	 */
3608c50d8ae3SPaolo Bonzini 	if (mmu_is_nested(vcpu))
3609c50d8ae3SPaolo Bonzini 		return false;
3610c50d8ae3SPaolo Bonzini 
3611c50d8ae3SPaolo Bonzini 	if (direct)
3612c50d8ae3SPaolo Bonzini 		return vcpu_match_mmio_gpa(vcpu, addr);
3613c50d8ae3SPaolo Bonzini 
3614c50d8ae3SPaolo Bonzini 	return vcpu_match_mmio_gva(vcpu, addr);
3615c50d8ae3SPaolo Bonzini }
3616c50d8ae3SPaolo Bonzini 
361795fb5b02SBen Gardon /*
361895fb5b02SBen Gardon  * Return the level of the lowest level SPTE added to sptes.
361995fb5b02SBen Gardon  * That SPTE may be non-present.
362095fb5b02SBen Gardon  */
362139b4d43eSSean Christopherson static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level)
3622c50d8ae3SPaolo Bonzini {
3623c50d8ae3SPaolo Bonzini 	struct kvm_shadow_walk_iterator iterator;
36242aa07893SSean Christopherson 	int leaf = -1;
362595fb5b02SBen Gardon 	u64 spte;
3626c50d8ae3SPaolo Bonzini 
3627c50d8ae3SPaolo Bonzini 	walk_shadow_page_lockless_begin(vcpu);
3628c50d8ae3SPaolo Bonzini 
362939b4d43eSSean Christopherson 	for (shadow_walk_init(&iterator, vcpu, addr),
363039b4d43eSSean Christopherson 	     *root_level = iterator.level;
3631c50d8ae3SPaolo Bonzini 	     shadow_walk_okay(&iterator);
3632c50d8ae3SPaolo Bonzini 	     __shadow_walk_next(&iterator, spte)) {
363395fb5b02SBen Gardon 		leaf = iterator.level;
3634c50d8ae3SPaolo Bonzini 		spte = mmu_spte_get_lockless(iterator.sptep);
3635c50d8ae3SPaolo Bonzini 
3636dde81f94SSean Christopherson 		sptes[leaf] = spte;
3637c50d8ae3SPaolo Bonzini 
3638c50d8ae3SPaolo Bonzini 		if (!is_shadow_present_pte(spte))
3639c50d8ae3SPaolo Bonzini 			break;
364095fb5b02SBen Gardon 	}
364195fb5b02SBen Gardon 
364295fb5b02SBen Gardon 	walk_shadow_page_lockless_end(vcpu);
364395fb5b02SBen Gardon 
364495fb5b02SBen Gardon 	return leaf;
364595fb5b02SBen Gardon }
364695fb5b02SBen Gardon 
36479aa41879SSean Christopherson /* return true if reserved bit(s) are detected on a valid, non-MMIO SPTE. */
364895fb5b02SBen Gardon static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
364995fb5b02SBen Gardon {
3650dde81f94SSean Christopherson 	u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
365195fb5b02SBen Gardon 	struct rsvd_bits_validate *rsvd_check;
365239b4d43eSSean Christopherson 	int root, leaf, level;
365395fb5b02SBen Gardon 	bool reserved = false;
365495fb5b02SBen Gardon 
365563c0cac9SDavid Matlack 	if (is_tdp_mmu(vcpu->arch.mmu))
365639b4d43eSSean Christopherson 		leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root);
365795fb5b02SBen Gardon 	else
365839b4d43eSSean Christopherson 		leaf = get_walk(vcpu, addr, sptes, &root);
365995fb5b02SBen Gardon 
36602aa07893SSean Christopherson 	if (unlikely(leaf < 0)) {
36612aa07893SSean Christopherson 		*sptep = 0ull;
36622aa07893SSean Christopherson 		return reserved;
36632aa07893SSean Christopherson 	}
36642aa07893SSean Christopherson 
36659aa41879SSean Christopherson 	*sptep = sptes[leaf];
36669aa41879SSean Christopherson 
36679aa41879SSean Christopherson 	/*
36689aa41879SSean Christopherson 	 * Skip reserved bits checks on the terminal leaf if it's not a valid
36699aa41879SSean Christopherson 	 * SPTE.  Note, this also (intentionally) skips MMIO SPTEs, which, by
36709aa41879SSean Christopherson 	 * design, always have reserved bits set.  The purpose of the checks is
36719aa41879SSean Christopherson 	 * to detect reserved bits on non-MMIO SPTEs. i.e. buggy SPTEs.
36729aa41879SSean Christopherson 	 */
36739aa41879SSean Christopherson 	if (!is_shadow_present_pte(sptes[leaf]))
36749aa41879SSean Christopherson 		leaf++;
367595fb5b02SBen Gardon 
367695fb5b02SBen Gardon 	rsvd_check = &vcpu->arch.mmu->shadow_zero_check;
367795fb5b02SBen Gardon 
36789aa41879SSean Christopherson 	for (level = root; level >= leaf; level--)
3679b5c3c1b3SSean Christopherson 		/*
3680b5c3c1b3SSean Christopherson 		 * Use a bitwise-OR instead of a logical-OR to aggregate the
3681b5c3c1b3SSean Christopherson 		 * reserved bit and EPT's invalid memtype/XWR checks to avoid
3682b5c3c1b3SSean Christopherson 		 * adding a Jcc in the loop.
3683b5c3c1b3SSean Christopherson 		 */
3684dde81f94SSean Christopherson 		reserved |= __is_bad_mt_xwr(rsvd_check, sptes[level]) |
3685dde81f94SSean Christopherson 			    __is_rsvd_bits_set(rsvd_check, sptes[level], level);
3686c50d8ae3SPaolo Bonzini 
3687c50d8ae3SPaolo Bonzini 	if (reserved) {
3688bb4cdf3aSSean Christopherson 		pr_err("%s: reserved bits set on MMU-present spte, addr 0x%llx, hierarchy:\n",
3689c50d8ae3SPaolo Bonzini 		       __func__, addr);
369095fb5b02SBen Gardon 		for (level = root; level >= leaf; level--)
3691bb4cdf3aSSean Christopherson 			pr_err("------ spte = 0x%llx level = %d, rsvd bits = 0x%llx",
3692bb4cdf3aSSean Christopherson 			       sptes[level], level,
3693bb4cdf3aSSean Christopherson 			       rsvd_check->rsvd_bits_mask[(sptes[level] >> 7) & 1][level-1]);
3694c50d8ae3SPaolo Bonzini 	}
3695ddce6208SSean Christopherson 
3696c50d8ae3SPaolo Bonzini 	return reserved;
3697c50d8ae3SPaolo Bonzini }
3698c50d8ae3SPaolo Bonzini 
3699c50d8ae3SPaolo Bonzini static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3700c50d8ae3SPaolo Bonzini {
3701c50d8ae3SPaolo Bonzini 	u64 spte;
3702c50d8ae3SPaolo Bonzini 	bool reserved;
3703c50d8ae3SPaolo Bonzini 
3704c50d8ae3SPaolo Bonzini 	if (mmio_info_in_cache(vcpu, addr, direct))
3705c50d8ae3SPaolo Bonzini 		return RET_PF_EMULATE;
3706c50d8ae3SPaolo Bonzini 
370795fb5b02SBen Gardon 	reserved = get_mmio_spte(vcpu, addr, &spte);
3708c50d8ae3SPaolo Bonzini 	if (WARN_ON(reserved))
3709c50d8ae3SPaolo Bonzini 		return -EINVAL;
3710c50d8ae3SPaolo Bonzini 
3711c50d8ae3SPaolo Bonzini 	if (is_mmio_spte(spte)) {
3712c50d8ae3SPaolo Bonzini 		gfn_t gfn = get_mmio_spte_gfn(spte);
37130a2b64c5SBen Gardon 		unsigned int access = get_mmio_spte_access(spte);
3714c50d8ae3SPaolo Bonzini 
3715c50d8ae3SPaolo Bonzini 		if (!check_mmio_spte(vcpu, spte))
3716c50d8ae3SPaolo Bonzini 			return RET_PF_INVALID;
3717c50d8ae3SPaolo Bonzini 
3718c50d8ae3SPaolo Bonzini 		if (direct)
3719c50d8ae3SPaolo Bonzini 			addr = 0;
3720c50d8ae3SPaolo Bonzini 
3721c50d8ae3SPaolo Bonzini 		trace_handle_mmio_page_fault(addr, gfn, access);
3722c50d8ae3SPaolo Bonzini 		vcpu_cache_mmio_info(vcpu, addr, gfn, access);
3723c50d8ae3SPaolo Bonzini 		return RET_PF_EMULATE;
3724c50d8ae3SPaolo Bonzini 	}
3725c50d8ae3SPaolo Bonzini 
3726c50d8ae3SPaolo Bonzini 	/*
3727c50d8ae3SPaolo Bonzini 	 * If the page table is zapped by other cpus, let CPU fault again on
3728c50d8ae3SPaolo Bonzini 	 * the address.
3729c50d8ae3SPaolo Bonzini 	 */
3730c50d8ae3SPaolo Bonzini 	return RET_PF_RETRY;
3731c50d8ae3SPaolo Bonzini }
3732c50d8ae3SPaolo Bonzini 
3733c50d8ae3SPaolo Bonzini static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
3734c50d8ae3SPaolo Bonzini 					 u32 error_code, gfn_t gfn)
3735c50d8ae3SPaolo Bonzini {
3736c50d8ae3SPaolo Bonzini 	if (unlikely(error_code & PFERR_RSVD_MASK))
3737c50d8ae3SPaolo Bonzini 		return false;
3738c50d8ae3SPaolo Bonzini 
3739c50d8ae3SPaolo Bonzini 	if (!(error_code & PFERR_PRESENT_MASK) ||
3740c50d8ae3SPaolo Bonzini 	      !(error_code & PFERR_WRITE_MASK))
3741c50d8ae3SPaolo Bonzini 		return false;
3742c50d8ae3SPaolo Bonzini 
3743c50d8ae3SPaolo Bonzini 	/*
3744c50d8ae3SPaolo Bonzini 	 * guest is writing the page which is write tracked which can
3745c50d8ae3SPaolo Bonzini 	 * not be fixed by page fault handler.
3746c50d8ae3SPaolo Bonzini 	 */
3747c50d8ae3SPaolo Bonzini 	if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
3748c50d8ae3SPaolo Bonzini 		return true;
3749c50d8ae3SPaolo Bonzini 
3750c50d8ae3SPaolo Bonzini 	return false;
3751c50d8ae3SPaolo Bonzini }
3752c50d8ae3SPaolo Bonzini 
3753c50d8ae3SPaolo Bonzini static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
3754c50d8ae3SPaolo Bonzini {
3755c50d8ae3SPaolo Bonzini 	struct kvm_shadow_walk_iterator iterator;
3756c50d8ae3SPaolo Bonzini 	u64 spte;
3757c50d8ae3SPaolo Bonzini 
3758c50d8ae3SPaolo Bonzini 	walk_shadow_page_lockless_begin(vcpu);
3759c50d8ae3SPaolo Bonzini 	for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
3760c50d8ae3SPaolo Bonzini 		clear_sp_write_flooding_count(iterator.sptep);
3761c50d8ae3SPaolo Bonzini 		if (!is_shadow_present_pte(spte))
3762c50d8ae3SPaolo Bonzini 			break;
3763c50d8ae3SPaolo Bonzini 	}
3764c50d8ae3SPaolo Bonzini 	walk_shadow_page_lockless_end(vcpu);
3765c50d8ae3SPaolo Bonzini }
3766c50d8ae3SPaolo Bonzini 
3767e8c22266SVitaly Kuznetsov static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
37689f1a8526SSean Christopherson 				    gfn_t gfn)
3769c50d8ae3SPaolo Bonzini {
3770c50d8ae3SPaolo Bonzini 	struct kvm_arch_async_pf arch;
3771c50d8ae3SPaolo Bonzini 
3772c50d8ae3SPaolo Bonzini 	arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
3773c50d8ae3SPaolo Bonzini 	arch.gfn = gfn;
3774c50d8ae3SPaolo Bonzini 	arch.direct_map = vcpu->arch.mmu->direct_map;
3775d8dd54e0SSean Christopherson 	arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
3776c50d8ae3SPaolo Bonzini 
37779f1a8526SSean Christopherson 	return kvm_setup_async_pf(vcpu, cr2_or_gpa,
37789f1a8526SSean Christopherson 				  kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
3779c50d8ae3SPaolo Bonzini }
3780c50d8ae3SPaolo Bonzini 
3781c50d8ae3SPaolo Bonzini static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
37824a42d848SDavid Stevens 			 gpa_t cr2_or_gpa, kvm_pfn_t *pfn, hva_t *hva,
37834a42d848SDavid Stevens 			 bool write, bool *writable)
3784c50d8ae3SPaolo Bonzini {
3785c36b7150SPaolo Bonzini 	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3786c50d8ae3SPaolo Bonzini 	bool async;
3787c50d8ae3SPaolo Bonzini 
3788e0c37868SSean Christopherson 	/*
3789e0c37868SSean Christopherson 	 * Retry the page fault if the gfn hit a memslot that is being deleted
3790e0c37868SSean Christopherson 	 * or moved.  This ensures any existing SPTEs for the old memslot will
3791e0c37868SSean Christopherson 	 * be zapped before KVM inserts a new MMIO SPTE for the gfn.
3792e0c37868SSean Christopherson 	 */
3793e0c37868SSean Christopherson 	if (slot && (slot->flags & KVM_MEMSLOT_INVALID))
3794e0c37868SSean Christopherson 		return true;
3795e0c37868SSean Christopherson 
3796c36b7150SPaolo Bonzini 	/* Don't expose private memslots to L2. */
3797c36b7150SPaolo Bonzini 	if (is_guest_mode(vcpu) && !kvm_is_visible_memslot(slot)) {
3798c50d8ae3SPaolo Bonzini 		*pfn = KVM_PFN_NOSLOT;
3799c583eed6SSean Christopherson 		*writable = false;
3800c50d8ae3SPaolo Bonzini 		return false;
3801c50d8ae3SPaolo Bonzini 	}
3802c50d8ae3SPaolo Bonzini 
3803c50d8ae3SPaolo Bonzini 	async = false;
38044a42d848SDavid Stevens 	*pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async,
38054a42d848SDavid Stevens 				    write, writable, hva);
3806c50d8ae3SPaolo Bonzini 	if (!async)
3807c50d8ae3SPaolo Bonzini 		return false; /* *pfn has correct page already */
3808c50d8ae3SPaolo Bonzini 
3809c50d8ae3SPaolo Bonzini 	if (!prefault && kvm_can_do_async_pf(vcpu)) {
38109f1a8526SSean Christopherson 		trace_kvm_try_async_get_page(cr2_or_gpa, gfn);
3811c50d8ae3SPaolo Bonzini 		if (kvm_find_async_pf_gfn(vcpu, gfn)) {
38129f1a8526SSean Christopherson 			trace_kvm_async_pf_doublefault(cr2_or_gpa, gfn);
3813c50d8ae3SPaolo Bonzini 			kvm_make_request(KVM_REQ_APF_HALT, vcpu);
3814c50d8ae3SPaolo Bonzini 			return true;
38159f1a8526SSean Christopherson 		} else if (kvm_arch_setup_async_pf(vcpu, cr2_or_gpa, gfn))
3816c50d8ae3SPaolo Bonzini 			return true;
3817c50d8ae3SPaolo Bonzini 	}
3818c50d8ae3SPaolo Bonzini 
38194a42d848SDavid Stevens 	*pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL,
38204a42d848SDavid Stevens 				    write, writable, hva);
3821c50d8ae3SPaolo Bonzini 	return false;
3822c50d8ae3SPaolo Bonzini }
3823c50d8ae3SPaolo Bonzini 
38240f90e1c1SSean Christopherson static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
38250f90e1c1SSean Christopherson 			     bool prefault, int max_level, bool is_tdp)
3826c50d8ae3SPaolo Bonzini {
382763c0cac9SDavid Matlack 	bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu);
3828367fd790SSean Christopherson 	bool write = error_code & PFERR_WRITE_MASK;
38290f90e1c1SSean Christopherson 	bool map_writable;
3830c50d8ae3SPaolo Bonzini 
38310f90e1c1SSean Christopherson 	gfn_t gfn = gpa >> PAGE_SHIFT;
38320f90e1c1SSean Christopherson 	unsigned long mmu_seq;
38330f90e1c1SSean Christopherson 	kvm_pfn_t pfn;
38344a42d848SDavid Stevens 	hva_t hva;
383583f06fa7SSean Christopherson 	int r;
3836c50d8ae3SPaolo Bonzini 
3837c50d8ae3SPaolo Bonzini 	if (page_fault_handle_page_track(vcpu, error_code, gfn))
3838c50d8ae3SPaolo Bonzini 		return RET_PF_EMULATE;
3839c50d8ae3SPaolo Bonzini 
38400b873fd7SDavid Matlack 	if (!is_tdp_mmu_fault) {
3841c4371c2aSSean Christopherson 		r = fast_page_fault(vcpu, gpa, error_code);
3842c4371c2aSSean Christopherson 		if (r != RET_PF_INVALID)
3843c4371c2aSSean Christopherson 			return r;
3844bb18842eSBen Gardon 	}
384583291445SSean Christopherson 
3846378f5cd6SSean Christopherson 	r = mmu_topup_memory_caches(vcpu, false);
3847c50d8ae3SPaolo Bonzini 	if (r)
3848c50d8ae3SPaolo Bonzini 		return r;
3849c50d8ae3SPaolo Bonzini 
3850367fd790SSean Christopherson 	mmu_seq = vcpu->kvm->mmu_notifier_seq;
3851367fd790SSean Christopherson 	smp_rmb();
3852367fd790SSean Christopherson 
38534a42d848SDavid Stevens 	if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, &hva,
38544a42d848SDavid Stevens 			 write, &map_writable))
3855367fd790SSean Christopherson 		return RET_PF_RETRY;
3856367fd790SSean Christopherson 
38570f90e1c1SSean Christopherson 	if (handle_abnormal_pfn(vcpu, is_tdp ? 0 : gpa, gfn, pfn, ACC_ALL, &r))
3858367fd790SSean Christopherson 		return r;
3859367fd790SSean Christopherson 
3860367fd790SSean Christopherson 	r = RET_PF_RETRY;
3861a2855afcSBen Gardon 
38620b873fd7SDavid Matlack 	if (is_tdp_mmu_fault)
3863a2855afcSBen Gardon 		read_lock(&vcpu->kvm->mmu_lock);
3864a2855afcSBen Gardon 	else
3865531810caSBen Gardon 		write_lock(&vcpu->kvm->mmu_lock);
3866a2855afcSBen Gardon 
38674a42d848SDavid Stevens 	if (!is_noslot_pfn(pfn) && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, hva))
3868367fd790SSean Christopherson 		goto out_unlock;
38697bd7ded6SSean Christopherson 	r = make_mmu_pages_available(vcpu);
38707bd7ded6SSean Christopherson 	if (r)
3871367fd790SSean Christopherson 		goto out_unlock;
3872bb18842eSBen Gardon 
38730b873fd7SDavid Matlack 	if (is_tdp_mmu_fault)
3874bb18842eSBen Gardon 		r = kvm_tdp_mmu_map(vcpu, gpa, error_code, map_writable, max_level,
3875bb18842eSBen Gardon 				    pfn, prefault);
3876bb18842eSBen Gardon 	else
38776c2fd34fSSean Christopherson 		r = __direct_map(vcpu, gpa, error_code, map_writable, max_level, pfn,
38786c2fd34fSSean Christopherson 				 prefault, is_tdp);
38790f90e1c1SSean Christopherson 
3880367fd790SSean Christopherson out_unlock:
38810b873fd7SDavid Matlack 	if (is_tdp_mmu_fault)
3882a2855afcSBen Gardon 		read_unlock(&vcpu->kvm->mmu_lock);
3883a2855afcSBen Gardon 	else
3884531810caSBen Gardon 		write_unlock(&vcpu->kvm->mmu_lock);
3885367fd790SSean Christopherson 	kvm_release_pfn_clean(pfn);
3886367fd790SSean Christopherson 	return r;
3887c50d8ae3SPaolo Bonzini }
3888c50d8ae3SPaolo Bonzini 
38890f90e1c1SSean Christopherson static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa,
38900f90e1c1SSean Christopherson 				u32 error_code, bool prefault)
38910f90e1c1SSean Christopherson {
38920f90e1c1SSean Christopherson 	pgprintk("%s: gva %lx error %x\n", __func__, gpa, error_code);
38930f90e1c1SSean Christopherson 
38940f90e1c1SSean Christopherson 	/* This path builds a PAE pagetable, we can map 2mb pages at maximum. */
38950f90e1c1SSean Christopherson 	return direct_page_fault(vcpu, gpa & PAGE_MASK, error_code, prefault,
38963bae0459SSean Christopherson 				 PG_LEVEL_2M, false);
38970f90e1c1SSean Christopherson }
38980f90e1c1SSean Christopherson 
3899c50d8ae3SPaolo Bonzini int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
3900c50d8ae3SPaolo Bonzini 				u64 fault_address, char *insn, int insn_len)
3901c50d8ae3SPaolo Bonzini {
3902c50d8ae3SPaolo Bonzini 	int r = 1;
39039ce372b3SVitaly Kuznetsov 	u32 flags = vcpu->arch.apf.host_apf_flags;
3904c50d8ae3SPaolo Bonzini 
3905736c291cSSean Christopherson #ifndef CONFIG_X86_64
3906736c291cSSean Christopherson 	/* A 64-bit CR2 should be impossible on 32-bit KVM. */
3907736c291cSSean Christopherson 	if (WARN_ON_ONCE(fault_address >> 32))
3908736c291cSSean Christopherson 		return -EFAULT;
3909736c291cSSean Christopherson #endif
3910736c291cSSean Christopherson 
3911c50d8ae3SPaolo Bonzini 	vcpu->arch.l1tf_flush_l1d = true;
39129ce372b3SVitaly Kuznetsov 	if (!flags) {
3913c50d8ae3SPaolo Bonzini 		trace_kvm_page_fault(fault_address, error_code);
3914c50d8ae3SPaolo Bonzini 
3915c50d8ae3SPaolo Bonzini 		if (kvm_event_needs_reinjection(vcpu))
3916c50d8ae3SPaolo Bonzini 			kvm_mmu_unprotect_page_virt(vcpu, fault_address);
3917c50d8ae3SPaolo Bonzini 		r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn,
3918c50d8ae3SPaolo Bonzini 				insn_len);
39199ce372b3SVitaly Kuznetsov 	} else if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) {
392068fd66f1SVitaly Kuznetsov 		vcpu->arch.apf.host_apf_flags = 0;
3921c50d8ae3SPaolo Bonzini 		local_irq_disable();
39226bca69adSThomas Gleixner 		kvm_async_pf_task_wait_schedule(fault_address);
3923c50d8ae3SPaolo Bonzini 		local_irq_enable();
39249ce372b3SVitaly Kuznetsov 	} else {
39259ce372b3SVitaly Kuznetsov 		WARN_ONCE(1, "Unexpected host async PF flags: %x\n", flags);
3926c50d8ae3SPaolo Bonzini 	}
39279ce372b3SVitaly Kuznetsov 
3928c50d8ae3SPaolo Bonzini 	return r;
3929c50d8ae3SPaolo Bonzini }
3930c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_handle_page_fault);
3931c50d8ae3SPaolo Bonzini 
39327a02674dSSean Christopherson int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
3933c50d8ae3SPaolo Bonzini 		       bool prefault)
3934c50d8ae3SPaolo Bonzini {
3935cb9b88c6SSean Christopherson 	int max_level;
3936c50d8ae3SPaolo Bonzini 
3937e662ec3eSSean Christopherson 	for (max_level = KVM_MAX_HUGEPAGE_LEVEL;
39383bae0459SSean Christopherson 	     max_level > PG_LEVEL_4K;
3939cb9b88c6SSean Christopherson 	     max_level--) {
3940cb9b88c6SSean Christopherson 		int page_num = KVM_PAGES_PER_HPAGE(max_level);
39410f90e1c1SSean Christopherson 		gfn_t base = (gpa >> PAGE_SHIFT) & ~(page_num - 1);
3942c50d8ae3SPaolo Bonzini 
3943cb9b88c6SSean Christopherson 		if (kvm_mtrr_check_gfn_range_consistency(vcpu, base, page_num))
3944cb9b88c6SSean Christopherson 			break;
3945c50d8ae3SPaolo Bonzini 	}
3946c50d8ae3SPaolo Bonzini 
39470f90e1c1SSean Christopherson 	return direct_page_fault(vcpu, gpa, error_code, prefault,
39480f90e1c1SSean Christopherson 				 max_level, true);
3949c50d8ae3SPaolo Bonzini }
3950c50d8ae3SPaolo Bonzini 
395184a16226SSean Christopherson static void nonpaging_init_context(struct kvm_mmu *context)
3952c50d8ae3SPaolo Bonzini {
3953c50d8ae3SPaolo Bonzini 	context->page_fault = nonpaging_page_fault;
3954c50d8ae3SPaolo Bonzini 	context->gva_to_gpa = nonpaging_gva_to_gpa;
3955c50d8ae3SPaolo Bonzini 	context->sync_page = nonpaging_sync_page;
39565efac074SPaolo Bonzini 	context->invlpg = NULL;
3957c50d8ae3SPaolo Bonzini 	context->root_level = 0;
3958c50d8ae3SPaolo Bonzini 	context->direct_map = true;
3959c50d8ae3SPaolo Bonzini 	context->nx = false;
3960c50d8ae3SPaolo Bonzini }
3961c50d8ae3SPaolo Bonzini 
3962be01e8e2SSean Christopherson static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
39630be44352SSean Christopherson 				  union kvm_mmu_page_role role)
39640be44352SSean Christopherson {
3965be01e8e2SSean Christopherson 	return (role.direct || pgd == root->pgd) &&
3966e47c4aeeSSean Christopherson 	       VALID_PAGE(root->hpa) && to_shadow_page(root->hpa) &&
3967e47c4aeeSSean Christopherson 	       role.word == to_shadow_page(root->hpa)->role.word;
39680be44352SSean Christopherson }
39690be44352SSean Christopherson 
3970c50d8ae3SPaolo Bonzini /*
3971be01e8e2SSean Christopherson  * Find out if a previously cached root matching the new pgd/role is available.
3972c50d8ae3SPaolo Bonzini  * The current root is also inserted into the cache.
3973c50d8ae3SPaolo Bonzini  * If a matching root was found, it is assigned to kvm_mmu->root_hpa and true is
3974c50d8ae3SPaolo Bonzini  * returned.
3975c50d8ae3SPaolo Bonzini  * Otherwise, the LRU root from the cache is assigned to kvm_mmu->root_hpa and
3976c50d8ae3SPaolo Bonzini  * false is returned. This root should now be freed by the caller.
3977c50d8ae3SPaolo Bonzini  */
3978be01e8e2SSean Christopherson static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_pgd,
3979c50d8ae3SPaolo Bonzini 				  union kvm_mmu_page_role new_role)
3980c50d8ae3SPaolo Bonzini {
3981c50d8ae3SPaolo Bonzini 	uint i;
3982c50d8ae3SPaolo Bonzini 	struct kvm_mmu_root_info root;
3983c50d8ae3SPaolo Bonzini 	struct kvm_mmu *mmu = vcpu->arch.mmu;
3984c50d8ae3SPaolo Bonzini 
3985be01e8e2SSean Christopherson 	root.pgd = mmu->root_pgd;
3986c50d8ae3SPaolo Bonzini 	root.hpa = mmu->root_hpa;
3987c50d8ae3SPaolo Bonzini 
3988be01e8e2SSean Christopherson 	if (is_root_usable(&root, new_pgd, new_role))
39890be44352SSean Christopherson 		return true;
39900be44352SSean Christopherson 
3991c50d8ae3SPaolo Bonzini 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
3992c50d8ae3SPaolo Bonzini 		swap(root, mmu->prev_roots[i]);
3993c50d8ae3SPaolo Bonzini 
3994be01e8e2SSean Christopherson 		if (is_root_usable(&root, new_pgd, new_role))
3995c50d8ae3SPaolo Bonzini 			break;
3996c50d8ae3SPaolo Bonzini 	}
3997c50d8ae3SPaolo Bonzini 
3998c50d8ae3SPaolo Bonzini 	mmu->root_hpa = root.hpa;
3999be01e8e2SSean Christopherson 	mmu->root_pgd = root.pgd;
4000c50d8ae3SPaolo Bonzini 
4001c50d8ae3SPaolo Bonzini 	return i < KVM_MMU_NUM_PREV_ROOTS;
4002c50d8ae3SPaolo Bonzini }
4003c50d8ae3SPaolo Bonzini 
4004be01e8e2SSean Christopherson static bool fast_pgd_switch(struct kvm_vcpu *vcpu, gpa_t new_pgd,
4005b869855bSSean Christopherson 			    union kvm_mmu_page_role new_role)
4006c50d8ae3SPaolo Bonzini {
4007c50d8ae3SPaolo Bonzini 	struct kvm_mmu *mmu = vcpu->arch.mmu;
4008c50d8ae3SPaolo Bonzini 
4009c50d8ae3SPaolo Bonzini 	/*
4010c50d8ae3SPaolo Bonzini 	 * For now, limit the fast switch to 64-bit hosts+VMs in order to avoid
4011c50d8ae3SPaolo Bonzini 	 * having to deal with PDPTEs. We may add support for 32-bit hosts/VMs
4012c50d8ae3SPaolo Bonzini 	 * later if necessary.
4013c50d8ae3SPaolo Bonzini 	 */
4014c50d8ae3SPaolo Bonzini 	if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
4015b869855bSSean Christopherson 	    mmu->root_level >= PT64_ROOT_4LEVEL)
4016fe9304d3SVitaly Kuznetsov 		return cached_root_available(vcpu, new_pgd, new_role);
4017c50d8ae3SPaolo Bonzini 
4018c50d8ae3SPaolo Bonzini 	return false;
4019c50d8ae3SPaolo Bonzini }
4020c50d8ae3SPaolo Bonzini 
4021be01e8e2SSean Christopherson static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd,
4022b5129100SSean Christopherson 			      union kvm_mmu_page_role new_role)
4023c50d8ae3SPaolo Bonzini {
4024be01e8e2SSean Christopherson 	if (!fast_pgd_switch(vcpu, new_pgd, new_role)) {
4025b869855bSSean Christopherson 		kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, KVM_MMU_ROOT_CURRENT);
4026b869855bSSean Christopherson 		return;
4027c50d8ae3SPaolo Bonzini 	}
4028c50d8ae3SPaolo Bonzini 
4029c50d8ae3SPaolo Bonzini 	/*
4030b869855bSSean Christopherson 	 * It's possible that the cached previous root page is obsolete because
4031b869855bSSean Christopherson 	 * of a change in the MMU generation number. However, changing the
4032b869855bSSean Christopherson 	 * generation number is accompanied by KVM_REQ_MMU_RELOAD, which will
4033b869855bSSean Christopherson 	 * free the root set here and allocate a new one.
4034b869855bSSean Christopherson 	 */
4035b869855bSSean Christopherson 	kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
4036b869855bSSean Christopherson 
4037b5129100SSean Christopherson 	if (force_flush_and_sync_on_reuse) {
4038b869855bSSean Christopherson 		kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
4039b869855bSSean Christopherson 		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
4040b5129100SSean Christopherson 	}
4041b869855bSSean Christopherson 
4042b869855bSSean Christopherson 	/*
4043b869855bSSean Christopherson 	 * The last MMIO access's GVA and GPA are cached in the VCPU. When
4044b869855bSSean Christopherson 	 * switching to a new CR3, that GVA->GPA mapping may no longer be
4045b869855bSSean Christopherson 	 * valid. So clear any cached MMIO info even when we don't need to sync
4046b869855bSSean Christopherson 	 * the shadow page tables.
4047c50d8ae3SPaolo Bonzini 	 */
4048c50d8ae3SPaolo Bonzini 	vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
4049c50d8ae3SPaolo Bonzini 
4050daa5b6c1SBen Gardon 	/*
4051daa5b6c1SBen Gardon 	 * If this is a direct root page, it doesn't have a write flooding
4052daa5b6c1SBen Gardon 	 * count. Otherwise, clear the write flooding count.
4053daa5b6c1SBen Gardon 	 */
4054daa5b6c1SBen Gardon 	if (!new_role.direct)
4055daa5b6c1SBen Gardon 		__clear_sp_write_flooding_count(
4056daa5b6c1SBen Gardon 				to_shadow_page(vcpu->arch.mmu->root_hpa));
4057c50d8ae3SPaolo Bonzini }
4058c50d8ae3SPaolo Bonzini 
4059b5129100SSean Christopherson void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
4060c50d8ae3SPaolo Bonzini {
4061b5129100SSean Christopherson 	__kvm_mmu_new_pgd(vcpu, new_pgd, kvm_mmu_calc_root_page_role(vcpu));
4062c50d8ae3SPaolo Bonzini }
4063be01e8e2SSean Christopherson EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd);
4064c50d8ae3SPaolo Bonzini 
4065c50d8ae3SPaolo Bonzini static unsigned long get_cr3(struct kvm_vcpu *vcpu)
4066c50d8ae3SPaolo Bonzini {
4067c50d8ae3SPaolo Bonzini 	return kvm_read_cr3(vcpu);
4068c50d8ae3SPaolo Bonzini }
4069c50d8ae3SPaolo Bonzini 
4070c50d8ae3SPaolo Bonzini static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
40710a2b64c5SBen Gardon 			   unsigned int access, int *nr_present)
4072c50d8ae3SPaolo Bonzini {
4073c50d8ae3SPaolo Bonzini 	if (unlikely(is_mmio_spte(*sptep))) {
4074c50d8ae3SPaolo Bonzini 		if (gfn != get_mmio_spte_gfn(*sptep)) {
4075c50d8ae3SPaolo Bonzini 			mmu_spte_clear_no_track(sptep);
4076c50d8ae3SPaolo Bonzini 			return true;
4077c50d8ae3SPaolo Bonzini 		}
4078c50d8ae3SPaolo Bonzini 
4079c50d8ae3SPaolo Bonzini 		(*nr_present)++;
4080c50d8ae3SPaolo Bonzini 		mark_mmio_spte(vcpu, sptep, gfn, access);
4081c50d8ae3SPaolo Bonzini 		return true;
4082c50d8ae3SPaolo Bonzini 	}
4083c50d8ae3SPaolo Bonzini 
4084c50d8ae3SPaolo Bonzini 	return false;
4085c50d8ae3SPaolo Bonzini }
4086c50d8ae3SPaolo Bonzini 
4087c50d8ae3SPaolo Bonzini static inline bool is_last_gpte(struct kvm_mmu *mmu,
4088c50d8ae3SPaolo Bonzini 				unsigned level, unsigned gpte)
4089c50d8ae3SPaolo Bonzini {
4090c50d8ae3SPaolo Bonzini 	/*
4091c50d8ae3SPaolo Bonzini 	 * The RHS has bit 7 set iff level < mmu->last_nonleaf_level.
4092c50d8ae3SPaolo Bonzini 	 * If it is clear, there are no large pages at this level, so clear
4093c50d8ae3SPaolo Bonzini 	 * PT_PAGE_SIZE_MASK in gpte if that is the case.
4094c50d8ae3SPaolo Bonzini 	 */
4095c50d8ae3SPaolo Bonzini 	gpte &= level - mmu->last_nonleaf_level;
4096c50d8ae3SPaolo Bonzini 
4097c50d8ae3SPaolo Bonzini 	/*
40983bae0459SSean Christopherson 	 * PG_LEVEL_4K always terminates.  The RHS has bit 7 set
40993bae0459SSean Christopherson 	 * iff level <= PG_LEVEL_4K, which for our purpose means
41003bae0459SSean Christopherson 	 * level == PG_LEVEL_4K; set PT_PAGE_SIZE_MASK in gpte then.
4101c50d8ae3SPaolo Bonzini 	 */
41023bae0459SSean Christopherson 	gpte |= level - PG_LEVEL_4K - 1;
4103c50d8ae3SPaolo Bonzini 
4104c50d8ae3SPaolo Bonzini 	return gpte & PT_PAGE_SIZE_MASK;
4105c50d8ae3SPaolo Bonzini }
4106c50d8ae3SPaolo Bonzini 
4107c50d8ae3SPaolo Bonzini #define PTTYPE_EPT 18 /* arbitrary */
4108c50d8ae3SPaolo Bonzini #define PTTYPE PTTYPE_EPT
4109c50d8ae3SPaolo Bonzini #include "paging_tmpl.h"
4110c50d8ae3SPaolo Bonzini #undef PTTYPE
4111c50d8ae3SPaolo Bonzini 
4112c50d8ae3SPaolo Bonzini #define PTTYPE 64
4113c50d8ae3SPaolo Bonzini #include "paging_tmpl.h"
4114c50d8ae3SPaolo Bonzini #undef PTTYPE
4115c50d8ae3SPaolo Bonzini 
4116c50d8ae3SPaolo Bonzini #define PTTYPE 32
4117c50d8ae3SPaolo Bonzini #include "paging_tmpl.h"
4118c50d8ae3SPaolo Bonzini #undef PTTYPE
4119c50d8ae3SPaolo Bonzini 
4120c50d8ae3SPaolo Bonzini static void
4121b705a277SSean Christopherson __reset_rsvds_bits_mask(struct rsvd_bits_validate *rsvd_check,
41225b7f575cSSean Christopherson 			u64 pa_bits_rsvd, int level, bool nx, bool gbpages,
4123c50d8ae3SPaolo Bonzini 			bool pse, bool amd)
4124c50d8ae3SPaolo Bonzini {
4125c50d8ae3SPaolo Bonzini 	u64 gbpages_bit_rsvd = 0;
4126c50d8ae3SPaolo Bonzini 	u64 nonleaf_bit8_rsvd = 0;
41275b7f575cSSean Christopherson 	u64 high_bits_rsvd;
4128c50d8ae3SPaolo Bonzini 
4129c50d8ae3SPaolo Bonzini 	rsvd_check->bad_mt_xwr = 0;
4130c50d8ae3SPaolo Bonzini 
4131c50d8ae3SPaolo Bonzini 	if (!gbpages)
4132c50d8ae3SPaolo Bonzini 		gbpages_bit_rsvd = rsvd_bits(7, 7);
4133c50d8ae3SPaolo Bonzini 
41345b7f575cSSean Christopherson 	if (level == PT32E_ROOT_LEVEL)
41355b7f575cSSean Christopherson 		high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 62);
41365b7f575cSSean Christopherson 	else
41375b7f575cSSean Christopherson 		high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);
41385b7f575cSSean Christopherson 
41395b7f575cSSean Christopherson 	/* Note, NX doesn't exist in PDPTEs, this is handled below. */
41405b7f575cSSean Christopherson 	if (!nx)
41415b7f575cSSean Christopherson 		high_bits_rsvd |= rsvd_bits(63, 63);
41425b7f575cSSean Christopherson 
4143c50d8ae3SPaolo Bonzini 	/*
4144c50d8ae3SPaolo Bonzini 	 * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for
4145c50d8ae3SPaolo Bonzini 	 * leaf entries) on AMD CPUs only.
4146c50d8ae3SPaolo Bonzini 	 */
4147c50d8ae3SPaolo Bonzini 	if (amd)
4148c50d8ae3SPaolo Bonzini 		nonleaf_bit8_rsvd = rsvd_bits(8, 8);
4149c50d8ae3SPaolo Bonzini 
4150c50d8ae3SPaolo Bonzini 	switch (level) {
4151c50d8ae3SPaolo Bonzini 	case PT32_ROOT_LEVEL:
4152c50d8ae3SPaolo Bonzini 		/* no rsvd bits for 2 level 4K page table entries */
4153c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[0][1] = 0;
4154c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[0][0] = 0;
4155c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[1][0] =
4156c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[0][0];
4157c50d8ae3SPaolo Bonzini 
4158c50d8ae3SPaolo Bonzini 		if (!pse) {
4159c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[1][1] = 0;
4160c50d8ae3SPaolo Bonzini 			break;
4161c50d8ae3SPaolo Bonzini 		}
4162c50d8ae3SPaolo Bonzini 
4163c50d8ae3SPaolo Bonzini 		if (is_cpuid_PSE36())
4164c50d8ae3SPaolo Bonzini 			/* 36bits PSE 4MB page */
4165c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
4166c50d8ae3SPaolo Bonzini 		else
4167c50d8ae3SPaolo Bonzini 			/* 32 bits PSE 4MB page */
4168c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
4169c50d8ae3SPaolo Bonzini 		break;
4170c50d8ae3SPaolo Bonzini 	case PT32E_ROOT_LEVEL:
41715b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[0][2] = rsvd_bits(63, 63) |
41725b7f575cSSean Christopherson 						   high_bits_rsvd |
41735b7f575cSSean Christopherson 						   rsvd_bits(5, 8) |
41745b7f575cSSean Christopherson 						   rsvd_bits(1, 2);	/* PDPTE */
41755b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd;	/* PDE */
41765b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;	/* PTE */
41775b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
4178c50d8ae3SPaolo Bonzini 						   rsvd_bits(13, 20);	/* large page */
4179c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[1][0] =
4180c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[0][0];
4181c50d8ae3SPaolo Bonzini 		break;
4182c50d8ae3SPaolo Bonzini 	case PT64_ROOT_5LEVEL:
41835b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd |
41845b7f575cSSean Christopherson 						   nonleaf_bit8_rsvd |
41855b7f575cSSean Christopherson 						   rsvd_bits(7, 7);
4186c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[1][4] =
4187c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[0][4];
4188df561f66SGustavo A. R. Silva 		fallthrough;
4189c50d8ae3SPaolo Bonzini 	case PT64_ROOT_4LEVEL:
41905b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd |
41915b7f575cSSean Christopherson 						   nonleaf_bit8_rsvd |
41925b7f575cSSean Christopherson 						   rsvd_bits(7, 7);
41935b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd |
41945b7f575cSSean Christopherson 						   gbpages_bit_rsvd;
41955b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd;
41965b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
4197c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[1][3] =
4198c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[0][3];
41995b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd |
42005b7f575cSSean Christopherson 						   gbpages_bit_rsvd |
4201c50d8ae3SPaolo Bonzini 						   rsvd_bits(13, 29);
42025b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
4203c50d8ae3SPaolo Bonzini 						   rsvd_bits(13, 20); /* large page */
4204c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[1][0] =
4205c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[0][0];
4206c50d8ae3SPaolo Bonzini 		break;
4207c50d8ae3SPaolo Bonzini 	}
4208c50d8ae3SPaolo Bonzini }
4209c50d8ae3SPaolo Bonzini 
4210c50d8ae3SPaolo Bonzini static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
4211c50d8ae3SPaolo Bonzini 				  struct kvm_mmu *context)
4212c50d8ae3SPaolo Bonzini {
4213b705a277SSean Christopherson 	__reset_rsvds_bits_mask(&context->guest_rsvd_check,
42145b7f575cSSean Christopherson 				vcpu->arch.reserved_gpa_bits,
4215*90599c28SSean Christopherson 				context->root_level, is_efer_nx(context),
4216c50d8ae3SPaolo Bonzini 				guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
42174e9c0d80SSean Christopherson 				is_cr4_pse(context),
421823493d0aSSean Christopherson 				guest_cpuid_is_amd_or_hygon(vcpu));
4219c50d8ae3SPaolo Bonzini }
4220c50d8ae3SPaolo Bonzini 
4221c50d8ae3SPaolo Bonzini static void
4222c50d8ae3SPaolo Bonzini __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
42235b7f575cSSean Christopherson 			    u64 pa_bits_rsvd, bool execonly)
4224c50d8ae3SPaolo Bonzini {
42255b7f575cSSean Christopherson 	u64 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);
4226c50d8ae3SPaolo Bonzini 	u64 bad_mt_xwr;
4227c50d8ae3SPaolo Bonzini 
42285b7f575cSSean Christopherson 	rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd | rsvd_bits(3, 7);
42295b7f575cSSean Christopherson 	rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd | rsvd_bits(3, 7);
42305b7f575cSSean Christopherson 	rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd | rsvd_bits(3, 6);
42315b7f575cSSean Christopherson 	rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd | rsvd_bits(3, 6);
42325b7f575cSSean Christopherson 	rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
4233c50d8ae3SPaolo Bonzini 
4234c50d8ae3SPaolo Bonzini 	/* large page */
4235c50d8ae3SPaolo Bonzini 	rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4];
4236c50d8ae3SPaolo Bonzini 	rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3];
42375b7f575cSSean Christopherson 	rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd | rsvd_bits(12, 29);
42385b7f575cSSean Christopherson 	rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd | rsvd_bits(12, 20);
4239c50d8ae3SPaolo Bonzini 	rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0];
4240c50d8ae3SPaolo Bonzini 
4241c50d8ae3SPaolo Bonzini 	bad_mt_xwr = 0xFFull << (2 * 8);	/* bits 3..5 must not be 2 */
4242c50d8ae3SPaolo Bonzini 	bad_mt_xwr |= 0xFFull << (3 * 8);	/* bits 3..5 must not be 3 */
4243c50d8ae3SPaolo Bonzini 	bad_mt_xwr |= 0xFFull << (7 * 8);	/* bits 3..5 must not be 7 */
4244c50d8ae3SPaolo Bonzini 	bad_mt_xwr |= REPEAT_BYTE(1ull << 2);	/* bits 0..2 must not be 010 */
4245c50d8ae3SPaolo Bonzini 	bad_mt_xwr |= REPEAT_BYTE(1ull << 6);	/* bits 0..2 must not be 110 */
4246c50d8ae3SPaolo Bonzini 	if (!execonly) {
4247c50d8ae3SPaolo Bonzini 		/* bits 0..2 must not be 100 unless VMX capabilities allow it */
4248c50d8ae3SPaolo Bonzini 		bad_mt_xwr |= REPEAT_BYTE(1ull << 4);
4249c50d8ae3SPaolo Bonzini 	}
4250c50d8ae3SPaolo Bonzini 	rsvd_check->bad_mt_xwr = bad_mt_xwr;
4251c50d8ae3SPaolo Bonzini }
4252c50d8ae3SPaolo Bonzini 
4253c50d8ae3SPaolo Bonzini static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
4254c50d8ae3SPaolo Bonzini 		struct kvm_mmu *context, bool execonly)
4255c50d8ae3SPaolo Bonzini {
4256c50d8ae3SPaolo Bonzini 	__reset_rsvds_bits_mask_ept(&context->guest_rsvd_check,
42575b7f575cSSean Christopherson 				    vcpu->arch.reserved_gpa_bits, execonly);
4258c50d8ae3SPaolo Bonzini }
4259c50d8ae3SPaolo Bonzini 
42606f8e65a6SSean Christopherson static inline u64 reserved_hpa_bits(void)
42616f8e65a6SSean Christopherson {
42626f8e65a6SSean Christopherson 	return rsvd_bits(shadow_phys_bits, 63);
42636f8e65a6SSean Christopherson }
42646f8e65a6SSean Christopherson 
4265c50d8ae3SPaolo Bonzini /*
4266c50d8ae3SPaolo Bonzini  * the page table on host is the shadow page table for the page
4267c50d8ae3SPaolo Bonzini  * table in guest or amd nested guest, its mmu features completely
4268c50d8ae3SPaolo Bonzini  * follow the features in guest.
4269c50d8ae3SPaolo Bonzini  */
427016be1d12SSean Christopherson static void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
427116be1d12SSean Christopherson 					struct kvm_mmu *context)
4272c50d8ae3SPaolo Bonzini {
4273112022bdSSean Christopherson 	/*
4274112022bdSSean Christopherson 	 * KVM uses NX when TDP is disabled to handle a variety of scenarios,
4275112022bdSSean Christopherson 	 * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and
4276112022bdSSean Christopherson 	 * to generate correct permissions for CR0.WP=0/CR4.SMEP=1/EFER.NX=0.
4277112022bdSSean Christopherson 	 * The iTLB multi-hit workaround can be toggled at any time, so assume
4278112022bdSSean Christopherson 	 * NX can be used by any non-nested shadow MMU to avoid having to reset
4279112022bdSSean Christopherson 	 * MMU contexts.  Note, KVM forces EFER.NX=1 when TDP is disabled.
4280112022bdSSean Christopherson 	 */
4281*90599c28SSean Christopherson 	bool uses_nx = is_efer_nx(context) || !tdp_enabled;
42828c985b2dSSean Christopherson 
42838c985b2dSSean Christopherson 	/* @amd adds a check on bit of SPTEs, which KVM shouldn't use anyways. */
42848c985b2dSSean Christopherson 	bool is_amd = true;
42858c985b2dSSean Christopherson 	/* KVM doesn't use 2-level page tables for the shadow MMU. */
42868c985b2dSSean Christopherson 	bool is_pse = false;
4287c50d8ae3SPaolo Bonzini 	struct rsvd_bits_validate *shadow_zero_check;
4288c50d8ae3SPaolo Bonzini 	int i;
4289c50d8ae3SPaolo Bonzini 
42908c985b2dSSean Christopherson 	WARN_ON_ONCE(context->shadow_root_level < PT32E_ROOT_LEVEL);
42918c985b2dSSean Christopherson 
4292c50d8ae3SPaolo Bonzini 	shadow_zero_check = &context->shadow_zero_check;
4293b705a277SSean Christopherson 	__reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
4294c50d8ae3SPaolo Bonzini 				context->shadow_root_level, uses_nx,
4295c50d8ae3SPaolo Bonzini 				guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
42968c985b2dSSean Christopherson 				is_pse, is_amd);
4297c50d8ae3SPaolo Bonzini 
4298c50d8ae3SPaolo Bonzini 	if (!shadow_me_mask)
4299c50d8ae3SPaolo Bonzini 		return;
4300c50d8ae3SPaolo Bonzini 
4301c50d8ae3SPaolo Bonzini 	for (i = context->shadow_root_level; --i >= 0;) {
4302c50d8ae3SPaolo Bonzini 		shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
4303c50d8ae3SPaolo Bonzini 		shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
4304c50d8ae3SPaolo Bonzini 	}
4305c50d8ae3SPaolo Bonzini 
4306c50d8ae3SPaolo Bonzini }
4307c50d8ae3SPaolo Bonzini 
4308c50d8ae3SPaolo Bonzini static inline bool boot_cpu_is_amd(void)
4309c50d8ae3SPaolo Bonzini {
4310c50d8ae3SPaolo Bonzini 	WARN_ON_ONCE(!tdp_enabled);
4311c50d8ae3SPaolo Bonzini 	return shadow_x_mask == 0;
4312c50d8ae3SPaolo Bonzini }
4313c50d8ae3SPaolo Bonzini 
4314c50d8ae3SPaolo Bonzini /*
4315c50d8ae3SPaolo Bonzini  * the direct page table on host, use as much mmu features as
4316c50d8ae3SPaolo Bonzini  * possible, however, kvm currently does not do execution-protection.
4317c50d8ae3SPaolo Bonzini  */
4318c50d8ae3SPaolo Bonzini static void
4319c50d8ae3SPaolo Bonzini reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
4320c50d8ae3SPaolo Bonzini 				struct kvm_mmu *context)
4321c50d8ae3SPaolo Bonzini {
4322c50d8ae3SPaolo Bonzini 	struct rsvd_bits_validate *shadow_zero_check;
4323c50d8ae3SPaolo Bonzini 	int i;
4324c50d8ae3SPaolo Bonzini 
4325c50d8ae3SPaolo Bonzini 	shadow_zero_check = &context->shadow_zero_check;
4326c50d8ae3SPaolo Bonzini 
4327c50d8ae3SPaolo Bonzini 	if (boot_cpu_is_amd())
4328b705a277SSean Christopherson 		__reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
4329c50d8ae3SPaolo Bonzini 					context->shadow_root_level, false,
4330c50d8ae3SPaolo Bonzini 					boot_cpu_has(X86_FEATURE_GBPAGES),
43318c985b2dSSean Christopherson 					false, true);
4332c50d8ae3SPaolo Bonzini 	else
4333c50d8ae3SPaolo Bonzini 		__reset_rsvds_bits_mask_ept(shadow_zero_check,
43346f8e65a6SSean Christopherson 					    reserved_hpa_bits(), false);
4335c50d8ae3SPaolo Bonzini 
4336c50d8ae3SPaolo Bonzini 	if (!shadow_me_mask)
4337c50d8ae3SPaolo Bonzini 		return;
4338c50d8ae3SPaolo Bonzini 
4339c50d8ae3SPaolo Bonzini 	for (i = context->shadow_root_level; --i >= 0;) {
4340c50d8ae3SPaolo Bonzini 		shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
4341c50d8ae3SPaolo Bonzini 		shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
4342c50d8ae3SPaolo Bonzini 	}
4343c50d8ae3SPaolo Bonzini }
4344c50d8ae3SPaolo Bonzini 
4345c50d8ae3SPaolo Bonzini /*
4346c50d8ae3SPaolo Bonzini  * as the comments in reset_shadow_zero_bits_mask() except it
4347c50d8ae3SPaolo Bonzini  * is the shadow page table for intel nested guest.
4348c50d8ae3SPaolo Bonzini  */
4349c50d8ae3SPaolo Bonzini static void
4350c50d8ae3SPaolo Bonzini reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
4351c50d8ae3SPaolo Bonzini 				struct kvm_mmu *context, bool execonly)
4352c50d8ae3SPaolo Bonzini {
4353c50d8ae3SPaolo Bonzini 	__reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
43546f8e65a6SSean Christopherson 				    reserved_hpa_bits(), execonly);
4355c50d8ae3SPaolo Bonzini }
4356c50d8ae3SPaolo Bonzini 
4357c50d8ae3SPaolo Bonzini #define BYTE_MASK(access) \
4358c50d8ae3SPaolo Bonzini 	((1 & (access) ? 2 : 0) | \
4359c50d8ae3SPaolo Bonzini 	 (2 & (access) ? 4 : 0) | \
4360c50d8ae3SPaolo Bonzini 	 (3 & (access) ? 8 : 0) | \
4361c50d8ae3SPaolo Bonzini 	 (4 & (access) ? 16 : 0) | \
4362c50d8ae3SPaolo Bonzini 	 (5 & (access) ? 32 : 0) | \
4363c50d8ae3SPaolo Bonzini 	 (6 & (access) ? 64 : 0) | \
4364c50d8ae3SPaolo Bonzini 	 (7 & (access) ? 128 : 0))
4365c50d8ae3SPaolo Bonzini 
4366c50d8ae3SPaolo Bonzini 
4367c596f147SSean Christopherson static void update_permission_bitmask(struct kvm_mmu *mmu, bool ept)
4368c50d8ae3SPaolo Bonzini {
4369c50d8ae3SPaolo Bonzini 	unsigned byte;
4370c50d8ae3SPaolo Bonzini 
4371c50d8ae3SPaolo Bonzini 	const u8 x = BYTE_MASK(ACC_EXEC_MASK);
4372c50d8ae3SPaolo Bonzini 	const u8 w = BYTE_MASK(ACC_WRITE_MASK);
4373c50d8ae3SPaolo Bonzini 	const u8 u = BYTE_MASK(ACC_USER_MASK);
4374c50d8ae3SPaolo Bonzini 
4375c596f147SSean Christopherson 	bool cr4_smep = is_cr4_smep(mmu);
4376c596f147SSean Christopherson 	bool cr4_smap = is_cr4_smap(mmu);
4377c596f147SSean Christopherson 	bool cr0_wp = is_cr0_wp(mmu);
4378*90599c28SSean Christopherson 	bool efer_nx = is_efer_nx(mmu);
4379c50d8ae3SPaolo Bonzini 
4380c50d8ae3SPaolo Bonzini 	for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) {
4381c50d8ae3SPaolo Bonzini 		unsigned pfec = byte << 1;
4382c50d8ae3SPaolo Bonzini 
4383c50d8ae3SPaolo Bonzini 		/*
4384c50d8ae3SPaolo Bonzini 		 * Each "*f" variable has a 1 bit for each UWX value
4385c50d8ae3SPaolo Bonzini 		 * that causes a fault with the given PFEC.
4386c50d8ae3SPaolo Bonzini 		 */
4387c50d8ae3SPaolo Bonzini 
4388c50d8ae3SPaolo Bonzini 		/* Faults from writes to non-writable pages */
4389c50d8ae3SPaolo Bonzini 		u8 wf = (pfec & PFERR_WRITE_MASK) ? (u8)~w : 0;
4390c50d8ae3SPaolo Bonzini 		/* Faults from user mode accesses to supervisor pages */
4391c50d8ae3SPaolo Bonzini 		u8 uf = (pfec & PFERR_USER_MASK) ? (u8)~u : 0;
4392c50d8ae3SPaolo Bonzini 		/* Faults from fetches of non-executable pages*/
4393c50d8ae3SPaolo Bonzini 		u8 ff = (pfec & PFERR_FETCH_MASK) ? (u8)~x : 0;
4394c50d8ae3SPaolo Bonzini 		/* Faults from kernel mode fetches of user pages */
4395c50d8ae3SPaolo Bonzini 		u8 smepf = 0;
4396c50d8ae3SPaolo Bonzini 		/* Faults from kernel mode accesses of user pages */
4397c50d8ae3SPaolo Bonzini 		u8 smapf = 0;
4398c50d8ae3SPaolo Bonzini 
4399c50d8ae3SPaolo Bonzini 		if (!ept) {
4400c50d8ae3SPaolo Bonzini 			/* Faults from kernel mode accesses to user pages */
4401c50d8ae3SPaolo Bonzini 			u8 kf = (pfec & PFERR_USER_MASK) ? 0 : u;
4402c50d8ae3SPaolo Bonzini 
4403c50d8ae3SPaolo Bonzini 			/* Not really needed: !nx will cause pte.nx to fault */
4404*90599c28SSean Christopherson 			if (!efer_nx)
4405c50d8ae3SPaolo Bonzini 				ff = 0;
4406c50d8ae3SPaolo Bonzini 
4407c50d8ae3SPaolo Bonzini 			/* Allow supervisor writes if !cr0.wp */
4408c50d8ae3SPaolo Bonzini 			if (!cr0_wp)
4409c50d8ae3SPaolo Bonzini 				wf = (pfec & PFERR_USER_MASK) ? wf : 0;
4410c50d8ae3SPaolo Bonzini 
4411c50d8ae3SPaolo Bonzini 			/* Disallow supervisor fetches of user code if cr4.smep */
4412c50d8ae3SPaolo Bonzini 			if (cr4_smep)
4413c50d8ae3SPaolo Bonzini 				smepf = (pfec & PFERR_FETCH_MASK) ? kf : 0;
4414c50d8ae3SPaolo Bonzini 
4415c50d8ae3SPaolo Bonzini 			/*
4416c50d8ae3SPaolo Bonzini 			 * SMAP:kernel-mode data accesses from user-mode
4417c50d8ae3SPaolo Bonzini 			 * mappings should fault. A fault is considered
4418c50d8ae3SPaolo Bonzini 			 * as a SMAP violation if all of the following
4419c50d8ae3SPaolo Bonzini 			 * conditions are true:
4420c50d8ae3SPaolo Bonzini 			 *   - X86_CR4_SMAP is set in CR4
4421c50d8ae3SPaolo Bonzini 			 *   - A user page is accessed
4422c50d8ae3SPaolo Bonzini 			 *   - The access is not a fetch
4423c50d8ae3SPaolo Bonzini 			 *   - Page fault in kernel mode
4424c50d8ae3SPaolo Bonzini 			 *   - if CPL = 3 or X86_EFLAGS_AC is clear
4425c50d8ae3SPaolo Bonzini 			 *
4426c50d8ae3SPaolo Bonzini 			 * Here, we cover the first three conditions.
4427c50d8ae3SPaolo Bonzini 			 * The fourth is computed dynamically in permission_fault();
4428c50d8ae3SPaolo Bonzini 			 * PFERR_RSVD_MASK bit will be set in PFEC if the access is
4429c50d8ae3SPaolo Bonzini 			 * *not* subject to SMAP restrictions.
4430c50d8ae3SPaolo Bonzini 			 */
4431c50d8ae3SPaolo Bonzini 			if (cr4_smap)
4432c50d8ae3SPaolo Bonzini 				smapf = (pfec & (PFERR_RSVD_MASK|PFERR_FETCH_MASK)) ? 0 : kf;
4433c50d8ae3SPaolo Bonzini 		}
4434c50d8ae3SPaolo Bonzini 
4435c50d8ae3SPaolo Bonzini 		mmu->permissions[byte] = ff | uf | wf | smepf | smapf;
4436c50d8ae3SPaolo Bonzini 	}
4437c50d8ae3SPaolo Bonzini }
4438c50d8ae3SPaolo Bonzini 
4439c50d8ae3SPaolo Bonzini /*
4440c50d8ae3SPaolo Bonzini * PKU is an additional mechanism by which the paging controls access to
4441c50d8ae3SPaolo Bonzini * user-mode addresses based on the value in the PKRU register.  Protection
4442c50d8ae3SPaolo Bonzini * key violations are reported through a bit in the page fault error code.
4443c50d8ae3SPaolo Bonzini * Unlike other bits of the error code, the PK bit is not known at the
4444c50d8ae3SPaolo Bonzini * call site of e.g. gva_to_gpa; it must be computed directly in
4445c50d8ae3SPaolo Bonzini * permission_fault based on two bits of PKRU, on some machine state (CR4,
4446c50d8ae3SPaolo Bonzini * CR0, EFER, CPL), and on other bits of the error code and the page tables.
4447c50d8ae3SPaolo Bonzini *
4448c50d8ae3SPaolo Bonzini * In particular the following conditions come from the error code, the
4449c50d8ae3SPaolo Bonzini * page tables and the machine state:
4450c50d8ae3SPaolo Bonzini * - PK is always zero unless CR4.PKE=1 and EFER.LMA=1
4451c50d8ae3SPaolo Bonzini * - PK is always zero if RSVD=1 (reserved bit set) or F=1 (instruction fetch)
4452c50d8ae3SPaolo Bonzini * - PK is always zero if U=0 in the page tables
4453c50d8ae3SPaolo Bonzini * - PKRU.WD is ignored if CR0.WP=0 and the access is a supervisor access.
4454c50d8ae3SPaolo Bonzini *
4455c50d8ae3SPaolo Bonzini * The PKRU bitmask caches the result of these four conditions.  The error
4456c50d8ae3SPaolo Bonzini * code (minus the P bit) and the page table's U bit form an index into the
4457c50d8ae3SPaolo Bonzini * PKRU bitmask.  Two bits of the PKRU bitmask are then extracted and ANDed
4458c50d8ae3SPaolo Bonzini * with the two bits of the PKRU register corresponding to the protection key.
4459c50d8ae3SPaolo Bonzini * For the first three conditions above the bits will be 00, thus masking
4460c50d8ae3SPaolo Bonzini * away both AD and WD.  For all reads or if the last condition holds, WD
4461c50d8ae3SPaolo Bonzini * only will be masked away.
4462c50d8ae3SPaolo Bonzini */
44632e4c0661SSean Christopherson static void update_pkru_bitmask(struct kvm_mmu *mmu)
4464c50d8ae3SPaolo Bonzini {
4465c50d8ae3SPaolo Bonzini 	unsigned bit;
4466c50d8ae3SPaolo Bonzini 	bool wp;
4467c50d8ae3SPaolo Bonzini 
44682e4c0661SSean Christopherson 	if (!is_cr4_pke(mmu)) {
4469c50d8ae3SPaolo Bonzini 		mmu->pkru_mask = 0;
4470c50d8ae3SPaolo Bonzini 		return;
4471c50d8ae3SPaolo Bonzini 	}
4472c50d8ae3SPaolo Bonzini 
44732e4c0661SSean Christopherson 	wp = is_cr0_wp(mmu);
4474c50d8ae3SPaolo Bonzini 
4475c50d8ae3SPaolo Bonzini 	for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) {
4476c50d8ae3SPaolo Bonzini 		unsigned pfec, pkey_bits;
4477c50d8ae3SPaolo Bonzini 		bool check_pkey, check_write, ff, uf, wf, pte_user;
4478c50d8ae3SPaolo Bonzini 
4479c50d8ae3SPaolo Bonzini 		pfec = bit << 1;
4480c50d8ae3SPaolo Bonzini 		ff = pfec & PFERR_FETCH_MASK;
4481c50d8ae3SPaolo Bonzini 		uf = pfec & PFERR_USER_MASK;
4482c50d8ae3SPaolo Bonzini 		wf = pfec & PFERR_WRITE_MASK;
4483c50d8ae3SPaolo Bonzini 
4484c50d8ae3SPaolo Bonzini 		/* PFEC.RSVD is replaced by ACC_USER_MASK. */
4485c50d8ae3SPaolo Bonzini 		pte_user = pfec & PFERR_RSVD_MASK;
4486c50d8ae3SPaolo Bonzini 
4487c50d8ae3SPaolo Bonzini 		/*
4488c50d8ae3SPaolo Bonzini 		 * Only need to check the access which is not an
4489c50d8ae3SPaolo Bonzini 		 * instruction fetch and is to a user page.
4490c50d8ae3SPaolo Bonzini 		 */
4491c50d8ae3SPaolo Bonzini 		check_pkey = (!ff && pte_user);
4492c50d8ae3SPaolo Bonzini 		/*
4493c50d8ae3SPaolo Bonzini 		 * write access is controlled by PKRU if it is a
4494c50d8ae3SPaolo Bonzini 		 * user access or CR0.WP = 1.
4495c50d8ae3SPaolo Bonzini 		 */
4496c50d8ae3SPaolo Bonzini 		check_write = check_pkey && wf && (uf || wp);
4497c50d8ae3SPaolo Bonzini 
4498c50d8ae3SPaolo Bonzini 		/* PKRU.AD stops both read and write access. */
4499c50d8ae3SPaolo Bonzini 		pkey_bits = !!check_pkey;
4500c50d8ae3SPaolo Bonzini 		/* PKRU.WD stops write access. */
4501c50d8ae3SPaolo Bonzini 		pkey_bits |= (!!check_write) << 1;
4502c50d8ae3SPaolo Bonzini 
4503c50d8ae3SPaolo Bonzini 		mmu->pkru_mask |= (pkey_bits & 3) << pfec;
4504c50d8ae3SPaolo Bonzini 	}
4505c50d8ae3SPaolo Bonzini }
4506c50d8ae3SPaolo Bonzini 
4507b67a93a8SSean Christopherson static void update_last_nonleaf_level(struct kvm_mmu *mmu)
4508c50d8ae3SPaolo Bonzini {
4509c50d8ae3SPaolo Bonzini 	unsigned root_level = mmu->root_level;
4510c50d8ae3SPaolo Bonzini 
4511c50d8ae3SPaolo Bonzini 	mmu->last_nonleaf_level = root_level;
4512b67a93a8SSean Christopherson 	if (root_level == PT32_ROOT_LEVEL && is_cr4_pse(mmu))
4513c50d8ae3SPaolo Bonzini 		mmu->last_nonleaf_level++;
4514c50d8ae3SPaolo Bonzini }
4515c50d8ae3SPaolo Bonzini 
451684a16226SSean Christopherson static void paging64_init_context_common(struct kvm_mmu *context,
4517d555f705SSean Christopherson 					 int root_level)
4518c50d8ae3SPaolo Bonzini {
451984a16226SSean Christopherson 	context->nx = is_efer_nx(context);
4520d555f705SSean Christopherson 	context->root_level = root_level;
4521c50d8ae3SPaolo Bonzini 
452284a16226SSean Christopherson 	WARN_ON_ONCE(!is_cr4_pae(context));
4523c50d8ae3SPaolo Bonzini 	context->page_fault = paging64_page_fault;
4524c50d8ae3SPaolo Bonzini 	context->gva_to_gpa = paging64_gva_to_gpa;
4525c50d8ae3SPaolo Bonzini 	context->sync_page = paging64_sync_page;
4526c50d8ae3SPaolo Bonzini 	context->invlpg = paging64_invlpg;
4527c50d8ae3SPaolo Bonzini 	context->direct_map = false;
4528c50d8ae3SPaolo Bonzini }
4529c50d8ae3SPaolo Bonzini 
453084a16226SSean Christopherson static void paging64_init_context(struct kvm_mmu *context,
453184a16226SSean Christopherson 				  struct kvm_mmu_role_regs *regs)
4532c50d8ae3SPaolo Bonzini {
453384a16226SSean Christopherson 	int root_level = ____is_cr4_la57(regs) ? PT64_ROOT_5LEVEL :
453484a16226SSean Christopherson 						 PT64_ROOT_4LEVEL;
4535c50d8ae3SPaolo Bonzini 
453684a16226SSean Christopherson 	paging64_init_context_common(context, root_level);
4537c50d8ae3SPaolo Bonzini }
4538c50d8ae3SPaolo Bonzini 
453984a16226SSean Christopherson static void paging32_init_context(struct kvm_mmu *context)
4540c50d8ae3SPaolo Bonzini {
4541c50d8ae3SPaolo Bonzini 	context->nx = false;
4542c50d8ae3SPaolo Bonzini 	context->root_level = PT32_ROOT_LEVEL;
4543c50d8ae3SPaolo Bonzini 	context->page_fault = paging32_page_fault;
4544c50d8ae3SPaolo Bonzini 	context->gva_to_gpa = paging32_gva_to_gpa;
4545c50d8ae3SPaolo Bonzini 	context->sync_page = paging32_sync_page;
4546c50d8ae3SPaolo Bonzini 	context->invlpg = paging32_invlpg;
4547c50d8ae3SPaolo Bonzini 	context->direct_map = false;
4548c50d8ae3SPaolo Bonzini }
4549c50d8ae3SPaolo Bonzini 
455084a16226SSean Christopherson static void paging32E_init_context(struct kvm_mmu *context)
4551c50d8ae3SPaolo Bonzini {
455284a16226SSean Christopherson 	paging64_init_context_common(context, PT32E_ROOT_LEVEL);
4553c50d8ae3SPaolo Bonzini }
4554c50d8ae3SPaolo Bonzini 
45558626c120SSean Christopherson static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu,
45568626c120SSean Christopherson 							 struct kvm_mmu_role_regs *regs)
4557c50d8ae3SPaolo Bonzini {
4558c50d8ae3SPaolo Bonzini 	union kvm_mmu_extended_role ext = {0};
4559c50d8ae3SPaolo Bonzini 
4560ca8d664fSSean Christopherson 	if (____is_cr0_pg(regs)) {
4561ca8d664fSSean Christopherson 		ext.cr0_pg = 1;
45628626c120SSean Christopherson 		ext.cr4_pae = ____is_cr4_pae(regs);
45638626c120SSean Christopherson 		ext.cr4_smep = ____is_cr4_smep(regs);
45648626c120SSean Christopherson 		ext.cr4_smap = ____is_cr4_smap(regs);
45658626c120SSean Christopherson 		ext.cr4_pse = ____is_cr4_pse(regs);
456684c679f5SSean Christopherson 
456784c679f5SSean Christopherson 		/* PKEY and LA57 are active iff long mode is active. */
456884c679f5SSean Christopherson 		ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs);
456984c679f5SSean Christopherson 		ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs);
4570ca8d664fSSean Christopherson 	}
4571c50d8ae3SPaolo Bonzini 
4572c50d8ae3SPaolo Bonzini 	ext.valid = 1;
4573c50d8ae3SPaolo Bonzini 
4574c50d8ae3SPaolo Bonzini 	return ext;
4575c50d8ae3SPaolo Bonzini }
4576c50d8ae3SPaolo Bonzini 
4577c50d8ae3SPaolo Bonzini static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
45788626c120SSean Christopherson 						   struct kvm_mmu_role_regs *regs,
4579c50d8ae3SPaolo Bonzini 						   bool base_only)
4580c50d8ae3SPaolo Bonzini {
4581c50d8ae3SPaolo Bonzini 	union kvm_mmu_role role = {0};
4582c50d8ae3SPaolo Bonzini 
4583c50d8ae3SPaolo Bonzini 	role.base.access = ACC_ALL;
4584ca8d664fSSean Christopherson 	if (____is_cr0_pg(regs)) {
4585167f8a5cSSean Christopherson 		role.base.efer_nx = ____is_efer_nx(regs);
45868626c120SSean Christopherson 		role.base.cr0_wp = ____is_cr0_wp(regs);
4587ca8d664fSSean Christopherson 	}
4588c50d8ae3SPaolo Bonzini 	role.base.smm = is_smm(vcpu);
4589c50d8ae3SPaolo Bonzini 	role.base.guest_mode = is_guest_mode(vcpu);
4590c50d8ae3SPaolo Bonzini 
4591c50d8ae3SPaolo Bonzini 	if (base_only)
4592c50d8ae3SPaolo Bonzini 		return role;
4593c50d8ae3SPaolo Bonzini 
45948626c120SSean Christopherson 	role.ext = kvm_calc_mmu_role_ext(vcpu, regs);
4595c50d8ae3SPaolo Bonzini 
4596c50d8ae3SPaolo Bonzini 	return role;
4597c50d8ae3SPaolo Bonzini }
4598c50d8ae3SPaolo Bonzini 
4599d468d94bSSean Christopherson static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
4600d468d94bSSean Christopherson {
4601d468d94bSSean Christopherson 	/* Use 5-level TDP if and only if it's useful/necessary. */
460283013059SSean Christopherson 	if (max_tdp_level == 5 && cpuid_maxphyaddr(vcpu) <= 48)
4603d468d94bSSean Christopherson 		return 4;
4604d468d94bSSean Christopherson 
460583013059SSean Christopherson 	return max_tdp_level;
4606d468d94bSSean Christopherson }
4607d468d94bSSean Christopherson 
4608c50d8ae3SPaolo Bonzini static union kvm_mmu_role
46098626c120SSean Christopherson kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
46108626c120SSean Christopherson 				struct kvm_mmu_role_regs *regs, bool base_only)
4611c50d8ae3SPaolo Bonzini {
46128626c120SSean Christopherson 	union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs, base_only);
4613c50d8ae3SPaolo Bonzini 
4614c50d8ae3SPaolo Bonzini 	role.base.ad_disabled = (shadow_accessed_mask == 0);
4615d468d94bSSean Christopherson 	role.base.level = kvm_mmu_get_tdp_level(vcpu);
4616c50d8ae3SPaolo Bonzini 	role.base.direct = true;
4617c50d8ae3SPaolo Bonzini 	role.base.gpte_is_8_bytes = true;
4618c50d8ae3SPaolo Bonzini 
4619c50d8ae3SPaolo Bonzini 	return role;
4620c50d8ae3SPaolo Bonzini }
4621c50d8ae3SPaolo Bonzini 
4622c50d8ae3SPaolo Bonzini static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
4623c50d8ae3SPaolo Bonzini {
46248c008659SPaolo Bonzini 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
46258626c120SSean Christopherson 	struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
4626c50d8ae3SPaolo Bonzini 	union kvm_mmu_role new_role =
46278626c120SSean Christopherson 		kvm_calc_tdp_mmu_root_page_role(vcpu, &regs, false);
4628c50d8ae3SPaolo Bonzini 
4629c50d8ae3SPaolo Bonzini 	if (new_role.as_u64 == context->mmu_role.as_u64)
4630c50d8ae3SPaolo Bonzini 		return;
4631c50d8ae3SPaolo Bonzini 
4632c50d8ae3SPaolo Bonzini 	context->mmu_role.as_u64 = new_role.as_u64;
46337a02674dSSean Christopherson 	context->page_fault = kvm_tdp_page_fault;
4634c50d8ae3SPaolo Bonzini 	context->sync_page = nonpaging_sync_page;
46355efac074SPaolo Bonzini 	context->invlpg = NULL;
4636d468d94bSSean Christopherson 	context->shadow_root_level = kvm_mmu_get_tdp_level(vcpu);
4637c50d8ae3SPaolo Bonzini 	context->direct_map = true;
4638d8dd54e0SSean Christopherson 	context->get_guest_pgd = get_cr3;
4639c50d8ae3SPaolo Bonzini 	context->get_pdptr = kvm_pdptr_read;
4640c50d8ae3SPaolo Bonzini 	context->inject_page_fault = kvm_inject_page_fault;
4641c50d8ae3SPaolo Bonzini 
4642c50d8ae3SPaolo Bonzini 	if (!is_paging(vcpu)) {
4643c50d8ae3SPaolo Bonzini 		context->nx = false;
4644c50d8ae3SPaolo Bonzini 		context->gva_to_gpa = nonpaging_gva_to_gpa;
4645c50d8ae3SPaolo Bonzini 		context->root_level = 0;
4646c50d8ae3SPaolo Bonzini 	} else if (is_long_mode(vcpu)) {
4647c50d8ae3SPaolo Bonzini 		context->nx = is_nx(vcpu);
4648c50d8ae3SPaolo Bonzini 		context->root_level = is_la57_mode(vcpu) ?
4649c50d8ae3SPaolo Bonzini 				PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
4650c50d8ae3SPaolo Bonzini 		reset_rsvds_bits_mask(vcpu, context);
4651c50d8ae3SPaolo Bonzini 		context->gva_to_gpa = paging64_gva_to_gpa;
4652c50d8ae3SPaolo Bonzini 	} else if (is_pae(vcpu)) {
4653c50d8ae3SPaolo Bonzini 		context->nx = is_nx(vcpu);
4654c50d8ae3SPaolo Bonzini 		context->root_level = PT32E_ROOT_LEVEL;
4655c50d8ae3SPaolo Bonzini 		reset_rsvds_bits_mask(vcpu, context);
4656c50d8ae3SPaolo Bonzini 		context->gva_to_gpa = paging64_gva_to_gpa;
4657c50d8ae3SPaolo Bonzini 	} else {
4658c50d8ae3SPaolo Bonzini 		context->nx = false;
4659c50d8ae3SPaolo Bonzini 		context->root_level = PT32_ROOT_LEVEL;
4660c50d8ae3SPaolo Bonzini 		reset_rsvds_bits_mask(vcpu, context);
4661c50d8ae3SPaolo Bonzini 		context->gva_to_gpa = paging32_gva_to_gpa;
4662c50d8ae3SPaolo Bonzini 	}
4663c50d8ae3SPaolo Bonzini 
4664c596f147SSean Christopherson 	update_permission_bitmask(context, false);
46652e4c0661SSean Christopherson 	update_pkru_bitmask(context);
4666b67a93a8SSean Christopherson 	update_last_nonleaf_level(context);
4667c50d8ae3SPaolo Bonzini 	reset_tdp_shadow_zero_bits_mask(vcpu, context);
4668c50d8ae3SPaolo Bonzini }
4669c50d8ae3SPaolo Bonzini 
4670c50d8ae3SPaolo Bonzini static union kvm_mmu_role
46718626c120SSean Christopherson kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu,
46728626c120SSean Christopherson 				      struct kvm_mmu_role_regs *regs, bool base_only)
4673c50d8ae3SPaolo Bonzini {
46748626c120SSean Christopherson 	union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs, base_only);
4675c50d8ae3SPaolo Bonzini 
46768626c120SSean Christopherson 	role.base.smep_andnot_wp = role.ext.cr4_smep && !____is_cr0_wp(regs);
46778626c120SSean Christopherson 	role.base.smap_andnot_wp = role.ext.cr4_smap && !____is_cr0_wp(regs);
4678ca8d664fSSean Christopherson 	role.base.gpte_is_8_bytes = ____is_cr0_pg(regs) && ____is_cr4_pae(regs);
4679c50d8ae3SPaolo Bonzini 
468059505b55SSean Christopherson 	return role;
468159505b55SSean Christopherson }
468259505b55SSean Christopherson 
468359505b55SSean Christopherson static union kvm_mmu_role
46848626c120SSean Christopherson kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu,
46858626c120SSean Christopherson 				   struct kvm_mmu_role_regs *regs, bool base_only)
468659505b55SSean Christopherson {
468759505b55SSean Christopherson 	union kvm_mmu_role role =
46888626c120SSean Christopherson 		kvm_calc_shadow_root_page_role_common(vcpu, regs, base_only);
468959505b55SSean Christopherson 
46908626c120SSean Christopherson 	role.base.direct = !____is_cr0_pg(regs);
469159505b55SSean Christopherson 
46928626c120SSean Christopherson 	if (!____is_efer_lma(regs))
4693c50d8ae3SPaolo Bonzini 		role.base.level = PT32E_ROOT_LEVEL;
46948626c120SSean Christopherson 	else if (____is_cr4_la57(regs))
4695c50d8ae3SPaolo Bonzini 		role.base.level = PT64_ROOT_5LEVEL;
4696c50d8ae3SPaolo Bonzini 	else
4697c50d8ae3SPaolo Bonzini 		role.base.level = PT64_ROOT_4LEVEL;
4698c50d8ae3SPaolo Bonzini 
4699c50d8ae3SPaolo Bonzini 	return role;
4700c50d8ae3SPaolo Bonzini }
4701c50d8ae3SPaolo Bonzini 
47028c008659SPaolo Bonzini static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
4703594e91a1SSean Christopherson 				    struct kvm_mmu_role_regs *regs,
4704594e91a1SSean Christopherson 				    union kvm_mmu_role new_role)
4705c50d8ae3SPaolo Bonzini {
470618db1b17SSean Christopherson 	if (new_role.as_u64 == context->mmu_role.as_u64)
470718db1b17SSean Christopherson 		return;
470818db1b17SSean Christopherson 
470918db1b17SSean Christopherson 	context->mmu_role.as_u64 = new_role.as_u64;
471018db1b17SSean Christopherson 
4711594e91a1SSean Christopherson 	if (!____is_cr0_pg(regs))
471284a16226SSean Christopherson 		nonpaging_init_context(context);
4713594e91a1SSean Christopherson 	else if (____is_efer_lma(regs))
471484a16226SSean Christopherson 		paging64_init_context(context, regs);
4715594e91a1SSean Christopherson 	else if (____is_cr4_pae(regs))
471684a16226SSean Christopherson 		paging32E_init_context(context);
4717c50d8ae3SPaolo Bonzini 	else
471884a16226SSean Christopherson 		paging32_init_context(context);
4719c50d8ae3SPaolo Bonzini 
4720af098972SSean Christopherson 	if (____is_cr0_pg(regs)) {
4721af098972SSean Christopherson 		reset_rsvds_bits_mask(vcpu, context);
4722c596f147SSean Christopherson 		update_permission_bitmask(context, false);
47232e4c0661SSean Christopherson 		update_pkru_bitmask(context);
4724b67a93a8SSean Christopherson 		update_last_nonleaf_level(context);
4725af098972SSean Christopherson 	}
4726d555f705SSean Christopherson 	context->shadow_root_level = new_role.base.level;
4727d555f705SSean Christopherson 
4728c50d8ae3SPaolo Bonzini 	reset_shadow_zero_bits_mask(vcpu, context);
4729c50d8ae3SPaolo Bonzini }
47300f04a2acSVitaly Kuznetsov 
4731594e91a1SSean Christopherson static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu,
4732594e91a1SSean Christopherson 				struct kvm_mmu_role_regs *regs)
47330f04a2acSVitaly Kuznetsov {
47348c008659SPaolo Bonzini 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
47350f04a2acSVitaly Kuznetsov 	union kvm_mmu_role new_role =
47368626c120SSean Christopherson 		kvm_calc_shadow_mmu_root_page_role(vcpu, regs, false);
47370f04a2acSVitaly Kuznetsov 
4738594e91a1SSean Christopherson 	shadow_mmu_init_context(vcpu, context, regs, new_role);
47390f04a2acSVitaly Kuznetsov }
47400f04a2acSVitaly Kuznetsov 
474159505b55SSean Christopherson static union kvm_mmu_role
47428626c120SSean Christopherson kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu,
47438626c120SSean Christopherson 				   struct kvm_mmu_role_regs *regs)
474459505b55SSean Christopherson {
474559505b55SSean Christopherson 	union kvm_mmu_role role =
47468626c120SSean Christopherson 		kvm_calc_shadow_root_page_role_common(vcpu, regs, false);
474759505b55SSean Christopherson 
474859505b55SSean Christopherson 	role.base.direct = false;
4749d468d94bSSean Christopherson 	role.base.level = kvm_mmu_get_tdp_level(vcpu);
475059505b55SSean Christopherson 
475159505b55SSean Christopherson 	return role;
475259505b55SSean Christopherson }
475359505b55SSean Christopherson 
4754dbc4739bSSean Christopherson void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
4755dbc4739bSSean Christopherson 			     unsigned long cr4, u64 efer, gpa_t nested_cr3)
47560f04a2acSVitaly Kuznetsov {
47578c008659SPaolo Bonzini 	struct kvm_mmu *context = &vcpu->arch.guest_mmu;
4758594e91a1SSean Christopherson 	struct kvm_mmu_role_regs regs = {
4759594e91a1SSean Christopherson 		.cr0 = cr0,
4760594e91a1SSean Christopherson 		.cr4 = cr4,
4761594e91a1SSean Christopherson 		.efer = efer,
4762594e91a1SSean Christopherson 	};
47638626c120SSean Christopherson 	union kvm_mmu_role new_role;
47648626c120SSean Christopherson 
47658626c120SSean Christopherson 	new_role = kvm_calc_shadow_npt_root_page_role(vcpu, &regs);
47660f04a2acSVitaly Kuznetsov 
4767b5129100SSean Christopherson 	__kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base);
4768a506fdd2SVitaly Kuznetsov 
4769594e91a1SSean Christopherson 	shadow_mmu_init_context(vcpu, context, &regs, new_role);
4770a3322d5cSSean Christopherson 
4771a3322d5cSSean Christopherson 	/*
477216be1d12SSean Christopherson 	 * Redo the shadow bits, the reset done by shadow_mmu_init_context()
477316be1d12SSean Christopherson 	 * (above) may use the wrong shadow_root_level.
477416be1d12SSean Christopherson 	 */
477516be1d12SSean Christopherson 	reset_shadow_zero_bits_mask(vcpu, context);
47760f04a2acSVitaly Kuznetsov }
47770f04a2acSVitaly Kuznetsov EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);
4778c50d8ae3SPaolo Bonzini 
4779c50d8ae3SPaolo Bonzini static union kvm_mmu_role
4780c50d8ae3SPaolo Bonzini kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
4781bb1fcc70SSean Christopherson 				   bool execonly, u8 level)
4782c50d8ae3SPaolo Bonzini {
4783c50d8ae3SPaolo Bonzini 	union kvm_mmu_role role = {0};
4784c50d8ae3SPaolo Bonzini 
4785c50d8ae3SPaolo Bonzini 	/* SMM flag is inherited from root_mmu */
4786c50d8ae3SPaolo Bonzini 	role.base.smm = vcpu->arch.root_mmu.mmu_role.base.smm;
4787c50d8ae3SPaolo Bonzini 
4788bb1fcc70SSean Christopherson 	role.base.level = level;
4789c50d8ae3SPaolo Bonzini 	role.base.gpte_is_8_bytes = true;
4790c50d8ae3SPaolo Bonzini 	role.base.direct = false;
4791c50d8ae3SPaolo Bonzini 	role.base.ad_disabled = !accessed_dirty;
4792c50d8ae3SPaolo Bonzini 	role.base.guest_mode = true;
4793c50d8ae3SPaolo Bonzini 	role.base.access = ACC_ALL;
4794c50d8ae3SPaolo Bonzini 
4795cd6767c3SSean Christopherson 	/* EPT, and thus nested EPT, does not consume CR0, CR4, nor EFER. */
4796cd6767c3SSean Christopherson 	role.ext.word = 0;
4797c50d8ae3SPaolo Bonzini 	role.ext.execonly = execonly;
4798cd6767c3SSean Christopherson 	role.ext.valid = 1;
4799c50d8ae3SPaolo Bonzini 
4800c50d8ae3SPaolo Bonzini 	return role;
4801c50d8ae3SPaolo Bonzini }
4802c50d8ae3SPaolo Bonzini 
4803c50d8ae3SPaolo Bonzini void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
4804c50d8ae3SPaolo Bonzini 			     bool accessed_dirty, gpa_t new_eptp)
4805c50d8ae3SPaolo Bonzini {
48068c008659SPaolo Bonzini 	struct kvm_mmu *context = &vcpu->arch.guest_mmu;
4807bb1fcc70SSean Christopherson 	u8 level = vmx_eptp_page_walk_level(new_eptp);
4808c50d8ae3SPaolo Bonzini 	union kvm_mmu_role new_role =
4809c50d8ae3SPaolo Bonzini 		kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
4810bb1fcc70SSean Christopherson 						   execonly, level);
4811c50d8ae3SPaolo Bonzini 
4812b5129100SSean Christopherson 	__kvm_mmu_new_pgd(vcpu, new_eptp, new_role.base);
4813c50d8ae3SPaolo Bonzini 
4814c50d8ae3SPaolo Bonzini 	if (new_role.as_u64 == context->mmu_role.as_u64)
4815c50d8ae3SPaolo Bonzini 		return;
4816c50d8ae3SPaolo Bonzini 
481718db1b17SSean Christopherson 	context->mmu_role.as_u64 = new_role.as_u64;
481818db1b17SSean Christopherson 
4819bb1fcc70SSean Christopherson 	context->shadow_root_level = level;
4820c50d8ae3SPaolo Bonzini 
4821c50d8ae3SPaolo Bonzini 	context->nx = true;
4822c50d8ae3SPaolo Bonzini 	context->ept_ad = accessed_dirty;
4823c50d8ae3SPaolo Bonzini 	context->page_fault = ept_page_fault;
4824c50d8ae3SPaolo Bonzini 	context->gva_to_gpa = ept_gva_to_gpa;
4825c50d8ae3SPaolo Bonzini 	context->sync_page = ept_sync_page;
4826c50d8ae3SPaolo Bonzini 	context->invlpg = ept_invlpg;
4827bb1fcc70SSean Christopherson 	context->root_level = level;
4828c50d8ae3SPaolo Bonzini 	context->direct_map = false;
4829c50d8ae3SPaolo Bonzini 
4830c596f147SSean Christopherson 	update_permission_bitmask(context, true);
4831b67a93a8SSean Christopherson 	update_last_nonleaf_level(context);
48322e4c0661SSean Christopherson 	update_pkru_bitmask(context);
4833c50d8ae3SPaolo Bonzini 	reset_rsvds_bits_mask_ept(vcpu, context, execonly);
4834c50d8ae3SPaolo Bonzini 	reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
4835c50d8ae3SPaolo Bonzini }
4836c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
4837c50d8ae3SPaolo Bonzini 
4838c50d8ae3SPaolo Bonzini static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
4839c50d8ae3SPaolo Bonzini {
48408c008659SPaolo Bonzini 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
4841594e91a1SSean Christopherson 	struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
4842c50d8ae3SPaolo Bonzini 
4843594e91a1SSean Christopherson 	kvm_init_shadow_mmu(vcpu, &regs);
4844929d1cfaSPaolo Bonzini 
4845d8dd54e0SSean Christopherson 	context->get_guest_pgd     = get_cr3;
4846c50d8ae3SPaolo Bonzini 	context->get_pdptr         = kvm_pdptr_read;
4847c50d8ae3SPaolo Bonzini 	context->inject_page_fault = kvm_inject_page_fault;
4848c50d8ae3SPaolo Bonzini }
4849c50d8ae3SPaolo Bonzini 
48508626c120SSean Christopherson static union kvm_mmu_role
48518626c120SSean Christopherson kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu, struct kvm_mmu_role_regs *regs)
4852654430efSSean Christopherson {
48538626c120SSean Christopherson 	union kvm_mmu_role role;
48548626c120SSean Christopherson 
48558626c120SSean Christopherson 	role = kvm_calc_shadow_root_page_role_common(vcpu, regs, false);
4856654430efSSean Christopherson 
4857654430efSSean Christopherson 	/*
4858654430efSSean Christopherson 	 * Nested MMUs are used only for walking L2's gva->gpa, they never have
4859654430efSSean Christopherson 	 * shadow pages of their own and so "direct" has no meaning.   Set it
4860654430efSSean Christopherson 	 * to "true" to try to detect bogus usage of the nested MMU.
4861654430efSSean Christopherson 	 */
4862654430efSSean Christopherson 	role.base.direct = true;
4863654430efSSean Christopherson 
48648626c120SSean Christopherson 	if (!____is_cr0_pg(regs))
4865654430efSSean Christopherson 		role.base.level = 0;
48668626c120SSean Christopherson 	else if (____is_efer_lma(regs))
48678626c120SSean Christopherson 		role.base.level = ____is_cr4_la57(regs) ? PT64_ROOT_5LEVEL :
4868654430efSSean Christopherson 							  PT64_ROOT_4LEVEL;
48698626c120SSean Christopherson 	else if (____is_cr4_pae(regs))
4870654430efSSean Christopherson 		role.base.level = PT32E_ROOT_LEVEL;
4871654430efSSean Christopherson 	else
4872654430efSSean Christopherson 		role.base.level = PT32_ROOT_LEVEL;
4873654430efSSean Christopherson 
4874654430efSSean Christopherson 	return role;
4875654430efSSean Christopherson }
4876654430efSSean Christopherson 
4877c50d8ae3SPaolo Bonzini static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
4878c50d8ae3SPaolo Bonzini {
48798626c120SSean Christopherson 	struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
48808626c120SSean Christopherson 	union kvm_mmu_role new_role = kvm_calc_nested_mmu_role(vcpu, &regs);
4881c50d8ae3SPaolo Bonzini 	struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
4882c50d8ae3SPaolo Bonzini 
4883c50d8ae3SPaolo Bonzini 	if (new_role.as_u64 == g_context->mmu_role.as_u64)
4884c50d8ae3SPaolo Bonzini 		return;
4885c50d8ae3SPaolo Bonzini 
4886c50d8ae3SPaolo Bonzini 	g_context->mmu_role.as_u64 = new_role.as_u64;
4887d8dd54e0SSean Christopherson 	g_context->get_guest_pgd     = get_cr3;
4888c50d8ae3SPaolo Bonzini 	g_context->get_pdptr         = kvm_pdptr_read;
4889c50d8ae3SPaolo Bonzini 	g_context->inject_page_fault = kvm_inject_page_fault;
4890c50d8ae3SPaolo Bonzini 
4891c50d8ae3SPaolo Bonzini 	/*
48925efac074SPaolo Bonzini 	 * L2 page tables are never shadowed, so there is no need to sync
48935efac074SPaolo Bonzini 	 * SPTEs.
48945efac074SPaolo Bonzini 	 */
48955efac074SPaolo Bonzini 	g_context->invlpg            = NULL;
48965efac074SPaolo Bonzini 
48975efac074SPaolo Bonzini 	/*
4898c50d8ae3SPaolo Bonzini 	 * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using
4899c50d8ae3SPaolo Bonzini 	 * L1's nested page tables (e.g. EPT12). The nested translation
4900c50d8ae3SPaolo Bonzini 	 * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using
4901c50d8ae3SPaolo Bonzini 	 * L2's page tables as the first level of translation and L1's
4902c50d8ae3SPaolo Bonzini 	 * nested page tables as the second level of translation. Basically
4903c50d8ae3SPaolo Bonzini 	 * the gva_to_gpa functions between mmu and nested_mmu are swapped.
4904c50d8ae3SPaolo Bonzini 	 */
4905c50d8ae3SPaolo Bonzini 	if (!is_paging(vcpu)) {
4906c50d8ae3SPaolo Bonzini 		g_context->nx = false;
4907c50d8ae3SPaolo Bonzini 		g_context->root_level = 0;
4908c50d8ae3SPaolo Bonzini 		g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested;
4909c50d8ae3SPaolo Bonzini 	} else if (is_long_mode(vcpu)) {
4910c50d8ae3SPaolo Bonzini 		g_context->nx = is_nx(vcpu);
4911c50d8ae3SPaolo Bonzini 		g_context->root_level = is_la57_mode(vcpu) ?
4912c50d8ae3SPaolo Bonzini 					PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
4913c50d8ae3SPaolo Bonzini 		reset_rsvds_bits_mask(vcpu, g_context);
4914c50d8ae3SPaolo Bonzini 		g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
4915c50d8ae3SPaolo Bonzini 	} else if (is_pae(vcpu)) {
4916c50d8ae3SPaolo Bonzini 		g_context->nx = is_nx(vcpu);
4917c50d8ae3SPaolo Bonzini 		g_context->root_level = PT32E_ROOT_LEVEL;
4918c50d8ae3SPaolo Bonzini 		reset_rsvds_bits_mask(vcpu, g_context);
4919c50d8ae3SPaolo Bonzini 		g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
4920c50d8ae3SPaolo Bonzini 	} else {
4921c50d8ae3SPaolo Bonzini 		g_context->nx = false;
4922c50d8ae3SPaolo Bonzini 		g_context->root_level = PT32_ROOT_LEVEL;
4923c50d8ae3SPaolo Bonzini 		reset_rsvds_bits_mask(vcpu, g_context);
4924c50d8ae3SPaolo Bonzini 		g_context->gva_to_gpa = paging32_gva_to_gpa_nested;
4925c50d8ae3SPaolo Bonzini 	}
4926c50d8ae3SPaolo Bonzini 
4927c596f147SSean Christopherson 	update_permission_bitmask(g_context, false);
49282e4c0661SSean Christopherson 	update_pkru_bitmask(g_context);
4929b67a93a8SSean Christopherson 	update_last_nonleaf_level(g_context);
4930c50d8ae3SPaolo Bonzini }
4931c50d8ae3SPaolo Bonzini 
4932c9060662SSean Christopherson void kvm_init_mmu(struct kvm_vcpu *vcpu)
4933c50d8ae3SPaolo Bonzini {
4934c50d8ae3SPaolo Bonzini 	if (mmu_is_nested(vcpu))
4935c50d8ae3SPaolo Bonzini 		init_kvm_nested_mmu(vcpu);
4936c50d8ae3SPaolo Bonzini 	else if (tdp_enabled)
4937c50d8ae3SPaolo Bonzini 		init_kvm_tdp_mmu(vcpu);
4938c50d8ae3SPaolo Bonzini 	else
4939c50d8ae3SPaolo Bonzini 		init_kvm_softmmu(vcpu);
4940c50d8ae3SPaolo Bonzini }
4941c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_init_mmu);
4942c50d8ae3SPaolo Bonzini 
4943c50d8ae3SPaolo Bonzini static union kvm_mmu_page_role
4944c50d8ae3SPaolo Bonzini kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu)
4945c50d8ae3SPaolo Bonzini {
49468626c120SSean Christopherson 	struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
4947c50d8ae3SPaolo Bonzini 	union kvm_mmu_role role;
4948c50d8ae3SPaolo Bonzini 
4949c50d8ae3SPaolo Bonzini 	if (tdp_enabled)
49508626c120SSean Christopherson 		role = kvm_calc_tdp_mmu_root_page_role(vcpu, &regs, true);
4951c50d8ae3SPaolo Bonzini 	else
49528626c120SSean Christopherson 		role = kvm_calc_shadow_mmu_root_page_role(vcpu, &regs, true);
4953c50d8ae3SPaolo Bonzini 
4954c50d8ae3SPaolo Bonzini 	return role.base;
4955c50d8ae3SPaolo Bonzini }
4956c50d8ae3SPaolo Bonzini 
495749c6f875SSean Christopherson void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
495849c6f875SSean Christopherson {
495949c6f875SSean Christopherson 	/*
496049c6f875SSean Christopherson 	 * Invalidate all MMU roles to force them to reinitialize as CPUID
496149c6f875SSean Christopherson 	 * information is factored into reserved bit calculations.
496249c6f875SSean Christopherson 	 */
496349c6f875SSean Christopherson 	vcpu->arch.root_mmu.mmu_role.ext.valid = 0;
496449c6f875SSean Christopherson 	vcpu->arch.guest_mmu.mmu_role.ext.valid = 0;
496549c6f875SSean Christopherson 	vcpu->arch.nested_mmu.mmu_role.ext.valid = 0;
496649c6f875SSean Christopherson 	kvm_mmu_reset_context(vcpu);
496763f5a190SSean Christopherson 
496863f5a190SSean Christopherson 	/*
496963f5a190SSean Christopherson 	 * KVM does not correctly handle changing guest CPUID after KVM_RUN, as
497063f5a190SSean Christopherson 	 * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't
497163f5a190SSean Christopherson 	 * tracked in kvm_mmu_page_role.  As a result, KVM may miss guest page
497263f5a190SSean Christopherson 	 * faults due to reusing SPs/SPTEs.  Alert userspace, but otherwise
497363f5a190SSean Christopherson 	 * sweep the problem under the rug.
497463f5a190SSean Christopherson 	 *
497563f5a190SSean Christopherson 	 * KVM's horrific CPUID ABI makes the problem all but impossible to
497663f5a190SSean Christopherson 	 * solve, as correctly handling multiple vCPU models (with respect to
497763f5a190SSean Christopherson 	 * paging and physical address properties) in a single VM would require
497863f5a190SSean Christopherson 	 * tracking all relevant CPUID information in kvm_mmu_page_role.  That
497963f5a190SSean Christopherson 	 * is very undesirable as it would double the memory requirements for
498063f5a190SSean Christopherson 	 * gfn_track (see struct kvm_mmu_page_role comments), and in practice
498163f5a190SSean Christopherson 	 * no sane VMM mucks with the core vCPU model on the fly.
498263f5a190SSean Christopherson 	 */
498363f5a190SSean Christopherson 	if (vcpu->arch.last_vmentry_cpu != -1) {
498463f5a190SSean Christopherson 		pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} after KVM_RUN may cause guest instability\n");
498563f5a190SSean Christopherson 		pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} will fail after KVM_RUN starting with Linux 5.16\n");
498663f5a190SSean Christopherson 	}
498749c6f875SSean Christopherson }
498849c6f875SSean Christopherson 
4989c50d8ae3SPaolo Bonzini void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
4990c50d8ae3SPaolo Bonzini {
4991c50d8ae3SPaolo Bonzini 	kvm_mmu_unload(vcpu);
4992c9060662SSean Christopherson 	kvm_init_mmu(vcpu);
4993c50d8ae3SPaolo Bonzini }
4994c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
4995c50d8ae3SPaolo Bonzini 
4996c50d8ae3SPaolo Bonzini int kvm_mmu_load(struct kvm_vcpu *vcpu)
4997c50d8ae3SPaolo Bonzini {
4998c50d8ae3SPaolo Bonzini 	int r;
4999c50d8ae3SPaolo Bonzini 
5000378f5cd6SSean Christopherson 	r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->direct_map);
5001c50d8ae3SPaolo Bonzini 	if (r)
5002c50d8ae3SPaolo Bonzini 		goto out;
5003748e52b9SSean Christopherson 	r = mmu_alloc_special_roots(vcpu);
5004c50d8ae3SPaolo Bonzini 	if (r)
5005c50d8ae3SPaolo Bonzini 		goto out;
50064a38162eSPaolo Bonzini 	if (vcpu->arch.mmu->direct_map)
50076e6ec584SSean Christopherson 		r = mmu_alloc_direct_roots(vcpu);
50086e6ec584SSean Christopherson 	else
50096e6ec584SSean Christopherson 		r = mmu_alloc_shadow_roots(vcpu);
5010c50d8ae3SPaolo Bonzini 	if (r)
5011c50d8ae3SPaolo Bonzini 		goto out;
5012a91f387bSSean Christopherson 
5013a91f387bSSean Christopherson 	kvm_mmu_sync_roots(vcpu);
5014a91f387bSSean Christopherson 
5015727a7e27SPaolo Bonzini 	kvm_mmu_load_pgd(vcpu);
5016b3646477SJason Baron 	static_call(kvm_x86_tlb_flush_current)(vcpu);
5017c50d8ae3SPaolo Bonzini out:
5018c50d8ae3SPaolo Bonzini 	return r;
5019c50d8ae3SPaolo Bonzini }
5020c50d8ae3SPaolo Bonzini 
5021c50d8ae3SPaolo Bonzini void kvm_mmu_unload(struct kvm_vcpu *vcpu)
5022c50d8ae3SPaolo Bonzini {
5023c50d8ae3SPaolo Bonzini 	kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL);
5024c50d8ae3SPaolo Bonzini 	WARN_ON(VALID_PAGE(vcpu->arch.root_mmu.root_hpa));
5025c50d8ae3SPaolo Bonzini 	kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
5026c50d8ae3SPaolo Bonzini 	WARN_ON(VALID_PAGE(vcpu->arch.guest_mmu.root_hpa));
5027c50d8ae3SPaolo Bonzini }
5028c50d8ae3SPaolo Bonzini 
5029c50d8ae3SPaolo Bonzini static bool need_remote_flush(u64 old, u64 new)
5030c50d8ae3SPaolo Bonzini {
5031c50d8ae3SPaolo Bonzini 	if (!is_shadow_present_pte(old))
5032c50d8ae3SPaolo Bonzini 		return false;
5033c50d8ae3SPaolo Bonzini 	if (!is_shadow_present_pte(new))
5034c50d8ae3SPaolo Bonzini 		return true;
5035c50d8ae3SPaolo Bonzini 	if ((old ^ new) & PT64_BASE_ADDR_MASK)
5036c50d8ae3SPaolo Bonzini 		return true;
5037c50d8ae3SPaolo Bonzini 	old ^= shadow_nx_mask;
5038c50d8ae3SPaolo Bonzini 	new ^= shadow_nx_mask;
5039c50d8ae3SPaolo Bonzini 	return (old & ~new & PT64_PERM_MASK) != 0;
5040c50d8ae3SPaolo Bonzini }
5041c50d8ae3SPaolo Bonzini 
5042c50d8ae3SPaolo Bonzini static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
5043c50d8ae3SPaolo Bonzini 				    int *bytes)
5044c50d8ae3SPaolo Bonzini {
5045c50d8ae3SPaolo Bonzini 	u64 gentry = 0;
5046c50d8ae3SPaolo Bonzini 	int r;
5047c50d8ae3SPaolo Bonzini 
5048c50d8ae3SPaolo Bonzini 	/*
5049c50d8ae3SPaolo Bonzini 	 * Assume that the pte write on a page table of the same type
5050c50d8ae3SPaolo Bonzini 	 * as the current vcpu paging mode since we update the sptes only
5051c50d8ae3SPaolo Bonzini 	 * when they have the same mode.
5052c50d8ae3SPaolo Bonzini 	 */
5053c50d8ae3SPaolo Bonzini 	if (is_pae(vcpu) && *bytes == 4) {
5054c50d8ae3SPaolo Bonzini 		/* Handle a 32-bit guest writing two halves of a 64-bit gpte */
5055c50d8ae3SPaolo Bonzini 		*gpa &= ~(gpa_t)7;
5056c50d8ae3SPaolo Bonzini 		*bytes = 8;
5057c50d8ae3SPaolo Bonzini 	}
5058c50d8ae3SPaolo Bonzini 
5059c50d8ae3SPaolo Bonzini 	if (*bytes == 4 || *bytes == 8) {
5060c50d8ae3SPaolo Bonzini 		r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes);
5061c50d8ae3SPaolo Bonzini 		if (r)
5062c50d8ae3SPaolo Bonzini 			gentry = 0;
5063c50d8ae3SPaolo Bonzini 	}
5064c50d8ae3SPaolo Bonzini 
5065c50d8ae3SPaolo Bonzini 	return gentry;
5066c50d8ae3SPaolo Bonzini }
5067c50d8ae3SPaolo Bonzini 
5068c50d8ae3SPaolo Bonzini /*
5069c50d8ae3SPaolo Bonzini  * If we're seeing too many writes to a page, it may no longer be a page table,
5070c50d8ae3SPaolo Bonzini  * or we may be forking, in which case it is better to unmap the page.
5071c50d8ae3SPaolo Bonzini  */
5072c50d8ae3SPaolo Bonzini static bool detect_write_flooding(struct kvm_mmu_page *sp)
5073c50d8ae3SPaolo Bonzini {
5074c50d8ae3SPaolo Bonzini 	/*
5075c50d8ae3SPaolo Bonzini 	 * Skip write-flooding detected for the sp whose level is 1, because
5076c50d8ae3SPaolo Bonzini 	 * it can become unsync, then the guest page is not write-protected.
5077c50d8ae3SPaolo Bonzini 	 */
50783bae0459SSean Christopherson 	if (sp->role.level == PG_LEVEL_4K)
5079c50d8ae3SPaolo Bonzini 		return false;
5080c50d8ae3SPaolo Bonzini 
5081c50d8ae3SPaolo Bonzini 	atomic_inc(&sp->write_flooding_count);
5082c50d8ae3SPaolo Bonzini 	return atomic_read(&sp->write_flooding_count) >= 3;
5083c50d8ae3SPaolo Bonzini }
5084c50d8ae3SPaolo Bonzini 
5085c50d8ae3SPaolo Bonzini /*
5086c50d8ae3SPaolo Bonzini  * Misaligned accesses are too much trouble to fix up; also, they usually
5087c50d8ae3SPaolo Bonzini  * indicate a page is not used as a page table.
5088c50d8ae3SPaolo Bonzini  */
5089c50d8ae3SPaolo Bonzini static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
5090c50d8ae3SPaolo Bonzini 				    int bytes)
5091c50d8ae3SPaolo Bonzini {
5092c50d8ae3SPaolo Bonzini 	unsigned offset, pte_size, misaligned;
5093c50d8ae3SPaolo Bonzini 
5094c50d8ae3SPaolo Bonzini 	pgprintk("misaligned: gpa %llx bytes %d role %x\n",
5095c50d8ae3SPaolo Bonzini 		 gpa, bytes, sp->role.word);
5096c50d8ae3SPaolo Bonzini 
5097c50d8ae3SPaolo Bonzini 	offset = offset_in_page(gpa);
5098c50d8ae3SPaolo Bonzini 	pte_size = sp->role.gpte_is_8_bytes ? 8 : 4;
5099c50d8ae3SPaolo Bonzini 
5100c50d8ae3SPaolo Bonzini 	/*
5101c50d8ae3SPaolo Bonzini 	 * Sometimes, the OS only writes the last one bytes to update status
5102c50d8ae3SPaolo Bonzini 	 * bits, for example, in linux, andb instruction is used in clear_bit().
5103c50d8ae3SPaolo Bonzini 	 */
5104c50d8ae3SPaolo Bonzini 	if (!(offset & (pte_size - 1)) && bytes == 1)
5105c50d8ae3SPaolo Bonzini 		return false;
5106c50d8ae3SPaolo Bonzini 
5107c50d8ae3SPaolo Bonzini 	misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
5108c50d8ae3SPaolo Bonzini 	misaligned |= bytes < 4;
5109c50d8ae3SPaolo Bonzini 
5110c50d8ae3SPaolo Bonzini 	return misaligned;
5111c50d8ae3SPaolo Bonzini }
5112c50d8ae3SPaolo Bonzini 
5113c50d8ae3SPaolo Bonzini static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
5114c50d8ae3SPaolo Bonzini {
5115c50d8ae3SPaolo Bonzini 	unsigned page_offset, quadrant;
5116c50d8ae3SPaolo Bonzini 	u64 *spte;
5117c50d8ae3SPaolo Bonzini 	int level;
5118c50d8ae3SPaolo Bonzini 
5119c50d8ae3SPaolo Bonzini 	page_offset = offset_in_page(gpa);
5120c50d8ae3SPaolo Bonzini 	level = sp->role.level;
5121c50d8ae3SPaolo Bonzini 	*nspte = 1;
5122c50d8ae3SPaolo Bonzini 	if (!sp->role.gpte_is_8_bytes) {
5123c50d8ae3SPaolo Bonzini 		page_offset <<= 1;	/* 32->64 */
5124c50d8ae3SPaolo Bonzini 		/*
5125c50d8ae3SPaolo Bonzini 		 * A 32-bit pde maps 4MB while the shadow pdes map
5126c50d8ae3SPaolo Bonzini 		 * only 2MB.  So we need to double the offset again
5127c50d8ae3SPaolo Bonzini 		 * and zap two pdes instead of one.
5128c50d8ae3SPaolo Bonzini 		 */
5129c50d8ae3SPaolo Bonzini 		if (level == PT32_ROOT_LEVEL) {
5130c50d8ae3SPaolo Bonzini 			page_offset &= ~7; /* kill rounding error */
5131c50d8ae3SPaolo Bonzini 			page_offset <<= 1;
5132c50d8ae3SPaolo Bonzini 			*nspte = 2;
5133c50d8ae3SPaolo Bonzini 		}
5134c50d8ae3SPaolo Bonzini 		quadrant = page_offset >> PAGE_SHIFT;
5135c50d8ae3SPaolo Bonzini 		page_offset &= ~PAGE_MASK;
5136c50d8ae3SPaolo Bonzini 		if (quadrant != sp->role.quadrant)
5137c50d8ae3SPaolo Bonzini 			return NULL;
5138c50d8ae3SPaolo Bonzini 	}
5139c50d8ae3SPaolo Bonzini 
5140c50d8ae3SPaolo Bonzini 	spte = &sp->spt[page_offset / sizeof(*spte)];
5141c50d8ae3SPaolo Bonzini 	return spte;
5142c50d8ae3SPaolo Bonzini }
5143c50d8ae3SPaolo Bonzini 
5144c50d8ae3SPaolo Bonzini static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
5145c50d8ae3SPaolo Bonzini 			      const u8 *new, int bytes,
5146c50d8ae3SPaolo Bonzini 			      struct kvm_page_track_notifier_node *node)
5147c50d8ae3SPaolo Bonzini {
5148c50d8ae3SPaolo Bonzini 	gfn_t gfn = gpa >> PAGE_SHIFT;
5149c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
5150c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
5151c50d8ae3SPaolo Bonzini 	u64 entry, gentry, *spte;
5152c50d8ae3SPaolo Bonzini 	int npte;
5153c50d8ae3SPaolo Bonzini 	bool remote_flush, local_flush;
5154c50d8ae3SPaolo Bonzini 
5155c50d8ae3SPaolo Bonzini 	/*
5156c50d8ae3SPaolo Bonzini 	 * If we don't have indirect shadow pages, it means no page is
5157c50d8ae3SPaolo Bonzini 	 * write-protected, so we can exit simply.
5158c50d8ae3SPaolo Bonzini 	 */
5159c50d8ae3SPaolo Bonzini 	if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
5160c50d8ae3SPaolo Bonzini 		return;
5161c50d8ae3SPaolo Bonzini 
5162c50d8ae3SPaolo Bonzini 	remote_flush = local_flush = false;
5163c50d8ae3SPaolo Bonzini 
5164c50d8ae3SPaolo Bonzini 	pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
5165c50d8ae3SPaolo Bonzini 
5166c50d8ae3SPaolo Bonzini 	/*
5167c50d8ae3SPaolo Bonzini 	 * No need to care whether allocation memory is successful
5168d9f6e12fSIngo Molnar 	 * or not since pte prefetch is skipped if it does not have
5169c50d8ae3SPaolo Bonzini 	 * enough objects in the cache.
5170c50d8ae3SPaolo Bonzini 	 */
5171378f5cd6SSean Christopherson 	mmu_topup_memory_caches(vcpu, true);
5172c50d8ae3SPaolo Bonzini 
5173531810caSBen Gardon 	write_lock(&vcpu->kvm->mmu_lock);
5174c50d8ae3SPaolo Bonzini 
5175c50d8ae3SPaolo Bonzini 	gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes);
5176c50d8ae3SPaolo Bonzini 
5177c50d8ae3SPaolo Bonzini 	++vcpu->kvm->stat.mmu_pte_write;
5178c50d8ae3SPaolo Bonzini 	kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
5179c50d8ae3SPaolo Bonzini 
5180c50d8ae3SPaolo Bonzini 	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
5181c50d8ae3SPaolo Bonzini 		if (detect_write_misaligned(sp, gpa, bytes) ||
5182c50d8ae3SPaolo Bonzini 		      detect_write_flooding(sp)) {
5183c50d8ae3SPaolo Bonzini 			kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
5184c50d8ae3SPaolo Bonzini 			++vcpu->kvm->stat.mmu_flooded;
5185c50d8ae3SPaolo Bonzini 			continue;
5186c50d8ae3SPaolo Bonzini 		}
5187c50d8ae3SPaolo Bonzini 
5188c50d8ae3SPaolo Bonzini 		spte = get_written_sptes(sp, gpa, &npte);
5189c50d8ae3SPaolo Bonzini 		if (!spte)
5190c50d8ae3SPaolo Bonzini 			continue;
5191c50d8ae3SPaolo Bonzini 
5192c50d8ae3SPaolo Bonzini 		local_flush = true;
5193c50d8ae3SPaolo Bonzini 		while (npte--) {
5194c50d8ae3SPaolo Bonzini 			entry = *spte;
51952de4085cSBen Gardon 			mmu_page_zap_pte(vcpu->kvm, sp, spte, NULL);
5196c5e2184dSSean Christopherson 			if (gentry && sp->role.level != PG_LEVEL_4K)
5197c5e2184dSSean Christopherson 				++vcpu->kvm->stat.mmu_pde_zapped;
5198c50d8ae3SPaolo Bonzini 			if (need_remote_flush(entry, *spte))
5199c50d8ae3SPaolo Bonzini 				remote_flush = true;
5200c50d8ae3SPaolo Bonzini 			++spte;
5201c50d8ae3SPaolo Bonzini 		}
5202c50d8ae3SPaolo Bonzini 	}
5203c50d8ae3SPaolo Bonzini 	kvm_mmu_flush_or_zap(vcpu, &invalid_list, remote_flush, local_flush);
5204c50d8ae3SPaolo Bonzini 	kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
5205531810caSBen Gardon 	write_unlock(&vcpu->kvm->mmu_lock);
5206c50d8ae3SPaolo Bonzini }
5207c50d8ae3SPaolo Bonzini 
5208736c291cSSean Christopherson int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
5209c50d8ae3SPaolo Bonzini 		       void *insn, int insn_len)
5210c50d8ae3SPaolo Bonzini {
521192daa48bSSean Christopherson 	int r, emulation_type = EMULTYPE_PF;
5212c50d8ae3SPaolo Bonzini 	bool direct = vcpu->arch.mmu->direct_map;
5213c50d8ae3SPaolo Bonzini 
52146948199aSSean Christopherson 	if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
5215ddce6208SSean Christopherson 		return RET_PF_RETRY;
5216ddce6208SSean Christopherson 
5217c50d8ae3SPaolo Bonzini 	r = RET_PF_INVALID;
5218c50d8ae3SPaolo Bonzini 	if (unlikely(error_code & PFERR_RSVD_MASK)) {
5219736c291cSSean Christopherson 		r = handle_mmio_page_fault(vcpu, cr2_or_gpa, direct);
5220c50d8ae3SPaolo Bonzini 		if (r == RET_PF_EMULATE)
5221c50d8ae3SPaolo Bonzini 			goto emulate;
5222c50d8ae3SPaolo Bonzini 	}
5223c50d8ae3SPaolo Bonzini 
5224c50d8ae3SPaolo Bonzini 	if (r == RET_PF_INVALID) {
52257a02674dSSean Christopherson 		r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa,
52267a02674dSSean Christopherson 					  lower_32_bits(error_code), false);
52277b367bc9SSean Christopherson 		if (WARN_ON_ONCE(r == RET_PF_INVALID))
52287b367bc9SSean Christopherson 			return -EIO;
5229c50d8ae3SPaolo Bonzini 	}
5230c50d8ae3SPaolo Bonzini 
5231c50d8ae3SPaolo Bonzini 	if (r < 0)
5232c50d8ae3SPaolo Bonzini 		return r;
523383a2ba4cSSean Christopherson 	if (r != RET_PF_EMULATE)
523483a2ba4cSSean Christopherson 		return 1;
5235c50d8ae3SPaolo Bonzini 
5236c50d8ae3SPaolo Bonzini 	/*
5237c50d8ae3SPaolo Bonzini 	 * Before emulating the instruction, check if the error code
5238c50d8ae3SPaolo Bonzini 	 * was due to a RO violation while translating the guest page.
5239c50d8ae3SPaolo Bonzini 	 * This can occur when using nested virtualization with nested
5240c50d8ae3SPaolo Bonzini 	 * paging in both guests. If true, we simply unprotect the page
5241c50d8ae3SPaolo Bonzini 	 * and resume the guest.
5242c50d8ae3SPaolo Bonzini 	 */
5243c50d8ae3SPaolo Bonzini 	if (vcpu->arch.mmu->direct_map &&
5244c50d8ae3SPaolo Bonzini 	    (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) {
5245736c291cSSean Christopherson 		kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2_or_gpa));
5246c50d8ae3SPaolo Bonzini 		return 1;
5247c50d8ae3SPaolo Bonzini 	}
5248c50d8ae3SPaolo Bonzini 
5249c50d8ae3SPaolo Bonzini 	/*
5250c50d8ae3SPaolo Bonzini 	 * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still
5251c50d8ae3SPaolo Bonzini 	 * optimistically try to just unprotect the page and let the processor
5252c50d8ae3SPaolo Bonzini 	 * re-execute the instruction that caused the page fault.  Do not allow
5253c50d8ae3SPaolo Bonzini 	 * retrying MMIO emulation, as it's not only pointless but could also
5254c50d8ae3SPaolo Bonzini 	 * cause us to enter an infinite loop because the processor will keep
5255c50d8ae3SPaolo Bonzini 	 * faulting on the non-existent MMIO address.  Retrying an instruction
5256c50d8ae3SPaolo Bonzini 	 * from a nested guest is also pointless and dangerous as we are only
5257c50d8ae3SPaolo Bonzini 	 * explicitly shadowing L1's page tables, i.e. unprotecting something
5258c50d8ae3SPaolo Bonzini 	 * for L1 isn't going to magically fix whatever issue cause L2 to fail.
5259c50d8ae3SPaolo Bonzini 	 */
5260736c291cSSean Christopherson 	if (!mmio_info_in_cache(vcpu, cr2_or_gpa, direct) && !is_guest_mode(vcpu))
526192daa48bSSean Christopherson 		emulation_type |= EMULTYPE_ALLOW_RETRY_PF;
5262c50d8ae3SPaolo Bonzini emulate:
5263736c291cSSean Christopherson 	return x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn,
5264c50d8ae3SPaolo Bonzini 				       insn_len);
5265c50d8ae3SPaolo Bonzini }
5266c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
5267c50d8ae3SPaolo Bonzini 
52685efac074SPaolo Bonzini void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
52695efac074SPaolo Bonzini 			    gva_t gva, hpa_t root_hpa)
5270c50d8ae3SPaolo Bonzini {
5271c50d8ae3SPaolo Bonzini 	int i;
5272c50d8ae3SPaolo Bonzini 
52735efac074SPaolo Bonzini 	/* It's actually a GPA for vcpu->arch.guest_mmu.  */
52745efac074SPaolo Bonzini 	if (mmu != &vcpu->arch.guest_mmu) {
52755efac074SPaolo Bonzini 		/* INVLPG on a non-canonical address is a NOP according to the SDM.  */
5276c50d8ae3SPaolo Bonzini 		if (is_noncanonical_address(gva, vcpu))
5277c50d8ae3SPaolo Bonzini 			return;
5278c50d8ae3SPaolo Bonzini 
5279b3646477SJason Baron 		static_call(kvm_x86_tlb_flush_gva)(vcpu, gva);
52805efac074SPaolo Bonzini 	}
52815efac074SPaolo Bonzini 
52825efac074SPaolo Bonzini 	if (!mmu->invlpg)
52835efac074SPaolo Bonzini 		return;
52845efac074SPaolo Bonzini 
52855efac074SPaolo Bonzini 	if (root_hpa == INVALID_PAGE) {
5286c50d8ae3SPaolo Bonzini 		mmu->invlpg(vcpu, gva, mmu->root_hpa);
5287c50d8ae3SPaolo Bonzini 
5288c50d8ae3SPaolo Bonzini 		/*
5289c50d8ae3SPaolo Bonzini 		 * INVLPG is required to invalidate any global mappings for the VA,
5290c50d8ae3SPaolo Bonzini 		 * irrespective of PCID. Since it would take us roughly similar amount
5291c50d8ae3SPaolo Bonzini 		 * of work to determine whether any of the prev_root mappings of the VA
5292c50d8ae3SPaolo Bonzini 		 * is marked global, or to just sync it blindly, so we might as well
5293c50d8ae3SPaolo Bonzini 		 * just always sync it.
5294c50d8ae3SPaolo Bonzini 		 *
5295c50d8ae3SPaolo Bonzini 		 * Mappings not reachable via the current cr3 or the prev_roots will be
5296c50d8ae3SPaolo Bonzini 		 * synced when switching to that cr3, so nothing needs to be done here
5297c50d8ae3SPaolo Bonzini 		 * for them.
5298c50d8ae3SPaolo Bonzini 		 */
5299c50d8ae3SPaolo Bonzini 		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
5300c50d8ae3SPaolo Bonzini 			if (VALID_PAGE(mmu->prev_roots[i].hpa))
5301c50d8ae3SPaolo Bonzini 				mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
53025efac074SPaolo Bonzini 	} else {
53035efac074SPaolo Bonzini 		mmu->invlpg(vcpu, gva, root_hpa);
53045efac074SPaolo Bonzini 	}
53055efac074SPaolo Bonzini }
5306c50d8ae3SPaolo Bonzini 
53075efac074SPaolo Bonzini void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
53085efac074SPaolo Bonzini {
53095efac074SPaolo Bonzini 	kvm_mmu_invalidate_gva(vcpu, vcpu->arch.mmu, gva, INVALID_PAGE);
5310c50d8ae3SPaolo Bonzini 	++vcpu->stat.invlpg;
5311c50d8ae3SPaolo Bonzini }
5312c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
5313c50d8ae3SPaolo Bonzini 
53145efac074SPaolo Bonzini 
5315c50d8ae3SPaolo Bonzini void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
5316c50d8ae3SPaolo Bonzini {
5317c50d8ae3SPaolo Bonzini 	struct kvm_mmu *mmu = vcpu->arch.mmu;
5318c50d8ae3SPaolo Bonzini 	bool tlb_flush = false;
5319c50d8ae3SPaolo Bonzini 	uint i;
5320c50d8ae3SPaolo Bonzini 
5321c50d8ae3SPaolo Bonzini 	if (pcid == kvm_get_active_pcid(vcpu)) {
5322c50d8ae3SPaolo Bonzini 		mmu->invlpg(vcpu, gva, mmu->root_hpa);
5323c50d8ae3SPaolo Bonzini 		tlb_flush = true;
5324c50d8ae3SPaolo Bonzini 	}
5325c50d8ae3SPaolo Bonzini 
5326c50d8ae3SPaolo Bonzini 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
5327c50d8ae3SPaolo Bonzini 		if (VALID_PAGE(mmu->prev_roots[i].hpa) &&
5328be01e8e2SSean Christopherson 		    pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd)) {
5329c50d8ae3SPaolo Bonzini 			mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
5330c50d8ae3SPaolo Bonzini 			tlb_flush = true;
5331c50d8ae3SPaolo Bonzini 		}
5332c50d8ae3SPaolo Bonzini 	}
5333c50d8ae3SPaolo Bonzini 
5334c50d8ae3SPaolo Bonzini 	if (tlb_flush)
5335b3646477SJason Baron 		static_call(kvm_x86_tlb_flush_gva)(vcpu, gva);
5336c50d8ae3SPaolo Bonzini 
5337c50d8ae3SPaolo Bonzini 	++vcpu->stat.invlpg;
5338c50d8ae3SPaolo Bonzini 
5339c50d8ae3SPaolo Bonzini 	/*
5340c50d8ae3SPaolo Bonzini 	 * Mappings not reachable via the current cr3 or the prev_roots will be
5341c50d8ae3SPaolo Bonzini 	 * synced when switching to that cr3, so nothing needs to be done here
5342c50d8ae3SPaolo Bonzini 	 * for them.
5343c50d8ae3SPaolo Bonzini 	 */
5344c50d8ae3SPaolo Bonzini }
5345c50d8ae3SPaolo Bonzini 
534683013059SSean Christopherson void kvm_configure_mmu(bool enable_tdp, int tdp_max_root_level,
534783013059SSean Christopherson 		       int tdp_huge_page_level)
5348c50d8ae3SPaolo Bonzini {
5349bde77235SSean Christopherson 	tdp_enabled = enable_tdp;
535083013059SSean Christopherson 	max_tdp_level = tdp_max_root_level;
5351703c335dSSean Christopherson 
5352703c335dSSean Christopherson 	/*
53531d92d2e8SSean Christopherson 	 * max_huge_page_level reflects KVM's MMU capabilities irrespective
5354703c335dSSean Christopherson 	 * of kernel support, e.g. KVM may be capable of using 1GB pages when
5355703c335dSSean Christopherson 	 * the kernel is not.  But, KVM never creates a page size greater than
5356703c335dSSean Christopherson 	 * what is used by the kernel for any given HVA, i.e. the kernel's
5357703c335dSSean Christopherson 	 * capabilities are ultimately consulted by kvm_mmu_hugepage_adjust().
5358703c335dSSean Christopherson 	 */
5359703c335dSSean Christopherson 	if (tdp_enabled)
53601d92d2e8SSean Christopherson 		max_huge_page_level = tdp_huge_page_level;
5361703c335dSSean Christopherson 	else if (boot_cpu_has(X86_FEATURE_GBPAGES))
53621d92d2e8SSean Christopherson 		max_huge_page_level = PG_LEVEL_1G;
5363703c335dSSean Christopherson 	else
53641d92d2e8SSean Christopherson 		max_huge_page_level = PG_LEVEL_2M;
5365c50d8ae3SPaolo Bonzini }
5366bde77235SSean Christopherson EXPORT_SYMBOL_GPL(kvm_configure_mmu);
5367c50d8ae3SPaolo Bonzini 
5368c50d8ae3SPaolo Bonzini /* The return value indicates if tlb flush on all vcpus is needed. */
53690a234f5dSSean Christopherson typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head,
53700a234f5dSSean Christopherson 				    struct kvm_memory_slot *slot);
5371c50d8ae3SPaolo Bonzini 
5372c50d8ae3SPaolo Bonzini /* The caller should hold mmu-lock before calling this function. */
5373c50d8ae3SPaolo Bonzini static __always_inline bool
5374c50d8ae3SPaolo Bonzini slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
5375c50d8ae3SPaolo Bonzini 			slot_level_handler fn, int start_level, int end_level,
53761a61b7dbSSean Christopherson 			gfn_t start_gfn, gfn_t end_gfn, bool flush_on_yield,
53771a61b7dbSSean Christopherson 			bool flush)
5378c50d8ae3SPaolo Bonzini {
5379c50d8ae3SPaolo Bonzini 	struct slot_rmap_walk_iterator iterator;
5380c50d8ae3SPaolo Bonzini 
5381c50d8ae3SPaolo Bonzini 	for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn,
5382c50d8ae3SPaolo Bonzini 			end_gfn, &iterator) {
5383c50d8ae3SPaolo Bonzini 		if (iterator.rmap)
53840a234f5dSSean Christopherson 			flush |= fn(kvm, iterator.rmap, memslot);
5385c50d8ae3SPaolo Bonzini 
5386531810caSBen Gardon 		if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
5387302695a5SSean Christopherson 			if (flush && flush_on_yield) {
5388c50d8ae3SPaolo Bonzini 				kvm_flush_remote_tlbs_with_address(kvm,
5389c50d8ae3SPaolo Bonzini 						start_gfn,
5390c50d8ae3SPaolo Bonzini 						iterator.gfn - start_gfn + 1);
5391c50d8ae3SPaolo Bonzini 				flush = false;
5392c50d8ae3SPaolo Bonzini 			}
5393531810caSBen Gardon 			cond_resched_rwlock_write(&kvm->mmu_lock);
5394c50d8ae3SPaolo Bonzini 		}
5395c50d8ae3SPaolo Bonzini 	}
5396c50d8ae3SPaolo Bonzini 
5397c50d8ae3SPaolo Bonzini 	return flush;
5398c50d8ae3SPaolo Bonzini }
5399c50d8ae3SPaolo Bonzini 
5400c50d8ae3SPaolo Bonzini static __always_inline bool
5401c50d8ae3SPaolo Bonzini slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
5402c50d8ae3SPaolo Bonzini 		  slot_level_handler fn, int start_level, int end_level,
5403302695a5SSean Christopherson 		  bool flush_on_yield)
5404c50d8ae3SPaolo Bonzini {
5405c50d8ae3SPaolo Bonzini 	return slot_handle_level_range(kvm, memslot, fn, start_level,
5406c50d8ae3SPaolo Bonzini 			end_level, memslot->base_gfn,
5407c50d8ae3SPaolo Bonzini 			memslot->base_gfn + memslot->npages - 1,
54081a61b7dbSSean Christopherson 			flush_on_yield, false);
5409c50d8ae3SPaolo Bonzini }
5410c50d8ae3SPaolo Bonzini 
5411c50d8ae3SPaolo Bonzini static __always_inline bool
5412c50d8ae3SPaolo Bonzini slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
5413302695a5SSean Christopherson 		 slot_level_handler fn, bool flush_on_yield)
5414c50d8ae3SPaolo Bonzini {
54153bae0459SSean Christopherson 	return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K,
5416302695a5SSean Christopherson 				 PG_LEVEL_4K, flush_on_yield);
5417c50d8ae3SPaolo Bonzini }
5418c50d8ae3SPaolo Bonzini 
5419c50d8ae3SPaolo Bonzini static void free_mmu_pages(struct kvm_mmu *mmu)
5420c50d8ae3SPaolo Bonzini {
54214a98623dSSean Christopherson 	if (!tdp_enabled && mmu->pae_root)
54224a98623dSSean Christopherson 		set_memory_encrypted((unsigned long)mmu->pae_root, 1);
5423c50d8ae3SPaolo Bonzini 	free_page((unsigned long)mmu->pae_root);
542403ca4589SSean Christopherson 	free_page((unsigned long)mmu->pml4_root);
5425c50d8ae3SPaolo Bonzini }
5426c50d8ae3SPaolo Bonzini 
542704d28e37SSean Christopherson static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
5428c50d8ae3SPaolo Bonzini {
5429c50d8ae3SPaolo Bonzini 	struct page *page;
5430c50d8ae3SPaolo Bonzini 	int i;
5431c50d8ae3SPaolo Bonzini 
543204d28e37SSean Christopherson 	mmu->root_hpa = INVALID_PAGE;
543304d28e37SSean Christopherson 	mmu->root_pgd = 0;
543404d28e37SSean Christopherson 	mmu->translate_gpa = translate_gpa;
543504d28e37SSean Christopherson 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
543604d28e37SSean Christopherson 		mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
543704d28e37SSean Christopherson 
5438c50d8ae3SPaolo Bonzini 	/*
5439c50d8ae3SPaolo Bonzini 	 * When using PAE paging, the four PDPTEs are treated as 'root' pages,
5440c50d8ae3SPaolo Bonzini 	 * while the PDP table is a per-vCPU construct that's allocated at MMU
5441c50d8ae3SPaolo Bonzini 	 * creation.  When emulating 32-bit mode, cr3 is only 32 bits even on
5442c50d8ae3SPaolo Bonzini 	 * x86_64.  Therefore we need to allocate the PDP table in the first
544304d45551SSean Christopherson 	 * 4GB of memory, which happens to fit the DMA32 zone.  TDP paging
544404d45551SSean Christopherson 	 * generally doesn't use PAE paging and can skip allocating the PDP
544504d45551SSean Christopherson 	 * table.  The main exception, handled here, is SVM's 32-bit NPT.  The
544604d45551SSean Christopherson 	 * other exception is for shadowing L1's 32-bit or PAE NPT on 64-bit
544704d45551SSean Christopherson 	 * KVM; that horror is handled on-demand by mmu_alloc_shadow_roots().
5448c50d8ae3SPaolo Bonzini 	 */
5449d468d94bSSean Christopherson 	if (tdp_enabled && kvm_mmu_get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
5450c50d8ae3SPaolo Bonzini 		return 0;
5451c50d8ae3SPaolo Bonzini 
5452c50d8ae3SPaolo Bonzini 	page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32);
5453c50d8ae3SPaolo Bonzini 	if (!page)
5454c50d8ae3SPaolo Bonzini 		return -ENOMEM;
5455c50d8ae3SPaolo Bonzini 
5456c50d8ae3SPaolo Bonzini 	mmu->pae_root = page_address(page);
54574a98623dSSean Christopherson 
54584a98623dSSean Christopherson 	/*
54594a98623dSSean Christopherson 	 * CR3 is only 32 bits when PAE paging is used, thus it's impossible to
54604a98623dSSean Christopherson 	 * get the CPU to treat the PDPTEs as encrypted.  Decrypt the page so
54614a98623dSSean Christopherson 	 * that KVM's writes and the CPU's reads get along.  Note, this is
54624a98623dSSean Christopherson 	 * only necessary when using shadow paging, as 64-bit NPT can get at
54634a98623dSSean Christopherson 	 * the C-bit even when shadowing 32-bit NPT, and SME isn't supported
54644a98623dSSean Christopherson 	 * by 32-bit kernels (when KVM itself uses 32-bit NPT).
54654a98623dSSean Christopherson 	 */
54664a98623dSSean Christopherson 	if (!tdp_enabled)
54674a98623dSSean Christopherson 		set_memory_decrypted((unsigned long)mmu->pae_root, 1);
54684a98623dSSean Christopherson 	else
54694a98623dSSean Christopherson 		WARN_ON_ONCE(shadow_me_mask);
54704a98623dSSean Christopherson 
5471c50d8ae3SPaolo Bonzini 	for (i = 0; i < 4; ++i)
5472c834e5e4SSean Christopherson 		mmu->pae_root[i] = INVALID_PAE_ROOT;
5473c50d8ae3SPaolo Bonzini 
5474c50d8ae3SPaolo Bonzini 	return 0;
5475c50d8ae3SPaolo Bonzini }
5476c50d8ae3SPaolo Bonzini 
5477c50d8ae3SPaolo Bonzini int kvm_mmu_create(struct kvm_vcpu *vcpu)
5478c50d8ae3SPaolo Bonzini {
5479c50d8ae3SPaolo Bonzini 	int ret;
5480c50d8ae3SPaolo Bonzini 
54815962bfb7SSean Christopherson 	vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
54825f6078f9SSean Christopherson 	vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;
54835f6078f9SSean Christopherson 
54845962bfb7SSean Christopherson 	vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
54855f6078f9SSean Christopherson 	vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
54865962bfb7SSean Christopherson 
548796880883SSean Christopherson 	vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
548896880883SSean Christopherson 
5489c50d8ae3SPaolo Bonzini 	vcpu->arch.mmu = &vcpu->arch.root_mmu;
5490c50d8ae3SPaolo Bonzini 	vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
5491c50d8ae3SPaolo Bonzini 
5492c50d8ae3SPaolo Bonzini 	vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
5493c50d8ae3SPaolo Bonzini 
549404d28e37SSean Christopherson 	ret = __kvm_mmu_create(vcpu, &vcpu->arch.guest_mmu);
5495c50d8ae3SPaolo Bonzini 	if (ret)
5496c50d8ae3SPaolo Bonzini 		return ret;
5497c50d8ae3SPaolo Bonzini 
549804d28e37SSean Christopherson 	ret = __kvm_mmu_create(vcpu, &vcpu->arch.root_mmu);
5499c50d8ae3SPaolo Bonzini 	if (ret)
5500c50d8ae3SPaolo Bonzini 		goto fail_allocate_root;
5501c50d8ae3SPaolo Bonzini 
5502c50d8ae3SPaolo Bonzini 	return ret;
5503c50d8ae3SPaolo Bonzini  fail_allocate_root:
5504c50d8ae3SPaolo Bonzini 	free_mmu_pages(&vcpu->arch.guest_mmu);
5505c50d8ae3SPaolo Bonzini 	return ret;
5506c50d8ae3SPaolo Bonzini }
5507c50d8ae3SPaolo Bonzini 
5508c50d8ae3SPaolo Bonzini #define BATCH_ZAP_PAGES	10
5509c50d8ae3SPaolo Bonzini static void kvm_zap_obsolete_pages(struct kvm *kvm)
5510c50d8ae3SPaolo Bonzini {
5511c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp, *node;
5512c50d8ae3SPaolo Bonzini 	int nr_zapped, batch = 0;
5513c50d8ae3SPaolo Bonzini 
5514c50d8ae3SPaolo Bonzini restart:
5515c50d8ae3SPaolo Bonzini 	list_for_each_entry_safe_reverse(sp, node,
5516c50d8ae3SPaolo Bonzini 	      &kvm->arch.active_mmu_pages, link) {
5517c50d8ae3SPaolo Bonzini 		/*
5518c50d8ae3SPaolo Bonzini 		 * No obsolete valid page exists before a newly created page
5519c50d8ae3SPaolo Bonzini 		 * since active_mmu_pages is a FIFO list.
5520c50d8ae3SPaolo Bonzini 		 */
5521c50d8ae3SPaolo Bonzini 		if (!is_obsolete_sp(kvm, sp))
5522c50d8ae3SPaolo Bonzini 			break;
5523c50d8ae3SPaolo Bonzini 
5524c50d8ae3SPaolo Bonzini 		/*
5525f95eec9bSSean Christopherson 		 * Invalid pages should never land back on the list of active
5526f95eec9bSSean Christopherson 		 * pages.  Skip the bogus page, otherwise we'll get stuck in an
5527f95eec9bSSean Christopherson 		 * infinite loop if the page gets put back on the list (again).
5528c50d8ae3SPaolo Bonzini 		 */
5529f95eec9bSSean Christopherson 		if (WARN_ON(sp->role.invalid))
5530c50d8ae3SPaolo Bonzini 			continue;
5531c50d8ae3SPaolo Bonzini 
5532c50d8ae3SPaolo Bonzini 		/*
5533c50d8ae3SPaolo Bonzini 		 * No need to flush the TLB since we're only zapping shadow
5534c50d8ae3SPaolo Bonzini 		 * pages with an obsolete generation number and all vCPUS have
5535c50d8ae3SPaolo Bonzini 		 * loaded a new root, i.e. the shadow pages being zapped cannot
5536c50d8ae3SPaolo Bonzini 		 * be in active use by the guest.
5537c50d8ae3SPaolo Bonzini 		 */
5538c50d8ae3SPaolo Bonzini 		if (batch >= BATCH_ZAP_PAGES &&
5539531810caSBen Gardon 		    cond_resched_rwlock_write(&kvm->mmu_lock)) {
5540c50d8ae3SPaolo Bonzini 			batch = 0;
5541c50d8ae3SPaolo Bonzini 			goto restart;
5542c50d8ae3SPaolo Bonzini 		}
5543c50d8ae3SPaolo Bonzini 
5544c50d8ae3SPaolo Bonzini 		if (__kvm_mmu_prepare_zap_page(kvm, sp,
5545c50d8ae3SPaolo Bonzini 				&kvm->arch.zapped_obsolete_pages, &nr_zapped)) {
5546c50d8ae3SPaolo Bonzini 			batch += nr_zapped;
5547c50d8ae3SPaolo Bonzini 			goto restart;
5548c50d8ae3SPaolo Bonzini 		}
5549c50d8ae3SPaolo Bonzini 	}
5550c50d8ae3SPaolo Bonzini 
5551c50d8ae3SPaolo Bonzini 	/*
5552c50d8ae3SPaolo Bonzini 	 * Trigger a remote TLB flush before freeing the page tables to ensure
5553c50d8ae3SPaolo Bonzini 	 * KVM is not in the middle of a lockless shadow page table walk, which
5554c50d8ae3SPaolo Bonzini 	 * may reference the pages.
5555c50d8ae3SPaolo Bonzini 	 */
5556c50d8ae3SPaolo Bonzini 	kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages);
5557c50d8ae3SPaolo Bonzini }
5558c50d8ae3SPaolo Bonzini 
5559c50d8ae3SPaolo Bonzini /*
5560c50d8ae3SPaolo Bonzini  * Fast invalidate all shadow pages and use lock-break technique
5561c50d8ae3SPaolo Bonzini  * to zap obsolete pages.
5562c50d8ae3SPaolo Bonzini  *
5563c50d8ae3SPaolo Bonzini  * It's required when memslot is being deleted or VM is being
5564c50d8ae3SPaolo Bonzini  * destroyed, in these cases, we should ensure that KVM MMU does
5565c50d8ae3SPaolo Bonzini  * not use any resource of the being-deleted slot or all slots
5566c50d8ae3SPaolo Bonzini  * after calling the function.
5567c50d8ae3SPaolo Bonzini  */
5568c50d8ae3SPaolo Bonzini static void kvm_mmu_zap_all_fast(struct kvm *kvm)
5569c50d8ae3SPaolo Bonzini {
5570c50d8ae3SPaolo Bonzini 	lockdep_assert_held(&kvm->slots_lock);
5571c50d8ae3SPaolo Bonzini 
5572531810caSBen Gardon 	write_lock(&kvm->mmu_lock);
5573c50d8ae3SPaolo Bonzini 	trace_kvm_mmu_zap_all_fast(kvm);
5574c50d8ae3SPaolo Bonzini 
5575c50d8ae3SPaolo Bonzini 	/*
5576c50d8ae3SPaolo Bonzini 	 * Toggle mmu_valid_gen between '0' and '1'.  Because slots_lock is
5577c50d8ae3SPaolo Bonzini 	 * held for the entire duration of zapping obsolete pages, it's
5578c50d8ae3SPaolo Bonzini 	 * impossible for there to be multiple invalid generations associated
5579c50d8ae3SPaolo Bonzini 	 * with *valid* shadow pages at any given time, i.e. there is exactly
5580c50d8ae3SPaolo Bonzini 	 * one valid generation and (at most) one invalid generation.
5581c50d8ae3SPaolo Bonzini 	 */
5582c50d8ae3SPaolo Bonzini 	kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1;
5583c50d8ae3SPaolo Bonzini 
5584b7cccd39SBen Gardon 	/* In order to ensure all threads see this change when
5585b7cccd39SBen Gardon 	 * handling the MMU reload signal, this must happen in the
5586b7cccd39SBen Gardon 	 * same critical section as kvm_reload_remote_mmus, and
5587b7cccd39SBen Gardon 	 * before kvm_zap_obsolete_pages as kvm_zap_obsolete_pages
5588b7cccd39SBen Gardon 	 * could drop the MMU lock and yield.
5589b7cccd39SBen Gardon 	 */
5590b7cccd39SBen Gardon 	if (is_tdp_mmu_enabled(kvm))
5591b7cccd39SBen Gardon 		kvm_tdp_mmu_invalidate_all_roots(kvm);
5592b7cccd39SBen Gardon 
5593c50d8ae3SPaolo Bonzini 	/*
5594c50d8ae3SPaolo Bonzini 	 * Notify all vcpus to reload its shadow page table and flush TLB.
5595c50d8ae3SPaolo Bonzini 	 * Then all vcpus will switch to new shadow page table with the new
5596c50d8ae3SPaolo Bonzini 	 * mmu_valid_gen.
5597c50d8ae3SPaolo Bonzini 	 *
5598c50d8ae3SPaolo Bonzini 	 * Note: we need to do this under the protection of mmu_lock,
5599c50d8ae3SPaolo Bonzini 	 * otherwise, vcpu would purge shadow page but miss tlb flush.
5600c50d8ae3SPaolo Bonzini 	 */
5601c50d8ae3SPaolo Bonzini 	kvm_reload_remote_mmus(kvm);
5602c50d8ae3SPaolo Bonzini 
5603c50d8ae3SPaolo Bonzini 	kvm_zap_obsolete_pages(kvm);
5604faaf05b0SBen Gardon 
5605531810caSBen Gardon 	write_unlock(&kvm->mmu_lock);
56064c6654bdSBen Gardon 
56074c6654bdSBen Gardon 	if (is_tdp_mmu_enabled(kvm)) {
56084c6654bdSBen Gardon 		read_lock(&kvm->mmu_lock);
56094c6654bdSBen Gardon 		kvm_tdp_mmu_zap_invalidated_roots(kvm);
56104c6654bdSBen Gardon 		read_unlock(&kvm->mmu_lock);
56114c6654bdSBen Gardon 	}
5612c50d8ae3SPaolo Bonzini }
5613c50d8ae3SPaolo Bonzini 
5614c50d8ae3SPaolo Bonzini static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
5615c50d8ae3SPaolo Bonzini {
5616c50d8ae3SPaolo Bonzini 	return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
5617c50d8ae3SPaolo Bonzini }
5618c50d8ae3SPaolo Bonzini 
5619c50d8ae3SPaolo Bonzini static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
5620c50d8ae3SPaolo Bonzini 			struct kvm_memory_slot *slot,
5621c50d8ae3SPaolo Bonzini 			struct kvm_page_track_notifier_node *node)
5622c50d8ae3SPaolo Bonzini {
5623c50d8ae3SPaolo Bonzini 	kvm_mmu_zap_all_fast(kvm);
5624c50d8ae3SPaolo Bonzini }
5625c50d8ae3SPaolo Bonzini 
5626c50d8ae3SPaolo Bonzini void kvm_mmu_init_vm(struct kvm *kvm)
5627c50d8ae3SPaolo Bonzini {
5628c50d8ae3SPaolo Bonzini 	struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
5629c50d8ae3SPaolo Bonzini 
5630d501f747SBen Gardon 	if (!kvm_mmu_init_tdp_mmu(kvm))
5631d501f747SBen Gardon 		/*
5632d501f747SBen Gardon 		 * No smp_load/store wrappers needed here as we are in
5633d501f747SBen Gardon 		 * VM init and there cannot be any memslots / other threads
5634d501f747SBen Gardon 		 * accessing this struct kvm yet.
5635d501f747SBen Gardon 		 */
5636a2557408SBen Gardon 		kvm->arch.memslots_have_rmaps = true;
5637a2557408SBen Gardon 
5638c50d8ae3SPaolo Bonzini 	node->track_write = kvm_mmu_pte_write;
5639c50d8ae3SPaolo Bonzini 	node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
5640c50d8ae3SPaolo Bonzini 	kvm_page_track_register_notifier(kvm, node);
5641c50d8ae3SPaolo Bonzini }
5642c50d8ae3SPaolo Bonzini 
5643c50d8ae3SPaolo Bonzini void kvm_mmu_uninit_vm(struct kvm *kvm)
5644c50d8ae3SPaolo Bonzini {
5645c50d8ae3SPaolo Bonzini 	struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
5646c50d8ae3SPaolo Bonzini 
5647c50d8ae3SPaolo Bonzini 	kvm_page_track_unregister_notifier(kvm, node);
5648fe5db27dSBen Gardon 
5649fe5db27dSBen Gardon 	kvm_mmu_uninit_tdp_mmu(kvm);
5650c50d8ae3SPaolo Bonzini }
5651c50d8ae3SPaolo Bonzini 
5652c50d8ae3SPaolo Bonzini void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
5653c50d8ae3SPaolo Bonzini {
5654c50d8ae3SPaolo Bonzini 	struct kvm_memslots *slots;
5655c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *memslot;
5656c50d8ae3SPaolo Bonzini 	int i;
56571a61b7dbSSean Christopherson 	bool flush = false;
5658c50d8ae3SPaolo Bonzini 
5659e2209710SBen Gardon 	if (kvm_memslots_have_rmaps(kvm)) {
5660531810caSBen Gardon 		write_lock(&kvm->mmu_lock);
5661c50d8ae3SPaolo Bonzini 		for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
5662c50d8ae3SPaolo Bonzini 			slots = __kvm_memslots(kvm, i);
5663c50d8ae3SPaolo Bonzini 			kvm_for_each_memslot(memslot, slots) {
5664c50d8ae3SPaolo Bonzini 				gfn_t start, end;
5665c50d8ae3SPaolo Bonzini 
5666c50d8ae3SPaolo Bonzini 				start = max(gfn_start, memslot->base_gfn);
5667c50d8ae3SPaolo Bonzini 				end = min(gfn_end, memslot->base_gfn + memslot->npages);
5668c50d8ae3SPaolo Bonzini 				if (start >= end)
5669c50d8ae3SPaolo Bonzini 					continue;
5670c50d8ae3SPaolo Bonzini 
5671e2209710SBen Gardon 				flush = slot_handle_level_range(kvm, memslot,
5672e2209710SBen Gardon 						kvm_zap_rmapp, PG_LEVEL_4K,
5673e2209710SBen Gardon 						KVM_MAX_HUGEPAGE_LEVEL, start,
5674e2209710SBen Gardon 						end - 1, true, flush);
5675c50d8ae3SPaolo Bonzini 			}
5676c50d8ae3SPaolo Bonzini 		}
5677faaf05b0SBen Gardon 		if (flush)
56781a61b7dbSSean Christopherson 			kvm_flush_remote_tlbs_with_address(kvm, gfn_start, gfn_end);
5679531810caSBen Gardon 		write_unlock(&kvm->mmu_lock);
5680e2209710SBen Gardon 	}
56816103bc07SBen Gardon 
56826103bc07SBen Gardon 	if (is_tdp_mmu_enabled(kvm)) {
56836103bc07SBen Gardon 		flush = false;
56846103bc07SBen Gardon 
56856103bc07SBen Gardon 		read_lock(&kvm->mmu_lock);
56866103bc07SBen Gardon 		for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
56876103bc07SBen Gardon 			flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, gfn_start,
56886103bc07SBen Gardon 							  gfn_end, flush, true);
56896103bc07SBen Gardon 		if (flush)
56906103bc07SBen Gardon 			kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
56916103bc07SBen Gardon 							   gfn_end);
56926103bc07SBen Gardon 
56936103bc07SBen Gardon 		read_unlock(&kvm->mmu_lock);
56946103bc07SBen Gardon 	}
5695c50d8ae3SPaolo Bonzini }
5696c50d8ae3SPaolo Bonzini 
5697c50d8ae3SPaolo Bonzini static bool slot_rmap_write_protect(struct kvm *kvm,
56980a234f5dSSean Christopherson 				    struct kvm_rmap_head *rmap_head,
56990a234f5dSSean Christopherson 				    struct kvm_memory_slot *slot)
5700c50d8ae3SPaolo Bonzini {
5701c50d8ae3SPaolo Bonzini 	return __rmap_write_protect(kvm, rmap_head, false);
5702c50d8ae3SPaolo Bonzini }
5703c50d8ae3SPaolo Bonzini 
5704c50d8ae3SPaolo Bonzini void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
57053c9bd400SJay Zhou 				      struct kvm_memory_slot *memslot,
57063c9bd400SJay Zhou 				      int start_level)
5707c50d8ae3SPaolo Bonzini {
5708e2209710SBen Gardon 	bool flush = false;
5709c50d8ae3SPaolo Bonzini 
5710e2209710SBen Gardon 	if (kvm_memslots_have_rmaps(kvm)) {
5711531810caSBen Gardon 		write_lock(&kvm->mmu_lock);
57123c9bd400SJay Zhou 		flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
5713e2209710SBen Gardon 					  start_level, KVM_MAX_HUGEPAGE_LEVEL,
5714e2209710SBen Gardon 					  false);
5715531810caSBen Gardon 		write_unlock(&kvm->mmu_lock);
5716e2209710SBen Gardon 	}
5717c50d8ae3SPaolo Bonzini 
571824ae4cfaSBen Gardon 	if (is_tdp_mmu_enabled(kvm)) {
571924ae4cfaSBen Gardon 		read_lock(&kvm->mmu_lock);
572024ae4cfaSBen Gardon 		flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, start_level);
572124ae4cfaSBen Gardon 		read_unlock(&kvm->mmu_lock);
572224ae4cfaSBen Gardon 	}
572324ae4cfaSBen Gardon 
5724c50d8ae3SPaolo Bonzini 	/*
5725c50d8ae3SPaolo Bonzini 	 * We can flush all the TLBs out of the mmu lock without TLB
5726c50d8ae3SPaolo Bonzini 	 * corruption since we just change the spte from writable to
5727c50d8ae3SPaolo Bonzini 	 * readonly so that we only need to care the case of changing
5728c50d8ae3SPaolo Bonzini 	 * spte from present to present (changing the spte from present
5729c50d8ae3SPaolo Bonzini 	 * to nonpresent will flush all the TLBs immediately), in other
5730c50d8ae3SPaolo Bonzini 	 * words, the only case we care is mmu_spte_update() where we
57315fc3424fSSean Christopherson 	 * have checked Host-writable | MMU-writable instead of
57325fc3424fSSean Christopherson 	 * PT_WRITABLE_MASK, that means it does not depend on PT_WRITABLE_MASK
57335fc3424fSSean Christopherson 	 * anymore.
5734c50d8ae3SPaolo Bonzini 	 */
5735c50d8ae3SPaolo Bonzini 	if (flush)
57367f42aa76SSean Christopherson 		kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
5737c50d8ae3SPaolo Bonzini }
5738c50d8ae3SPaolo Bonzini 
5739c50d8ae3SPaolo Bonzini static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
57400a234f5dSSean Christopherson 					 struct kvm_rmap_head *rmap_head,
57410a234f5dSSean Christopherson 					 struct kvm_memory_slot *slot)
5742c50d8ae3SPaolo Bonzini {
5743c50d8ae3SPaolo Bonzini 	u64 *sptep;
5744c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
5745c50d8ae3SPaolo Bonzini 	int need_tlb_flush = 0;
5746c50d8ae3SPaolo Bonzini 	kvm_pfn_t pfn;
5747c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
5748c50d8ae3SPaolo Bonzini 
5749c50d8ae3SPaolo Bonzini restart:
5750c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(rmap_head, &iter, sptep) {
575157354682SSean Christopherson 		sp = sptep_to_sp(sptep);
5752c50d8ae3SPaolo Bonzini 		pfn = spte_to_pfn(*sptep);
5753c50d8ae3SPaolo Bonzini 
5754c50d8ae3SPaolo Bonzini 		/*
5755c50d8ae3SPaolo Bonzini 		 * We cannot do huge page mapping for indirect shadow pages,
5756c50d8ae3SPaolo Bonzini 		 * which are found on the last rmap (level = 1) when not using
5757c50d8ae3SPaolo Bonzini 		 * tdp; such shadow pages are synced with the page table in
5758c50d8ae3SPaolo Bonzini 		 * the guest, and the guest page table is using 4K page size
5759c50d8ae3SPaolo Bonzini 		 * mapping if the indirect sp has level = 1.
5760c50d8ae3SPaolo Bonzini 		 */
5761c50d8ae3SPaolo Bonzini 		if (sp->role.direct && !kvm_is_reserved_pfn(pfn) &&
57629eba50f8SSean Christopherson 		    sp->role.level < kvm_mmu_max_mapping_level(kvm, slot, sp->gfn,
57639eba50f8SSean Christopherson 							       pfn, PG_LEVEL_NUM)) {
5764c50d8ae3SPaolo Bonzini 			pte_list_remove(rmap_head, sptep);
5765c50d8ae3SPaolo Bonzini 
5766c50d8ae3SPaolo Bonzini 			if (kvm_available_flush_tlb_with_range())
5767c50d8ae3SPaolo Bonzini 				kvm_flush_remote_tlbs_with_address(kvm, sp->gfn,
5768c50d8ae3SPaolo Bonzini 					KVM_PAGES_PER_HPAGE(sp->role.level));
5769c50d8ae3SPaolo Bonzini 			else
5770c50d8ae3SPaolo Bonzini 				need_tlb_flush = 1;
5771c50d8ae3SPaolo Bonzini 
5772c50d8ae3SPaolo Bonzini 			goto restart;
5773c50d8ae3SPaolo Bonzini 		}
5774c50d8ae3SPaolo Bonzini 	}
5775c50d8ae3SPaolo Bonzini 
5776c50d8ae3SPaolo Bonzini 	return need_tlb_flush;
5777c50d8ae3SPaolo Bonzini }
5778c50d8ae3SPaolo Bonzini 
5779c50d8ae3SPaolo Bonzini void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
5780c50d8ae3SPaolo Bonzini 				   const struct kvm_memory_slot *memslot)
5781c50d8ae3SPaolo Bonzini {
5782c50d8ae3SPaolo Bonzini 	/* FIXME: const-ify all uses of struct kvm_memory_slot.  */
57839eba50f8SSean Christopherson 	struct kvm_memory_slot *slot = (struct kvm_memory_slot *)memslot;
578431c65657SColin Ian King 	bool flush = false;
57859eba50f8SSean Christopherson 
5786e2209710SBen Gardon 	if (kvm_memslots_have_rmaps(kvm)) {
5787531810caSBen Gardon 		write_lock(&kvm->mmu_lock);
5788302695a5SSean Christopherson 		flush = slot_handle_leaf(kvm, slot, kvm_mmu_zap_collapsible_spte, true);
5789302695a5SSean Christopherson 		if (flush)
5790302695a5SSean Christopherson 			kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
5791531810caSBen Gardon 		write_unlock(&kvm->mmu_lock);
5792e2209710SBen Gardon 	}
57932db6f772SBen Gardon 
57942db6f772SBen Gardon 	if (is_tdp_mmu_enabled(kvm)) {
57952db6f772SBen Gardon 		read_lock(&kvm->mmu_lock);
57962db6f772SBen Gardon 		flush = kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot, flush);
57972db6f772SBen Gardon 		if (flush)
57982db6f772SBen Gardon 			kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
57992db6f772SBen Gardon 		read_unlock(&kvm->mmu_lock);
58002db6f772SBen Gardon 	}
5801c50d8ae3SPaolo Bonzini }
5802c50d8ae3SPaolo Bonzini 
5803b3594ffbSSean Christopherson void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
58046c9dd6d2SPaolo Bonzini 					const struct kvm_memory_slot *memslot)
5805b3594ffbSSean Christopherson {
5806b3594ffbSSean Christopherson 	/*
58077f42aa76SSean Christopherson 	 * All current use cases for flushing the TLBs for a specific memslot
5808302695a5SSean Christopherson 	 * related to dirty logging, and many do the TLB flush out of mmu_lock.
58097f42aa76SSean Christopherson 	 * The interaction between the various operations on memslot must be
58107f42aa76SSean Christopherson 	 * serialized by slots_locks to ensure the TLB flush from one operation
58117f42aa76SSean Christopherson 	 * is observed by any other operation on the same memslot.
5812b3594ffbSSean Christopherson 	 */
5813b3594ffbSSean Christopherson 	lockdep_assert_held(&kvm->slots_lock);
5814cec37648SSean Christopherson 	kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
5815cec37648SSean Christopherson 					   memslot->npages);
5816b3594ffbSSean Christopherson }
5817b3594ffbSSean Christopherson 
5818c50d8ae3SPaolo Bonzini void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
5819c50d8ae3SPaolo Bonzini 				   struct kvm_memory_slot *memslot)
5820c50d8ae3SPaolo Bonzini {
5821e2209710SBen Gardon 	bool flush = false;
5822c50d8ae3SPaolo Bonzini 
5823e2209710SBen Gardon 	if (kvm_memslots_have_rmaps(kvm)) {
5824531810caSBen Gardon 		write_lock(&kvm->mmu_lock);
5825e2209710SBen Gardon 		flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty,
5826e2209710SBen Gardon 					 false);
5827531810caSBen Gardon 		write_unlock(&kvm->mmu_lock);
5828e2209710SBen Gardon 	}
5829c50d8ae3SPaolo Bonzini 
583024ae4cfaSBen Gardon 	if (is_tdp_mmu_enabled(kvm)) {
583124ae4cfaSBen Gardon 		read_lock(&kvm->mmu_lock);
583224ae4cfaSBen Gardon 		flush |= kvm_tdp_mmu_clear_dirty_slot(kvm, memslot);
583324ae4cfaSBen Gardon 		read_unlock(&kvm->mmu_lock);
583424ae4cfaSBen Gardon 	}
583524ae4cfaSBen Gardon 
5836c50d8ae3SPaolo Bonzini 	/*
5837c50d8ae3SPaolo Bonzini 	 * It's also safe to flush TLBs out of mmu lock here as currently this
5838c50d8ae3SPaolo Bonzini 	 * function is only used for dirty logging, in which case flushing TLB
5839c50d8ae3SPaolo Bonzini 	 * out of mmu lock also guarantees no dirty pages will be lost in
5840c50d8ae3SPaolo Bonzini 	 * dirty_bitmap.
5841c50d8ae3SPaolo Bonzini 	 */
5842c50d8ae3SPaolo Bonzini 	if (flush)
58437f42aa76SSean Christopherson 		kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
5844c50d8ae3SPaolo Bonzini }
5845c50d8ae3SPaolo Bonzini 
5846c50d8ae3SPaolo Bonzini void kvm_mmu_zap_all(struct kvm *kvm)
5847c50d8ae3SPaolo Bonzini {
5848c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp, *node;
5849c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
5850c50d8ae3SPaolo Bonzini 	int ign;
5851c50d8ae3SPaolo Bonzini 
5852531810caSBen Gardon 	write_lock(&kvm->mmu_lock);
5853c50d8ae3SPaolo Bonzini restart:
5854c50d8ae3SPaolo Bonzini 	list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
5855f95eec9bSSean Christopherson 		if (WARN_ON(sp->role.invalid))
5856c50d8ae3SPaolo Bonzini 			continue;
5857c50d8ae3SPaolo Bonzini 		if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
5858c50d8ae3SPaolo Bonzini 			goto restart;
5859531810caSBen Gardon 		if (cond_resched_rwlock_write(&kvm->mmu_lock))
5860c50d8ae3SPaolo Bonzini 			goto restart;
5861c50d8ae3SPaolo Bonzini 	}
5862c50d8ae3SPaolo Bonzini 
5863c50d8ae3SPaolo Bonzini 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
5864faaf05b0SBen Gardon 
5865897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
5866faaf05b0SBen Gardon 		kvm_tdp_mmu_zap_all(kvm);
5867faaf05b0SBen Gardon 
5868531810caSBen Gardon 	write_unlock(&kvm->mmu_lock);
5869c50d8ae3SPaolo Bonzini }
5870c50d8ae3SPaolo Bonzini 
5871c50d8ae3SPaolo Bonzini void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
5872c50d8ae3SPaolo Bonzini {
5873c50d8ae3SPaolo Bonzini 	WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
5874c50d8ae3SPaolo Bonzini 
5875c50d8ae3SPaolo Bonzini 	gen &= MMIO_SPTE_GEN_MASK;
5876c50d8ae3SPaolo Bonzini 
5877c50d8ae3SPaolo Bonzini 	/*
5878c50d8ae3SPaolo Bonzini 	 * Generation numbers are incremented in multiples of the number of
5879c50d8ae3SPaolo Bonzini 	 * address spaces in order to provide unique generations across all
5880c50d8ae3SPaolo Bonzini 	 * address spaces.  Strip what is effectively the address space
5881c50d8ae3SPaolo Bonzini 	 * modifier prior to checking for a wrap of the MMIO generation so
5882c50d8ae3SPaolo Bonzini 	 * that a wrap in any address space is detected.
5883c50d8ae3SPaolo Bonzini 	 */
5884c50d8ae3SPaolo Bonzini 	gen &= ~((u64)KVM_ADDRESS_SPACE_NUM - 1);
5885c50d8ae3SPaolo Bonzini 
5886c50d8ae3SPaolo Bonzini 	/*
5887c50d8ae3SPaolo Bonzini 	 * The very rare case: if the MMIO generation number has wrapped,
5888c50d8ae3SPaolo Bonzini 	 * zap all shadow pages.
5889c50d8ae3SPaolo Bonzini 	 */
5890c50d8ae3SPaolo Bonzini 	if (unlikely(gen == 0)) {
5891c50d8ae3SPaolo Bonzini 		kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
5892c50d8ae3SPaolo Bonzini 		kvm_mmu_zap_all_fast(kvm);
5893c50d8ae3SPaolo Bonzini 	}
5894c50d8ae3SPaolo Bonzini }
5895c50d8ae3SPaolo Bonzini 
5896c50d8ae3SPaolo Bonzini static unsigned long
5897c50d8ae3SPaolo Bonzini mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
5898c50d8ae3SPaolo Bonzini {
5899c50d8ae3SPaolo Bonzini 	struct kvm *kvm;
5900c50d8ae3SPaolo Bonzini 	int nr_to_scan = sc->nr_to_scan;
5901c50d8ae3SPaolo Bonzini 	unsigned long freed = 0;
5902c50d8ae3SPaolo Bonzini 
5903c50d8ae3SPaolo Bonzini 	mutex_lock(&kvm_lock);
5904c50d8ae3SPaolo Bonzini 
5905c50d8ae3SPaolo Bonzini 	list_for_each_entry(kvm, &vm_list, vm_list) {
5906c50d8ae3SPaolo Bonzini 		int idx;
5907c50d8ae3SPaolo Bonzini 		LIST_HEAD(invalid_list);
5908c50d8ae3SPaolo Bonzini 
5909c50d8ae3SPaolo Bonzini 		/*
5910c50d8ae3SPaolo Bonzini 		 * Never scan more than sc->nr_to_scan VM instances.
5911c50d8ae3SPaolo Bonzini 		 * Will not hit this condition practically since we do not try
5912c50d8ae3SPaolo Bonzini 		 * to shrink more than one VM and it is very unlikely to see
5913c50d8ae3SPaolo Bonzini 		 * !n_used_mmu_pages so many times.
5914c50d8ae3SPaolo Bonzini 		 */
5915c50d8ae3SPaolo Bonzini 		if (!nr_to_scan--)
5916c50d8ae3SPaolo Bonzini 			break;
5917c50d8ae3SPaolo Bonzini 		/*
5918c50d8ae3SPaolo Bonzini 		 * n_used_mmu_pages is accessed without holding kvm->mmu_lock
5919c50d8ae3SPaolo Bonzini 		 * here. We may skip a VM instance errorneosly, but we do not
5920c50d8ae3SPaolo Bonzini 		 * want to shrink a VM that only started to populate its MMU
5921c50d8ae3SPaolo Bonzini 		 * anyway.
5922c50d8ae3SPaolo Bonzini 		 */
5923c50d8ae3SPaolo Bonzini 		if (!kvm->arch.n_used_mmu_pages &&
5924c50d8ae3SPaolo Bonzini 		    !kvm_has_zapped_obsolete_pages(kvm))
5925c50d8ae3SPaolo Bonzini 			continue;
5926c50d8ae3SPaolo Bonzini 
5927c50d8ae3SPaolo Bonzini 		idx = srcu_read_lock(&kvm->srcu);
5928531810caSBen Gardon 		write_lock(&kvm->mmu_lock);
5929c50d8ae3SPaolo Bonzini 
5930c50d8ae3SPaolo Bonzini 		if (kvm_has_zapped_obsolete_pages(kvm)) {
5931c50d8ae3SPaolo Bonzini 			kvm_mmu_commit_zap_page(kvm,
5932c50d8ae3SPaolo Bonzini 			      &kvm->arch.zapped_obsolete_pages);
5933c50d8ae3SPaolo Bonzini 			goto unlock;
5934c50d8ae3SPaolo Bonzini 		}
5935c50d8ae3SPaolo Bonzini 
5936ebdb292dSSean Christopherson 		freed = kvm_mmu_zap_oldest_mmu_pages(kvm, sc->nr_to_scan);
5937c50d8ae3SPaolo Bonzini 
5938c50d8ae3SPaolo Bonzini unlock:
5939531810caSBen Gardon 		write_unlock(&kvm->mmu_lock);
5940c50d8ae3SPaolo Bonzini 		srcu_read_unlock(&kvm->srcu, idx);
5941c50d8ae3SPaolo Bonzini 
5942c50d8ae3SPaolo Bonzini 		/*
5943c50d8ae3SPaolo Bonzini 		 * unfair on small ones
5944c50d8ae3SPaolo Bonzini 		 * per-vm shrinkers cry out
5945c50d8ae3SPaolo Bonzini 		 * sadness comes quickly
5946c50d8ae3SPaolo Bonzini 		 */
5947c50d8ae3SPaolo Bonzini 		list_move_tail(&kvm->vm_list, &vm_list);
5948c50d8ae3SPaolo Bonzini 		break;
5949c50d8ae3SPaolo Bonzini 	}
5950c50d8ae3SPaolo Bonzini 
5951c50d8ae3SPaolo Bonzini 	mutex_unlock(&kvm_lock);
5952c50d8ae3SPaolo Bonzini 	return freed;
5953c50d8ae3SPaolo Bonzini }
5954c50d8ae3SPaolo Bonzini 
5955c50d8ae3SPaolo Bonzini static unsigned long
5956c50d8ae3SPaolo Bonzini mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
5957c50d8ae3SPaolo Bonzini {
5958c50d8ae3SPaolo Bonzini 	return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
5959c50d8ae3SPaolo Bonzini }
5960c50d8ae3SPaolo Bonzini 
5961c50d8ae3SPaolo Bonzini static struct shrinker mmu_shrinker = {
5962c50d8ae3SPaolo Bonzini 	.count_objects = mmu_shrink_count,
5963c50d8ae3SPaolo Bonzini 	.scan_objects = mmu_shrink_scan,
5964c50d8ae3SPaolo Bonzini 	.seeks = DEFAULT_SEEKS * 10,
5965c50d8ae3SPaolo Bonzini };
5966c50d8ae3SPaolo Bonzini 
5967c50d8ae3SPaolo Bonzini static void mmu_destroy_caches(void)
5968c50d8ae3SPaolo Bonzini {
5969c50d8ae3SPaolo Bonzini 	kmem_cache_destroy(pte_list_desc_cache);
5970c50d8ae3SPaolo Bonzini 	kmem_cache_destroy(mmu_page_header_cache);
5971c50d8ae3SPaolo Bonzini }
5972c50d8ae3SPaolo Bonzini 
5973c50d8ae3SPaolo Bonzini static bool get_nx_auto_mode(void)
5974c50d8ae3SPaolo Bonzini {
5975c50d8ae3SPaolo Bonzini 	/* Return true when CPU has the bug, and mitigations are ON */
5976c50d8ae3SPaolo Bonzini 	return boot_cpu_has_bug(X86_BUG_ITLB_MULTIHIT) && !cpu_mitigations_off();
5977c50d8ae3SPaolo Bonzini }
5978c50d8ae3SPaolo Bonzini 
5979c50d8ae3SPaolo Bonzini static void __set_nx_huge_pages(bool val)
5980c50d8ae3SPaolo Bonzini {
5981c50d8ae3SPaolo Bonzini 	nx_huge_pages = itlb_multihit_kvm_mitigation = val;
5982c50d8ae3SPaolo Bonzini }
5983c50d8ae3SPaolo Bonzini 
5984c50d8ae3SPaolo Bonzini static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
5985c50d8ae3SPaolo Bonzini {
5986c50d8ae3SPaolo Bonzini 	bool old_val = nx_huge_pages;
5987c50d8ae3SPaolo Bonzini 	bool new_val;
5988c50d8ae3SPaolo Bonzini 
5989c50d8ae3SPaolo Bonzini 	/* In "auto" mode deploy workaround only if CPU has the bug. */
5990c50d8ae3SPaolo Bonzini 	if (sysfs_streq(val, "off"))
5991c50d8ae3SPaolo Bonzini 		new_val = 0;
5992c50d8ae3SPaolo Bonzini 	else if (sysfs_streq(val, "force"))
5993c50d8ae3SPaolo Bonzini 		new_val = 1;
5994c50d8ae3SPaolo Bonzini 	else if (sysfs_streq(val, "auto"))
5995c50d8ae3SPaolo Bonzini 		new_val = get_nx_auto_mode();
5996c50d8ae3SPaolo Bonzini 	else if (strtobool(val, &new_val) < 0)
5997c50d8ae3SPaolo Bonzini 		return -EINVAL;
5998c50d8ae3SPaolo Bonzini 
5999c50d8ae3SPaolo Bonzini 	__set_nx_huge_pages(new_val);
6000c50d8ae3SPaolo Bonzini 
6001c50d8ae3SPaolo Bonzini 	if (new_val != old_val) {
6002c50d8ae3SPaolo Bonzini 		struct kvm *kvm;
6003c50d8ae3SPaolo Bonzini 
6004c50d8ae3SPaolo Bonzini 		mutex_lock(&kvm_lock);
6005c50d8ae3SPaolo Bonzini 
6006c50d8ae3SPaolo Bonzini 		list_for_each_entry(kvm, &vm_list, vm_list) {
6007c50d8ae3SPaolo Bonzini 			mutex_lock(&kvm->slots_lock);
6008c50d8ae3SPaolo Bonzini 			kvm_mmu_zap_all_fast(kvm);
6009c50d8ae3SPaolo Bonzini 			mutex_unlock(&kvm->slots_lock);
6010c50d8ae3SPaolo Bonzini 
6011c50d8ae3SPaolo Bonzini 			wake_up_process(kvm->arch.nx_lpage_recovery_thread);
6012c50d8ae3SPaolo Bonzini 		}
6013c50d8ae3SPaolo Bonzini 		mutex_unlock(&kvm_lock);
6014c50d8ae3SPaolo Bonzini 	}
6015c50d8ae3SPaolo Bonzini 
6016c50d8ae3SPaolo Bonzini 	return 0;
6017c50d8ae3SPaolo Bonzini }
6018c50d8ae3SPaolo Bonzini 
6019c50d8ae3SPaolo Bonzini int kvm_mmu_module_init(void)
6020c50d8ae3SPaolo Bonzini {
6021c50d8ae3SPaolo Bonzini 	int ret = -ENOMEM;
6022c50d8ae3SPaolo Bonzini 
6023c50d8ae3SPaolo Bonzini 	if (nx_huge_pages == -1)
6024c50d8ae3SPaolo Bonzini 		__set_nx_huge_pages(get_nx_auto_mode());
6025c50d8ae3SPaolo Bonzini 
6026c50d8ae3SPaolo Bonzini 	/*
6027c50d8ae3SPaolo Bonzini 	 * MMU roles use union aliasing which is, generally speaking, an
6028c50d8ae3SPaolo Bonzini 	 * undefined behavior. However, we supposedly know how compilers behave
6029c50d8ae3SPaolo Bonzini 	 * and the current status quo is unlikely to change. Guardians below are
6030c50d8ae3SPaolo Bonzini 	 * supposed to let us know if the assumption becomes false.
6031c50d8ae3SPaolo Bonzini 	 */
6032c50d8ae3SPaolo Bonzini 	BUILD_BUG_ON(sizeof(union kvm_mmu_page_role) != sizeof(u32));
6033c50d8ae3SPaolo Bonzini 	BUILD_BUG_ON(sizeof(union kvm_mmu_extended_role) != sizeof(u32));
6034c50d8ae3SPaolo Bonzini 	BUILD_BUG_ON(sizeof(union kvm_mmu_role) != sizeof(u64));
6035c50d8ae3SPaolo Bonzini 
6036c50d8ae3SPaolo Bonzini 	kvm_mmu_reset_all_pte_masks();
6037c50d8ae3SPaolo Bonzini 
6038c50d8ae3SPaolo Bonzini 	pte_list_desc_cache = kmem_cache_create("pte_list_desc",
6039c50d8ae3SPaolo Bonzini 					    sizeof(struct pte_list_desc),
6040c50d8ae3SPaolo Bonzini 					    0, SLAB_ACCOUNT, NULL);
6041c50d8ae3SPaolo Bonzini 	if (!pte_list_desc_cache)
6042c50d8ae3SPaolo Bonzini 		goto out;
6043c50d8ae3SPaolo Bonzini 
6044c50d8ae3SPaolo Bonzini 	mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
6045c50d8ae3SPaolo Bonzini 						  sizeof(struct kvm_mmu_page),
6046c50d8ae3SPaolo Bonzini 						  0, SLAB_ACCOUNT, NULL);
6047c50d8ae3SPaolo Bonzini 	if (!mmu_page_header_cache)
6048c50d8ae3SPaolo Bonzini 		goto out;
6049c50d8ae3SPaolo Bonzini 
6050c50d8ae3SPaolo Bonzini 	if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL))
6051c50d8ae3SPaolo Bonzini 		goto out;
6052c50d8ae3SPaolo Bonzini 
6053c50d8ae3SPaolo Bonzini 	ret = register_shrinker(&mmu_shrinker);
6054c50d8ae3SPaolo Bonzini 	if (ret)
6055c50d8ae3SPaolo Bonzini 		goto out;
6056c50d8ae3SPaolo Bonzini 
6057c50d8ae3SPaolo Bonzini 	return 0;
6058c50d8ae3SPaolo Bonzini 
6059c50d8ae3SPaolo Bonzini out:
6060c50d8ae3SPaolo Bonzini 	mmu_destroy_caches();
6061c50d8ae3SPaolo Bonzini 	return ret;
6062c50d8ae3SPaolo Bonzini }
6063c50d8ae3SPaolo Bonzini 
6064c50d8ae3SPaolo Bonzini /*
6065c50d8ae3SPaolo Bonzini  * Calculate mmu pages needed for kvm.
6066c50d8ae3SPaolo Bonzini  */
6067c50d8ae3SPaolo Bonzini unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
6068c50d8ae3SPaolo Bonzini {
6069c50d8ae3SPaolo Bonzini 	unsigned long nr_mmu_pages;
6070c50d8ae3SPaolo Bonzini 	unsigned long nr_pages = 0;
6071c50d8ae3SPaolo Bonzini 	struct kvm_memslots *slots;
6072c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *memslot;
6073c50d8ae3SPaolo Bonzini 	int i;
6074c50d8ae3SPaolo Bonzini 
6075c50d8ae3SPaolo Bonzini 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
6076c50d8ae3SPaolo Bonzini 		slots = __kvm_memslots(kvm, i);
6077c50d8ae3SPaolo Bonzini 
6078c50d8ae3SPaolo Bonzini 		kvm_for_each_memslot(memslot, slots)
6079c50d8ae3SPaolo Bonzini 			nr_pages += memslot->npages;
6080c50d8ae3SPaolo Bonzini 	}
6081c50d8ae3SPaolo Bonzini 
6082c50d8ae3SPaolo Bonzini 	nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
6083c50d8ae3SPaolo Bonzini 	nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES);
6084c50d8ae3SPaolo Bonzini 
6085c50d8ae3SPaolo Bonzini 	return nr_mmu_pages;
6086c50d8ae3SPaolo Bonzini }
6087c50d8ae3SPaolo Bonzini 
6088c50d8ae3SPaolo Bonzini void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
6089c50d8ae3SPaolo Bonzini {
6090c50d8ae3SPaolo Bonzini 	kvm_mmu_unload(vcpu);
6091c50d8ae3SPaolo Bonzini 	free_mmu_pages(&vcpu->arch.root_mmu);
6092c50d8ae3SPaolo Bonzini 	free_mmu_pages(&vcpu->arch.guest_mmu);
6093c50d8ae3SPaolo Bonzini 	mmu_free_memory_caches(vcpu);
6094c50d8ae3SPaolo Bonzini }
6095c50d8ae3SPaolo Bonzini 
6096c50d8ae3SPaolo Bonzini void kvm_mmu_module_exit(void)
6097c50d8ae3SPaolo Bonzini {
6098c50d8ae3SPaolo Bonzini 	mmu_destroy_caches();
6099c50d8ae3SPaolo Bonzini 	percpu_counter_destroy(&kvm_total_used_mmu_pages);
6100c50d8ae3SPaolo Bonzini 	unregister_shrinker(&mmu_shrinker);
6101c50d8ae3SPaolo Bonzini 	mmu_audit_disable();
6102c50d8ae3SPaolo Bonzini }
6103c50d8ae3SPaolo Bonzini 
6104c50d8ae3SPaolo Bonzini static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp)
6105c50d8ae3SPaolo Bonzini {
6106c50d8ae3SPaolo Bonzini 	unsigned int old_val;
6107c50d8ae3SPaolo Bonzini 	int err;
6108c50d8ae3SPaolo Bonzini 
6109c50d8ae3SPaolo Bonzini 	old_val = nx_huge_pages_recovery_ratio;
6110c50d8ae3SPaolo Bonzini 	err = param_set_uint(val, kp);
6111c50d8ae3SPaolo Bonzini 	if (err)
6112c50d8ae3SPaolo Bonzini 		return err;
6113c50d8ae3SPaolo Bonzini 
6114c50d8ae3SPaolo Bonzini 	if (READ_ONCE(nx_huge_pages) &&
6115c50d8ae3SPaolo Bonzini 	    !old_val && nx_huge_pages_recovery_ratio) {
6116c50d8ae3SPaolo Bonzini 		struct kvm *kvm;
6117c50d8ae3SPaolo Bonzini 
6118c50d8ae3SPaolo Bonzini 		mutex_lock(&kvm_lock);
6119c50d8ae3SPaolo Bonzini 
6120c50d8ae3SPaolo Bonzini 		list_for_each_entry(kvm, &vm_list, vm_list)
6121c50d8ae3SPaolo Bonzini 			wake_up_process(kvm->arch.nx_lpage_recovery_thread);
6122c50d8ae3SPaolo Bonzini 
6123c50d8ae3SPaolo Bonzini 		mutex_unlock(&kvm_lock);
6124c50d8ae3SPaolo Bonzini 	}
6125c50d8ae3SPaolo Bonzini 
6126c50d8ae3SPaolo Bonzini 	return err;
6127c50d8ae3SPaolo Bonzini }
6128c50d8ae3SPaolo Bonzini 
6129c50d8ae3SPaolo Bonzini static void kvm_recover_nx_lpages(struct kvm *kvm)
6130c50d8ae3SPaolo Bonzini {
6131ade74e14SSean Christopherson 	unsigned long nx_lpage_splits = kvm->stat.nx_lpage_splits;
6132c50d8ae3SPaolo Bonzini 	int rcu_idx;
6133c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
6134c50d8ae3SPaolo Bonzini 	unsigned int ratio;
6135c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
6136048f4980SSean Christopherson 	bool flush = false;
6137c50d8ae3SPaolo Bonzini 	ulong to_zap;
6138c50d8ae3SPaolo Bonzini 
6139c50d8ae3SPaolo Bonzini 	rcu_idx = srcu_read_lock(&kvm->srcu);
6140531810caSBen Gardon 	write_lock(&kvm->mmu_lock);
6141c50d8ae3SPaolo Bonzini 
6142c50d8ae3SPaolo Bonzini 	ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
6143ade74e14SSean Christopherson 	to_zap = ratio ? DIV_ROUND_UP(nx_lpage_splits, ratio) : 0;
61447d919c7aSSean Christopherson 	for ( ; to_zap; --to_zap) {
61457d919c7aSSean Christopherson 		if (list_empty(&kvm->arch.lpage_disallowed_mmu_pages))
61467d919c7aSSean Christopherson 			break;
61477d919c7aSSean Christopherson 
6148c50d8ae3SPaolo Bonzini 		/*
6149c50d8ae3SPaolo Bonzini 		 * We use a separate list instead of just using active_mmu_pages
6150c50d8ae3SPaolo Bonzini 		 * because the number of lpage_disallowed pages is expected to
6151c50d8ae3SPaolo Bonzini 		 * be relatively small compared to the total.
6152c50d8ae3SPaolo Bonzini 		 */
6153c50d8ae3SPaolo Bonzini 		sp = list_first_entry(&kvm->arch.lpage_disallowed_mmu_pages,
6154c50d8ae3SPaolo Bonzini 				      struct kvm_mmu_page,
6155c50d8ae3SPaolo Bonzini 				      lpage_disallowed_link);
6156c50d8ae3SPaolo Bonzini 		WARN_ON_ONCE(!sp->lpage_disallowed);
6157897218ffSPaolo Bonzini 		if (is_tdp_mmu_page(sp)) {
6158315f02c6SPaolo Bonzini 			flush |= kvm_tdp_mmu_zap_sp(kvm, sp);
61598d1a182eSBen Gardon 		} else {
6160c50d8ae3SPaolo Bonzini 			kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
6161c50d8ae3SPaolo Bonzini 			WARN_ON_ONCE(sp->lpage_disallowed);
616229cf0f50SBen Gardon 		}
6163c50d8ae3SPaolo Bonzini 
6164531810caSBen Gardon 		if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
6165048f4980SSean Christopherson 			kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
6166531810caSBen Gardon 			cond_resched_rwlock_write(&kvm->mmu_lock);
6167048f4980SSean Christopherson 			flush = false;
6168c50d8ae3SPaolo Bonzini 		}
6169c50d8ae3SPaolo Bonzini 	}
6170048f4980SSean Christopherson 	kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
6171c50d8ae3SPaolo Bonzini 
6172531810caSBen Gardon 	write_unlock(&kvm->mmu_lock);
6173c50d8ae3SPaolo Bonzini 	srcu_read_unlock(&kvm->srcu, rcu_idx);
6174c50d8ae3SPaolo Bonzini }
6175c50d8ae3SPaolo Bonzini 
6176c50d8ae3SPaolo Bonzini static long get_nx_lpage_recovery_timeout(u64 start_time)
6177c50d8ae3SPaolo Bonzini {
6178c50d8ae3SPaolo Bonzini 	return READ_ONCE(nx_huge_pages) && READ_ONCE(nx_huge_pages_recovery_ratio)
6179c50d8ae3SPaolo Bonzini 		? start_time + 60 * HZ - get_jiffies_64()
6180c50d8ae3SPaolo Bonzini 		: MAX_SCHEDULE_TIMEOUT;
6181c50d8ae3SPaolo Bonzini }
6182c50d8ae3SPaolo Bonzini 
6183c50d8ae3SPaolo Bonzini static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data)
6184c50d8ae3SPaolo Bonzini {
6185c50d8ae3SPaolo Bonzini 	u64 start_time;
6186c50d8ae3SPaolo Bonzini 	long remaining_time;
6187c50d8ae3SPaolo Bonzini 
6188c50d8ae3SPaolo Bonzini 	while (true) {
6189c50d8ae3SPaolo Bonzini 		start_time = get_jiffies_64();
6190c50d8ae3SPaolo Bonzini 		remaining_time = get_nx_lpage_recovery_timeout(start_time);
6191c50d8ae3SPaolo Bonzini 
6192c50d8ae3SPaolo Bonzini 		set_current_state(TASK_INTERRUPTIBLE);
6193c50d8ae3SPaolo Bonzini 		while (!kthread_should_stop() && remaining_time > 0) {
6194c50d8ae3SPaolo Bonzini 			schedule_timeout(remaining_time);
6195c50d8ae3SPaolo Bonzini 			remaining_time = get_nx_lpage_recovery_timeout(start_time);
6196c50d8ae3SPaolo Bonzini 			set_current_state(TASK_INTERRUPTIBLE);
6197c50d8ae3SPaolo Bonzini 		}
6198c50d8ae3SPaolo Bonzini 
6199c50d8ae3SPaolo Bonzini 		set_current_state(TASK_RUNNING);
6200c50d8ae3SPaolo Bonzini 
6201c50d8ae3SPaolo Bonzini 		if (kthread_should_stop())
6202c50d8ae3SPaolo Bonzini 			return 0;
6203c50d8ae3SPaolo Bonzini 
6204c50d8ae3SPaolo Bonzini 		kvm_recover_nx_lpages(kvm);
6205c50d8ae3SPaolo Bonzini 	}
6206c50d8ae3SPaolo Bonzini }
6207c50d8ae3SPaolo Bonzini 
6208c50d8ae3SPaolo Bonzini int kvm_mmu_post_init_vm(struct kvm *kvm)
6209c50d8ae3SPaolo Bonzini {
6210c50d8ae3SPaolo Bonzini 	int err;
6211c50d8ae3SPaolo Bonzini 
6212c50d8ae3SPaolo Bonzini 	err = kvm_vm_create_worker_thread(kvm, kvm_nx_lpage_recovery_worker, 0,
6213c50d8ae3SPaolo Bonzini 					  "kvm-nx-lpage-recovery",
6214c50d8ae3SPaolo Bonzini 					  &kvm->arch.nx_lpage_recovery_thread);
6215c50d8ae3SPaolo Bonzini 	if (!err)
6216c50d8ae3SPaolo Bonzini 		kthread_unpark(kvm->arch.nx_lpage_recovery_thread);
6217c50d8ae3SPaolo Bonzini 
6218c50d8ae3SPaolo Bonzini 	return err;
6219c50d8ae3SPaolo Bonzini }
6220c50d8ae3SPaolo Bonzini 
6221c50d8ae3SPaolo Bonzini void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
6222c50d8ae3SPaolo Bonzini {
6223c50d8ae3SPaolo Bonzini 	if (kvm->arch.nx_lpage_recovery_thread)
6224c50d8ae3SPaolo Bonzini 		kthread_stop(kvm->arch.nx_lpage_recovery_thread);
6225c50d8ae3SPaolo Bonzini }
6226