xref: /linux/arch/x86/kvm/mmu/mmu.c (revision 264d3dc1d3dca13b7eaf0c4fa7a4b2c91a5e056a)
1c50d8ae3SPaolo Bonzini // SPDX-License-Identifier: GPL-2.0-only
2c50d8ae3SPaolo Bonzini /*
3c50d8ae3SPaolo Bonzini  * Kernel-based Virtual Machine driver for Linux
4c50d8ae3SPaolo Bonzini  *
5c50d8ae3SPaolo Bonzini  * This module enables machines with Intel VT-x extensions to run virtual
6c50d8ae3SPaolo Bonzini  * machines without emulation or binary translation.
7c50d8ae3SPaolo Bonzini  *
8c50d8ae3SPaolo Bonzini  * MMU support
9c50d8ae3SPaolo Bonzini  *
10c50d8ae3SPaolo Bonzini  * Copyright (C) 2006 Qumranet, Inc.
11c50d8ae3SPaolo Bonzini  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
12c50d8ae3SPaolo Bonzini  *
13c50d8ae3SPaolo Bonzini  * Authors:
14c50d8ae3SPaolo Bonzini  *   Yaniv Kamay  <yaniv@qumranet.com>
15c50d8ae3SPaolo Bonzini  *   Avi Kivity   <avi@qumranet.com>
16c50d8ae3SPaolo Bonzini  */
17c50d8ae3SPaolo Bonzini 
18c50d8ae3SPaolo Bonzini #include "irq.h"
1988197e6aS彭浩(Richard) #include "ioapic.h"
20c50d8ae3SPaolo Bonzini #include "mmu.h"
216ca9a6f3SSean Christopherson #include "mmu_internal.h"
22fe5db27dSBen Gardon #include "tdp_mmu.h"
23c50d8ae3SPaolo Bonzini #include "x86.h"
24c50d8ae3SPaolo Bonzini #include "kvm_cache_regs.h"
252f728d66SSean Christopherson #include "kvm_emulate.h"
26c50d8ae3SPaolo Bonzini #include "cpuid.h"
275a9624afSPaolo Bonzini #include "spte.h"
28c50d8ae3SPaolo Bonzini 
29c50d8ae3SPaolo Bonzini #include <linux/kvm_host.h>
30c50d8ae3SPaolo Bonzini #include <linux/types.h>
31c50d8ae3SPaolo Bonzini #include <linux/string.h>
32c50d8ae3SPaolo Bonzini #include <linux/mm.h>
33c50d8ae3SPaolo Bonzini #include <linux/highmem.h>
34c50d8ae3SPaolo Bonzini #include <linux/moduleparam.h>
35c50d8ae3SPaolo Bonzini #include <linux/export.h>
36c50d8ae3SPaolo Bonzini #include <linux/swap.h>
37c50d8ae3SPaolo Bonzini #include <linux/hugetlb.h>
38c50d8ae3SPaolo Bonzini #include <linux/compiler.h>
39c50d8ae3SPaolo Bonzini #include <linux/srcu.h>
40c50d8ae3SPaolo Bonzini #include <linux/slab.h>
41c50d8ae3SPaolo Bonzini #include <linux/sched/signal.h>
42c50d8ae3SPaolo Bonzini #include <linux/uaccess.h>
43c50d8ae3SPaolo Bonzini #include <linux/hash.h>
44c50d8ae3SPaolo Bonzini #include <linux/kern_levels.h>
45c50d8ae3SPaolo Bonzini #include <linux/kthread.h>
46c50d8ae3SPaolo Bonzini 
47c50d8ae3SPaolo Bonzini #include <asm/page.h>
48eb243d1dSIngo Molnar #include <asm/memtype.h>
49c50d8ae3SPaolo Bonzini #include <asm/cmpxchg.h>
50c50d8ae3SPaolo Bonzini #include <asm/io.h>
514a98623dSSean Christopherson #include <asm/set_memory.h>
52c50d8ae3SPaolo Bonzini #include <asm/vmx.h>
53c50d8ae3SPaolo Bonzini #include <asm/kvm_page_track.h>
54c50d8ae3SPaolo Bonzini #include "trace.h"
55c50d8ae3SPaolo Bonzini 
56fc9bf2e0SSean Christopherson #include "paging.h"
57fc9bf2e0SSean Christopherson 
58c50d8ae3SPaolo Bonzini extern bool itlb_multihit_kvm_mitigation;
59c50d8ae3SPaolo Bonzini 
60a9d6496dSShaokun Zhang int __read_mostly nx_huge_pages = -1;
614dfe4f40SJunaid Shahid static uint __read_mostly nx_huge_pages_recovery_period_ms;
62c50d8ae3SPaolo Bonzini #ifdef CONFIG_PREEMPT_RT
63c50d8ae3SPaolo Bonzini /* Recovery can cause latency spikes, disable it for PREEMPT_RT.  */
64c50d8ae3SPaolo Bonzini static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
65c50d8ae3SPaolo Bonzini #else
66c50d8ae3SPaolo Bonzini static uint __read_mostly nx_huge_pages_recovery_ratio = 60;
67c50d8ae3SPaolo Bonzini #endif
68c50d8ae3SPaolo Bonzini 
69c50d8ae3SPaolo Bonzini static int set_nx_huge_pages(const char *val, const struct kernel_param *kp);
704dfe4f40SJunaid Shahid static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp);
71c50d8ae3SPaolo Bonzini 
72d5d6c18dSJoe Perches static const struct kernel_param_ops nx_huge_pages_ops = {
73c50d8ae3SPaolo Bonzini 	.set = set_nx_huge_pages,
74c50d8ae3SPaolo Bonzini 	.get = param_get_bool,
75c50d8ae3SPaolo Bonzini };
76c50d8ae3SPaolo Bonzini 
774dfe4f40SJunaid Shahid static const struct kernel_param_ops nx_huge_pages_recovery_param_ops = {
784dfe4f40SJunaid Shahid 	.set = set_nx_huge_pages_recovery_param,
79c50d8ae3SPaolo Bonzini 	.get = param_get_uint,
80c50d8ae3SPaolo Bonzini };
81c50d8ae3SPaolo Bonzini 
82c50d8ae3SPaolo Bonzini module_param_cb(nx_huge_pages, &nx_huge_pages_ops, &nx_huge_pages, 0644);
83c50d8ae3SPaolo Bonzini __MODULE_PARM_TYPE(nx_huge_pages, "bool");
844dfe4f40SJunaid Shahid module_param_cb(nx_huge_pages_recovery_ratio, &nx_huge_pages_recovery_param_ops,
85c50d8ae3SPaolo Bonzini 		&nx_huge_pages_recovery_ratio, 0644);
86c50d8ae3SPaolo Bonzini __MODULE_PARM_TYPE(nx_huge_pages_recovery_ratio, "uint");
874dfe4f40SJunaid Shahid module_param_cb(nx_huge_pages_recovery_period_ms, &nx_huge_pages_recovery_param_ops,
884dfe4f40SJunaid Shahid 		&nx_huge_pages_recovery_period_ms, 0644);
894dfe4f40SJunaid Shahid __MODULE_PARM_TYPE(nx_huge_pages_recovery_period_ms, "uint");
90c50d8ae3SPaolo Bonzini 
9171fe7013SSean Christopherson static bool __read_mostly force_flush_and_sync_on_reuse;
9271fe7013SSean Christopherson module_param_named(flush_on_reuse, force_flush_and_sync_on_reuse, bool, 0644);
9371fe7013SSean Christopherson 
94c50d8ae3SPaolo Bonzini /*
95c50d8ae3SPaolo Bonzini  * When setting this variable to true it enables Two-Dimensional-Paging
96c50d8ae3SPaolo Bonzini  * where the hardware walks 2 page tables:
97c50d8ae3SPaolo Bonzini  * 1. the guest-virtual to guest-physical
98c50d8ae3SPaolo Bonzini  * 2. while doing 1. it walks guest-physical to host-physical
99c50d8ae3SPaolo Bonzini  * If the hardware supports that we don't need to do shadow paging.
100c50d8ae3SPaolo Bonzini  */
101c50d8ae3SPaolo Bonzini bool tdp_enabled = false;
102c50d8ae3SPaolo Bonzini 
1031d92d2e8SSean Christopherson static int max_huge_page_level __read_mostly;
104746700d2SWei Huang static int tdp_root_level __read_mostly;
10583013059SSean Christopherson static int max_tdp_level __read_mostly;
106703c335dSSean Christopherson 
107c50d8ae3SPaolo Bonzini enum {
108c50d8ae3SPaolo Bonzini 	AUDIT_PRE_PAGE_FAULT,
109c50d8ae3SPaolo Bonzini 	AUDIT_POST_PAGE_FAULT,
110c50d8ae3SPaolo Bonzini 	AUDIT_PRE_PTE_WRITE,
111c50d8ae3SPaolo Bonzini 	AUDIT_POST_PTE_WRITE,
112c50d8ae3SPaolo Bonzini 	AUDIT_PRE_SYNC,
113c50d8ae3SPaolo Bonzini 	AUDIT_POST_SYNC
114c50d8ae3SPaolo Bonzini };
115c50d8ae3SPaolo Bonzini 
116c50d8ae3SPaolo Bonzini #ifdef MMU_DEBUG
1175a9624afSPaolo Bonzini bool dbg = 0;
118c50d8ae3SPaolo Bonzini module_param(dbg, bool, 0644);
119c50d8ae3SPaolo Bonzini #endif
120c50d8ae3SPaolo Bonzini 
121c50d8ae3SPaolo Bonzini #define PTE_PREFETCH_NUM		8
122c50d8ae3SPaolo Bonzini 
123c50d8ae3SPaolo Bonzini #define PT32_LEVEL_BITS 10
124c50d8ae3SPaolo Bonzini 
125c50d8ae3SPaolo Bonzini #define PT32_LEVEL_SHIFT(level) \
126c50d8ae3SPaolo Bonzini 		(PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
127c50d8ae3SPaolo Bonzini 
128c50d8ae3SPaolo Bonzini #define PT32_LVL_OFFSET_MASK(level) \
129c50d8ae3SPaolo Bonzini 	(PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
130c50d8ae3SPaolo Bonzini 						* PT32_LEVEL_BITS))) - 1))
131c50d8ae3SPaolo Bonzini 
132c50d8ae3SPaolo Bonzini #define PT32_INDEX(address, level)\
133c50d8ae3SPaolo Bonzini 	(((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
134c50d8ae3SPaolo Bonzini 
135c50d8ae3SPaolo Bonzini 
136c50d8ae3SPaolo Bonzini #define PT32_BASE_ADDR_MASK PAGE_MASK
137c50d8ae3SPaolo Bonzini #define PT32_DIR_BASE_ADDR_MASK \
138c50d8ae3SPaolo Bonzini 	(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
139c50d8ae3SPaolo Bonzini #define PT32_LVL_ADDR_MASK(level) \
140c50d8ae3SPaolo Bonzini 	(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
141c50d8ae3SPaolo Bonzini 					    * PT32_LEVEL_BITS))) - 1))
142c50d8ae3SPaolo Bonzini 
143c50d8ae3SPaolo Bonzini #include <trace/events/kvm.h>
144c50d8ae3SPaolo Bonzini 
145dc1cff96SPeter Xu /* make pte_list_desc fit well in cache lines */
14613236e25SPeter Xu #define PTE_LIST_EXT 14
147c50d8ae3SPaolo Bonzini 
14813236e25SPeter Xu /*
14913236e25SPeter Xu  * Slight optimization of cacheline layout, by putting `more' and `spte_count'
15013236e25SPeter Xu  * at the start; then accessing it will only use one single cacheline for
15113236e25SPeter Xu  * either full (entries==PTE_LIST_EXT) case or entries<=6.
15213236e25SPeter Xu  */
153c50d8ae3SPaolo Bonzini struct pte_list_desc {
154c50d8ae3SPaolo Bonzini 	struct pte_list_desc *more;
15513236e25SPeter Xu 	/*
15613236e25SPeter Xu 	 * Stores number of entries stored in the pte_list_desc.  No need to be
15713236e25SPeter Xu 	 * u64 but just for easier alignment.  When PTE_LIST_EXT, means full.
15813236e25SPeter Xu 	 */
15913236e25SPeter Xu 	u64 spte_count;
16013236e25SPeter Xu 	u64 *sptes[PTE_LIST_EXT];
161c50d8ae3SPaolo Bonzini };
162c50d8ae3SPaolo Bonzini 
163c50d8ae3SPaolo Bonzini struct kvm_shadow_walk_iterator {
164c50d8ae3SPaolo Bonzini 	u64 addr;
165c50d8ae3SPaolo Bonzini 	hpa_t shadow_addr;
166c50d8ae3SPaolo Bonzini 	u64 *sptep;
167c50d8ae3SPaolo Bonzini 	int level;
168c50d8ae3SPaolo Bonzini 	unsigned index;
169c50d8ae3SPaolo Bonzini };
170c50d8ae3SPaolo Bonzini 
171c50d8ae3SPaolo Bonzini #define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker)     \
172c50d8ae3SPaolo Bonzini 	for (shadow_walk_init_using_root(&(_walker), (_vcpu),              \
173c50d8ae3SPaolo Bonzini 					 (_root), (_addr));                \
174c50d8ae3SPaolo Bonzini 	     shadow_walk_okay(&(_walker));			           \
175c50d8ae3SPaolo Bonzini 	     shadow_walk_next(&(_walker)))
176c50d8ae3SPaolo Bonzini 
177c50d8ae3SPaolo Bonzini #define for_each_shadow_entry(_vcpu, _addr, _walker)            \
178c50d8ae3SPaolo Bonzini 	for (shadow_walk_init(&(_walker), _vcpu, _addr);	\
179c50d8ae3SPaolo Bonzini 	     shadow_walk_okay(&(_walker));			\
180c50d8ae3SPaolo Bonzini 	     shadow_walk_next(&(_walker)))
181c50d8ae3SPaolo Bonzini 
182c50d8ae3SPaolo Bonzini #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte)	\
183c50d8ae3SPaolo Bonzini 	for (shadow_walk_init(&(_walker), _vcpu, _addr);		\
184c50d8ae3SPaolo Bonzini 	     shadow_walk_okay(&(_walker)) &&				\
185c50d8ae3SPaolo Bonzini 		({ spte = mmu_spte_get_lockless(_walker.sptep); 1; });	\
186c50d8ae3SPaolo Bonzini 	     __shadow_walk_next(&(_walker), spte))
187c50d8ae3SPaolo Bonzini 
188c50d8ae3SPaolo Bonzini static struct kmem_cache *pte_list_desc_cache;
18902c00b3aSBen Gardon struct kmem_cache *mmu_page_header_cache;
190c50d8ae3SPaolo Bonzini static struct percpu_counter kvm_total_used_mmu_pages;
191c50d8ae3SPaolo Bonzini 
192c50d8ae3SPaolo Bonzini static void mmu_spte_set(u64 *sptep, u64 spte);
193c50d8ae3SPaolo Bonzini static union kvm_mmu_page_role
194c50d8ae3SPaolo Bonzini kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
195c50d8ae3SPaolo Bonzini 
196594e91a1SSean Christopherson struct kvm_mmu_role_regs {
197594e91a1SSean Christopherson 	const unsigned long cr0;
198594e91a1SSean Christopherson 	const unsigned long cr4;
199594e91a1SSean Christopherson 	const u64 efer;
200594e91a1SSean Christopherson };
201594e91a1SSean Christopherson 
202c50d8ae3SPaolo Bonzini #define CREATE_TRACE_POINTS
203c50d8ae3SPaolo Bonzini #include "mmutrace.h"
204c50d8ae3SPaolo Bonzini 
205594e91a1SSean Christopherson /*
206594e91a1SSean Christopherson  * Yes, lot's of underscores.  They're a hint that you probably shouldn't be
207594e91a1SSean Christopherson  * reading from the role_regs.  Once the mmu_role is constructed, it becomes
208594e91a1SSean Christopherson  * the single source of truth for the MMU's state.
209594e91a1SSean Christopherson  */
210594e91a1SSean Christopherson #define BUILD_MMU_ROLE_REGS_ACCESSOR(reg, name, flag)			\
2114ac21457SPaolo Bonzini static inline bool __maybe_unused ____is_##reg##_##name(struct kvm_mmu_role_regs *regs)\
212594e91a1SSean Christopherson {									\
213594e91a1SSean Christopherson 	return !!(regs->reg & flag);					\
214594e91a1SSean Christopherson }
215594e91a1SSean Christopherson BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, pg, X86_CR0_PG);
216594e91a1SSean Christopherson BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, wp, X86_CR0_WP);
217594e91a1SSean Christopherson BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pse, X86_CR4_PSE);
218594e91a1SSean Christopherson BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pae, X86_CR4_PAE);
219594e91a1SSean Christopherson BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smep, X86_CR4_SMEP);
220594e91a1SSean Christopherson BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smap, X86_CR4_SMAP);
221594e91a1SSean Christopherson BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pke, X86_CR4_PKE);
222594e91a1SSean Christopherson BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, la57, X86_CR4_LA57);
223594e91a1SSean Christopherson BUILD_MMU_ROLE_REGS_ACCESSOR(efer, nx, EFER_NX);
224594e91a1SSean Christopherson BUILD_MMU_ROLE_REGS_ACCESSOR(efer, lma, EFER_LMA);
225594e91a1SSean Christopherson 
22660667724SSean Christopherson /*
22760667724SSean Christopherson  * The MMU itself (with a valid role) is the single source of truth for the
22860667724SSean Christopherson  * MMU.  Do not use the regs used to build the MMU/role, nor the vCPU.  The
22960667724SSean Christopherson  * regs don't account for dependencies, e.g. clearing CR4 bits if CR0.PG=1,
23060667724SSean Christopherson  * and the vCPU may be incorrect/irrelevant.
23160667724SSean Christopherson  */
23260667724SSean Christopherson #define BUILD_MMU_ROLE_ACCESSOR(base_or_ext, reg, name)		\
2334ac21457SPaolo Bonzini static inline bool __maybe_unused is_##reg##_##name(struct kvm_mmu *mmu)	\
23460667724SSean Christopherson {								\
23560667724SSean Christopherson 	return !!(mmu->mmu_role. base_or_ext . reg##_##name);	\
23660667724SSean Christopherson }
23760667724SSean Christopherson BUILD_MMU_ROLE_ACCESSOR(ext,  cr0, pg);
23860667724SSean Christopherson BUILD_MMU_ROLE_ACCESSOR(base, cr0, wp);
23960667724SSean Christopherson BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pse);
24060667724SSean Christopherson BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pae);
24160667724SSean Christopherson BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, smep);
24260667724SSean Christopherson BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, smap);
24360667724SSean Christopherson BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pke);
24460667724SSean Christopherson BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, la57);
24560667724SSean Christopherson BUILD_MMU_ROLE_ACCESSOR(base, efer, nx);
24660667724SSean Christopherson 
247594e91a1SSean Christopherson static struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu)
248594e91a1SSean Christopherson {
249594e91a1SSean Christopherson 	struct kvm_mmu_role_regs regs = {
250594e91a1SSean Christopherson 		.cr0 = kvm_read_cr0_bits(vcpu, KVM_MMU_CR0_ROLE_BITS),
251594e91a1SSean Christopherson 		.cr4 = kvm_read_cr4_bits(vcpu, KVM_MMU_CR4_ROLE_BITS),
252594e91a1SSean Christopherson 		.efer = vcpu->arch.efer,
253594e91a1SSean Christopherson 	};
254594e91a1SSean Christopherson 
255594e91a1SSean Christopherson 	return regs;
256594e91a1SSean Christopherson }
257c50d8ae3SPaolo Bonzini 
258f4bd6f73SSean Christopherson static int role_regs_to_root_level(struct kvm_mmu_role_regs *regs)
259f4bd6f73SSean Christopherson {
260f4bd6f73SSean Christopherson 	if (!____is_cr0_pg(regs))
261f4bd6f73SSean Christopherson 		return 0;
262f4bd6f73SSean Christopherson 	else if (____is_efer_lma(regs))
263f4bd6f73SSean Christopherson 		return ____is_cr4_la57(regs) ? PT64_ROOT_5LEVEL :
264f4bd6f73SSean Christopherson 					       PT64_ROOT_4LEVEL;
265f4bd6f73SSean Christopherson 	else if (____is_cr4_pae(regs))
266f4bd6f73SSean Christopherson 		return PT32E_ROOT_LEVEL;
267f4bd6f73SSean Christopherson 	else
268f4bd6f73SSean Christopherson 		return PT32_ROOT_LEVEL;
269f4bd6f73SSean Christopherson }
270c50d8ae3SPaolo Bonzini 
271c50d8ae3SPaolo Bonzini static inline bool kvm_available_flush_tlb_with_range(void)
272c50d8ae3SPaolo Bonzini {
273afaf0b2fSSean Christopherson 	return kvm_x86_ops.tlb_remote_flush_with_range;
274c50d8ae3SPaolo Bonzini }
275c50d8ae3SPaolo Bonzini 
276c50d8ae3SPaolo Bonzini static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm,
277c50d8ae3SPaolo Bonzini 		struct kvm_tlb_range *range)
278c50d8ae3SPaolo Bonzini {
279c50d8ae3SPaolo Bonzini 	int ret = -ENOTSUPP;
280c50d8ae3SPaolo Bonzini 
281afaf0b2fSSean Christopherson 	if (range && kvm_x86_ops.tlb_remote_flush_with_range)
282b3646477SJason Baron 		ret = static_call(kvm_x86_tlb_remote_flush_with_range)(kvm, range);
283c50d8ae3SPaolo Bonzini 
284c50d8ae3SPaolo Bonzini 	if (ret)
285c50d8ae3SPaolo Bonzini 		kvm_flush_remote_tlbs(kvm);
286c50d8ae3SPaolo Bonzini }
287c50d8ae3SPaolo Bonzini 
2882f2fad08SBen Gardon void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
289c50d8ae3SPaolo Bonzini 		u64 start_gfn, u64 pages)
290c50d8ae3SPaolo Bonzini {
291c50d8ae3SPaolo Bonzini 	struct kvm_tlb_range range;
292c50d8ae3SPaolo Bonzini 
293c50d8ae3SPaolo Bonzini 	range.start_gfn = start_gfn;
294c50d8ae3SPaolo Bonzini 	range.pages = pages;
295c50d8ae3SPaolo Bonzini 
296c50d8ae3SPaolo Bonzini 	kvm_flush_remote_tlbs_with_range(kvm, &range);
297c50d8ae3SPaolo Bonzini }
298c50d8ae3SPaolo Bonzini 
2998f79b064SBen Gardon static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
3008f79b064SBen Gardon 			   unsigned int access)
3018f79b064SBen Gardon {
302c236d962SSean Christopherson 	u64 spte = make_mmio_spte(vcpu, gfn, access);
3038f79b064SBen Gardon 
304c236d962SSean Christopherson 	trace_mark_mmio_spte(sptep, gfn, spte);
305c236d962SSean Christopherson 	mmu_spte_set(sptep, spte);
306c50d8ae3SPaolo Bonzini }
307c50d8ae3SPaolo Bonzini 
308c50d8ae3SPaolo Bonzini static gfn_t get_mmio_spte_gfn(u64 spte)
309c50d8ae3SPaolo Bonzini {
310c50d8ae3SPaolo Bonzini 	u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
311c50d8ae3SPaolo Bonzini 
3128a967d65SPaolo Bonzini 	gpa |= (spte >> SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)
313c50d8ae3SPaolo Bonzini 	       & shadow_nonpresent_or_rsvd_mask;
314c50d8ae3SPaolo Bonzini 
315c50d8ae3SPaolo Bonzini 	return gpa >> PAGE_SHIFT;
316c50d8ae3SPaolo Bonzini }
317c50d8ae3SPaolo Bonzini 
318c50d8ae3SPaolo Bonzini static unsigned get_mmio_spte_access(u64 spte)
319c50d8ae3SPaolo Bonzini {
320c50d8ae3SPaolo Bonzini 	return spte & shadow_mmio_access_mask;
321c50d8ae3SPaolo Bonzini }
322c50d8ae3SPaolo Bonzini 
323c50d8ae3SPaolo Bonzini static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
324c50d8ae3SPaolo Bonzini {
325c50d8ae3SPaolo Bonzini 	u64 kvm_gen, spte_gen, gen;
326c50d8ae3SPaolo Bonzini 
327c50d8ae3SPaolo Bonzini 	gen = kvm_vcpu_memslots(vcpu)->generation;
328c50d8ae3SPaolo Bonzini 	if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
329c50d8ae3SPaolo Bonzini 		return false;
330c50d8ae3SPaolo Bonzini 
331c50d8ae3SPaolo Bonzini 	kvm_gen = gen & MMIO_SPTE_GEN_MASK;
332c50d8ae3SPaolo Bonzini 	spte_gen = get_mmio_spte_generation(spte);
333c50d8ae3SPaolo Bonzini 
334c50d8ae3SPaolo Bonzini 	trace_check_mmio_spte(spte, kvm_gen, spte_gen);
335c50d8ae3SPaolo Bonzini 	return likely(kvm_gen == spte_gen);
336c50d8ae3SPaolo Bonzini }
337c50d8ae3SPaolo Bonzini 
338cd313569SMohammed Gamal static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
339cd313569SMohammed Gamal                                   struct x86_exception *exception)
340cd313569SMohammed Gamal {
341cd313569SMohammed Gamal         return gpa;
342cd313569SMohammed Gamal }
343cd313569SMohammed Gamal 
344c50d8ae3SPaolo Bonzini static int is_cpuid_PSE36(void)
345c50d8ae3SPaolo Bonzini {
346c50d8ae3SPaolo Bonzini 	return 1;
347c50d8ae3SPaolo Bonzini }
348c50d8ae3SPaolo Bonzini 
349c50d8ae3SPaolo Bonzini static gfn_t pse36_gfn_delta(u32 gpte)
350c50d8ae3SPaolo Bonzini {
351c50d8ae3SPaolo Bonzini 	int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
352c50d8ae3SPaolo Bonzini 
353c50d8ae3SPaolo Bonzini 	return (gpte & PT32_DIR_PSE36_MASK) << shift;
354c50d8ae3SPaolo Bonzini }
355c50d8ae3SPaolo Bonzini 
356c50d8ae3SPaolo Bonzini #ifdef CONFIG_X86_64
357c50d8ae3SPaolo Bonzini static void __set_spte(u64 *sptep, u64 spte)
358c50d8ae3SPaolo Bonzini {
359c50d8ae3SPaolo Bonzini 	WRITE_ONCE(*sptep, spte);
360c50d8ae3SPaolo Bonzini }
361c50d8ae3SPaolo Bonzini 
362c50d8ae3SPaolo Bonzini static void __update_clear_spte_fast(u64 *sptep, u64 spte)
363c50d8ae3SPaolo Bonzini {
364c50d8ae3SPaolo Bonzini 	WRITE_ONCE(*sptep, spte);
365c50d8ae3SPaolo Bonzini }
366c50d8ae3SPaolo Bonzini 
367c50d8ae3SPaolo Bonzini static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
368c50d8ae3SPaolo Bonzini {
369c50d8ae3SPaolo Bonzini 	return xchg(sptep, spte);
370c50d8ae3SPaolo Bonzini }
371c50d8ae3SPaolo Bonzini 
372c50d8ae3SPaolo Bonzini static u64 __get_spte_lockless(u64 *sptep)
373c50d8ae3SPaolo Bonzini {
374c50d8ae3SPaolo Bonzini 	return READ_ONCE(*sptep);
375c50d8ae3SPaolo Bonzini }
376c50d8ae3SPaolo Bonzini #else
377c50d8ae3SPaolo Bonzini union split_spte {
378c50d8ae3SPaolo Bonzini 	struct {
379c50d8ae3SPaolo Bonzini 		u32 spte_low;
380c50d8ae3SPaolo Bonzini 		u32 spte_high;
381c50d8ae3SPaolo Bonzini 	};
382c50d8ae3SPaolo Bonzini 	u64 spte;
383c50d8ae3SPaolo Bonzini };
384c50d8ae3SPaolo Bonzini 
385c50d8ae3SPaolo Bonzini static void count_spte_clear(u64 *sptep, u64 spte)
386c50d8ae3SPaolo Bonzini {
38757354682SSean Christopherson 	struct kvm_mmu_page *sp =  sptep_to_sp(sptep);
388c50d8ae3SPaolo Bonzini 
389c50d8ae3SPaolo Bonzini 	if (is_shadow_present_pte(spte))
390c50d8ae3SPaolo Bonzini 		return;
391c50d8ae3SPaolo Bonzini 
392c50d8ae3SPaolo Bonzini 	/* Ensure the spte is completely set before we increase the count */
393c50d8ae3SPaolo Bonzini 	smp_wmb();
394c50d8ae3SPaolo Bonzini 	sp->clear_spte_count++;
395c50d8ae3SPaolo Bonzini }
396c50d8ae3SPaolo Bonzini 
397c50d8ae3SPaolo Bonzini static void __set_spte(u64 *sptep, u64 spte)
398c50d8ae3SPaolo Bonzini {
399c50d8ae3SPaolo Bonzini 	union split_spte *ssptep, sspte;
400c50d8ae3SPaolo Bonzini 
401c50d8ae3SPaolo Bonzini 	ssptep = (union split_spte *)sptep;
402c50d8ae3SPaolo Bonzini 	sspte = (union split_spte)spte;
403c50d8ae3SPaolo Bonzini 
404c50d8ae3SPaolo Bonzini 	ssptep->spte_high = sspte.spte_high;
405c50d8ae3SPaolo Bonzini 
406c50d8ae3SPaolo Bonzini 	/*
407c50d8ae3SPaolo Bonzini 	 * If we map the spte from nonpresent to present, We should store
408c50d8ae3SPaolo Bonzini 	 * the high bits firstly, then set present bit, so cpu can not
409c50d8ae3SPaolo Bonzini 	 * fetch this spte while we are setting the spte.
410c50d8ae3SPaolo Bonzini 	 */
411c50d8ae3SPaolo Bonzini 	smp_wmb();
412c50d8ae3SPaolo Bonzini 
413c50d8ae3SPaolo Bonzini 	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
414c50d8ae3SPaolo Bonzini }
415c50d8ae3SPaolo Bonzini 
416c50d8ae3SPaolo Bonzini static void __update_clear_spte_fast(u64 *sptep, u64 spte)
417c50d8ae3SPaolo Bonzini {
418c50d8ae3SPaolo Bonzini 	union split_spte *ssptep, sspte;
419c50d8ae3SPaolo Bonzini 
420c50d8ae3SPaolo Bonzini 	ssptep = (union split_spte *)sptep;
421c50d8ae3SPaolo Bonzini 	sspte = (union split_spte)spte;
422c50d8ae3SPaolo Bonzini 
423c50d8ae3SPaolo Bonzini 	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
424c50d8ae3SPaolo Bonzini 
425c50d8ae3SPaolo Bonzini 	/*
426c50d8ae3SPaolo Bonzini 	 * If we map the spte from present to nonpresent, we should clear
427c50d8ae3SPaolo Bonzini 	 * present bit firstly to avoid vcpu fetch the old high bits.
428c50d8ae3SPaolo Bonzini 	 */
429c50d8ae3SPaolo Bonzini 	smp_wmb();
430c50d8ae3SPaolo Bonzini 
431c50d8ae3SPaolo Bonzini 	ssptep->spte_high = sspte.spte_high;
432c50d8ae3SPaolo Bonzini 	count_spte_clear(sptep, spte);
433c50d8ae3SPaolo Bonzini }
434c50d8ae3SPaolo Bonzini 
435c50d8ae3SPaolo Bonzini static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
436c50d8ae3SPaolo Bonzini {
437c50d8ae3SPaolo Bonzini 	union split_spte *ssptep, sspte, orig;
438c50d8ae3SPaolo Bonzini 
439c50d8ae3SPaolo Bonzini 	ssptep = (union split_spte *)sptep;
440c50d8ae3SPaolo Bonzini 	sspte = (union split_spte)spte;
441c50d8ae3SPaolo Bonzini 
442c50d8ae3SPaolo Bonzini 	/* xchg acts as a barrier before the setting of the high bits */
443c50d8ae3SPaolo Bonzini 	orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low);
444c50d8ae3SPaolo Bonzini 	orig.spte_high = ssptep->spte_high;
445c50d8ae3SPaolo Bonzini 	ssptep->spte_high = sspte.spte_high;
446c50d8ae3SPaolo Bonzini 	count_spte_clear(sptep, spte);
447c50d8ae3SPaolo Bonzini 
448c50d8ae3SPaolo Bonzini 	return orig.spte;
449c50d8ae3SPaolo Bonzini }
450c50d8ae3SPaolo Bonzini 
451c50d8ae3SPaolo Bonzini /*
452c50d8ae3SPaolo Bonzini  * The idea using the light way get the spte on x86_32 guest is from
453c50d8ae3SPaolo Bonzini  * gup_get_pte (mm/gup.c).
454c50d8ae3SPaolo Bonzini  *
455c50d8ae3SPaolo Bonzini  * An spte tlb flush may be pending, because kvm_set_pte_rmapp
456c50d8ae3SPaolo Bonzini  * coalesces them and we are running out of the MMU lock.  Therefore
457c50d8ae3SPaolo Bonzini  * we need to protect against in-progress updates of the spte.
458c50d8ae3SPaolo Bonzini  *
459c50d8ae3SPaolo Bonzini  * Reading the spte while an update is in progress may get the old value
460c50d8ae3SPaolo Bonzini  * for the high part of the spte.  The race is fine for a present->non-present
461c50d8ae3SPaolo Bonzini  * change (because the high part of the spte is ignored for non-present spte),
462c50d8ae3SPaolo Bonzini  * but for a present->present change we must reread the spte.
463c50d8ae3SPaolo Bonzini  *
464c50d8ae3SPaolo Bonzini  * All such changes are done in two steps (present->non-present and
465c50d8ae3SPaolo Bonzini  * non-present->present), hence it is enough to count the number of
466c50d8ae3SPaolo Bonzini  * present->non-present updates: if it changed while reading the spte,
467c50d8ae3SPaolo Bonzini  * we might have hit the race.  This is done using clear_spte_count.
468c50d8ae3SPaolo Bonzini  */
469c50d8ae3SPaolo Bonzini static u64 __get_spte_lockless(u64 *sptep)
470c50d8ae3SPaolo Bonzini {
47157354682SSean Christopherson 	struct kvm_mmu_page *sp =  sptep_to_sp(sptep);
472c50d8ae3SPaolo Bonzini 	union split_spte spte, *orig = (union split_spte *)sptep;
473c50d8ae3SPaolo Bonzini 	int count;
474c50d8ae3SPaolo Bonzini 
475c50d8ae3SPaolo Bonzini retry:
476c50d8ae3SPaolo Bonzini 	count = sp->clear_spte_count;
477c50d8ae3SPaolo Bonzini 	smp_rmb();
478c50d8ae3SPaolo Bonzini 
479c50d8ae3SPaolo Bonzini 	spte.spte_low = orig->spte_low;
480c50d8ae3SPaolo Bonzini 	smp_rmb();
481c50d8ae3SPaolo Bonzini 
482c50d8ae3SPaolo Bonzini 	spte.spte_high = orig->spte_high;
483c50d8ae3SPaolo Bonzini 	smp_rmb();
484c50d8ae3SPaolo Bonzini 
485c50d8ae3SPaolo Bonzini 	if (unlikely(spte.spte_low != orig->spte_low ||
486c50d8ae3SPaolo Bonzini 	      count != sp->clear_spte_count))
487c50d8ae3SPaolo Bonzini 		goto retry;
488c50d8ae3SPaolo Bonzini 
489c50d8ae3SPaolo Bonzini 	return spte.spte;
490c50d8ae3SPaolo Bonzini }
491c50d8ae3SPaolo Bonzini #endif
492c50d8ae3SPaolo Bonzini 
493c50d8ae3SPaolo Bonzini static bool spte_has_volatile_bits(u64 spte)
494c50d8ae3SPaolo Bonzini {
495c50d8ae3SPaolo Bonzini 	if (!is_shadow_present_pte(spte))
496c50d8ae3SPaolo Bonzini 		return false;
497c50d8ae3SPaolo Bonzini 
498c50d8ae3SPaolo Bonzini 	/*
499c50d8ae3SPaolo Bonzini 	 * Always atomically update spte if it can be updated
500c50d8ae3SPaolo Bonzini 	 * out of mmu-lock, it can ensure dirty bit is not lost,
501c50d8ae3SPaolo Bonzini 	 * also, it can help us to get a stable is_writable_pte()
502c50d8ae3SPaolo Bonzini 	 * to ensure tlb flush is not missed.
503c50d8ae3SPaolo Bonzini 	 */
504c50d8ae3SPaolo Bonzini 	if (spte_can_locklessly_be_made_writable(spte) ||
505c50d8ae3SPaolo Bonzini 	    is_access_track_spte(spte))
506c50d8ae3SPaolo Bonzini 		return true;
507c50d8ae3SPaolo Bonzini 
508c50d8ae3SPaolo Bonzini 	if (spte_ad_enabled(spte)) {
509c50d8ae3SPaolo Bonzini 		if ((spte & shadow_accessed_mask) == 0 ||
510c50d8ae3SPaolo Bonzini 	    	    (is_writable_pte(spte) && (spte & shadow_dirty_mask) == 0))
511c50d8ae3SPaolo Bonzini 			return true;
512c50d8ae3SPaolo Bonzini 	}
513c50d8ae3SPaolo Bonzini 
514c50d8ae3SPaolo Bonzini 	return false;
515c50d8ae3SPaolo Bonzini }
516c50d8ae3SPaolo Bonzini 
517c50d8ae3SPaolo Bonzini /* Rules for using mmu_spte_set:
518c50d8ae3SPaolo Bonzini  * Set the sptep from nonpresent to present.
519c50d8ae3SPaolo Bonzini  * Note: the sptep being assigned *must* be either not present
520c50d8ae3SPaolo Bonzini  * or in a state where the hardware will not attempt to update
521c50d8ae3SPaolo Bonzini  * the spte.
522c50d8ae3SPaolo Bonzini  */
523c50d8ae3SPaolo Bonzini static void mmu_spte_set(u64 *sptep, u64 new_spte)
524c50d8ae3SPaolo Bonzini {
525c50d8ae3SPaolo Bonzini 	WARN_ON(is_shadow_present_pte(*sptep));
526c50d8ae3SPaolo Bonzini 	__set_spte(sptep, new_spte);
527c50d8ae3SPaolo Bonzini }
528c50d8ae3SPaolo Bonzini 
529c50d8ae3SPaolo Bonzini /*
530c50d8ae3SPaolo Bonzini  * Update the SPTE (excluding the PFN), but do not track changes in its
531c50d8ae3SPaolo Bonzini  * accessed/dirty status.
532c50d8ae3SPaolo Bonzini  */
533c50d8ae3SPaolo Bonzini static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte)
534c50d8ae3SPaolo Bonzini {
535c50d8ae3SPaolo Bonzini 	u64 old_spte = *sptep;
536c50d8ae3SPaolo Bonzini 
537c50d8ae3SPaolo Bonzini 	WARN_ON(!is_shadow_present_pte(new_spte));
538c50d8ae3SPaolo Bonzini 
539c50d8ae3SPaolo Bonzini 	if (!is_shadow_present_pte(old_spte)) {
540c50d8ae3SPaolo Bonzini 		mmu_spte_set(sptep, new_spte);
541c50d8ae3SPaolo Bonzini 		return old_spte;
542c50d8ae3SPaolo Bonzini 	}
543c50d8ae3SPaolo Bonzini 
544c50d8ae3SPaolo Bonzini 	if (!spte_has_volatile_bits(old_spte))
545c50d8ae3SPaolo Bonzini 		__update_clear_spte_fast(sptep, new_spte);
546c50d8ae3SPaolo Bonzini 	else
547c50d8ae3SPaolo Bonzini 		old_spte = __update_clear_spte_slow(sptep, new_spte);
548c50d8ae3SPaolo Bonzini 
549c50d8ae3SPaolo Bonzini 	WARN_ON(spte_to_pfn(old_spte) != spte_to_pfn(new_spte));
550c50d8ae3SPaolo Bonzini 
551c50d8ae3SPaolo Bonzini 	return old_spte;
552c50d8ae3SPaolo Bonzini }
553c50d8ae3SPaolo Bonzini 
554c50d8ae3SPaolo Bonzini /* Rules for using mmu_spte_update:
555c50d8ae3SPaolo Bonzini  * Update the state bits, it means the mapped pfn is not changed.
556c50d8ae3SPaolo Bonzini  *
557c50d8ae3SPaolo Bonzini  * Whenever we overwrite a writable spte with a read-only one we
558c50d8ae3SPaolo Bonzini  * should flush remote TLBs. Otherwise rmap_write_protect
559c50d8ae3SPaolo Bonzini  * will find a read-only spte, even though the writable spte
560c50d8ae3SPaolo Bonzini  * might be cached on a CPU's TLB, the return value indicates this
561c50d8ae3SPaolo Bonzini  * case.
562c50d8ae3SPaolo Bonzini  *
563c50d8ae3SPaolo Bonzini  * Returns true if the TLB needs to be flushed
564c50d8ae3SPaolo Bonzini  */
565c50d8ae3SPaolo Bonzini static bool mmu_spte_update(u64 *sptep, u64 new_spte)
566c50d8ae3SPaolo Bonzini {
567c50d8ae3SPaolo Bonzini 	bool flush = false;
568c50d8ae3SPaolo Bonzini 	u64 old_spte = mmu_spte_update_no_track(sptep, new_spte);
569c50d8ae3SPaolo Bonzini 
570c50d8ae3SPaolo Bonzini 	if (!is_shadow_present_pte(old_spte))
571c50d8ae3SPaolo Bonzini 		return false;
572c50d8ae3SPaolo Bonzini 
573c50d8ae3SPaolo Bonzini 	/*
574c50d8ae3SPaolo Bonzini 	 * For the spte updated out of mmu-lock is safe, since
575c50d8ae3SPaolo Bonzini 	 * we always atomically update it, see the comments in
576c50d8ae3SPaolo Bonzini 	 * spte_has_volatile_bits().
577c50d8ae3SPaolo Bonzini 	 */
578c50d8ae3SPaolo Bonzini 	if (spte_can_locklessly_be_made_writable(old_spte) &&
579c50d8ae3SPaolo Bonzini 	      !is_writable_pte(new_spte))
580c50d8ae3SPaolo Bonzini 		flush = true;
581c50d8ae3SPaolo Bonzini 
582c50d8ae3SPaolo Bonzini 	/*
583c50d8ae3SPaolo Bonzini 	 * Flush TLB when accessed/dirty states are changed in the page tables,
584c50d8ae3SPaolo Bonzini 	 * to guarantee consistency between TLB and page tables.
585c50d8ae3SPaolo Bonzini 	 */
586c50d8ae3SPaolo Bonzini 
587c50d8ae3SPaolo Bonzini 	if (is_accessed_spte(old_spte) && !is_accessed_spte(new_spte)) {
588c50d8ae3SPaolo Bonzini 		flush = true;
589c50d8ae3SPaolo Bonzini 		kvm_set_pfn_accessed(spte_to_pfn(old_spte));
590c50d8ae3SPaolo Bonzini 	}
591c50d8ae3SPaolo Bonzini 
592c50d8ae3SPaolo Bonzini 	if (is_dirty_spte(old_spte) && !is_dirty_spte(new_spte)) {
593c50d8ae3SPaolo Bonzini 		flush = true;
594c50d8ae3SPaolo Bonzini 		kvm_set_pfn_dirty(spte_to_pfn(old_spte));
595c50d8ae3SPaolo Bonzini 	}
596c50d8ae3SPaolo Bonzini 
597c50d8ae3SPaolo Bonzini 	return flush;
598c50d8ae3SPaolo Bonzini }
599c50d8ae3SPaolo Bonzini 
600c50d8ae3SPaolo Bonzini /*
601c50d8ae3SPaolo Bonzini  * Rules for using mmu_spte_clear_track_bits:
602c50d8ae3SPaolo Bonzini  * It sets the sptep from present to nonpresent, and track the
603c50d8ae3SPaolo Bonzini  * state bits, it is used to clear the last level sptep.
6047fa2a347SSean Christopherson  * Returns the old PTE.
605c50d8ae3SPaolo Bonzini  */
60671f51d2cSMingwei Zhang static int mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)
607c50d8ae3SPaolo Bonzini {
608c50d8ae3SPaolo Bonzini 	kvm_pfn_t pfn;
609c50d8ae3SPaolo Bonzini 	u64 old_spte = *sptep;
61071f51d2cSMingwei Zhang 	int level = sptep_to_sp(sptep)->role.level;
611c50d8ae3SPaolo Bonzini 
612c50d8ae3SPaolo Bonzini 	if (!spte_has_volatile_bits(old_spte))
613c50d8ae3SPaolo Bonzini 		__update_clear_spte_fast(sptep, 0ull);
614c50d8ae3SPaolo Bonzini 	else
615c50d8ae3SPaolo Bonzini 		old_spte = __update_clear_spte_slow(sptep, 0ull);
616c50d8ae3SPaolo Bonzini 
617c50d8ae3SPaolo Bonzini 	if (!is_shadow_present_pte(old_spte))
6187fa2a347SSean Christopherson 		return old_spte;
619c50d8ae3SPaolo Bonzini 
62071f51d2cSMingwei Zhang 	kvm_update_page_stats(kvm, level, -1);
62171f51d2cSMingwei Zhang 
622c50d8ae3SPaolo Bonzini 	pfn = spte_to_pfn(old_spte);
623c50d8ae3SPaolo Bonzini 
624c50d8ae3SPaolo Bonzini 	/*
625c50d8ae3SPaolo Bonzini 	 * KVM does not hold the refcount of the page used by
626c50d8ae3SPaolo Bonzini 	 * kvm mmu, before reclaiming the page, we should
627c50d8ae3SPaolo Bonzini 	 * unmap it from mmu first.
628c50d8ae3SPaolo Bonzini 	 */
629c50d8ae3SPaolo Bonzini 	WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
630c50d8ae3SPaolo Bonzini 
631c50d8ae3SPaolo Bonzini 	if (is_accessed_spte(old_spte))
632c50d8ae3SPaolo Bonzini 		kvm_set_pfn_accessed(pfn);
633c50d8ae3SPaolo Bonzini 
634c50d8ae3SPaolo Bonzini 	if (is_dirty_spte(old_spte))
635c50d8ae3SPaolo Bonzini 		kvm_set_pfn_dirty(pfn);
636c50d8ae3SPaolo Bonzini 
6377fa2a347SSean Christopherson 	return old_spte;
638c50d8ae3SPaolo Bonzini }
639c50d8ae3SPaolo Bonzini 
640c50d8ae3SPaolo Bonzini /*
641c50d8ae3SPaolo Bonzini  * Rules for using mmu_spte_clear_no_track:
642c50d8ae3SPaolo Bonzini  * Directly clear spte without caring the state bits of sptep,
643c50d8ae3SPaolo Bonzini  * it is used to set the upper level spte.
644c50d8ae3SPaolo Bonzini  */
645c50d8ae3SPaolo Bonzini static void mmu_spte_clear_no_track(u64 *sptep)
646c50d8ae3SPaolo Bonzini {
647c50d8ae3SPaolo Bonzini 	__update_clear_spte_fast(sptep, 0ull);
648c50d8ae3SPaolo Bonzini }
649c50d8ae3SPaolo Bonzini 
650c50d8ae3SPaolo Bonzini static u64 mmu_spte_get_lockless(u64 *sptep)
651c50d8ae3SPaolo Bonzini {
652c50d8ae3SPaolo Bonzini 	return __get_spte_lockless(sptep);
653c50d8ae3SPaolo Bonzini }
654c50d8ae3SPaolo Bonzini 
655c50d8ae3SPaolo Bonzini /* Restore an acc-track PTE back to a regular PTE */
656c50d8ae3SPaolo Bonzini static u64 restore_acc_track_spte(u64 spte)
657c50d8ae3SPaolo Bonzini {
658c50d8ae3SPaolo Bonzini 	u64 new_spte = spte;
6598a967d65SPaolo Bonzini 	u64 saved_bits = (spte >> SHADOW_ACC_TRACK_SAVED_BITS_SHIFT)
6608a967d65SPaolo Bonzini 			 & SHADOW_ACC_TRACK_SAVED_BITS_MASK;
661c50d8ae3SPaolo Bonzini 
662c50d8ae3SPaolo Bonzini 	WARN_ON_ONCE(spte_ad_enabled(spte));
663c50d8ae3SPaolo Bonzini 	WARN_ON_ONCE(!is_access_track_spte(spte));
664c50d8ae3SPaolo Bonzini 
665c50d8ae3SPaolo Bonzini 	new_spte &= ~shadow_acc_track_mask;
6668a967d65SPaolo Bonzini 	new_spte &= ~(SHADOW_ACC_TRACK_SAVED_BITS_MASK <<
6678a967d65SPaolo Bonzini 		      SHADOW_ACC_TRACK_SAVED_BITS_SHIFT);
668c50d8ae3SPaolo Bonzini 	new_spte |= saved_bits;
669c50d8ae3SPaolo Bonzini 
670c50d8ae3SPaolo Bonzini 	return new_spte;
671c50d8ae3SPaolo Bonzini }
672c50d8ae3SPaolo Bonzini 
673c50d8ae3SPaolo Bonzini /* Returns the Accessed status of the PTE and resets it at the same time. */
674c50d8ae3SPaolo Bonzini static bool mmu_spte_age(u64 *sptep)
675c50d8ae3SPaolo Bonzini {
676c50d8ae3SPaolo Bonzini 	u64 spte = mmu_spte_get_lockless(sptep);
677c50d8ae3SPaolo Bonzini 
678c50d8ae3SPaolo Bonzini 	if (!is_accessed_spte(spte))
679c50d8ae3SPaolo Bonzini 		return false;
680c50d8ae3SPaolo Bonzini 
681c50d8ae3SPaolo Bonzini 	if (spte_ad_enabled(spte)) {
682c50d8ae3SPaolo Bonzini 		clear_bit((ffs(shadow_accessed_mask) - 1),
683c50d8ae3SPaolo Bonzini 			  (unsigned long *)sptep);
684c50d8ae3SPaolo Bonzini 	} else {
685c50d8ae3SPaolo Bonzini 		/*
686c50d8ae3SPaolo Bonzini 		 * Capture the dirty status of the page, so that it doesn't get
687c50d8ae3SPaolo Bonzini 		 * lost when the SPTE is marked for access tracking.
688c50d8ae3SPaolo Bonzini 		 */
689c50d8ae3SPaolo Bonzini 		if (is_writable_pte(spte))
690c50d8ae3SPaolo Bonzini 			kvm_set_pfn_dirty(spte_to_pfn(spte));
691c50d8ae3SPaolo Bonzini 
692c50d8ae3SPaolo Bonzini 		spte = mark_spte_for_access_track(spte);
693c50d8ae3SPaolo Bonzini 		mmu_spte_update_no_track(sptep, spte);
694c50d8ae3SPaolo Bonzini 	}
695c50d8ae3SPaolo Bonzini 
696c50d8ae3SPaolo Bonzini 	return true;
697c50d8ae3SPaolo Bonzini }
698c50d8ae3SPaolo Bonzini 
699c50d8ae3SPaolo Bonzini static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
700c50d8ae3SPaolo Bonzini {
701c5c8c7c5SDavid Matlack 	if (is_tdp_mmu(vcpu->arch.mmu)) {
702c5c8c7c5SDavid Matlack 		kvm_tdp_mmu_walk_lockless_begin();
703c5c8c7c5SDavid Matlack 	} else {
704c50d8ae3SPaolo Bonzini 		/*
705c50d8ae3SPaolo Bonzini 		 * Prevent page table teardown by making any free-er wait during
706c50d8ae3SPaolo Bonzini 		 * kvm_flush_remote_tlbs() IPI to all active vcpus.
707c50d8ae3SPaolo Bonzini 		 */
708c50d8ae3SPaolo Bonzini 		local_irq_disable();
709c50d8ae3SPaolo Bonzini 
710c50d8ae3SPaolo Bonzini 		/*
711c50d8ae3SPaolo Bonzini 		 * Make sure a following spte read is not reordered ahead of the write
712c50d8ae3SPaolo Bonzini 		 * to vcpu->mode.
713c50d8ae3SPaolo Bonzini 		 */
714c50d8ae3SPaolo Bonzini 		smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
715c50d8ae3SPaolo Bonzini 	}
716c5c8c7c5SDavid Matlack }
717c50d8ae3SPaolo Bonzini 
718c50d8ae3SPaolo Bonzini static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
719c50d8ae3SPaolo Bonzini {
720c5c8c7c5SDavid Matlack 	if (is_tdp_mmu(vcpu->arch.mmu)) {
721c5c8c7c5SDavid Matlack 		kvm_tdp_mmu_walk_lockless_end();
722c5c8c7c5SDavid Matlack 	} else {
723c50d8ae3SPaolo Bonzini 		/*
724c50d8ae3SPaolo Bonzini 		 * Make sure the write to vcpu->mode is not reordered in front of
725c50d8ae3SPaolo Bonzini 		 * reads to sptes.  If it does, kvm_mmu_commit_zap_page() can see us
726c50d8ae3SPaolo Bonzini 		 * OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
727c50d8ae3SPaolo Bonzini 		 */
728c50d8ae3SPaolo Bonzini 		smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
729c50d8ae3SPaolo Bonzini 		local_irq_enable();
730c50d8ae3SPaolo Bonzini 	}
731c5c8c7c5SDavid Matlack }
732c50d8ae3SPaolo Bonzini 
733378f5cd6SSean Christopherson static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
734c50d8ae3SPaolo Bonzini {
735c50d8ae3SPaolo Bonzini 	int r;
736c50d8ae3SPaolo Bonzini 
737531281adSSean Christopherson 	/* 1 rmap, 1 parent PTE per level, and the prefetched rmaps. */
73894ce87efSSean Christopherson 	r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
739531281adSSean Christopherson 				       1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);
740c50d8ae3SPaolo Bonzini 	if (r)
741c50d8ae3SPaolo Bonzini 		return r;
74294ce87efSSean Christopherson 	r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
743171a90d7SSean Christopherson 				       PT64_ROOT_MAX_LEVEL);
744171a90d7SSean Christopherson 	if (r)
745171a90d7SSean Christopherson 		return r;
746378f5cd6SSean Christopherson 	if (maybe_indirect) {
74794ce87efSSean Christopherson 		r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
748171a90d7SSean Christopherson 					       PT64_ROOT_MAX_LEVEL);
749c50d8ae3SPaolo Bonzini 		if (r)
750c50d8ae3SPaolo Bonzini 			return r;
751378f5cd6SSean Christopherson 	}
75294ce87efSSean Christopherson 	return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
753531281adSSean Christopherson 					  PT64_ROOT_MAX_LEVEL);
754c50d8ae3SPaolo Bonzini }
755c50d8ae3SPaolo Bonzini 
756c50d8ae3SPaolo Bonzini static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
757c50d8ae3SPaolo Bonzini {
75894ce87efSSean Christopherson 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
75994ce87efSSean Christopherson 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
76094ce87efSSean Christopherson 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_gfn_array_cache);
76194ce87efSSean Christopherson 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
762c50d8ae3SPaolo Bonzini }
763c50d8ae3SPaolo Bonzini 
764c50d8ae3SPaolo Bonzini static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu)
765c50d8ae3SPaolo Bonzini {
76694ce87efSSean Christopherson 	return kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache);
767c50d8ae3SPaolo Bonzini }
768c50d8ae3SPaolo Bonzini 
769c50d8ae3SPaolo Bonzini static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
770c50d8ae3SPaolo Bonzini {
771c50d8ae3SPaolo Bonzini 	kmem_cache_free(pte_list_desc_cache, pte_list_desc);
772c50d8ae3SPaolo Bonzini }
773c50d8ae3SPaolo Bonzini 
774c50d8ae3SPaolo Bonzini static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
775c50d8ae3SPaolo Bonzini {
776c50d8ae3SPaolo Bonzini 	if (!sp->role.direct)
777c50d8ae3SPaolo Bonzini 		return sp->gfns[index];
778c50d8ae3SPaolo Bonzini 
779c50d8ae3SPaolo Bonzini 	return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS));
780c50d8ae3SPaolo Bonzini }
781c50d8ae3SPaolo Bonzini 
782c50d8ae3SPaolo Bonzini static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
783c50d8ae3SPaolo Bonzini {
784c50d8ae3SPaolo Bonzini 	if (!sp->role.direct) {
785c50d8ae3SPaolo Bonzini 		sp->gfns[index] = gfn;
786c50d8ae3SPaolo Bonzini 		return;
787c50d8ae3SPaolo Bonzini 	}
788c50d8ae3SPaolo Bonzini 
789c50d8ae3SPaolo Bonzini 	if (WARN_ON(gfn != kvm_mmu_page_get_gfn(sp, index)))
790c50d8ae3SPaolo Bonzini 		pr_err_ratelimited("gfn mismatch under direct page %llx "
791c50d8ae3SPaolo Bonzini 				   "(expected %llx, got %llx)\n",
792c50d8ae3SPaolo Bonzini 				   sp->gfn,
793c50d8ae3SPaolo Bonzini 				   kvm_mmu_page_get_gfn(sp, index), gfn);
794c50d8ae3SPaolo Bonzini }
795c50d8ae3SPaolo Bonzini 
796c50d8ae3SPaolo Bonzini /*
797c50d8ae3SPaolo Bonzini  * Return the pointer to the large page information for a given gfn,
798c50d8ae3SPaolo Bonzini  * handling slots that are not large page aligned.
799c50d8ae3SPaolo Bonzini  */
800c50d8ae3SPaolo Bonzini static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
8018ca6f063SBen Gardon 		const struct kvm_memory_slot *slot, int level)
802c50d8ae3SPaolo Bonzini {
803c50d8ae3SPaolo Bonzini 	unsigned long idx;
804c50d8ae3SPaolo Bonzini 
805c50d8ae3SPaolo Bonzini 	idx = gfn_to_index(gfn, slot->base_gfn, level);
806c50d8ae3SPaolo Bonzini 	return &slot->arch.lpage_info[level - 2][idx];
807c50d8ae3SPaolo Bonzini }
808c50d8ae3SPaolo Bonzini 
809269e9552SHamza Mahfooz static void update_gfn_disallow_lpage_count(const struct kvm_memory_slot *slot,
810c50d8ae3SPaolo Bonzini 					    gfn_t gfn, int count)
811c50d8ae3SPaolo Bonzini {
812c50d8ae3SPaolo Bonzini 	struct kvm_lpage_info *linfo;
813c50d8ae3SPaolo Bonzini 	int i;
814c50d8ae3SPaolo Bonzini 
8153bae0459SSean Christopherson 	for (i = PG_LEVEL_2M; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
816c50d8ae3SPaolo Bonzini 		linfo = lpage_info_slot(gfn, slot, i);
817c50d8ae3SPaolo Bonzini 		linfo->disallow_lpage += count;
818c50d8ae3SPaolo Bonzini 		WARN_ON(linfo->disallow_lpage < 0);
819c50d8ae3SPaolo Bonzini 	}
820c50d8ae3SPaolo Bonzini }
821c50d8ae3SPaolo Bonzini 
822269e9552SHamza Mahfooz void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
823c50d8ae3SPaolo Bonzini {
824c50d8ae3SPaolo Bonzini 	update_gfn_disallow_lpage_count(slot, gfn, 1);
825c50d8ae3SPaolo Bonzini }
826c50d8ae3SPaolo Bonzini 
827269e9552SHamza Mahfooz void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
828c50d8ae3SPaolo Bonzini {
829c50d8ae3SPaolo Bonzini 	update_gfn_disallow_lpage_count(slot, gfn, -1);
830c50d8ae3SPaolo Bonzini }
831c50d8ae3SPaolo Bonzini 
832c50d8ae3SPaolo Bonzini static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
833c50d8ae3SPaolo Bonzini {
834c50d8ae3SPaolo Bonzini 	struct kvm_memslots *slots;
835c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
836c50d8ae3SPaolo Bonzini 	gfn_t gfn;
837c50d8ae3SPaolo Bonzini 
838c50d8ae3SPaolo Bonzini 	kvm->arch.indirect_shadow_pages++;
839c50d8ae3SPaolo Bonzini 	gfn = sp->gfn;
840c50d8ae3SPaolo Bonzini 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
841c50d8ae3SPaolo Bonzini 	slot = __gfn_to_memslot(slots, gfn);
842c50d8ae3SPaolo Bonzini 
843c50d8ae3SPaolo Bonzini 	/* the non-leaf shadow pages are keeping readonly. */
8443bae0459SSean Christopherson 	if (sp->role.level > PG_LEVEL_4K)
845c50d8ae3SPaolo Bonzini 		return kvm_slot_page_track_add_page(kvm, slot, gfn,
846c50d8ae3SPaolo Bonzini 						    KVM_PAGE_TRACK_WRITE);
847c50d8ae3SPaolo Bonzini 
848c50d8ae3SPaolo Bonzini 	kvm_mmu_gfn_disallow_lpage(slot, gfn);
849c50d8ae3SPaolo Bonzini }
850c50d8ae3SPaolo Bonzini 
85129cf0f50SBen Gardon void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
852c50d8ae3SPaolo Bonzini {
853c50d8ae3SPaolo Bonzini 	if (sp->lpage_disallowed)
854c50d8ae3SPaolo Bonzini 		return;
855c50d8ae3SPaolo Bonzini 
856c50d8ae3SPaolo Bonzini 	++kvm->stat.nx_lpage_splits;
857c50d8ae3SPaolo Bonzini 	list_add_tail(&sp->lpage_disallowed_link,
858c50d8ae3SPaolo Bonzini 		      &kvm->arch.lpage_disallowed_mmu_pages);
859c50d8ae3SPaolo Bonzini 	sp->lpage_disallowed = true;
860c50d8ae3SPaolo Bonzini }
861c50d8ae3SPaolo Bonzini 
862c50d8ae3SPaolo Bonzini static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
863c50d8ae3SPaolo Bonzini {
864c50d8ae3SPaolo Bonzini 	struct kvm_memslots *slots;
865c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
866c50d8ae3SPaolo Bonzini 	gfn_t gfn;
867c50d8ae3SPaolo Bonzini 
868c50d8ae3SPaolo Bonzini 	kvm->arch.indirect_shadow_pages--;
869c50d8ae3SPaolo Bonzini 	gfn = sp->gfn;
870c50d8ae3SPaolo Bonzini 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
871c50d8ae3SPaolo Bonzini 	slot = __gfn_to_memslot(slots, gfn);
8723bae0459SSean Christopherson 	if (sp->role.level > PG_LEVEL_4K)
873c50d8ae3SPaolo Bonzini 		return kvm_slot_page_track_remove_page(kvm, slot, gfn,
874c50d8ae3SPaolo Bonzini 						       KVM_PAGE_TRACK_WRITE);
875c50d8ae3SPaolo Bonzini 
876c50d8ae3SPaolo Bonzini 	kvm_mmu_gfn_allow_lpage(slot, gfn);
877c50d8ae3SPaolo Bonzini }
878c50d8ae3SPaolo Bonzini 
87929cf0f50SBen Gardon void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
880c50d8ae3SPaolo Bonzini {
881c50d8ae3SPaolo Bonzini 	--kvm->stat.nx_lpage_splits;
882c50d8ae3SPaolo Bonzini 	sp->lpage_disallowed = false;
883c50d8ae3SPaolo Bonzini 	list_del(&sp->lpage_disallowed_link);
884c50d8ae3SPaolo Bonzini }
885c50d8ae3SPaolo Bonzini 
886c50d8ae3SPaolo Bonzini static struct kvm_memory_slot *
887c50d8ae3SPaolo Bonzini gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
888c50d8ae3SPaolo Bonzini 			    bool no_dirty_log)
889c50d8ae3SPaolo Bonzini {
890c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
891c50d8ae3SPaolo Bonzini 
892c50d8ae3SPaolo Bonzini 	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
89391b0d268SPaolo Bonzini 	if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
89491b0d268SPaolo Bonzini 		return NULL;
895044c59c4SPeter Xu 	if (no_dirty_log && kvm_slot_dirty_track_enabled(slot))
89691b0d268SPaolo Bonzini 		return NULL;
897c50d8ae3SPaolo Bonzini 
898c50d8ae3SPaolo Bonzini 	return slot;
899c50d8ae3SPaolo Bonzini }
900c50d8ae3SPaolo Bonzini 
901c50d8ae3SPaolo Bonzini /*
902c50d8ae3SPaolo Bonzini  * About rmap_head encoding:
903c50d8ae3SPaolo Bonzini  *
904c50d8ae3SPaolo Bonzini  * If the bit zero of rmap_head->val is clear, then it points to the only spte
905c50d8ae3SPaolo Bonzini  * in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct
906c50d8ae3SPaolo Bonzini  * pte_list_desc containing more mappings.
907c50d8ae3SPaolo Bonzini  */
908c50d8ae3SPaolo Bonzini 
909c50d8ae3SPaolo Bonzini /*
910c50d8ae3SPaolo Bonzini  * Returns the number of pointers in the rmap chain, not counting the new one.
911c50d8ae3SPaolo Bonzini  */
912c50d8ae3SPaolo Bonzini static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
913c50d8ae3SPaolo Bonzini 			struct kvm_rmap_head *rmap_head)
914c50d8ae3SPaolo Bonzini {
915c50d8ae3SPaolo Bonzini 	struct pte_list_desc *desc;
91613236e25SPeter Xu 	int count = 0;
917c50d8ae3SPaolo Bonzini 
918c50d8ae3SPaolo Bonzini 	if (!rmap_head->val) {
919805a0f83SStephen Zhang 		rmap_printk("%p %llx 0->1\n", spte, *spte);
920c50d8ae3SPaolo Bonzini 		rmap_head->val = (unsigned long)spte;
921c50d8ae3SPaolo Bonzini 	} else if (!(rmap_head->val & 1)) {
922805a0f83SStephen Zhang 		rmap_printk("%p %llx 1->many\n", spte, *spte);
923c50d8ae3SPaolo Bonzini 		desc = mmu_alloc_pte_list_desc(vcpu);
924c50d8ae3SPaolo Bonzini 		desc->sptes[0] = (u64 *)rmap_head->val;
925c50d8ae3SPaolo Bonzini 		desc->sptes[1] = spte;
92613236e25SPeter Xu 		desc->spte_count = 2;
927c50d8ae3SPaolo Bonzini 		rmap_head->val = (unsigned long)desc | 1;
928c50d8ae3SPaolo Bonzini 		++count;
929c50d8ae3SPaolo Bonzini 	} else {
930805a0f83SStephen Zhang 		rmap_printk("%p %llx many->many\n", spte, *spte);
931c50d8ae3SPaolo Bonzini 		desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
93213236e25SPeter Xu 		while (desc->spte_count == PTE_LIST_EXT) {
933c50d8ae3SPaolo Bonzini 			count += PTE_LIST_EXT;
934c6c4f961SLi RongQing 			if (!desc->more) {
935c50d8ae3SPaolo Bonzini 				desc->more = mmu_alloc_pte_list_desc(vcpu);
936c50d8ae3SPaolo Bonzini 				desc = desc->more;
93713236e25SPeter Xu 				desc->spte_count = 0;
938c6c4f961SLi RongQing 				break;
939c6c4f961SLi RongQing 			}
940c6c4f961SLi RongQing 			desc = desc->more;
941c50d8ae3SPaolo Bonzini 		}
94213236e25SPeter Xu 		count += desc->spte_count;
94313236e25SPeter Xu 		desc->sptes[desc->spte_count++] = spte;
944c50d8ae3SPaolo Bonzini 	}
945c50d8ae3SPaolo Bonzini 	return count;
946c50d8ae3SPaolo Bonzini }
947c50d8ae3SPaolo Bonzini 
948c50d8ae3SPaolo Bonzini static void
949c50d8ae3SPaolo Bonzini pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
950c50d8ae3SPaolo Bonzini 			   struct pte_list_desc *desc, int i,
951c50d8ae3SPaolo Bonzini 			   struct pte_list_desc *prev_desc)
952c50d8ae3SPaolo Bonzini {
95313236e25SPeter Xu 	int j = desc->spte_count - 1;
954c50d8ae3SPaolo Bonzini 
955c50d8ae3SPaolo Bonzini 	desc->sptes[i] = desc->sptes[j];
956c50d8ae3SPaolo Bonzini 	desc->sptes[j] = NULL;
95713236e25SPeter Xu 	desc->spte_count--;
95813236e25SPeter Xu 	if (desc->spte_count)
959c50d8ae3SPaolo Bonzini 		return;
960c50d8ae3SPaolo Bonzini 	if (!prev_desc && !desc->more)
961fe3c2b4cSMiaohe Lin 		rmap_head->val = 0;
962c50d8ae3SPaolo Bonzini 	else
963c50d8ae3SPaolo Bonzini 		if (prev_desc)
964c50d8ae3SPaolo Bonzini 			prev_desc->more = desc->more;
965c50d8ae3SPaolo Bonzini 		else
966c50d8ae3SPaolo Bonzini 			rmap_head->val = (unsigned long)desc->more | 1;
967c50d8ae3SPaolo Bonzini 	mmu_free_pte_list_desc(desc);
968c50d8ae3SPaolo Bonzini }
969c50d8ae3SPaolo Bonzini 
970c50d8ae3SPaolo Bonzini static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
971c50d8ae3SPaolo Bonzini {
972c50d8ae3SPaolo Bonzini 	struct pte_list_desc *desc;
973c50d8ae3SPaolo Bonzini 	struct pte_list_desc *prev_desc;
974c50d8ae3SPaolo Bonzini 	int i;
975c50d8ae3SPaolo Bonzini 
976c50d8ae3SPaolo Bonzini 	if (!rmap_head->val) {
977c50d8ae3SPaolo Bonzini 		pr_err("%s: %p 0->BUG\n", __func__, spte);
978c50d8ae3SPaolo Bonzini 		BUG();
979c50d8ae3SPaolo Bonzini 	} else if (!(rmap_head->val & 1)) {
980805a0f83SStephen Zhang 		rmap_printk("%p 1->0\n", spte);
981c50d8ae3SPaolo Bonzini 		if ((u64 *)rmap_head->val != spte) {
982c50d8ae3SPaolo Bonzini 			pr_err("%s:  %p 1->BUG\n", __func__, spte);
983c50d8ae3SPaolo Bonzini 			BUG();
984c50d8ae3SPaolo Bonzini 		}
985c50d8ae3SPaolo Bonzini 		rmap_head->val = 0;
986c50d8ae3SPaolo Bonzini 	} else {
987805a0f83SStephen Zhang 		rmap_printk("%p many->many\n", spte);
988c50d8ae3SPaolo Bonzini 		desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
989c50d8ae3SPaolo Bonzini 		prev_desc = NULL;
990c50d8ae3SPaolo Bonzini 		while (desc) {
99113236e25SPeter Xu 			for (i = 0; i < desc->spte_count; ++i) {
992c50d8ae3SPaolo Bonzini 				if (desc->sptes[i] == spte) {
993c50d8ae3SPaolo Bonzini 					pte_list_desc_remove_entry(rmap_head,
994c50d8ae3SPaolo Bonzini 							desc, i, prev_desc);
995c50d8ae3SPaolo Bonzini 					return;
996c50d8ae3SPaolo Bonzini 				}
997c50d8ae3SPaolo Bonzini 			}
998c50d8ae3SPaolo Bonzini 			prev_desc = desc;
999c50d8ae3SPaolo Bonzini 			desc = desc->more;
1000c50d8ae3SPaolo Bonzini 		}
1001c50d8ae3SPaolo Bonzini 		pr_err("%s: %p many->many\n", __func__, spte);
1002c50d8ae3SPaolo Bonzini 		BUG();
1003c50d8ae3SPaolo Bonzini 	}
1004c50d8ae3SPaolo Bonzini }
1005c50d8ae3SPaolo Bonzini 
100671f51d2cSMingwei Zhang static void pte_list_remove(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
100771f51d2cSMingwei Zhang 			    u64 *sptep)
1008c50d8ae3SPaolo Bonzini {
100971f51d2cSMingwei Zhang 	mmu_spte_clear_track_bits(kvm, sptep);
1010c50d8ae3SPaolo Bonzini 	__pte_list_remove(sptep, rmap_head);
1011c50d8ae3SPaolo Bonzini }
1012c50d8ae3SPaolo Bonzini 
1013a75b5404SPeter Xu /* Return true if rmap existed, false otherwise */
101471f51d2cSMingwei Zhang static bool pte_list_destroy(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
1015a75b5404SPeter Xu {
1016a75b5404SPeter Xu 	struct pte_list_desc *desc, *next;
1017a75b5404SPeter Xu 	int i;
1018a75b5404SPeter Xu 
1019a75b5404SPeter Xu 	if (!rmap_head->val)
1020a75b5404SPeter Xu 		return false;
1021a75b5404SPeter Xu 
1022a75b5404SPeter Xu 	if (!(rmap_head->val & 1)) {
102371f51d2cSMingwei Zhang 		mmu_spte_clear_track_bits(kvm, (u64 *)rmap_head->val);
1024a75b5404SPeter Xu 		goto out;
1025a75b5404SPeter Xu 	}
1026a75b5404SPeter Xu 
1027a75b5404SPeter Xu 	desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1028a75b5404SPeter Xu 
1029a75b5404SPeter Xu 	for (; desc; desc = next) {
1030a75b5404SPeter Xu 		for (i = 0; i < desc->spte_count; i++)
103171f51d2cSMingwei Zhang 			mmu_spte_clear_track_bits(kvm, desc->sptes[i]);
1032a75b5404SPeter Xu 		next = desc->more;
1033a75b5404SPeter Xu 		mmu_free_pte_list_desc(desc);
1034a75b5404SPeter Xu 	}
1035a75b5404SPeter Xu out:
1036a75b5404SPeter Xu 	/* rmap_head is meaningless now, remember to reset it */
1037a75b5404SPeter Xu 	rmap_head->val = 0;
1038a75b5404SPeter Xu 	return true;
1039a75b5404SPeter Xu }
1040a75b5404SPeter Xu 
10413bcd0662SPeter Xu unsigned int pte_list_count(struct kvm_rmap_head *rmap_head)
10423bcd0662SPeter Xu {
10433bcd0662SPeter Xu 	struct pte_list_desc *desc;
10443bcd0662SPeter Xu 	unsigned int count = 0;
10453bcd0662SPeter Xu 
10463bcd0662SPeter Xu 	if (!rmap_head->val)
10473bcd0662SPeter Xu 		return 0;
10483bcd0662SPeter Xu 	else if (!(rmap_head->val & 1))
10493bcd0662SPeter Xu 		return 1;
10503bcd0662SPeter Xu 
10513bcd0662SPeter Xu 	desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
10523bcd0662SPeter Xu 
10533bcd0662SPeter Xu 	while (desc) {
10543bcd0662SPeter Xu 		count += desc->spte_count;
10553bcd0662SPeter Xu 		desc = desc->more;
10563bcd0662SPeter Xu 	}
10573bcd0662SPeter Xu 
10583bcd0662SPeter Xu 	return count;
10593bcd0662SPeter Xu }
10603bcd0662SPeter Xu 
106193e083d4SDavid Matlack static struct kvm_rmap_head *gfn_to_rmap(gfn_t gfn, int level,
1062269e9552SHamza Mahfooz 					 const struct kvm_memory_slot *slot)
1063c50d8ae3SPaolo Bonzini {
1064c50d8ae3SPaolo Bonzini 	unsigned long idx;
1065c50d8ae3SPaolo Bonzini 
1066c50d8ae3SPaolo Bonzini 	idx = gfn_to_index(gfn, slot->base_gfn, level);
10673bae0459SSean Christopherson 	return &slot->arch.rmap[level - PG_LEVEL_4K][idx];
1068c50d8ae3SPaolo Bonzini }
1069c50d8ae3SPaolo Bonzini 
1070c50d8ae3SPaolo Bonzini static bool rmap_can_add(struct kvm_vcpu *vcpu)
1071c50d8ae3SPaolo Bonzini {
1072356ec69aSSean Christopherson 	struct kvm_mmu_memory_cache *mc;
1073c50d8ae3SPaolo Bonzini 
1074356ec69aSSean Christopherson 	mc = &vcpu->arch.mmu_pte_list_desc_cache;
107594ce87efSSean Christopherson 	return kvm_mmu_memory_cache_nr_free_objects(mc);
1076c50d8ae3SPaolo Bonzini }
1077c50d8ae3SPaolo Bonzini 
1078c50d8ae3SPaolo Bonzini static void rmap_remove(struct kvm *kvm, u64 *spte)
1079c50d8ae3SPaolo Bonzini {
1080601f8af0SDavid Matlack 	struct kvm_memslots *slots;
1081601f8af0SDavid Matlack 	struct kvm_memory_slot *slot;
1082c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
1083c50d8ae3SPaolo Bonzini 	gfn_t gfn;
1084c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *rmap_head;
1085c50d8ae3SPaolo Bonzini 
108657354682SSean Christopherson 	sp = sptep_to_sp(spte);
1087c50d8ae3SPaolo Bonzini 	gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
1088601f8af0SDavid Matlack 
1089601f8af0SDavid Matlack 	/*
109068be1306SDavid Matlack 	 * Unlike rmap_add, rmap_remove does not run in the context of a vCPU
109168be1306SDavid Matlack 	 * so we have to determine which memslots to use based on context
109268be1306SDavid Matlack 	 * information in sp->role.
1093601f8af0SDavid Matlack 	 */
1094601f8af0SDavid Matlack 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
1095601f8af0SDavid Matlack 
1096601f8af0SDavid Matlack 	slot = __gfn_to_memslot(slots, gfn);
109793e083d4SDavid Matlack 	rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1098601f8af0SDavid Matlack 
1099c50d8ae3SPaolo Bonzini 	__pte_list_remove(spte, rmap_head);
1100c50d8ae3SPaolo Bonzini }
1101c50d8ae3SPaolo Bonzini 
1102c50d8ae3SPaolo Bonzini /*
1103c50d8ae3SPaolo Bonzini  * Used by the following functions to iterate through the sptes linked by a
1104c50d8ae3SPaolo Bonzini  * rmap.  All fields are private and not assumed to be used outside.
1105c50d8ae3SPaolo Bonzini  */
1106c50d8ae3SPaolo Bonzini struct rmap_iterator {
1107c50d8ae3SPaolo Bonzini 	/* private fields */
1108c50d8ae3SPaolo Bonzini 	struct pte_list_desc *desc;	/* holds the sptep if not NULL */
1109c50d8ae3SPaolo Bonzini 	int pos;			/* index of the sptep */
1110c50d8ae3SPaolo Bonzini };
1111c50d8ae3SPaolo Bonzini 
1112c50d8ae3SPaolo Bonzini /*
1113c50d8ae3SPaolo Bonzini  * Iteration must be started by this function.  This should also be used after
1114c50d8ae3SPaolo Bonzini  * removing/dropping sptes from the rmap link because in such cases the
11150a03cbdaSMiaohe Lin  * information in the iterator may not be valid.
1116c50d8ae3SPaolo Bonzini  *
1117c50d8ae3SPaolo Bonzini  * Returns sptep if found, NULL otherwise.
1118c50d8ae3SPaolo Bonzini  */
1119c50d8ae3SPaolo Bonzini static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head,
1120c50d8ae3SPaolo Bonzini 			   struct rmap_iterator *iter)
1121c50d8ae3SPaolo Bonzini {
1122c50d8ae3SPaolo Bonzini 	u64 *sptep;
1123c50d8ae3SPaolo Bonzini 
1124c50d8ae3SPaolo Bonzini 	if (!rmap_head->val)
1125c50d8ae3SPaolo Bonzini 		return NULL;
1126c50d8ae3SPaolo Bonzini 
1127c50d8ae3SPaolo Bonzini 	if (!(rmap_head->val & 1)) {
1128c50d8ae3SPaolo Bonzini 		iter->desc = NULL;
1129c50d8ae3SPaolo Bonzini 		sptep = (u64 *)rmap_head->val;
1130c50d8ae3SPaolo Bonzini 		goto out;
1131c50d8ae3SPaolo Bonzini 	}
1132c50d8ae3SPaolo Bonzini 
1133c50d8ae3SPaolo Bonzini 	iter->desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1134c50d8ae3SPaolo Bonzini 	iter->pos = 0;
1135c50d8ae3SPaolo Bonzini 	sptep = iter->desc->sptes[iter->pos];
1136c50d8ae3SPaolo Bonzini out:
1137c50d8ae3SPaolo Bonzini 	BUG_ON(!is_shadow_present_pte(*sptep));
1138c50d8ae3SPaolo Bonzini 	return sptep;
1139c50d8ae3SPaolo Bonzini }
1140c50d8ae3SPaolo Bonzini 
1141c50d8ae3SPaolo Bonzini /*
1142c50d8ae3SPaolo Bonzini  * Must be used with a valid iterator: e.g. after rmap_get_first().
1143c50d8ae3SPaolo Bonzini  *
1144c50d8ae3SPaolo Bonzini  * Returns sptep if found, NULL otherwise.
1145c50d8ae3SPaolo Bonzini  */
1146c50d8ae3SPaolo Bonzini static u64 *rmap_get_next(struct rmap_iterator *iter)
1147c50d8ae3SPaolo Bonzini {
1148c50d8ae3SPaolo Bonzini 	u64 *sptep;
1149c50d8ae3SPaolo Bonzini 
1150c50d8ae3SPaolo Bonzini 	if (iter->desc) {
1151c50d8ae3SPaolo Bonzini 		if (iter->pos < PTE_LIST_EXT - 1) {
1152c50d8ae3SPaolo Bonzini 			++iter->pos;
1153c50d8ae3SPaolo Bonzini 			sptep = iter->desc->sptes[iter->pos];
1154c50d8ae3SPaolo Bonzini 			if (sptep)
1155c50d8ae3SPaolo Bonzini 				goto out;
1156c50d8ae3SPaolo Bonzini 		}
1157c50d8ae3SPaolo Bonzini 
1158c50d8ae3SPaolo Bonzini 		iter->desc = iter->desc->more;
1159c50d8ae3SPaolo Bonzini 
1160c50d8ae3SPaolo Bonzini 		if (iter->desc) {
1161c50d8ae3SPaolo Bonzini 			iter->pos = 0;
1162c50d8ae3SPaolo Bonzini 			/* desc->sptes[0] cannot be NULL */
1163c50d8ae3SPaolo Bonzini 			sptep = iter->desc->sptes[iter->pos];
1164c50d8ae3SPaolo Bonzini 			goto out;
1165c50d8ae3SPaolo Bonzini 		}
1166c50d8ae3SPaolo Bonzini 	}
1167c50d8ae3SPaolo Bonzini 
1168c50d8ae3SPaolo Bonzini 	return NULL;
1169c50d8ae3SPaolo Bonzini out:
1170c50d8ae3SPaolo Bonzini 	BUG_ON(!is_shadow_present_pte(*sptep));
1171c50d8ae3SPaolo Bonzini 	return sptep;
1172c50d8ae3SPaolo Bonzini }
1173c50d8ae3SPaolo Bonzini 
1174c50d8ae3SPaolo Bonzini #define for_each_rmap_spte(_rmap_head_, _iter_, _spte_)			\
1175c50d8ae3SPaolo Bonzini 	for (_spte_ = rmap_get_first(_rmap_head_, _iter_);		\
1176c50d8ae3SPaolo Bonzini 	     _spte_; _spte_ = rmap_get_next(_iter_))
1177c50d8ae3SPaolo Bonzini 
1178c50d8ae3SPaolo Bonzini static void drop_spte(struct kvm *kvm, u64 *sptep)
1179c50d8ae3SPaolo Bonzini {
118071f51d2cSMingwei Zhang 	u64 old_spte = mmu_spte_clear_track_bits(kvm, sptep);
11817fa2a347SSean Christopherson 
11827fa2a347SSean Christopherson 	if (is_shadow_present_pte(old_spte))
1183c50d8ae3SPaolo Bonzini 		rmap_remove(kvm, sptep);
1184c50d8ae3SPaolo Bonzini }
1185c50d8ae3SPaolo Bonzini 
1186c50d8ae3SPaolo Bonzini 
1187c50d8ae3SPaolo Bonzini static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
1188c50d8ae3SPaolo Bonzini {
1189c50d8ae3SPaolo Bonzini 	if (is_large_pte(*sptep)) {
119057354682SSean Christopherson 		WARN_ON(sptep_to_sp(sptep)->role.level == PG_LEVEL_4K);
1191c50d8ae3SPaolo Bonzini 		drop_spte(kvm, sptep);
1192c50d8ae3SPaolo Bonzini 		return true;
1193c50d8ae3SPaolo Bonzini 	}
1194c50d8ae3SPaolo Bonzini 
1195c50d8ae3SPaolo Bonzini 	return false;
1196c50d8ae3SPaolo Bonzini }
1197c50d8ae3SPaolo Bonzini 
1198c50d8ae3SPaolo Bonzini static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
1199c50d8ae3SPaolo Bonzini {
1200c50d8ae3SPaolo Bonzini 	if (__drop_large_spte(vcpu->kvm, sptep)) {
120157354682SSean Christopherson 		struct kvm_mmu_page *sp = sptep_to_sp(sptep);
1202c50d8ae3SPaolo Bonzini 
1203c50d8ae3SPaolo Bonzini 		kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
1204c50d8ae3SPaolo Bonzini 			KVM_PAGES_PER_HPAGE(sp->role.level));
1205c50d8ae3SPaolo Bonzini 	}
1206c50d8ae3SPaolo Bonzini }
1207c50d8ae3SPaolo Bonzini 
1208c50d8ae3SPaolo Bonzini /*
1209c50d8ae3SPaolo Bonzini  * Write-protect on the specified @sptep, @pt_protect indicates whether
1210c50d8ae3SPaolo Bonzini  * spte write-protection is caused by protecting shadow page table.
1211c50d8ae3SPaolo Bonzini  *
1212c50d8ae3SPaolo Bonzini  * Note: write protection is difference between dirty logging and spte
1213c50d8ae3SPaolo Bonzini  * protection:
1214c50d8ae3SPaolo Bonzini  * - for dirty logging, the spte can be set to writable at anytime if
1215c50d8ae3SPaolo Bonzini  *   its dirty bitmap is properly set.
1216c50d8ae3SPaolo Bonzini  * - for spte protection, the spte can be writable only after unsync-ing
1217c50d8ae3SPaolo Bonzini  *   shadow page.
1218c50d8ae3SPaolo Bonzini  *
1219c50d8ae3SPaolo Bonzini  * Return true if tlb need be flushed.
1220c50d8ae3SPaolo Bonzini  */
1221c50d8ae3SPaolo Bonzini static bool spte_write_protect(u64 *sptep, bool pt_protect)
1222c50d8ae3SPaolo Bonzini {
1223c50d8ae3SPaolo Bonzini 	u64 spte = *sptep;
1224c50d8ae3SPaolo Bonzini 
1225c50d8ae3SPaolo Bonzini 	if (!is_writable_pte(spte) &&
1226c50d8ae3SPaolo Bonzini 	      !(pt_protect && spte_can_locklessly_be_made_writable(spte)))
1227c50d8ae3SPaolo Bonzini 		return false;
1228c50d8ae3SPaolo Bonzini 
1229805a0f83SStephen Zhang 	rmap_printk("spte %p %llx\n", sptep, *sptep);
1230c50d8ae3SPaolo Bonzini 
1231c50d8ae3SPaolo Bonzini 	if (pt_protect)
12325fc3424fSSean Christopherson 		spte &= ~shadow_mmu_writable_mask;
1233c50d8ae3SPaolo Bonzini 	spte = spte & ~PT_WRITABLE_MASK;
1234c50d8ae3SPaolo Bonzini 
1235c50d8ae3SPaolo Bonzini 	return mmu_spte_update(sptep, spte);
1236c50d8ae3SPaolo Bonzini }
1237c50d8ae3SPaolo Bonzini 
1238c50d8ae3SPaolo Bonzini static bool __rmap_write_protect(struct kvm *kvm,
1239c50d8ae3SPaolo Bonzini 				 struct kvm_rmap_head *rmap_head,
1240c50d8ae3SPaolo Bonzini 				 bool pt_protect)
1241c50d8ae3SPaolo Bonzini {
1242c50d8ae3SPaolo Bonzini 	u64 *sptep;
1243c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
1244c50d8ae3SPaolo Bonzini 	bool flush = false;
1245c50d8ae3SPaolo Bonzini 
1246c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(rmap_head, &iter, sptep)
1247c50d8ae3SPaolo Bonzini 		flush |= spte_write_protect(sptep, pt_protect);
1248c50d8ae3SPaolo Bonzini 
1249c50d8ae3SPaolo Bonzini 	return flush;
1250c50d8ae3SPaolo Bonzini }
1251c50d8ae3SPaolo Bonzini 
1252c50d8ae3SPaolo Bonzini static bool spte_clear_dirty(u64 *sptep)
1253c50d8ae3SPaolo Bonzini {
1254c50d8ae3SPaolo Bonzini 	u64 spte = *sptep;
1255c50d8ae3SPaolo Bonzini 
1256805a0f83SStephen Zhang 	rmap_printk("spte %p %llx\n", sptep, *sptep);
1257c50d8ae3SPaolo Bonzini 
1258c50d8ae3SPaolo Bonzini 	MMU_WARN_ON(!spte_ad_enabled(spte));
1259c50d8ae3SPaolo Bonzini 	spte &= ~shadow_dirty_mask;
1260c50d8ae3SPaolo Bonzini 	return mmu_spte_update(sptep, spte);
1261c50d8ae3SPaolo Bonzini }
1262c50d8ae3SPaolo Bonzini 
1263c50d8ae3SPaolo Bonzini static bool spte_wrprot_for_clear_dirty(u64 *sptep)
1264c50d8ae3SPaolo Bonzini {
1265c50d8ae3SPaolo Bonzini 	bool was_writable = test_and_clear_bit(PT_WRITABLE_SHIFT,
1266c50d8ae3SPaolo Bonzini 					       (unsigned long *)sptep);
1267c50d8ae3SPaolo Bonzini 	if (was_writable && !spte_ad_enabled(*sptep))
1268c50d8ae3SPaolo Bonzini 		kvm_set_pfn_dirty(spte_to_pfn(*sptep));
1269c50d8ae3SPaolo Bonzini 
1270c50d8ae3SPaolo Bonzini 	return was_writable;
1271c50d8ae3SPaolo Bonzini }
1272c50d8ae3SPaolo Bonzini 
1273c50d8ae3SPaolo Bonzini /*
1274c50d8ae3SPaolo Bonzini  * Gets the GFN ready for another round of dirty logging by clearing the
1275c50d8ae3SPaolo Bonzini  *	- D bit on ad-enabled SPTEs, and
1276c50d8ae3SPaolo Bonzini  *	- W bit on ad-disabled SPTEs.
1277c50d8ae3SPaolo Bonzini  * Returns true iff any D or W bits were cleared.
1278c50d8ae3SPaolo Bonzini  */
12790a234f5dSSean Christopherson static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1280269e9552SHamza Mahfooz 			       const struct kvm_memory_slot *slot)
1281c50d8ae3SPaolo Bonzini {
1282c50d8ae3SPaolo Bonzini 	u64 *sptep;
1283c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
1284c50d8ae3SPaolo Bonzini 	bool flush = false;
1285c50d8ae3SPaolo Bonzini 
1286c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(rmap_head, &iter, sptep)
1287c50d8ae3SPaolo Bonzini 		if (spte_ad_need_write_protect(*sptep))
1288c50d8ae3SPaolo Bonzini 			flush |= spte_wrprot_for_clear_dirty(sptep);
1289c50d8ae3SPaolo Bonzini 		else
1290c50d8ae3SPaolo Bonzini 			flush |= spte_clear_dirty(sptep);
1291c50d8ae3SPaolo Bonzini 
1292c50d8ae3SPaolo Bonzini 	return flush;
1293c50d8ae3SPaolo Bonzini }
1294c50d8ae3SPaolo Bonzini 
1295c50d8ae3SPaolo Bonzini /**
1296c50d8ae3SPaolo Bonzini  * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
1297c50d8ae3SPaolo Bonzini  * @kvm: kvm instance
1298c50d8ae3SPaolo Bonzini  * @slot: slot to protect
1299c50d8ae3SPaolo Bonzini  * @gfn_offset: start of the BITS_PER_LONG pages we care about
1300c50d8ae3SPaolo Bonzini  * @mask: indicates which pages we should protect
1301c50d8ae3SPaolo Bonzini  *
130289212919SKeqian Zhu  * Used when we do not need to care about huge page mappings.
1303c50d8ae3SPaolo Bonzini  */
1304c50d8ae3SPaolo Bonzini static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1305c50d8ae3SPaolo Bonzini 				     struct kvm_memory_slot *slot,
1306c50d8ae3SPaolo Bonzini 				     gfn_t gfn_offset, unsigned long mask)
1307c50d8ae3SPaolo Bonzini {
1308c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *rmap_head;
1309c50d8ae3SPaolo Bonzini 
1310897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
1311a6a0b05dSBen Gardon 		kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
1312a6a0b05dSBen Gardon 				slot->base_gfn + gfn_offset, mask, true);
1313e2209710SBen Gardon 
1314e2209710SBen Gardon 	if (!kvm_memslots_have_rmaps(kvm))
1315e2209710SBen Gardon 		return;
1316e2209710SBen Gardon 
1317c50d8ae3SPaolo Bonzini 	while (mask) {
131893e083d4SDavid Matlack 		rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
13193bae0459SSean Christopherson 					PG_LEVEL_4K, slot);
1320c50d8ae3SPaolo Bonzini 		__rmap_write_protect(kvm, rmap_head, false);
1321c50d8ae3SPaolo Bonzini 
1322c50d8ae3SPaolo Bonzini 		/* clear the first set bit */
1323c50d8ae3SPaolo Bonzini 		mask &= mask - 1;
1324c50d8ae3SPaolo Bonzini 	}
1325c50d8ae3SPaolo Bonzini }
1326c50d8ae3SPaolo Bonzini 
1327c50d8ae3SPaolo Bonzini /**
1328c50d8ae3SPaolo Bonzini  * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages, or write
1329c50d8ae3SPaolo Bonzini  * protect the page if the D-bit isn't supported.
1330c50d8ae3SPaolo Bonzini  * @kvm: kvm instance
1331c50d8ae3SPaolo Bonzini  * @slot: slot to clear D-bit
1332c50d8ae3SPaolo Bonzini  * @gfn_offset: start of the BITS_PER_LONG pages we care about
1333c50d8ae3SPaolo Bonzini  * @mask: indicates which pages we should clear D-bit
1334c50d8ae3SPaolo Bonzini  *
1335c50d8ae3SPaolo Bonzini  * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap.
1336c50d8ae3SPaolo Bonzini  */
1337a018eba5SSean Christopherson static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1338c50d8ae3SPaolo Bonzini 					 struct kvm_memory_slot *slot,
1339c50d8ae3SPaolo Bonzini 					 gfn_t gfn_offset, unsigned long mask)
1340c50d8ae3SPaolo Bonzini {
1341c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *rmap_head;
1342c50d8ae3SPaolo Bonzini 
1343897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
1344a6a0b05dSBen Gardon 		kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
1345a6a0b05dSBen Gardon 				slot->base_gfn + gfn_offset, mask, false);
1346e2209710SBen Gardon 
1347e2209710SBen Gardon 	if (!kvm_memslots_have_rmaps(kvm))
1348e2209710SBen Gardon 		return;
1349e2209710SBen Gardon 
1350c50d8ae3SPaolo Bonzini 	while (mask) {
135193e083d4SDavid Matlack 		rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
13523bae0459SSean Christopherson 					PG_LEVEL_4K, slot);
13530a234f5dSSean Christopherson 		__rmap_clear_dirty(kvm, rmap_head, slot);
1354c50d8ae3SPaolo Bonzini 
1355c50d8ae3SPaolo Bonzini 		/* clear the first set bit */
1356c50d8ae3SPaolo Bonzini 		mask &= mask - 1;
1357c50d8ae3SPaolo Bonzini 	}
1358c50d8ae3SPaolo Bonzini }
1359c50d8ae3SPaolo Bonzini 
1360c50d8ae3SPaolo Bonzini /**
1361c50d8ae3SPaolo Bonzini  * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
1362c50d8ae3SPaolo Bonzini  * PT level pages.
1363c50d8ae3SPaolo Bonzini  *
1364c50d8ae3SPaolo Bonzini  * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
1365c50d8ae3SPaolo Bonzini  * enable dirty logging for them.
1366c50d8ae3SPaolo Bonzini  *
136789212919SKeqian Zhu  * We need to care about huge page mappings: e.g. during dirty logging we may
136889212919SKeqian Zhu  * have such mappings.
1369c50d8ae3SPaolo Bonzini  */
1370c50d8ae3SPaolo Bonzini void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1371c50d8ae3SPaolo Bonzini 				struct kvm_memory_slot *slot,
1372c50d8ae3SPaolo Bonzini 				gfn_t gfn_offset, unsigned long mask)
1373c50d8ae3SPaolo Bonzini {
137489212919SKeqian Zhu 	/*
137589212919SKeqian Zhu 	 * Huge pages are NOT write protected when we start dirty logging in
137689212919SKeqian Zhu 	 * initially-all-set mode; must write protect them here so that they
137789212919SKeqian Zhu 	 * are split to 4K on the first write.
137889212919SKeqian Zhu 	 *
137989212919SKeqian Zhu 	 * The gfn_offset is guaranteed to be aligned to 64, but the base_gfn
138089212919SKeqian Zhu 	 * of memslot has no such restriction, so the range can cross two large
138189212919SKeqian Zhu 	 * pages.
138289212919SKeqian Zhu 	 */
138389212919SKeqian Zhu 	if (kvm_dirty_log_manual_protect_and_init_set(kvm)) {
138489212919SKeqian Zhu 		gfn_t start = slot->base_gfn + gfn_offset + __ffs(mask);
138589212919SKeqian Zhu 		gfn_t end = slot->base_gfn + gfn_offset + __fls(mask);
138689212919SKeqian Zhu 
138789212919SKeqian Zhu 		kvm_mmu_slot_gfn_write_protect(kvm, slot, start, PG_LEVEL_2M);
138889212919SKeqian Zhu 
138989212919SKeqian Zhu 		/* Cross two large pages? */
139089212919SKeqian Zhu 		if (ALIGN(start << PAGE_SHIFT, PMD_SIZE) !=
139189212919SKeqian Zhu 		    ALIGN(end << PAGE_SHIFT, PMD_SIZE))
139289212919SKeqian Zhu 			kvm_mmu_slot_gfn_write_protect(kvm, slot, end,
139389212919SKeqian Zhu 						       PG_LEVEL_2M);
139489212919SKeqian Zhu 	}
139589212919SKeqian Zhu 
139689212919SKeqian Zhu 	/* Now handle 4K PTEs.  */
1397a018eba5SSean Christopherson 	if (kvm_x86_ops.cpu_dirty_log_size)
1398a018eba5SSean Christopherson 		kvm_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset, mask);
1399c50d8ae3SPaolo Bonzini 	else
1400c50d8ae3SPaolo Bonzini 		kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1401c50d8ae3SPaolo Bonzini }
1402c50d8ae3SPaolo Bonzini 
1403fb04a1edSPeter Xu int kvm_cpu_dirty_log_size(void)
1404fb04a1edSPeter Xu {
14056dd03800SSean Christopherson 	return kvm_x86_ops.cpu_dirty_log_size;
1406fb04a1edSPeter Xu }
1407fb04a1edSPeter Xu 
1408c50d8ae3SPaolo Bonzini bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
14093ad93562SKeqian Zhu 				    struct kvm_memory_slot *slot, u64 gfn,
14103ad93562SKeqian Zhu 				    int min_level)
1411c50d8ae3SPaolo Bonzini {
1412c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *rmap_head;
1413c50d8ae3SPaolo Bonzini 	int i;
1414c50d8ae3SPaolo Bonzini 	bool write_protected = false;
1415c50d8ae3SPaolo Bonzini 
1416e2209710SBen Gardon 	if (kvm_memslots_have_rmaps(kvm)) {
14173ad93562SKeqian Zhu 		for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
141893e083d4SDavid Matlack 			rmap_head = gfn_to_rmap(gfn, i, slot);
1419c50d8ae3SPaolo Bonzini 			write_protected |= __rmap_write_protect(kvm, rmap_head, true);
1420c50d8ae3SPaolo Bonzini 		}
1421e2209710SBen Gardon 	}
1422c50d8ae3SPaolo Bonzini 
1423897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
142446044f72SBen Gardon 		write_protected |=
14253ad93562SKeqian Zhu 			kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn, min_level);
142646044f72SBen Gardon 
1427c50d8ae3SPaolo Bonzini 	return write_protected;
1428c50d8ae3SPaolo Bonzini }
1429c50d8ae3SPaolo Bonzini 
1430c50d8ae3SPaolo Bonzini static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
1431c50d8ae3SPaolo Bonzini {
1432c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
1433c50d8ae3SPaolo Bonzini 
1434c50d8ae3SPaolo Bonzini 	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
14353ad93562SKeqian Zhu 	return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K);
1436c50d8ae3SPaolo Bonzini }
1437c50d8ae3SPaolo Bonzini 
14380a234f5dSSean Christopherson static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1439269e9552SHamza Mahfooz 			  const struct kvm_memory_slot *slot)
1440c50d8ae3SPaolo Bonzini {
144171f51d2cSMingwei Zhang 	return pte_list_destroy(kvm, rmap_head);
1442c50d8ae3SPaolo Bonzini }
1443c50d8ae3SPaolo Bonzini 
14443039bcc7SSean Christopherson static bool kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1445c50d8ae3SPaolo Bonzini 			    struct kvm_memory_slot *slot, gfn_t gfn, int level,
14463039bcc7SSean Christopherson 			    pte_t unused)
1447c50d8ae3SPaolo Bonzini {
14480a234f5dSSean Christopherson 	return kvm_zap_rmapp(kvm, rmap_head, slot);
1449c50d8ae3SPaolo Bonzini }
1450c50d8ae3SPaolo Bonzini 
14513039bcc7SSean Christopherson static bool kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1452c50d8ae3SPaolo Bonzini 			      struct kvm_memory_slot *slot, gfn_t gfn, int level,
14533039bcc7SSean Christopherson 			      pte_t pte)
1454c50d8ae3SPaolo Bonzini {
1455c50d8ae3SPaolo Bonzini 	u64 *sptep;
1456c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
1457c50d8ae3SPaolo Bonzini 	int need_flush = 0;
1458c50d8ae3SPaolo Bonzini 	u64 new_spte;
1459c50d8ae3SPaolo Bonzini 	kvm_pfn_t new_pfn;
1460c50d8ae3SPaolo Bonzini 
14613039bcc7SSean Christopherson 	WARN_ON(pte_huge(pte));
14623039bcc7SSean Christopherson 	new_pfn = pte_pfn(pte);
1463c50d8ae3SPaolo Bonzini 
1464c50d8ae3SPaolo Bonzini restart:
1465c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(rmap_head, &iter, sptep) {
1466805a0f83SStephen Zhang 		rmap_printk("spte %p %llx gfn %llx (%d)\n",
1467c50d8ae3SPaolo Bonzini 			    sptep, *sptep, gfn, level);
1468c50d8ae3SPaolo Bonzini 
1469c50d8ae3SPaolo Bonzini 		need_flush = 1;
1470c50d8ae3SPaolo Bonzini 
14713039bcc7SSean Christopherson 		if (pte_write(pte)) {
147271f51d2cSMingwei Zhang 			pte_list_remove(kvm, rmap_head, sptep);
1473c50d8ae3SPaolo Bonzini 			goto restart;
1474c50d8ae3SPaolo Bonzini 		} else {
1475cb3eedabSPaolo Bonzini 			new_spte = kvm_mmu_changed_pte_notifier_make_spte(
1476cb3eedabSPaolo Bonzini 					*sptep, new_pfn);
1477c50d8ae3SPaolo Bonzini 
147871f51d2cSMingwei Zhang 			mmu_spte_clear_track_bits(kvm, sptep);
1479c50d8ae3SPaolo Bonzini 			mmu_spte_set(sptep, new_spte);
1480c50d8ae3SPaolo Bonzini 		}
1481c50d8ae3SPaolo Bonzini 	}
1482c50d8ae3SPaolo Bonzini 
1483c50d8ae3SPaolo Bonzini 	if (need_flush && kvm_available_flush_tlb_with_range()) {
1484c50d8ae3SPaolo Bonzini 		kvm_flush_remote_tlbs_with_address(kvm, gfn, 1);
1485c50d8ae3SPaolo Bonzini 		return 0;
1486c50d8ae3SPaolo Bonzini 	}
1487c50d8ae3SPaolo Bonzini 
1488c50d8ae3SPaolo Bonzini 	return need_flush;
1489c50d8ae3SPaolo Bonzini }
1490c50d8ae3SPaolo Bonzini 
1491c50d8ae3SPaolo Bonzini struct slot_rmap_walk_iterator {
1492c50d8ae3SPaolo Bonzini 	/* input fields. */
1493269e9552SHamza Mahfooz 	const struct kvm_memory_slot *slot;
1494c50d8ae3SPaolo Bonzini 	gfn_t start_gfn;
1495c50d8ae3SPaolo Bonzini 	gfn_t end_gfn;
1496c50d8ae3SPaolo Bonzini 	int start_level;
1497c50d8ae3SPaolo Bonzini 	int end_level;
1498c50d8ae3SPaolo Bonzini 
1499c50d8ae3SPaolo Bonzini 	/* output fields. */
1500c50d8ae3SPaolo Bonzini 	gfn_t gfn;
1501c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *rmap;
1502c50d8ae3SPaolo Bonzini 	int level;
1503c50d8ae3SPaolo Bonzini 
1504c50d8ae3SPaolo Bonzini 	/* private field. */
1505c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *end_rmap;
1506c50d8ae3SPaolo Bonzini };
1507c50d8ae3SPaolo Bonzini 
1508c50d8ae3SPaolo Bonzini static void
1509c50d8ae3SPaolo Bonzini rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level)
1510c50d8ae3SPaolo Bonzini {
1511c50d8ae3SPaolo Bonzini 	iterator->level = level;
1512c50d8ae3SPaolo Bonzini 	iterator->gfn = iterator->start_gfn;
151393e083d4SDavid Matlack 	iterator->rmap = gfn_to_rmap(iterator->gfn, level, iterator->slot);
151493e083d4SDavid Matlack 	iterator->end_rmap = gfn_to_rmap(iterator->end_gfn, level, iterator->slot);
1515c50d8ae3SPaolo Bonzini }
1516c50d8ae3SPaolo Bonzini 
1517c50d8ae3SPaolo Bonzini static void
1518c50d8ae3SPaolo Bonzini slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator,
1519269e9552SHamza Mahfooz 		    const struct kvm_memory_slot *slot, int start_level,
1520c50d8ae3SPaolo Bonzini 		    int end_level, gfn_t start_gfn, gfn_t end_gfn)
1521c50d8ae3SPaolo Bonzini {
1522c50d8ae3SPaolo Bonzini 	iterator->slot = slot;
1523c50d8ae3SPaolo Bonzini 	iterator->start_level = start_level;
1524c50d8ae3SPaolo Bonzini 	iterator->end_level = end_level;
1525c50d8ae3SPaolo Bonzini 	iterator->start_gfn = start_gfn;
1526c50d8ae3SPaolo Bonzini 	iterator->end_gfn = end_gfn;
1527c50d8ae3SPaolo Bonzini 
1528c50d8ae3SPaolo Bonzini 	rmap_walk_init_level(iterator, iterator->start_level);
1529c50d8ae3SPaolo Bonzini }
1530c50d8ae3SPaolo Bonzini 
1531c50d8ae3SPaolo Bonzini static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator)
1532c50d8ae3SPaolo Bonzini {
1533c50d8ae3SPaolo Bonzini 	return !!iterator->rmap;
1534c50d8ae3SPaolo Bonzini }
1535c50d8ae3SPaolo Bonzini 
1536c50d8ae3SPaolo Bonzini static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
1537c50d8ae3SPaolo Bonzini {
1538c50d8ae3SPaolo Bonzini 	if (++iterator->rmap <= iterator->end_rmap) {
1539c50d8ae3SPaolo Bonzini 		iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level));
1540c50d8ae3SPaolo Bonzini 		return;
1541c50d8ae3SPaolo Bonzini 	}
1542c50d8ae3SPaolo Bonzini 
1543c50d8ae3SPaolo Bonzini 	if (++iterator->level > iterator->end_level) {
1544c50d8ae3SPaolo Bonzini 		iterator->rmap = NULL;
1545c50d8ae3SPaolo Bonzini 		return;
1546c50d8ae3SPaolo Bonzini 	}
1547c50d8ae3SPaolo Bonzini 
1548c50d8ae3SPaolo Bonzini 	rmap_walk_init_level(iterator, iterator->level);
1549c50d8ae3SPaolo Bonzini }
1550c50d8ae3SPaolo Bonzini 
1551c50d8ae3SPaolo Bonzini #define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_,	\
1552c50d8ae3SPaolo Bonzini 	   _start_gfn, _end_gfn, _iter_)				\
1553c50d8ae3SPaolo Bonzini 	for (slot_rmap_walk_init(_iter_, _slot_, _start_level_,		\
1554c50d8ae3SPaolo Bonzini 				 _end_level_, _start_gfn, _end_gfn);	\
1555c50d8ae3SPaolo Bonzini 	     slot_rmap_walk_okay(_iter_);				\
1556c50d8ae3SPaolo Bonzini 	     slot_rmap_walk_next(_iter_))
1557c50d8ae3SPaolo Bonzini 
15583039bcc7SSean Christopherson typedef bool (*rmap_handler_t)(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1559c1b91493SSean Christopherson 			       struct kvm_memory_slot *slot, gfn_t gfn,
15603039bcc7SSean Christopherson 			       int level, pte_t pte);
1561c1b91493SSean Christopherson 
15623039bcc7SSean Christopherson static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm,
15633039bcc7SSean Christopherson 						 struct kvm_gfn_range *range,
1564c1b91493SSean Christopherson 						 rmap_handler_t handler)
1565c50d8ae3SPaolo Bonzini {
1566c50d8ae3SPaolo Bonzini 	struct slot_rmap_walk_iterator iterator;
15673039bcc7SSean Christopherson 	bool ret = false;
1568c50d8ae3SPaolo Bonzini 
15693039bcc7SSean Christopherson 	for_each_slot_rmap_range(range->slot, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
15703039bcc7SSean Christopherson 				 range->start, range->end - 1, &iterator)
15713039bcc7SSean Christopherson 		ret |= handler(kvm, iterator.rmap, range->slot, iterator.gfn,
15723039bcc7SSean Christopherson 			       iterator.level, range->pte);
1573c50d8ae3SPaolo Bonzini 
1574c50d8ae3SPaolo Bonzini 	return ret;
1575c50d8ae3SPaolo Bonzini }
1576c50d8ae3SPaolo Bonzini 
15773039bcc7SSean Christopherson bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1578c50d8ae3SPaolo Bonzini {
1579e2209710SBen Gardon 	bool flush = false;
1580c50d8ae3SPaolo Bonzini 
1581e2209710SBen Gardon 	if (kvm_memslots_have_rmaps(kvm))
15823039bcc7SSean Christopherson 		flush = kvm_handle_gfn_range(kvm, range, kvm_unmap_rmapp);
1583063afacdSBen Gardon 
1584897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
15853039bcc7SSean Christopherson 		flush |= kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
1586063afacdSBen Gardon 
15873039bcc7SSean Christopherson 	return flush;
1588c50d8ae3SPaolo Bonzini }
1589c50d8ae3SPaolo Bonzini 
15903039bcc7SSean Christopherson bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1591c50d8ae3SPaolo Bonzini {
1592e2209710SBen Gardon 	bool flush = false;
15931d8dd6b3SBen Gardon 
1594e2209710SBen Gardon 	if (kvm_memslots_have_rmaps(kvm))
15953039bcc7SSean Christopherson 		flush = kvm_handle_gfn_range(kvm, range, kvm_set_pte_rmapp);
15961d8dd6b3SBen Gardon 
1597897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
15983039bcc7SSean Christopherson 		flush |= kvm_tdp_mmu_set_spte_gfn(kvm, range);
15991d8dd6b3SBen Gardon 
16003039bcc7SSean Christopherson 	return flush;
1601c50d8ae3SPaolo Bonzini }
1602c50d8ae3SPaolo Bonzini 
16033039bcc7SSean Christopherson static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1604c50d8ae3SPaolo Bonzini 			  struct kvm_memory_slot *slot, gfn_t gfn, int level,
16053039bcc7SSean Christopherson 			  pte_t unused)
1606c50d8ae3SPaolo Bonzini {
1607c50d8ae3SPaolo Bonzini 	u64 *sptep;
16083f649ab7SKees Cook 	struct rmap_iterator iter;
1609c50d8ae3SPaolo Bonzini 	int young = 0;
1610c50d8ae3SPaolo Bonzini 
1611c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(rmap_head, &iter, sptep)
1612c50d8ae3SPaolo Bonzini 		young |= mmu_spte_age(sptep);
1613c50d8ae3SPaolo Bonzini 
1614c50d8ae3SPaolo Bonzini 	return young;
1615c50d8ae3SPaolo Bonzini }
1616c50d8ae3SPaolo Bonzini 
16173039bcc7SSean Christopherson static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1618c50d8ae3SPaolo Bonzini 			       struct kvm_memory_slot *slot, gfn_t gfn,
16193039bcc7SSean Christopherson 			       int level, pte_t unused)
1620c50d8ae3SPaolo Bonzini {
1621c50d8ae3SPaolo Bonzini 	u64 *sptep;
1622c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
1623c50d8ae3SPaolo Bonzini 
1624c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(rmap_head, &iter, sptep)
1625c50d8ae3SPaolo Bonzini 		if (is_accessed_spte(*sptep))
1626c50d8ae3SPaolo Bonzini 			return 1;
1627c50d8ae3SPaolo Bonzini 	return 0;
1628c50d8ae3SPaolo Bonzini }
1629c50d8ae3SPaolo Bonzini 
1630c50d8ae3SPaolo Bonzini #define RMAP_RECYCLE_THRESHOLD 1000
1631c50d8ae3SPaolo Bonzini 
16328a9f566aSDavid Matlack static void rmap_add(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
16338a9f566aSDavid Matlack 		     u64 *spte, gfn_t gfn)
1634c50d8ae3SPaolo Bonzini {
1635c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
163668be1306SDavid Matlack 	struct kvm_rmap_head *rmap_head;
163768be1306SDavid Matlack 	int rmap_count;
1638c50d8ae3SPaolo Bonzini 
163957354682SSean Christopherson 	sp = sptep_to_sp(spte);
164068be1306SDavid Matlack 	kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
164193e083d4SDavid Matlack 	rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
164268be1306SDavid Matlack 	rmap_count = pte_list_add(vcpu, spte, rmap_head);
1643c50d8ae3SPaolo Bonzini 
164468be1306SDavid Matlack 	if (rmap_count > RMAP_RECYCLE_THRESHOLD) {
16453039bcc7SSean Christopherson 		kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0));
164668be1306SDavid Matlack 		kvm_flush_remote_tlbs_with_address(
164768be1306SDavid Matlack 				vcpu->kvm, sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level));
164868be1306SDavid Matlack 	}
1649c50d8ae3SPaolo Bonzini }
1650c50d8ae3SPaolo Bonzini 
16513039bcc7SSean Christopherson bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1652c50d8ae3SPaolo Bonzini {
1653e2209710SBen Gardon 	bool young = false;
1654f8e14497SBen Gardon 
1655e2209710SBen Gardon 	if (kvm_memslots_have_rmaps(kvm))
16563039bcc7SSean Christopherson 		young = kvm_handle_gfn_range(kvm, range, kvm_age_rmapp);
16573039bcc7SSean Christopherson 
1658897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
16593039bcc7SSean Christopherson 		young |= kvm_tdp_mmu_age_gfn_range(kvm, range);
1660f8e14497SBen Gardon 
1661f8e14497SBen Gardon 	return young;
1662c50d8ae3SPaolo Bonzini }
1663c50d8ae3SPaolo Bonzini 
16643039bcc7SSean Christopherson bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1665c50d8ae3SPaolo Bonzini {
1666e2209710SBen Gardon 	bool young = false;
1667f8e14497SBen Gardon 
1668e2209710SBen Gardon 	if (kvm_memslots_have_rmaps(kvm))
16693039bcc7SSean Christopherson 		young = kvm_handle_gfn_range(kvm, range, kvm_test_age_rmapp);
16703039bcc7SSean Christopherson 
1671897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
16723039bcc7SSean Christopherson 		young |= kvm_tdp_mmu_test_age_gfn(kvm, range);
1673f8e14497SBen Gardon 
1674f8e14497SBen Gardon 	return young;
1675c50d8ae3SPaolo Bonzini }
1676c50d8ae3SPaolo Bonzini 
1677c50d8ae3SPaolo Bonzini #ifdef MMU_DEBUG
1678c50d8ae3SPaolo Bonzini static int is_empty_shadow_page(u64 *spt)
1679c50d8ae3SPaolo Bonzini {
1680c50d8ae3SPaolo Bonzini 	u64 *pos;
1681c50d8ae3SPaolo Bonzini 	u64 *end;
1682c50d8ae3SPaolo Bonzini 
1683c50d8ae3SPaolo Bonzini 	for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
1684c50d8ae3SPaolo Bonzini 		if (is_shadow_present_pte(*pos)) {
1685c50d8ae3SPaolo Bonzini 			printk(KERN_ERR "%s: %p %llx\n", __func__,
1686c50d8ae3SPaolo Bonzini 			       pos, *pos);
1687c50d8ae3SPaolo Bonzini 			return 0;
1688c50d8ae3SPaolo Bonzini 		}
1689c50d8ae3SPaolo Bonzini 	return 1;
1690c50d8ae3SPaolo Bonzini }
1691c50d8ae3SPaolo Bonzini #endif
1692c50d8ae3SPaolo Bonzini 
1693c50d8ae3SPaolo Bonzini /*
1694c50d8ae3SPaolo Bonzini  * This value is the sum of all of the kvm instances's
1695c50d8ae3SPaolo Bonzini  * kvm->arch.n_used_mmu_pages values.  We need a global,
1696c50d8ae3SPaolo Bonzini  * aggregate version in order to make the slab shrinker
1697c50d8ae3SPaolo Bonzini  * faster
1698c50d8ae3SPaolo Bonzini  */
1699d5aaad6fSSean Christopherson static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, long nr)
1700c50d8ae3SPaolo Bonzini {
1701c50d8ae3SPaolo Bonzini 	kvm->arch.n_used_mmu_pages += nr;
1702c50d8ae3SPaolo Bonzini 	percpu_counter_add(&kvm_total_used_mmu_pages, nr);
1703c50d8ae3SPaolo Bonzini }
1704c50d8ae3SPaolo Bonzini 
1705c50d8ae3SPaolo Bonzini static void kvm_mmu_free_page(struct kvm_mmu_page *sp)
1706c50d8ae3SPaolo Bonzini {
1707c50d8ae3SPaolo Bonzini 	MMU_WARN_ON(!is_empty_shadow_page(sp->spt));
1708c50d8ae3SPaolo Bonzini 	hlist_del(&sp->hash_link);
1709c50d8ae3SPaolo Bonzini 	list_del(&sp->link);
1710c50d8ae3SPaolo Bonzini 	free_page((unsigned long)sp->spt);
1711c50d8ae3SPaolo Bonzini 	if (!sp->role.direct)
1712c50d8ae3SPaolo Bonzini 		free_page((unsigned long)sp->gfns);
1713c50d8ae3SPaolo Bonzini 	kmem_cache_free(mmu_page_header_cache, sp);
1714c50d8ae3SPaolo Bonzini }
1715c50d8ae3SPaolo Bonzini 
1716c50d8ae3SPaolo Bonzini static unsigned kvm_page_table_hashfn(gfn_t gfn)
1717c50d8ae3SPaolo Bonzini {
1718c50d8ae3SPaolo Bonzini 	return hash_64(gfn, KVM_MMU_HASH_SHIFT);
1719c50d8ae3SPaolo Bonzini }
1720c50d8ae3SPaolo Bonzini 
1721c50d8ae3SPaolo Bonzini static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
1722c50d8ae3SPaolo Bonzini 				    struct kvm_mmu_page *sp, u64 *parent_pte)
1723c50d8ae3SPaolo Bonzini {
1724c50d8ae3SPaolo Bonzini 	if (!parent_pte)
1725c50d8ae3SPaolo Bonzini 		return;
1726c50d8ae3SPaolo Bonzini 
1727c50d8ae3SPaolo Bonzini 	pte_list_add(vcpu, parent_pte, &sp->parent_ptes);
1728c50d8ae3SPaolo Bonzini }
1729c50d8ae3SPaolo Bonzini 
1730c50d8ae3SPaolo Bonzini static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
1731c50d8ae3SPaolo Bonzini 				       u64 *parent_pte)
1732c50d8ae3SPaolo Bonzini {
1733c50d8ae3SPaolo Bonzini 	__pte_list_remove(parent_pte, &sp->parent_ptes);
1734c50d8ae3SPaolo Bonzini }
1735c50d8ae3SPaolo Bonzini 
1736c50d8ae3SPaolo Bonzini static void drop_parent_pte(struct kvm_mmu_page *sp,
1737c50d8ae3SPaolo Bonzini 			    u64 *parent_pte)
1738c50d8ae3SPaolo Bonzini {
1739c50d8ae3SPaolo Bonzini 	mmu_page_remove_parent_pte(sp, parent_pte);
1740c50d8ae3SPaolo Bonzini 	mmu_spte_clear_no_track(parent_pte);
1741c50d8ae3SPaolo Bonzini }
1742c50d8ae3SPaolo Bonzini 
1743c50d8ae3SPaolo Bonzini static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct)
1744c50d8ae3SPaolo Bonzini {
1745c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
1746c50d8ae3SPaolo Bonzini 
174794ce87efSSean Christopherson 	sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
174894ce87efSSean Christopherson 	sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
1749c50d8ae3SPaolo Bonzini 	if (!direct)
175094ce87efSSean Christopherson 		sp->gfns = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache);
1751c50d8ae3SPaolo Bonzini 	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
1752c50d8ae3SPaolo Bonzini 
1753c50d8ae3SPaolo Bonzini 	/*
1754c50d8ae3SPaolo Bonzini 	 * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
1755c50d8ae3SPaolo Bonzini 	 * depends on valid pages being added to the head of the list.  See
1756c50d8ae3SPaolo Bonzini 	 * comments in kvm_zap_obsolete_pages().
1757c50d8ae3SPaolo Bonzini 	 */
1758c50d8ae3SPaolo Bonzini 	sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
1759c50d8ae3SPaolo Bonzini 	list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
1760c50d8ae3SPaolo Bonzini 	kvm_mod_used_mmu_pages(vcpu->kvm, +1);
1761c50d8ae3SPaolo Bonzini 	return sp;
1762c50d8ae3SPaolo Bonzini }
1763c50d8ae3SPaolo Bonzini 
1764c50d8ae3SPaolo Bonzini static void mark_unsync(u64 *spte);
1765c50d8ae3SPaolo Bonzini static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
1766c50d8ae3SPaolo Bonzini {
1767c50d8ae3SPaolo Bonzini 	u64 *sptep;
1768c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
1769c50d8ae3SPaolo Bonzini 
1770c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) {
1771c50d8ae3SPaolo Bonzini 		mark_unsync(sptep);
1772c50d8ae3SPaolo Bonzini 	}
1773c50d8ae3SPaolo Bonzini }
1774c50d8ae3SPaolo Bonzini 
1775c50d8ae3SPaolo Bonzini static void mark_unsync(u64 *spte)
1776c50d8ae3SPaolo Bonzini {
1777c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
1778c50d8ae3SPaolo Bonzini 	unsigned int index;
1779c50d8ae3SPaolo Bonzini 
178057354682SSean Christopherson 	sp = sptep_to_sp(spte);
1781c50d8ae3SPaolo Bonzini 	index = spte - sp->spt;
1782c50d8ae3SPaolo Bonzini 	if (__test_and_set_bit(index, sp->unsync_child_bitmap))
1783c50d8ae3SPaolo Bonzini 		return;
1784c50d8ae3SPaolo Bonzini 	if (sp->unsync_children++)
1785c50d8ae3SPaolo Bonzini 		return;
1786c50d8ae3SPaolo Bonzini 	kvm_mmu_mark_parents_unsync(sp);
1787c50d8ae3SPaolo Bonzini }
1788c50d8ae3SPaolo Bonzini 
1789c50d8ae3SPaolo Bonzini static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
1790c50d8ae3SPaolo Bonzini 			       struct kvm_mmu_page *sp)
1791c50d8ae3SPaolo Bonzini {
1792c3e5e415SLai Jiangshan 	return -1;
1793c50d8ae3SPaolo Bonzini }
1794c50d8ae3SPaolo Bonzini 
1795c50d8ae3SPaolo Bonzini #define KVM_PAGE_ARRAY_NR 16
1796c50d8ae3SPaolo Bonzini 
1797c50d8ae3SPaolo Bonzini struct kvm_mmu_pages {
1798c50d8ae3SPaolo Bonzini 	struct mmu_page_and_offset {
1799c50d8ae3SPaolo Bonzini 		struct kvm_mmu_page *sp;
1800c50d8ae3SPaolo Bonzini 		unsigned int idx;
1801c50d8ae3SPaolo Bonzini 	} page[KVM_PAGE_ARRAY_NR];
1802c50d8ae3SPaolo Bonzini 	unsigned int nr;
1803c50d8ae3SPaolo Bonzini };
1804c50d8ae3SPaolo Bonzini 
1805c50d8ae3SPaolo Bonzini static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
1806c50d8ae3SPaolo Bonzini 			 int idx)
1807c50d8ae3SPaolo Bonzini {
1808c50d8ae3SPaolo Bonzini 	int i;
1809c50d8ae3SPaolo Bonzini 
1810c50d8ae3SPaolo Bonzini 	if (sp->unsync)
1811c50d8ae3SPaolo Bonzini 		for (i=0; i < pvec->nr; i++)
1812c50d8ae3SPaolo Bonzini 			if (pvec->page[i].sp == sp)
1813c50d8ae3SPaolo Bonzini 				return 0;
1814c50d8ae3SPaolo Bonzini 
1815c50d8ae3SPaolo Bonzini 	pvec->page[pvec->nr].sp = sp;
1816c50d8ae3SPaolo Bonzini 	pvec->page[pvec->nr].idx = idx;
1817c50d8ae3SPaolo Bonzini 	pvec->nr++;
1818c50d8ae3SPaolo Bonzini 	return (pvec->nr == KVM_PAGE_ARRAY_NR);
1819c50d8ae3SPaolo Bonzini }
1820c50d8ae3SPaolo Bonzini 
1821c50d8ae3SPaolo Bonzini static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx)
1822c50d8ae3SPaolo Bonzini {
1823c50d8ae3SPaolo Bonzini 	--sp->unsync_children;
1824c50d8ae3SPaolo Bonzini 	WARN_ON((int)sp->unsync_children < 0);
1825c50d8ae3SPaolo Bonzini 	__clear_bit(idx, sp->unsync_child_bitmap);
1826c50d8ae3SPaolo Bonzini }
1827c50d8ae3SPaolo Bonzini 
1828c50d8ae3SPaolo Bonzini static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
1829c50d8ae3SPaolo Bonzini 			   struct kvm_mmu_pages *pvec)
1830c50d8ae3SPaolo Bonzini {
1831c50d8ae3SPaolo Bonzini 	int i, ret, nr_unsync_leaf = 0;
1832c50d8ae3SPaolo Bonzini 
1833c50d8ae3SPaolo Bonzini 	for_each_set_bit(i, sp->unsync_child_bitmap, 512) {
1834c50d8ae3SPaolo Bonzini 		struct kvm_mmu_page *child;
1835c50d8ae3SPaolo Bonzini 		u64 ent = sp->spt[i];
1836c50d8ae3SPaolo Bonzini 
1837c50d8ae3SPaolo Bonzini 		if (!is_shadow_present_pte(ent) || is_large_pte(ent)) {
1838c50d8ae3SPaolo Bonzini 			clear_unsync_child_bit(sp, i);
1839c50d8ae3SPaolo Bonzini 			continue;
1840c50d8ae3SPaolo Bonzini 		}
1841c50d8ae3SPaolo Bonzini 
1842e47c4aeeSSean Christopherson 		child = to_shadow_page(ent & PT64_BASE_ADDR_MASK);
1843c50d8ae3SPaolo Bonzini 
1844c50d8ae3SPaolo Bonzini 		if (child->unsync_children) {
1845c50d8ae3SPaolo Bonzini 			if (mmu_pages_add(pvec, child, i))
1846c50d8ae3SPaolo Bonzini 				return -ENOSPC;
1847c50d8ae3SPaolo Bonzini 
1848c50d8ae3SPaolo Bonzini 			ret = __mmu_unsync_walk(child, pvec);
1849c50d8ae3SPaolo Bonzini 			if (!ret) {
1850c50d8ae3SPaolo Bonzini 				clear_unsync_child_bit(sp, i);
1851c50d8ae3SPaolo Bonzini 				continue;
1852c50d8ae3SPaolo Bonzini 			} else if (ret > 0) {
1853c50d8ae3SPaolo Bonzini 				nr_unsync_leaf += ret;
1854c50d8ae3SPaolo Bonzini 			} else
1855c50d8ae3SPaolo Bonzini 				return ret;
1856c50d8ae3SPaolo Bonzini 		} else if (child->unsync) {
1857c50d8ae3SPaolo Bonzini 			nr_unsync_leaf++;
1858c50d8ae3SPaolo Bonzini 			if (mmu_pages_add(pvec, child, i))
1859c50d8ae3SPaolo Bonzini 				return -ENOSPC;
1860c50d8ae3SPaolo Bonzini 		} else
1861c50d8ae3SPaolo Bonzini 			clear_unsync_child_bit(sp, i);
1862c50d8ae3SPaolo Bonzini 	}
1863c50d8ae3SPaolo Bonzini 
1864c50d8ae3SPaolo Bonzini 	return nr_unsync_leaf;
1865c50d8ae3SPaolo Bonzini }
1866c50d8ae3SPaolo Bonzini 
1867c50d8ae3SPaolo Bonzini #define INVALID_INDEX (-1)
1868c50d8ae3SPaolo Bonzini 
1869c50d8ae3SPaolo Bonzini static int mmu_unsync_walk(struct kvm_mmu_page *sp,
1870c50d8ae3SPaolo Bonzini 			   struct kvm_mmu_pages *pvec)
1871c50d8ae3SPaolo Bonzini {
1872c50d8ae3SPaolo Bonzini 	pvec->nr = 0;
1873c50d8ae3SPaolo Bonzini 	if (!sp->unsync_children)
1874c50d8ae3SPaolo Bonzini 		return 0;
1875c50d8ae3SPaolo Bonzini 
1876c50d8ae3SPaolo Bonzini 	mmu_pages_add(pvec, sp, INVALID_INDEX);
1877c50d8ae3SPaolo Bonzini 	return __mmu_unsync_walk(sp, pvec);
1878c50d8ae3SPaolo Bonzini }
1879c50d8ae3SPaolo Bonzini 
1880c50d8ae3SPaolo Bonzini static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1881c50d8ae3SPaolo Bonzini {
1882c50d8ae3SPaolo Bonzini 	WARN_ON(!sp->unsync);
1883c50d8ae3SPaolo Bonzini 	trace_kvm_mmu_sync_page(sp);
1884c50d8ae3SPaolo Bonzini 	sp->unsync = 0;
1885c50d8ae3SPaolo Bonzini 	--kvm->stat.mmu_unsync;
1886c50d8ae3SPaolo Bonzini }
1887c50d8ae3SPaolo Bonzini 
1888c50d8ae3SPaolo Bonzini static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1889c50d8ae3SPaolo Bonzini 				     struct list_head *invalid_list);
1890c50d8ae3SPaolo Bonzini static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1891c50d8ae3SPaolo Bonzini 				    struct list_head *invalid_list);
1892c50d8ae3SPaolo Bonzini 
1893ac101b7cSSean Christopherson #define for_each_valid_sp(_kvm, _sp, _list)				\
1894ac101b7cSSean Christopherson 	hlist_for_each_entry(_sp, _list, hash_link)			\
1895c50d8ae3SPaolo Bonzini 		if (is_obsolete_sp((_kvm), (_sp))) {			\
1896c50d8ae3SPaolo Bonzini 		} else
1897c50d8ae3SPaolo Bonzini 
1898c50d8ae3SPaolo Bonzini #define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn)			\
1899ac101b7cSSean Christopherson 	for_each_valid_sp(_kvm, _sp,					\
1900ac101b7cSSean Christopherson 	  &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)])	\
1901c50d8ae3SPaolo Bonzini 		if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
1902c50d8ae3SPaolo Bonzini 
1903479a1efcSSean Christopherson static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1904c50d8ae3SPaolo Bonzini 			 struct list_head *invalid_list)
1905c50d8ae3SPaolo Bonzini {
1906c3e5e415SLai Jiangshan 	int ret = vcpu->arch.mmu->sync_page(vcpu, sp);
1907c3e5e415SLai Jiangshan 
1908c3e5e415SLai Jiangshan 	if (ret < 0) {
1909c50d8ae3SPaolo Bonzini 		kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
1910c50d8ae3SPaolo Bonzini 		return false;
1911c50d8ae3SPaolo Bonzini 	}
1912c50d8ae3SPaolo Bonzini 
1913c3e5e415SLai Jiangshan 	return !!ret;
1914c50d8ae3SPaolo Bonzini }
1915c50d8ae3SPaolo Bonzini 
1916c50d8ae3SPaolo Bonzini static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
1917c50d8ae3SPaolo Bonzini 					struct list_head *invalid_list,
1918c50d8ae3SPaolo Bonzini 					bool remote_flush)
1919c50d8ae3SPaolo Bonzini {
1920c50d8ae3SPaolo Bonzini 	if (!remote_flush && list_empty(invalid_list))
1921c50d8ae3SPaolo Bonzini 		return false;
1922c50d8ae3SPaolo Bonzini 
1923c50d8ae3SPaolo Bonzini 	if (!list_empty(invalid_list))
1924c50d8ae3SPaolo Bonzini 		kvm_mmu_commit_zap_page(kvm, invalid_list);
1925c50d8ae3SPaolo Bonzini 	else
1926c50d8ae3SPaolo Bonzini 		kvm_flush_remote_tlbs(kvm);
1927c50d8ae3SPaolo Bonzini 	return true;
1928c50d8ae3SPaolo Bonzini }
1929c50d8ae3SPaolo Bonzini 
1930c50d8ae3SPaolo Bonzini #ifdef CONFIG_KVM_MMU_AUDIT
1931c50d8ae3SPaolo Bonzini #include "mmu_audit.c"
1932c50d8ae3SPaolo Bonzini #else
1933c50d8ae3SPaolo Bonzini static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { }
1934c50d8ae3SPaolo Bonzini static void mmu_audit_disable(void) { }
1935c50d8ae3SPaolo Bonzini #endif
1936c50d8ae3SPaolo Bonzini 
1937c50d8ae3SPaolo Bonzini static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
1938c50d8ae3SPaolo Bonzini {
1939c50d8ae3SPaolo Bonzini 	return sp->role.invalid ||
1940c50d8ae3SPaolo Bonzini 	       unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
1941c50d8ae3SPaolo Bonzini }
1942c50d8ae3SPaolo Bonzini 
1943c50d8ae3SPaolo Bonzini struct mmu_page_path {
1944c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL];
1945c50d8ae3SPaolo Bonzini 	unsigned int idx[PT64_ROOT_MAX_LEVEL];
1946c50d8ae3SPaolo Bonzini };
1947c50d8ae3SPaolo Bonzini 
1948c50d8ae3SPaolo Bonzini #define for_each_sp(pvec, sp, parents, i)			\
1949c50d8ae3SPaolo Bonzini 		for (i = mmu_pages_first(&pvec, &parents);	\
1950c50d8ae3SPaolo Bonzini 			i < pvec.nr && ({ sp = pvec.page[i].sp; 1;});	\
1951c50d8ae3SPaolo Bonzini 			i = mmu_pages_next(&pvec, &parents, i))
1952c50d8ae3SPaolo Bonzini 
1953c50d8ae3SPaolo Bonzini static int mmu_pages_next(struct kvm_mmu_pages *pvec,
1954c50d8ae3SPaolo Bonzini 			  struct mmu_page_path *parents,
1955c50d8ae3SPaolo Bonzini 			  int i)
1956c50d8ae3SPaolo Bonzini {
1957c50d8ae3SPaolo Bonzini 	int n;
1958c50d8ae3SPaolo Bonzini 
1959c50d8ae3SPaolo Bonzini 	for (n = i+1; n < pvec->nr; n++) {
1960c50d8ae3SPaolo Bonzini 		struct kvm_mmu_page *sp = pvec->page[n].sp;
1961c50d8ae3SPaolo Bonzini 		unsigned idx = pvec->page[n].idx;
1962c50d8ae3SPaolo Bonzini 		int level = sp->role.level;
1963c50d8ae3SPaolo Bonzini 
1964c50d8ae3SPaolo Bonzini 		parents->idx[level-1] = idx;
19653bae0459SSean Christopherson 		if (level == PG_LEVEL_4K)
1966c50d8ae3SPaolo Bonzini 			break;
1967c50d8ae3SPaolo Bonzini 
1968c50d8ae3SPaolo Bonzini 		parents->parent[level-2] = sp;
1969c50d8ae3SPaolo Bonzini 	}
1970c50d8ae3SPaolo Bonzini 
1971c50d8ae3SPaolo Bonzini 	return n;
1972c50d8ae3SPaolo Bonzini }
1973c50d8ae3SPaolo Bonzini 
1974c50d8ae3SPaolo Bonzini static int mmu_pages_first(struct kvm_mmu_pages *pvec,
1975c50d8ae3SPaolo Bonzini 			   struct mmu_page_path *parents)
1976c50d8ae3SPaolo Bonzini {
1977c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
1978c50d8ae3SPaolo Bonzini 	int level;
1979c50d8ae3SPaolo Bonzini 
1980c50d8ae3SPaolo Bonzini 	if (pvec->nr == 0)
1981c50d8ae3SPaolo Bonzini 		return 0;
1982c50d8ae3SPaolo Bonzini 
1983c50d8ae3SPaolo Bonzini 	WARN_ON(pvec->page[0].idx != INVALID_INDEX);
1984c50d8ae3SPaolo Bonzini 
1985c50d8ae3SPaolo Bonzini 	sp = pvec->page[0].sp;
1986c50d8ae3SPaolo Bonzini 	level = sp->role.level;
19873bae0459SSean Christopherson 	WARN_ON(level == PG_LEVEL_4K);
1988c50d8ae3SPaolo Bonzini 
1989c50d8ae3SPaolo Bonzini 	parents->parent[level-2] = sp;
1990c50d8ae3SPaolo Bonzini 
1991c50d8ae3SPaolo Bonzini 	/* Also set up a sentinel.  Further entries in pvec are all
1992c50d8ae3SPaolo Bonzini 	 * children of sp, so this element is never overwritten.
1993c50d8ae3SPaolo Bonzini 	 */
1994c50d8ae3SPaolo Bonzini 	parents->parent[level-1] = NULL;
1995c50d8ae3SPaolo Bonzini 	return mmu_pages_next(pvec, parents, 0);
1996c50d8ae3SPaolo Bonzini }
1997c50d8ae3SPaolo Bonzini 
1998c50d8ae3SPaolo Bonzini static void mmu_pages_clear_parents(struct mmu_page_path *parents)
1999c50d8ae3SPaolo Bonzini {
2000c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
2001c50d8ae3SPaolo Bonzini 	unsigned int level = 0;
2002c50d8ae3SPaolo Bonzini 
2003c50d8ae3SPaolo Bonzini 	do {
2004c50d8ae3SPaolo Bonzini 		unsigned int idx = parents->idx[level];
2005c50d8ae3SPaolo Bonzini 		sp = parents->parent[level];
2006c50d8ae3SPaolo Bonzini 		if (!sp)
2007c50d8ae3SPaolo Bonzini 			return;
2008c50d8ae3SPaolo Bonzini 
2009c50d8ae3SPaolo Bonzini 		WARN_ON(idx == INVALID_INDEX);
2010c50d8ae3SPaolo Bonzini 		clear_unsync_child_bit(sp, idx);
2011c50d8ae3SPaolo Bonzini 		level++;
2012c50d8ae3SPaolo Bonzini 	} while (!sp->unsync_children);
2013c50d8ae3SPaolo Bonzini }
2014c50d8ae3SPaolo Bonzini 
201565855ed8SLai Jiangshan static int mmu_sync_children(struct kvm_vcpu *vcpu,
201665855ed8SLai Jiangshan 			     struct kvm_mmu_page *parent, bool can_yield)
2017c50d8ae3SPaolo Bonzini {
2018c50d8ae3SPaolo Bonzini 	int i;
2019c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
2020c50d8ae3SPaolo Bonzini 	struct mmu_page_path parents;
2021c50d8ae3SPaolo Bonzini 	struct kvm_mmu_pages pages;
2022c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
2023c3e5e415SLai Jiangshan 	bool flush = false;
2024c50d8ae3SPaolo Bonzini 
2025c50d8ae3SPaolo Bonzini 	while (mmu_unsync_walk(parent, &pages)) {
2026c50d8ae3SPaolo Bonzini 		bool protected = false;
2027c50d8ae3SPaolo Bonzini 
2028c50d8ae3SPaolo Bonzini 		for_each_sp(pages, sp, parents, i)
2029c50d8ae3SPaolo Bonzini 			protected |= rmap_write_protect(vcpu, sp->gfn);
2030c50d8ae3SPaolo Bonzini 
2031c50d8ae3SPaolo Bonzini 		if (protected) {
20325591c069SLai Jiangshan 			kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, true);
2033c3e5e415SLai Jiangshan 			flush = false;
2034c50d8ae3SPaolo Bonzini 		}
2035c50d8ae3SPaolo Bonzini 
2036c50d8ae3SPaolo Bonzini 		for_each_sp(pages, sp, parents, i) {
2037479a1efcSSean Christopherson 			kvm_unlink_unsync_page(vcpu->kvm, sp);
2038c3e5e415SLai Jiangshan 			flush |= kvm_sync_page(vcpu, sp, &invalid_list);
2039c50d8ae3SPaolo Bonzini 			mmu_pages_clear_parents(&parents);
2040c50d8ae3SPaolo Bonzini 		}
2041531810caSBen Gardon 		if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
2042c3e5e415SLai Jiangshan 			kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush);
204365855ed8SLai Jiangshan 			if (!can_yield) {
204465855ed8SLai Jiangshan 				kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
204565855ed8SLai Jiangshan 				return -EINTR;
204665855ed8SLai Jiangshan 			}
204765855ed8SLai Jiangshan 
2048531810caSBen Gardon 			cond_resched_rwlock_write(&vcpu->kvm->mmu_lock);
2049c3e5e415SLai Jiangshan 			flush = false;
2050c50d8ae3SPaolo Bonzini 		}
2051c50d8ae3SPaolo Bonzini 	}
2052c50d8ae3SPaolo Bonzini 
2053c3e5e415SLai Jiangshan 	kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush);
205465855ed8SLai Jiangshan 	return 0;
2055c50d8ae3SPaolo Bonzini }
2056c50d8ae3SPaolo Bonzini 
2057c50d8ae3SPaolo Bonzini static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
2058c50d8ae3SPaolo Bonzini {
2059c50d8ae3SPaolo Bonzini 	atomic_set(&sp->write_flooding_count,  0);
2060c50d8ae3SPaolo Bonzini }
2061c50d8ae3SPaolo Bonzini 
2062c50d8ae3SPaolo Bonzini static void clear_sp_write_flooding_count(u64 *spte)
2063c50d8ae3SPaolo Bonzini {
206457354682SSean Christopherson 	__clear_sp_write_flooding_count(sptep_to_sp(spte));
2065c50d8ae3SPaolo Bonzini }
2066c50d8ae3SPaolo Bonzini 
2067c50d8ae3SPaolo Bonzini static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
2068c50d8ae3SPaolo Bonzini 					     gfn_t gfn,
2069c50d8ae3SPaolo Bonzini 					     gva_t gaddr,
2070c50d8ae3SPaolo Bonzini 					     unsigned level,
2071c50d8ae3SPaolo Bonzini 					     int direct,
20720a2b64c5SBen Gardon 					     unsigned int access)
2073c50d8ae3SPaolo Bonzini {
2074fb58a9c3SSean Christopherson 	bool direct_mmu = vcpu->arch.mmu->direct_map;
2075c50d8ae3SPaolo Bonzini 	union kvm_mmu_page_role role;
2076ac101b7cSSean Christopherson 	struct hlist_head *sp_list;
2077c50d8ae3SPaolo Bonzini 	unsigned quadrant;
2078c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
2079c50d8ae3SPaolo Bonzini 	int collisions = 0;
2080c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
2081c50d8ae3SPaolo Bonzini 
2082c50d8ae3SPaolo Bonzini 	role = vcpu->arch.mmu->mmu_role.base;
2083c50d8ae3SPaolo Bonzini 	role.level = level;
2084c50d8ae3SPaolo Bonzini 	role.direct = direct;
2085c50d8ae3SPaolo Bonzini 	if (role.direct)
2086c50d8ae3SPaolo Bonzini 		role.gpte_is_8_bytes = true;
2087c50d8ae3SPaolo Bonzini 	role.access = access;
2088fb58a9c3SSean Christopherson 	if (!direct_mmu && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) {
2089c50d8ae3SPaolo Bonzini 		quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
2090c50d8ae3SPaolo Bonzini 		quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
2091c50d8ae3SPaolo Bonzini 		role.quadrant = quadrant;
2092c50d8ae3SPaolo Bonzini 	}
2093ac101b7cSSean Christopherson 
2094ac101b7cSSean Christopherson 	sp_list = &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
2095ac101b7cSSean Christopherson 	for_each_valid_sp(vcpu->kvm, sp, sp_list) {
2096c50d8ae3SPaolo Bonzini 		if (sp->gfn != gfn) {
2097c50d8ae3SPaolo Bonzini 			collisions++;
2098c50d8ae3SPaolo Bonzini 			continue;
2099c50d8ae3SPaolo Bonzini 		}
2100c50d8ae3SPaolo Bonzini 
2101ddc16abbSSean Christopherson 		if (sp->role.word != role.word) {
2102ddc16abbSSean Christopherson 			/*
2103ddc16abbSSean Christopherson 			 * If the guest is creating an upper-level page, zap
2104ddc16abbSSean Christopherson 			 * unsync pages for the same gfn.  While it's possible
2105ddc16abbSSean Christopherson 			 * the guest is using recursive page tables, in all
2106ddc16abbSSean Christopherson 			 * likelihood the guest has stopped using the unsync
2107ddc16abbSSean Christopherson 			 * page and is installing a completely unrelated page.
2108ddc16abbSSean Christopherson 			 * Unsync pages must not be left as is, because the new
2109ddc16abbSSean Christopherson 			 * upper-level page will be write-protected.
2110ddc16abbSSean Christopherson 			 */
2111ddc16abbSSean Christopherson 			if (level > PG_LEVEL_4K && sp->unsync)
2112ddc16abbSSean Christopherson 				kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
2113ddc16abbSSean Christopherson 							 &invalid_list);
2114c50d8ae3SPaolo Bonzini 			continue;
2115ddc16abbSSean Christopherson 		}
2116c50d8ae3SPaolo Bonzini 
2117fb58a9c3SSean Christopherson 		if (direct_mmu)
2118fb58a9c3SSean Christopherson 			goto trace_get_page;
2119fb58a9c3SSean Christopherson 
2120c50d8ae3SPaolo Bonzini 		if (sp->unsync) {
212107dc4f35SSean Christopherson 			/*
2122479a1efcSSean Christopherson 			 * The page is good, but is stale.  kvm_sync_page does
212307dc4f35SSean Christopherson 			 * get the latest guest state, but (unlike mmu_unsync_children)
212407dc4f35SSean Christopherson 			 * it doesn't write-protect the page or mark it synchronized!
212507dc4f35SSean Christopherson 			 * This way the validity of the mapping is ensured, but the
212607dc4f35SSean Christopherson 			 * overhead of write protection is not incurred until the
212707dc4f35SSean Christopherson 			 * guest invalidates the TLB mapping.  This allows multiple
212807dc4f35SSean Christopherson 			 * SPs for a single gfn to be unsync.
212907dc4f35SSean Christopherson 			 *
213007dc4f35SSean Christopherson 			 * If the sync fails, the page is zapped.  If so, break
213107dc4f35SSean Christopherson 			 * in order to rebuild it.
2132c50d8ae3SPaolo Bonzini 			 */
2133479a1efcSSean Christopherson 			if (!kvm_sync_page(vcpu, sp, &invalid_list))
2134c50d8ae3SPaolo Bonzini 				break;
2135c50d8ae3SPaolo Bonzini 
2136c50d8ae3SPaolo Bonzini 			WARN_ON(!list_empty(&invalid_list));
2137c3e5e415SLai Jiangshan 			kvm_flush_remote_tlbs(vcpu->kvm);
2138c50d8ae3SPaolo Bonzini 		}
2139c50d8ae3SPaolo Bonzini 
2140c50d8ae3SPaolo Bonzini 		__clear_sp_write_flooding_count(sp);
2141fb58a9c3SSean Christopherson 
2142fb58a9c3SSean Christopherson trace_get_page:
2143c50d8ae3SPaolo Bonzini 		trace_kvm_mmu_get_page(sp, false);
2144c50d8ae3SPaolo Bonzini 		goto out;
2145c50d8ae3SPaolo Bonzini 	}
2146c50d8ae3SPaolo Bonzini 
2147c50d8ae3SPaolo Bonzini 	++vcpu->kvm->stat.mmu_cache_miss;
2148c50d8ae3SPaolo Bonzini 
2149c50d8ae3SPaolo Bonzini 	sp = kvm_mmu_alloc_page(vcpu, direct);
2150c50d8ae3SPaolo Bonzini 
2151c50d8ae3SPaolo Bonzini 	sp->gfn = gfn;
2152c50d8ae3SPaolo Bonzini 	sp->role = role;
2153ac101b7cSSean Christopherson 	hlist_add_head(&sp->hash_link, sp_list);
2154c50d8ae3SPaolo Bonzini 	if (!direct) {
2155c50d8ae3SPaolo Bonzini 		account_shadowed(vcpu->kvm, sp);
21563bae0459SSean Christopherson 		if (level == PG_LEVEL_4K && rmap_write_protect(vcpu, gfn))
2157c50d8ae3SPaolo Bonzini 			kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
2158c50d8ae3SPaolo Bonzini 	}
2159c50d8ae3SPaolo Bonzini 	trace_kvm_mmu_get_page(sp, true);
2160c50d8ae3SPaolo Bonzini out:
2161ddc16abbSSean Christopherson 	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
2162ddc16abbSSean Christopherson 
2163c50d8ae3SPaolo Bonzini 	if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions)
2164c50d8ae3SPaolo Bonzini 		vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions;
2165c50d8ae3SPaolo Bonzini 	return sp;
2166c50d8ae3SPaolo Bonzini }
2167c50d8ae3SPaolo Bonzini 
2168c50d8ae3SPaolo Bonzini static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator,
2169c50d8ae3SPaolo Bonzini 					struct kvm_vcpu *vcpu, hpa_t root,
2170c50d8ae3SPaolo Bonzini 					u64 addr)
2171c50d8ae3SPaolo Bonzini {
2172c50d8ae3SPaolo Bonzini 	iterator->addr = addr;
2173c50d8ae3SPaolo Bonzini 	iterator->shadow_addr = root;
2174c50d8ae3SPaolo Bonzini 	iterator->level = vcpu->arch.mmu->shadow_root_level;
2175c50d8ae3SPaolo Bonzini 
2176c50d8ae3SPaolo Bonzini 	if (iterator->level == PT64_ROOT_4LEVEL &&
2177c50d8ae3SPaolo Bonzini 	    vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL &&
2178c50d8ae3SPaolo Bonzini 	    !vcpu->arch.mmu->direct_map)
2179c50d8ae3SPaolo Bonzini 		--iterator->level;
2180c50d8ae3SPaolo Bonzini 
2181c50d8ae3SPaolo Bonzini 	if (iterator->level == PT32E_ROOT_LEVEL) {
2182c50d8ae3SPaolo Bonzini 		/*
2183c50d8ae3SPaolo Bonzini 		 * prev_root is currently only used for 64-bit hosts. So only
2184c50d8ae3SPaolo Bonzini 		 * the active root_hpa is valid here.
2185c50d8ae3SPaolo Bonzini 		 */
2186c50d8ae3SPaolo Bonzini 		BUG_ON(root != vcpu->arch.mmu->root_hpa);
2187c50d8ae3SPaolo Bonzini 
2188c50d8ae3SPaolo Bonzini 		iterator->shadow_addr
2189c50d8ae3SPaolo Bonzini 			= vcpu->arch.mmu->pae_root[(addr >> 30) & 3];
2190c50d8ae3SPaolo Bonzini 		iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
2191c50d8ae3SPaolo Bonzini 		--iterator->level;
2192c50d8ae3SPaolo Bonzini 		if (!iterator->shadow_addr)
2193c50d8ae3SPaolo Bonzini 			iterator->level = 0;
2194c50d8ae3SPaolo Bonzini 	}
2195c50d8ae3SPaolo Bonzini }
2196c50d8ae3SPaolo Bonzini 
2197c50d8ae3SPaolo Bonzini static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
2198c50d8ae3SPaolo Bonzini 			     struct kvm_vcpu *vcpu, u64 addr)
2199c50d8ae3SPaolo Bonzini {
2200c50d8ae3SPaolo Bonzini 	shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root_hpa,
2201c50d8ae3SPaolo Bonzini 				    addr);
2202c50d8ae3SPaolo Bonzini }
2203c50d8ae3SPaolo Bonzini 
2204c50d8ae3SPaolo Bonzini static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
2205c50d8ae3SPaolo Bonzini {
22063bae0459SSean Christopherson 	if (iterator->level < PG_LEVEL_4K)
2207c50d8ae3SPaolo Bonzini 		return false;
2208c50d8ae3SPaolo Bonzini 
2209c50d8ae3SPaolo Bonzini 	iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
2210c50d8ae3SPaolo Bonzini 	iterator->sptep	= ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
2211c50d8ae3SPaolo Bonzini 	return true;
2212c50d8ae3SPaolo Bonzini }
2213c50d8ae3SPaolo Bonzini 
2214c50d8ae3SPaolo Bonzini static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator,
2215c50d8ae3SPaolo Bonzini 			       u64 spte)
2216c50d8ae3SPaolo Bonzini {
22173e44dce4SLai Jiangshan 	if (!is_shadow_present_pte(spte) || is_last_spte(spte, iterator->level)) {
2218c50d8ae3SPaolo Bonzini 		iterator->level = 0;
2219c50d8ae3SPaolo Bonzini 		return;
2220c50d8ae3SPaolo Bonzini 	}
2221c50d8ae3SPaolo Bonzini 
2222c50d8ae3SPaolo Bonzini 	iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK;
2223c50d8ae3SPaolo Bonzini 	--iterator->level;
2224c50d8ae3SPaolo Bonzini }
2225c50d8ae3SPaolo Bonzini 
2226c50d8ae3SPaolo Bonzini static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
2227c50d8ae3SPaolo Bonzini {
2228c50d8ae3SPaolo Bonzini 	__shadow_walk_next(iterator, *iterator->sptep);
2229c50d8ae3SPaolo Bonzini }
2230c50d8ae3SPaolo Bonzini 
2231c50d8ae3SPaolo Bonzini static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
2232c50d8ae3SPaolo Bonzini 			     struct kvm_mmu_page *sp)
2233c50d8ae3SPaolo Bonzini {
2234c50d8ae3SPaolo Bonzini 	u64 spte;
2235c50d8ae3SPaolo Bonzini 
2236c50d8ae3SPaolo Bonzini 	BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
2237c50d8ae3SPaolo Bonzini 
2238cc4674d0SBen Gardon 	spte = make_nonleaf_spte(sp->spt, sp_ad_disabled(sp));
2239c50d8ae3SPaolo Bonzini 
2240c50d8ae3SPaolo Bonzini 	mmu_spte_set(sptep, spte);
2241c50d8ae3SPaolo Bonzini 
2242c50d8ae3SPaolo Bonzini 	mmu_page_add_parent_pte(vcpu, sp, sptep);
2243c50d8ae3SPaolo Bonzini 
2244c50d8ae3SPaolo Bonzini 	if (sp->unsync_children || sp->unsync)
2245c50d8ae3SPaolo Bonzini 		mark_unsync(sptep);
2246c50d8ae3SPaolo Bonzini }
2247c50d8ae3SPaolo Bonzini 
2248c50d8ae3SPaolo Bonzini static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2249c50d8ae3SPaolo Bonzini 				   unsigned direct_access)
2250c50d8ae3SPaolo Bonzini {
2251c50d8ae3SPaolo Bonzini 	if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
2252c50d8ae3SPaolo Bonzini 		struct kvm_mmu_page *child;
2253c50d8ae3SPaolo Bonzini 
2254c50d8ae3SPaolo Bonzini 		/*
2255c50d8ae3SPaolo Bonzini 		 * For the direct sp, if the guest pte's dirty bit
2256c50d8ae3SPaolo Bonzini 		 * changed form clean to dirty, it will corrupt the
2257c50d8ae3SPaolo Bonzini 		 * sp's access: allow writable in the read-only sp,
2258c50d8ae3SPaolo Bonzini 		 * so we should update the spte at this point to get
2259c50d8ae3SPaolo Bonzini 		 * a new sp with the correct access.
2260c50d8ae3SPaolo Bonzini 		 */
2261e47c4aeeSSean Christopherson 		child = to_shadow_page(*sptep & PT64_BASE_ADDR_MASK);
2262c50d8ae3SPaolo Bonzini 		if (child->role.access == direct_access)
2263c50d8ae3SPaolo Bonzini 			return;
2264c50d8ae3SPaolo Bonzini 
2265c50d8ae3SPaolo Bonzini 		drop_parent_pte(child, sptep);
2266c50d8ae3SPaolo Bonzini 		kvm_flush_remote_tlbs_with_address(vcpu->kvm, child->gfn, 1);
2267c50d8ae3SPaolo Bonzini 	}
2268c50d8ae3SPaolo Bonzini }
2269c50d8ae3SPaolo Bonzini 
22702de4085cSBen Gardon /* Returns the number of zapped non-leaf child shadow pages. */
22712de4085cSBen Gardon static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
22722de4085cSBen Gardon 			    u64 *spte, struct list_head *invalid_list)
2273c50d8ae3SPaolo Bonzini {
2274c50d8ae3SPaolo Bonzini 	u64 pte;
2275c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *child;
2276c50d8ae3SPaolo Bonzini 
2277c50d8ae3SPaolo Bonzini 	pte = *spte;
2278c50d8ae3SPaolo Bonzini 	if (is_shadow_present_pte(pte)) {
2279c50d8ae3SPaolo Bonzini 		if (is_last_spte(pte, sp->role.level)) {
2280c50d8ae3SPaolo Bonzini 			drop_spte(kvm, spte);
2281c50d8ae3SPaolo Bonzini 		} else {
2282e47c4aeeSSean Christopherson 			child = to_shadow_page(pte & PT64_BASE_ADDR_MASK);
2283c50d8ae3SPaolo Bonzini 			drop_parent_pte(child, spte);
22842de4085cSBen Gardon 
22852de4085cSBen Gardon 			/*
22862de4085cSBen Gardon 			 * Recursively zap nested TDP SPs, parentless SPs are
22872de4085cSBen Gardon 			 * unlikely to be used again in the near future.  This
22882de4085cSBen Gardon 			 * avoids retaining a large number of stale nested SPs.
22892de4085cSBen Gardon 			 */
22902de4085cSBen Gardon 			if (tdp_enabled && invalid_list &&
22912de4085cSBen Gardon 			    child->role.guest_mode && !child->parent_ptes.val)
22922de4085cSBen Gardon 				return kvm_mmu_prepare_zap_page(kvm, child,
22932de4085cSBen Gardon 								invalid_list);
2294c50d8ae3SPaolo Bonzini 		}
2295ace569e0SSean Christopherson 	} else if (is_mmio_spte(pte)) {
2296c50d8ae3SPaolo Bonzini 		mmu_spte_clear_no_track(spte);
2297ace569e0SSean Christopherson 	}
22982de4085cSBen Gardon 	return 0;
2299c50d8ae3SPaolo Bonzini }
2300c50d8ae3SPaolo Bonzini 
23012de4085cSBen Gardon static int kvm_mmu_page_unlink_children(struct kvm *kvm,
23022de4085cSBen Gardon 					struct kvm_mmu_page *sp,
23032de4085cSBen Gardon 					struct list_head *invalid_list)
2304c50d8ae3SPaolo Bonzini {
23052de4085cSBen Gardon 	int zapped = 0;
2306c50d8ae3SPaolo Bonzini 	unsigned i;
2307c50d8ae3SPaolo Bonzini 
2308c50d8ae3SPaolo Bonzini 	for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
23092de4085cSBen Gardon 		zapped += mmu_page_zap_pte(kvm, sp, sp->spt + i, invalid_list);
23102de4085cSBen Gardon 
23112de4085cSBen Gardon 	return zapped;
2312c50d8ae3SPaolo Bonzini }
2313c50d8ae3SPaolo Bonzini 
2314c50d8ae3SPaolo Bonzini static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
2315c50d8ae3SPaolo Bonzini {
2316c50d8ae3SPaolo Bonzini 	u64 *sptep;
2317c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
2318c50d8ae3SPaolo Bonzini 
2319c50d8ae3SPaolo Bonzini 	while ((sptep = rmap_get_first(&sp->parent_ptes, &iter)))
2320c50d8ae3SPaolo Bonzini 		drop_parent_pte(sp, sptep);
2321c50d8ae3SPaolo Bonzini }
2322c50d8ae3SPaolo Bonzini 
2323c50d8ae3SPaolo Bonzini static int mmu_zap_unsync_children(struct kvm *kvm,
2324c50d8ae3SPaolo Bonzini 				   struct kvm_mmu_page *parent,
2325c50d8ae3SPaolo Bonzini 				   struct list_head *invalid_list)
2326c50d8ae3SPaolo Bonzini {
2327c50d8ae3SPaolo Bonzini 	int i, zapped = 0;
2328c50d8ae3SPaolo Bonzini 	struct mmu_page_path parents;
2329c50d8ae3SPaolo Bonzini 	struct kvm_mmu_pages pages;
2330c50d8ae3SPaolo Bonzini 
23313bae0459SSean Christopherson 	if (parent->role.level == PG_LEVEL_4K)
2332c50d8ae3SPaolo Bonzini 		return 0;
2333c50d8ae3SPaolo Bonzini 
2334c50d8ae3SPaolo Bonzini 	while (mmu_unsync_walk(parent, &pages)) {
2335c50d8ae3SPaolo Bonzini 		struct kvm_mmu_page *sp;
2336c50d8ae3SPaolo Bonzini 
2337c50d8ae3SPaolo Bonzini 		for_each_sp(pages, sp, parents, i) {
2338c50d8ae3SPaolo Bonzini 			kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
2339c50d8ae3SPaolo Bonzini 			mmu_pages_clear_parents(&parents);
2340c50d8ae3SPaolo Bonzini 			zapped++;
2341c50d8ae3SPaolo Bonzini 		}
2342c50d8ae3SPaolo Bonzini 	}
2343c50d8ae3SPaolo Bonzini 
2344c50d8ae3SPaolo Bonzini 	return zapped;
2345c50d8ae3SPaolo Bonzini }
2346c50d8ae3SPaolo Bonzini 
2347c50d8ae3SPaolo Bonzini static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
2348c50d8ae3SPaolo Bonzini 				       struct kvm_mmu_page *sp,
2349c50d8ae3SPaolo Bonzini 				       struct list_head *invalid_list,
2350c50d8ae3SPaolo Bonzini 				       int *nr_zapped)
2351c50d8ae3SPaolo Bonzini {
2352c50d8ae3SPaolo Bonzini 	bool list_unstable;
2353c50d8ae3SPaolo Bonzini 
2354c50d8ae3SPaolo Bonzini 	trace_kvm_mmu_prepare_zap_page(sp);
2355c50d8ae3SPaolo Bonzini 	++kvm->stat.mmu_shadow_zapped;
2356c50d8ae3SPaolo Bonzini 	*nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list);
23572de4085cSBen Gardon 	*nr_zapped += kvm_mmu_page_unlink_children(kvm, sp, invalid_list);
2358c50d8ae3SPaolo Bonzini 	kvm_mmu_unlink_parents(kvm, sp);
2359c50d8ae3SPaolo Bonzini 
2360c50d8ae3SPaolo Bonzini 	/* Zapping children means active_mmu_pages has become unstable. */
2361c50d8ae3SPaolo Bonzini 	list_unstable = *nr_zapped;
2362c50d8ae3SPaolo Bonzini 
2363c50d8ae3SPaolo Bonzini 	if (!sp->role.invalid && !sp->role.direct)
2364c50d8ae3SPaolo Bonzini 		unaccount_shadowed(kvm, sp);
2365c50d8ae3SPaolo Bonzini 
2366c50d8ae3SPaolo Bonzini 	if (sp->unsync)
2367c50d8ae3SPaolo Bonzini 		kvm_unlink_unsync_page(kvm, sp);
2368c50d8ae3SPaolo Bonzini 	if (!sp->root_count) {
2369c50d8ae3SPaolo Bonzini 		/* Count self */
2370c50d8ae3SPaolo Bonzini 		(*nr_zapped)++;
2371f95eec9bSSean Christopherson 
2372f95eec9bSSean Christopherson 		/*
2373f95eec9bSSean Christopherson 		 * Already invalid pages (previously active roots) are not on
2374f95eec9bSSean Christopherson 		 * the active page list.  See list_del() in the "else" case of
2375f95eec9bSSean Christopherson 		 * !sp->root_count.
2376f95eec9bSSean Christopherson 		 */
2377f95eec9bSSean Christopherson 		if (sp->role.invalid)
2378f95eec9bSSean Christopherson 			list_add(&sp->link, invalid_list);
2379f95eec9bSSean Christopherson 		else
2380c50d8ae3SPaolo Bonzini 			list_move(&sp->link, invalid_list);
2381c50d8ae3SPaolo Bonzini 		kvm_mod_used_mmu_pages(kvm, -1);
2382c50d8ae3SPaolo Bonzini 	} else {
2383f95eec9bSSean Christopherson 		/*
2384f95eec9bSSean Christopherson 		 * Remove the active root from the active page list, the root
2385f95eec9bSSean Christopherson 		 * will be explicitly freed when the root_count hits zero.
2386f95eec9bSSean Christopherson 		 */
2387f95eec9bSSean Christopherson 		list_del(&sp->link);
2388c50d8ae3SPaolo Bonzini 
2389c50d8ae3SPaolo Bonzini 		/*
2390c50d8ae3SPaolo Bonzini 		 * Obsolete pages cannot be used on any vCPUs, see the comment
2391c50d8ae3SPaolo Bonzini 		 * in kvm_mmu_zap_all_fast().  Note, is_obsolete_sp() also
2392c50d8ae3SPaolo Bonzini 		 * treats invalid shadow pages as being obsolete.
2393c50d8ae3SPaolo Bonzini 		 */
2394c50d8ae3SPaolo Bonzini 		if (!is_obsolete_sp(kvm, sp))
2395c50d8ae3SPaolo Bonzini 			kvm_reload_remote_mmus(kvm);
2396c50d8ae3SPaolo Bonzini 	}
2397c50d8ae3SPaolo Bonzini 
2398c50d8ae3SPaolo Bonzini 	if (sp->lpage_disallowed)
2399c50d8ae3SPaolo Bonzini 		unaccount_huge_nx_page(kvm, sp);
2400c50d8ae3SPaolo Bonzini 
2401c50d8ae3SPaolo Bonzini 	sp->role.invalid = 1;
2402c50d8ae3SPaolo Bonzini 	return list_unstable;
2403c50d8ae3SPaolo Bonzini }
2404c50d8ae3SPaolo Bonzini 
2405c50d8ae3SPaolo Bonzini static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2406c50d8ae3SPaolo Bonzini 				     struct list_head *invalid_list)
2407c50d8ae3SPaolo Bonzini {
2408c50d8ae3SPaolo Bonzini 	int nr_zapped;
2409c50d8ae3SPaolo Bonzini 
2410c50d8ae3SPaolo Bonzini 	__kvm_mmu_prepare_zap_page(kvm, sp, invalid_list, &nr_zapped);
2411c50d8ae3SPaolo Bonzini 	return nr_zapped;
2412c50d8ae3SPaolo Bonzini }
2413c50d8ae3SPaolo Bonzini 
2414c50d8ae3SPaolo Bonzini static void kvm_mmu_commit_zap_page(struct kvm *kvm,
2415c50d8ae3SPaolo Bonzini 				    struct list_head *invalid_list)
2416c50d8ae3SPaolo Bonzini {
2417c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp, *nsp;
2418c50d8ae3SPaolo Bonzini 
2419c50d8ae3SPaolo Bonzini 	if (list_empty(invalid_list))
2420c50d8ae3SPaolo Bonzini 		return;
2421c50d8ae3SPaolo Bonzini 
2422c50d8ae3SPaolo Bonzini 	/*
2423c50d8ae3SPaolo Bonzini 	 * We need to make sure everyone sees our modifications to
2424c50d8ae3SPaolo Bonzini 	 * the page tables and see changes to vcpu->mode here. The barrier
2425c50d8ae3SPaolo Bonzini 	 * in the kvm_flush_remote_tlbs() achieves this. This pairs
2426c50d8ae3SPaolo Bonzini 	 * with vcpu_enter_guest and walk_shadow_page_lockless_begin/end.
2427c50d8ae3SPaolo Bonzini 	 *
2428c50d8ae3SPaolo Bonzini 	 * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit
2429c50d8ae3SPaolo Bonzini 	 * guest mode and/or lockless shadow page table walks.
2430c50d8ae3SPaolo Bonzini 	 */
2431c50d8ae3SPaolo Bonzini 	kvm_flush_remote_tlbs(kvm);
2432c50d8ae3SPaolo Bonzini 
2433c50d8ae3SPaolo Bonzini 	list_for_each_entry_safe(sp, nsp, invalid_list, link) {
2434c50d8ae3SPaolo Bonzini 		WARN_ON(!sp->role.invalid || sp->root_count);
2435c50d8ae3SPaolo Bonzini 		kvm_mmu_free_page(sp);
2436c50d8ae3SPaolo Bonzini 	}
2437c50d8ae3SPaolo Bonzini }
2438c50d8ae3SPaolo Bonzini 
24396b82ef2cSSean Christopherson static unsigned long kvm_mmu_zap_oldest_mmu_pages(struct kvm *kvm,
24406b82ef2cSSean Christopherson 						  unsigned long nr_to_zap)
2441c50d8ae3SPaolo Bonzini {
24426b82ef2cSSean Christopherson 	unsigned long total_zapped = 0;
24436b82ef2cSSean Christopherson 	struct kvm_mmu_page *sp, *tmp;
2444ba7888ddSSean Christopherson 	LIST_HEAD(invalid_list);
24456b82ef2cSSean Christopherson 	bool unstable;
24466b82ef2cSSean Christopherson 	int nr_zapped;
2447c50d8ae3SPaolo Bonzini 
2448c50d8ae3SPaolo Bonzini 	if (list_empty(&kvm->arch.active_mmu_pages))
2449ba7888ddSSean Christopherson 		return 0;
2450c50d8ae3SPaolo Bonzini 
24516b82ef2cSSean Christopherson restart:
24528fc51726SSean Christopherson 	list_for_each_entry_safe_reverse(sp, tmp, &kvm->arch.active_mmu_pages, link) {
24536b82ef2cSSean Christopherson 		/*
24546b82ef2cSSean Christopherson 		 * Don't zap active root pages, the page itself can't be freed
24556b82ef2cSSean Christopherson 		 * and zapping it will just force vCPUs to realloc and reload.
24566b82ef2cSSean Christopherson 		 */
24576b82ef2cSSean Christopherson 		if (sp->root_count)
24586b82ef2cSSean Christopherson 			continue;
24596b82ef2cSSean Christopherson 
24606b82ef2cSSean Christopherson 		unstable = __kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list,
24616b82ef2cSSean Christopherson 						      &nr_zapped);
24626b82ef2cSSean Christopherson 		total_zapped += nr_zapped;
24636b82ef2cSSean Christopherson 		if (total_zapped >= nr_to_zap)
2464ba7888ddSSean Christopherson 			break;
2465ba7888ddSSean Christopherson 
24666b82ef2cSSean Christopherson 		if (unstable)
24676b82ef2cSSean Christopherson 			goto restart;
2468ba7888ddSSean Christopherson 	}
24696b82ef2cSSean Christopherson 
24706b82ef2cSSean Christopherson 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
24716b82ef2cSSean Christopherson 
24726b82ef2cSSean Christopherson 	kvm->stat.mmu_recycled += total_zapped;
24736b82ef2cSSean Christopherson 	return total_zapped;
24746b82ef2cSSean Christopherson }
24756b82ef2cSSean Christopherson 
2476afe8d7e6SSean Christopherson static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
2477afe8d7e6SSean Christopherson {
2478afe8d7e6SSean Christopherson 	if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
2479afe8d7e6SSean Christopherson 		return kvm->arch.n_max_mmu_pages -
2480afe8d7e6SSean Christopherson 			kvm->arch.n_used_mmu_pages;
2481afe8d7e6SSean Christopherson 
2482afe8d7e6SSean Christopherson 	return 0;
2483c50d8ae3SPaolo Bonzini }
2484c50d8ae3SPaolo Bonzini 
2485ba7888ddSSean Christopherson static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
2486ba7888ddSSean Christopherson {
24876b82ef2cSSean Christopherson 	unsigned long avail = kvm_mmu_available_pages(vcpu->kvm);
2488ba7888ddSSean Christopherson 
24896b82ef2cSSean Christopherson 	if (likely(avail >= KVM_MIN_FREE_MMU_PAGES))
2490ba7888ddSSean Christopherson 		return 0;
2491ba7888ddSSean Christopherson 
24926b82ef2cSSean Christopherson 	kvm_mmu_zap_oldest_mmu_pages(vcpu->kvm, KVM_REFILL_PAGES - avail);
2493ba7888ddSSean Christopherson 
24946e6ec584SSean Christopherson 	/*
24956e6ec584SSean Christopherson 	 * Note, this check is intentionally soft, it only guarantees that one
24966e6ec584SSean Christopherson 	 * page is available, while the caller may end up allocating as many as
24976e6ec584SSean Christopherson 	 * four pages, e.g. for PAE roots or for 5-level paging.  Temporarily
24986e6ec584SSean Christopherson 	 * exceeding the (arbitrary by default) limit will not harm the host,
2499c4342633SIngo Molnar 	 * being too aggressive may unnecessarily kill the guest, and getting an
25006e6ec584SSean Christopherson 	 * exact count is far more trouble than it's worth, especially in the
25016e6ec584SSean Christopherson 	 * page fault paths.
25026e6ec584SSean Christopherson 	 */
2503ba7888ddSSean Christopherson 	if (!kvm_mmu_available_pages(vcpu->kvm))
2504ba7888ddSSean Christopherson 		return -ENOSPC;
2505ba7888ddSSean Christopherson 	return 0;
2506ba7888ddSSean Christopherson }
2507ba7888ddSSean Christopherson 
2508c50d8ae3SPaolo Bonzini /*
2509c50d8ae3SPaolo Bonzini  * Changing the number of mmu pages allocated to the vm
2510c50d8ae3SPaolo Bonzini  * Note: if goal_nr_mmu_pages is too small, you will get dead lock
2511c50d8ae3SPaolo Bonzini  */
2512c50d8ae3SPaolo Bonzini void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
2513c50d8ae3SPaolo Bonzini {
2514531810caSBen Gardon 	write_lock(&kvm->mmu_lock);
2515c50d8ae3SPaolo Bonzini 
2516c50d8ae3SPaolo Bonzini 	if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
25176b82ef2cSSean Christopherson 		kvm_mmu_zap_oldest_mmu_pages(kvm, kvm->arch.n_used_mmu_pages -
25186b82ef2cSSean Christopherson 						  goal_nr_mmu_pages);
2519c50d8ae3SPaolo Bonzini 
2520c50d8ae3SPaolo Bonzini 		goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
2521c50d8ae3SPaolo Bonzini 	}
2522c50d8ae3SPaolo Bonzini 
2523c50d8ae3SPaolo Bonzini 	kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
2524c50d8ae3SPaolo Bonzini 
2525531810caSBen Gardon 	write_unlock(&kvm->mmu_lock);
2526c50d8ae3SPaolo Bonzini }
2527c50d8ae3SPaolo Bonzini 
2528c50d8ae3SPaolo Bonzini int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
2529c50d8ae3SPaolo Bonzini {
2530c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
2531c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
2532c50d8ae3SPaolo Bonzini 	int r;
2533c50d8ae3SPaolo Bonzini 
2534c50d8ae3SPaolo Bonzini 	pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
2535c50d8ae3SPaolo Bonzini 	r = 0;
2536531810caSBen Gardon 	write_lock(&kvm->mmu_lock);
2537c50d8ae3SPaolo Bonzini 	for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
2538c50d8ae3SPaolo Bonzini 		pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
2539c50d8ae3SPaolo Bonzini 			 sp->role.word);
2540c50d8ae3SPaolo Bonzini 		r = 1;
2541c50d8ae3SPaolo Bonzini 		kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
2542c50d8ae3SPaolo Bonzini 	}
2543c50d8ae3SPaolo Bonzini 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
2544531810caSBen Gardon 	write_unlock(&kvm->mmu_lock);
2545c50d8ae3SPaolo Bonzini 
2546c50d8ae3SPaolo Bonzini 	return r;
2547c50d8ae3SPaolo Bonzini }
254896ad91aeSSean Christopherson 
254996ad91aeSSean Christopherson static int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
255096ad91aeSSean Christopherson {
255196ad91aeSSean Christopherson 	gpa_t gpa;
255296ad91aeSSean Christopherson 	int r;
255396ad91aeSSean Christopherson 
255496ad91aeSSean Christopherson 	if (vcpu->arch.mmu->direct_map)
255596ad91aeSSean Christopherson 		return 0;
255696ad91aeSSean Christopherson 
255796ad91aeSSean Christopherson 	gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
255896ad91aeSSean Christopherson 
255996ad91aeSSean Christopherson 	r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
256096ad91aeSSean Christopherson 
256196ad91aeSSean Christopherson 	return r;
256296ad91aeSSean Christopherson }
2563c50d8ae3SPaolo Bonzini 
2564c50d8ae3SPaolo Bonzini static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
2565c50d8ae3SPaolo Bonzini {
2566c50d8ae3SPaolo Bonzini 	trace_kvm_mmu_unsync_page(sp);
2567c50d8ae3SPaolo Bonzini 	++vcpu->kvm->stat.mmu_unsync;
2568c50d8ae3SPaolo Bonzini 	sp->unsync = 1;
2569c50d8ae3SPaolo Bonzini 
2570c50d8ae3SPaolo Bonzini 	kvm_mmu_mark_parents_unsync(sp);
2571c50d8ae3SPaolo Bonzini }
2572c50d8ae3SPaolo Bonzini 
25730337f585SSean Christopherson /*
25740337f585SSean Christopherson  * Attempt to unsync any shadow pages that can be reached by the specified gfn,
25750337f585SSean Christopherson  * KVM is creating a writable mapping for said gfn.  Returns 0 if all pages
25760337f585SSean Christopherson  * were marked unsync (or if there is no shadow page), -EPERM if the SPTE must
25770337f585SSean Christopherson  * be write-protected.
25780337f585SSean Christopherson  */
257953597858SDavid Matlack int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
25802839180cSPaolo Bonzini 			    gfn_t gfn, bool can_unsync, bool prefetch)
2581c50d8ae3SPaolo Bonzini {
2582c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
2583ce25681dSSean Christopherson 	bool locked = false;
2584c50d8ae3SPaolo Bonzini 
25850337f585SSean Christopherson 	/*
25860337f585SSean Christopherson 	 * Force write-protection if the page is being tracked.  Note, the page
25870337f585SSean Christopherson 	 * track machinery is used to write-protect upper-level shadow pages,
25880337f585SSean Christopherson 	 * i.e. this guards the role.level == 4K assertion below!
25890337f585SSean Christopherson 	 */
2590deae4a10SDavid Stevens 	if (kvm_slot_page_track_is_active(vcpu, slot, gfn, KVM_PAGE_TRACK_WRITE))
25910337f585SSean Christopherson 		return -EPERM;
2592c50d8ae3SPaolo Bonzini 
25930337f585SSean Christopherson 	/*
25940337f585SSean Christopherson 	 * The page is not write-tracked, mark existing shadow pages unsync
25950337f585SSean Christopherson 	 * unless KVM is synchronizing an unsync SP (can_unsync = false).  In
25960337f585SSean Christopherson 	 * that case, KVM must complete emulation of the guest TLB flush before
25970337f585SSean Christopherson 	 * allowing shadow pages to become unsync (writable by the guest).
25980337f585SSean Christopherson 	 */
2599c50d8ae3SPaolo Bonzini 	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
2600c50d8ae3SPaolo Bonzini 		if (!can_unsync)
26010337f585SSean Christopherson 			return -EPERM;
2602c50d8ae3SPaolo Bonzini 
2603c50d8ae3SPaolo Bonzini 		if (sp->unsync)
2604c50d8ae3SPaolo Bonzini 			continue;
2605c50d8ae3SPaolo Bonzini 
26062839180cSPaolo Bonzini 		if (prefetch)
2607f1c4a88cSLai Jiangshan 			return -EEXIST;
2608f1c4a88cSLai Jiangshan 
2609ce25681dSSean Christopherson 		/*
2610ce25681dSSean Christopherson 		 * TDP MMU page faults require an additional spinlock as they
2611ce25681dSSean Christopherson 		 * run with mmu_lock held for read, not write, and the unsync
2612ce25681dSSean Christopherson 		 * logic is not thread safe.  Take the spinklock regardless of
2613ce25681dSSean Christopherson 		 * the MMU type to avoid extra conditionals/parameters, there's
2614ce25681dSSean Christopherson 		 * no meaningful penalty if mmu_lock is held for write.
2615ce25681dSSean Christopherson 		 */
2616ce25681dSSean Christopherson 		if (!locked) {
2617ce25681dSSean Christopherson 			locked = true;
2618ce25681dSSean Christopherson 			spin_lock(&vcpu->kvm->arch.mmu_unsync_pages_lock);
2619ce25681dSSean Christopherson 
2620ce25681dSSean Christopherson 			/*
2621ce25681dSSean Christopherson 			 * Recheck after taking the spinlock, a different vCPU
2622ce25681dSSean Christopherson 			 * may have since marked the page unsync.  A false
2623ce25681dSSean Christopherson 			 * positive on the unprotected check above is not
2624ce25681dSSean Christopherson 			 * possible as clearing sp->unsync _must_ hold mmu_lock
2625ce25681dSSean Christopherson 			 * for write, i.e. unsync cannot transition from 0->1
2626ce25681dSSean Christopherson 			 * while this CPU holds mmu_lock for read (or write).
2627ce25681dSSean Christopherson 			 */
2628ce25681dSSean Christopherson 			if (READ_ONCE(sp->unsync))
2629ce25681dSSean Christopherson 				continue;
2630ce25681dSSean Christopherson 		}
2631ce25681dSSean Christopherson 
26323bae0459SSean Christopherson 		WARN_ON(sp->role.level != PG_LEVEL_4K);
2633c50d8ae3SPaolo Bonzini 		kvm_unsync_page(vcpu, sp);
2634c50d8ae3SPaolo Bonzini 	}
2635ce25681dSSean Christopherson 	if (locked)
2636ce25681dSSean Christopherson 		spin_unlock(&vcpu->kvm->arch.mmu_unsync_pages_lock);
2637c50d8ae3SPaolo Bonzini 
2638c50d8ae3SPaolo Bonzini 	/*
2639c50d8ae3SPaolo Bonzini 	 * We need to ensure that the marking of unsync pages is visible
2640c50d8ae3SPaolo Bonzini 	 * before the SPTE is updated to allow writes because
2641c50d8ae3SPaolo Bonzini 	 * kvm_mmu_sync_roots() checks the unsync flags without holding
2642c50d8ae3SPaolo Bonzini 	 * the MMU lock and so can race with this. If the SPTE was updated
2643c50d8ae3SPaolo Bonzini 	 * before the page had been marked as unsync-ed, something like the
2644c50d8ae3SPaolo Bonzini 	 * following could happen:
2645c50d8ae3SPaolo Bonzini 	 *
2646c50d8ae3SPaolo Bonzini 	 * CPU 1                    CPU 2
2647c50d8ae3SPaolo Bonzini 	 * ---------------------------------------------------------------------
2648c50d8ae3SPaolo Bonzini 	 * 1.2 Host updates SPTE
2649c50d8ae3SPaolo Bonzini 	 *     to be writable
2650c50d8ae3SPaolo Bonzini 	 *                      2.1 Guest writes a GPTE for GVA X.
2651c50d8ae3SPaolo Bonzini 	 *                          (GPTE being in the guest page table shadowed
2652c50d8ae3SPaolo Bonzini 	 *                           by the SP from CPU 1.)
2653c50d8ae3SPaolo Bonzini 	 *                          This reads SPTE during the page table walk.
2654c50d8ae3SPaolo Bonzini 	 *                          Since SPTE.W is read as 1, there is no
2655c50d8ae3SPaolo Bonzini 	 *                          fault.
2656c50d8ae3SPaolo Bonzini 	 *
2657c50d8ae3SPaolo Bonzini 	 *                      2.2 Guest issues TLB flush.
2658c50d8ae3SPaolo Bonzini 	 *                          That causes a VM Exit.
2659c50d8ae3SPaolo Bonzini 	 *
26600337f585SSean Christopherson 	 *                      2.3 Walking of unsync pages sees sp->unsync is
26610337f585SSean Christopherson 	 *                          false and skips the page.
2662c50d8ae3SPaolo Bonzini 	 *
2663c50d8ae3SPaolo Bonzini 	 *                      2.4 Guest accesses GVA X.
2664c50d8ae3SPaolo Bonzini 	 *                          Since the mapping in the SP was not updated,
2665c50d8ae3SPaolo Bonzini 	 *                          so the old mapping for GVA X incorrectly
2666c50d8ae3SPaolo Bonzini 	 *                          gets used.
2667c50d8ae3SPaolo Bonzini 	 * 1.1 Host marks SP
2668c50d8ae3SPaolo Bonzini 	 *     as unsync
2669c50d8ae3SPaolo Bonzini 	 *     (sp->unsync = true)
2670c50d8ae3SPaolo Bonzini 	 *
2671c50d8ae3SPaolo Bonzini 	 * The write barrier below ensures that 1.1 happens before 1.2 and thus
2672*264d3dc1SLai Jiangshan 	 * the situation in 2.4 does not arise.  It pairs with the read barrier
2673*264d3dc1SLai Jiangshan 	 * in is_unsync_root(), placed between 2.1's load of SPTE.W and 2.3.
2674c50d8ae3SPaolo Bonzini 	 */
2675c50d8ae3SPaolo Bonzini 	smp_wmb();
2676c50d8ae3SPaolo Bonzini 
26770337f585SSean Christopherson 	return 0;
2678c50d8ae3SPaolo Bonzini }
2679c50d8ae3SPaolo Bonzini 
26808a9f566aSDavid Matlack static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
26818a9f566aSDavid Matlack 			u64 *sptep, unsigned int pte_access, gfn_t gfn,
2682a12f4381SPaolo Bonzini 			kvm_pfn_t pfn, struct kvm_page_fault *fault)
2683c50d8ae3SPaolo Bonzini {
2684d786c778SPaolo Bonzini 	struct kvm_mmu_page *sp = sptep_to_sp(sptep);
2685eb5cd7ffSPaolo Bonzini 	int level = sp->role.level;
2686c50d8ae3SPaolo Bonzini 	int was_rmapped = 0;
2687c4371c2aSSean Christopherson 	int ret = RET_PF_FIXED;
2688c50d8ae3SPaolo Bonzini 	bool flush = false;
2689ad67e480SPaolo Bonzini 	bool wrprot;
2690d786c778SPaolo Bonzini 	u64 spte;
2691c50d8ae3SPaolo Bonzini 
2692a12f4381SPaolo Bonzini 	/* Prefetching always gets a writable pfn.  */
2693a12f4381SPaolo Bonzini 	bool host_writable = !fault || fault->map_writable;
26942839180cSPaolo Bonzini 	bool prefetch = !fault || fault->prefetch;
2695a12f4381SPaolo Bonzini 	bool write_fault = fault && fault->write;
2696a12f4381SPaolo Bonzini 
2697c50d8ae3SPaolo Bonzini 	pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
2698c50d8ae3SPaolo Bonzini 		 *sptep, write_fault, gfn);
2699c50d8ae3SPaolo Bonzini 
2700a54aa15cSSean Christopherson 	if (unlikely(is_noslot_pfn(pfn))) {
2701a54aa15cSSean Christopherson 		mark_mmio_spte(vcpu, sptep, gfn, pte_access);
2702a54aa15cSSean Christopherson 		return RET_PF_EMULATE;
2703a54aa15cSSean Christopherson 	}
2704a54aa15cSSean Christopherson 
2705c50d8ae3SPaolo Bonzini 	if (is_shadow_present_pte(*sptep)) {
2706c50d8ae3SPaolo Bonzini 		/*
2707c50d8ae3SPaolo Bonzini 		 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
2708c50d8ae3SPaolo Bonzini 		 * the parent of the now unreachable PTE.
2709c50d8ae3SPaolo Bonzini 		 */
27103bae0459SSean Christopherson 		if (level > PG_LEVEL_4K && !is_large_pte(*sptep)) {
2711c50d8ae3SPaolo Bonzini 			struct kvm_mmu_page *child;
2712c50d8ae3SPaolo Bonzini 			u64 pte = *sptep;
2713c50d8ae3SPaolo Bonzini 
2714e47c4aeeSSean Christopherson 			child = to_shadow_page(pte & PT64_BASE_ADDR_MASK);
2715c50d8ae3SPaolo Bonzini 			drop_parent_pte(child, sptep);
2716c50d8ae3SPaolo Bonzini 			flush = true;
2717c50d8ae3SPaolo Bonzini 		} else if (pfn != spte_to_pfn(*sptep)) {
2718c50d8ae3SPaolo Bonzini 			pgprintk("hfn old %llx new %llx\n",
2719c50d8ae3SPaolo Bonzini 				 spte_to_pfn(*sptep), pfn);
2720c50d8ae3SPaolo Bonzini 			drop_spte(vcpu->kvm, sptep);
2721c50d8ae3SPaolo Bonzini 			flush = true;
2722c50d8ae3SPaolo Bonzini 		} else
2723c50d8ae3SPaolo Bonzini 			was_rmapped = 1;
2724c50d8ae3SPaolo Bonzini 	}
2725c50d8ae3SPaolo Bonzini 
27262839180cSPaolo Bonzini 	wrprot = make_spte(vcpu, sp, slot, pte_access, gfn, pfn, *sptep, prefetch,
27277158bee4SPaolo Bonzini 			   true, host_writable, &spte);
2728d786c778SPaolo Bonzini 
2729d786c778SPaolo Bonzini 	if (*sptep == spte) {
2730d786c778SPaolo Bonzini 		ret = RET_PF_SPURIOUS;
2731d786c778SPaolo Bonzini 	} else {
2732d786c778SPaolo Bonzini 		trace_kvm_mmu_set_spte(level, gfn, sptep);
2733d786c778SPaolo Bonzini 		flush |= mmu_spte_update(sptep, spte);
2734d786c778SPaolo Bonzini 	}
2735d786c778SPaolo Bonzini 
2736ad67e480SPaolo Bonzini 	if (wrprot) {
2737c50d8ae3SPaolo Bonzini 		if (write_fault)
2738c50d8ae3SPaolo Bonzini 			ret = RET_PF_EMULATE;
2739c50d8ae3SPaolo Bonzini 	}
2740c50d8ae3SPaolo Bonzini 
2741d786c778SPaolo Bonzini 	if (flush)
2742c50d8ae3SPaolo Bonzini 		kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn,
2743c50d8ae3SPaolo Bonzini 				KVM_PAGES_PER_HPAGE(level));
2744c50d8ae3SPaolo Bonzini 
2745c50d8ae3SPaolo Bonzini 	pgprintk("%s: setting spte %llx\n", __func__, *sptep);
2746c50d8ae3SPaolo Bonzini 
2747c50d8ae3SPaolo Bonzini 	if (!was_rmapped) {
2748d786c778SPaolo Bonzini 		WARN_ON_ONCE(ret == RET_PF_SPURIOUS);
274971f51d2cSMingwei Zhang 		kvm_update_page_stats(vcpu->kvm, level, 1);
27508a9f566aSDavid Matlack 		rmap_add(vcpu, slot, sptep, gfn);
2751c50d8ae3SPaolo Bonzini 	}
2752c50d8ae3SPaolo Bonzini 
2753c50d8ae3SPaolo Bonzini 	return ret;
2754c50d8ae3SPaolo Bonzini }
2755c50d8ae3SPaolo Bonzini 
2756c50d8ae3SPaolo Bonzini static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
2757c50d8ae3SPaolo Bonzini 				    struct kvm_mmu_page *sp,
2758c50d8ae3SPaolo Bonzini 				    u64 *start, u64 *end)
2759c50d8ae3SPaolo Bonzini {
2760c50d8ae3SPaolo Bonzini 	struct page *pages[PTE_PREFETCH_NUM];
2761c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
27620a2b64c5SBen Gardon 	unsigned int access = sp->role.access;
2763c50d8ae3SPaolo Bonzini 	int i, ret;
2764c50d8ae3SPaolo Bonzini 	gfn_t gfn;
2765c50d8ae3SPaolo Bonzini 
2766c50d8ae3SPaolo Bonzini 	gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
2767c50d8ae3SPaolo Bonzini 	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
2768c50d8ae3SPaolo Bonzini 	if (!slot)
2769c50d8ae3SPaolo Bonzini 		return -1;
2770c50d8ae3SPaolo Bonzini 
2771c50d8ae3SPaolo Bonzini 	ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start);
2772c50d8ae3SPaolo Bonzini 	if (ret <= 0)
2773c50d8ae3SPaolo Bonzini 		return -1;
2774c50d8ae3SPaolo Bonzini 
2775c50d8ae3SPaolo Bonzini 	for (i = 0; i < ret; i++, gfn++, start++) {
27768a9f566aSDavid Matlack 		mmu_set_spte(vcpu, slot, start, access, gfn,
2777a12f4381SPaolo Bonzini 			     page_to_pfn(pages[i]), NULL);
2778c50d8ae3SPaolo Bonzini 		put_page(pages[i]);
2779c50d8ae3SPaolo Bonzini 	}
2780c50d8ae3SPaolo Bonzini 
2781c50d8ae3SPaolo Bonzini 	return 0;
2782c50d8ae3SPaolo Bonzini }
2783c50d8ae3SPaolo Bonzini 
2784c50d8ae3SPaolo Bonzini static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
2785c50d8ae3SPaolo Bonzini 				  struct kvm_mmu_page *sp, u64 *sptep)
2786c50d8ae3SPaolo Bonzini {
2787c50d8ae3SPaolo Bonzini 	u64 *spte, *start = NULL;
2788c50d8ae3SPaolo Bonzini 	int i;
2789c50d8ae3SPaolo Bonzini 
2790c50d8ae3SPaolo Bonzini 	WARN_ON(!sp->role.direct);
2791c50d8ae3SPaolo Bonzini 
2792c50d8ae3SPaolo Bonzini 	i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
2793c50d8ae3SPaolo Bonzini 	spte = sp->spt + i;
2794c50d8ae3SPaolo Bonzini 
2795c50d8ae3SPaolo Bonzini 	for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
2796c50d8ae3SPaolo Bonzini 		if (is_shadow_present_pte(*spte) || spte == sptep) {
2797c50d8ae3SPaolo Bonzini 			if (!start)
2798c50d8ae3SPaolo Bonzini 				continue;
2799c50d8ae3SPaolo Bonzini 			if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
2800c6cecc4bSSean Christopherson 				return;
2801c50d8ae3SPaolo Bonzini 			start = NULL;
2802c50d8ae3SPaolo Bonzini 		} else if (!start)
2803c50d8ae3SPaolo Bonzini 			start = spte;
2804c50d8ae3SPaolo Bonzini 	}
2805c6cecc4bSSean Christopherson 	if (start)
2806c6cecc4bSSean Christopherson 		direct_pte_prefetch_many(vcpu, sp, start, spte);
2807c50d8ae3SPaolo Bonzini }
2808c50d8ae3SPaolo Bonzini 
2809c50d8ae3SPaolo Bonzini static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
2810c50d8ae3SPaolo Bonzini {
2811c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
2812c50d8ae3SPaolo Bonzini 
281357354682SSean Christopherson 	sp = sptep_to_sp(sptep);
2814c50d8ae3SPaolo Bonzini 
2815c50d8ae3SPaolo Bonzini 	/*
2816c50d8ae3SPaolo Bonzini 	 * Without accessed bits, there's no way to distinguish between
2817c50d8ae3SPaolo Bonzini 	 * actually accessed translations and prefetched, so disable pte
2818c50d8ae3SPaolo Bonzini 	 * prefetch if accessed bits aren't available.
2819c50d8ae3SPaolo Bonzini 	 */
2820c50d8ae3SPaolo Bonzini 	if (sp_ad_disabled(sp))
2821c50d8ae3SPaolo Bonzini 		return;
2822c50d8ae3SPaolo Bonzini 
28233bae0459SSean Christopherson 	if (sp->role.level > PG_LEVEL_4K)
2824c50d8ae3SPaolo Bonzini 		return;
2825c50d8ae3SPaolo Bonzini 
28264a42d848SDavid Stevens 	/*
28274a42d848SDavid Stevens 	 * If addresses are being invalidated, skip prefetching to avoid
28284a42d848SDavid Stevens 	 * accidentally prefetching those addresses.
28294a42d848SDavid Stevens 	 */
28304a42d848SDavid Stevens 	if (unlikely(vcpu->kvm->mmu_notifier_count))
28314a42d848SDavid Stevens 		return;
28324a42d848SDavid Stevens 
2833c50d8ae3SPaolo Bonzini 	__direct_pte_prefetch(vcpu, sp, sptep);
2834c50d8ae3SPaolo Bonzini }
2835c50d8ae3SPaolo Bonzini 
28361b6d9d9eSSean Christopherson static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
28378ca6f063SBen Gardon 				  const struct kvm_memory_slot *slot)
2838db543216SSean Christopherson {
2839db543216SSean Christopherson 	unsigned long hva;
2840db543216SSean Christopherson 	pte_t *pte;
2841db543216SSean Christopherson 	int level;
2842db543216SSean Christopherson 
2843e851265aSSean Christopherson 	if (!PageCompound(pfn_to_page(pfn)) && !kvm_is_zone_device_pfn(pfn))
28443bae0459SSean Christopherson 		return PG_LEVEL_4K;
2845db543216SSean Christopherson 
2846293e306eSSean Christopherson 	/*
2847293e306eSSean Christopherson 	 * Note, using the already-retrieved memslot and __gfn_to_hva_memslot()
2848293e306eSSean Christopherson 	 * is not solely for performance, it's also necessary to avoid the
2849293e306eSSean Christopherson 	 * "writable" check in __gfn_to_hva_many(), which will always fail on
2850293e306eSSean Christopherson 	 * read-only memslots due to gfn_to_hva() assuming writes.  Earlier
2851293e306eSSean Christopherson 	 * page fault steps have already verified the guest isn't writing a
2852293e306eSSean Christopherson 	 * read-only memslot.
2853293e306eSSean Christopherson 	 */
2854db543216SSean Christopherson 	hva = __gfn_to_hva_memslot(slot, gfn);
2855db543216SSean Christopherson 
28561b6d9d9eSSean Christopherson 	pte = lookup_address_in_mm(kvm->mm, hva, &level);
2857db543216SSean Christopherson 	if (unlikely(!pte))
28583bae0459SSean Christopherson 		return PG_LEVEL_4K;
2859db543216SSean Christopherson 
2860db543216SSean Christopherson 	return level;
2861db543216SSean Christopherson }
2862db543216SSean Christopherson 
28638ca6f063SBen Gardon int kvm_mmu_max_mapping_level(struct kvm *kvm,
28648ca6f063SBen Gardon 			      const struct kvm_memory_slot *slot, gfn_t gfn,
28658ca6f063SBen Gardon 			      kvm_pfn_t pfn, int max_level)
28661b6d9d9eSSean Christopherson {
28671b6d9d9eSSean Christopherson 	struct kvm_lpage_info *linfo;
2868ec607a56SPaolo Bonzini 	int host_level;
28691b6d9d9eSSean Christopherson 
28701b6d9d9eSSean Christopherson 	max_level = min(max_level, max_huge_page_level);
28711b6d9d9eSSean Christopherson 	for ( ; max_level > PG_LEVEL_4K; max_level--) {
28721b6d9d9eSSean Christopherson 		linfo = lpage_info_slot(gfn, slot, max_level);
28731b6d9d9eSSean Christopherson 		if (!linfo->disallow_lpage)
28741b6d9d9eSSean Christopherson 			break;
28751b6d9d9eSSean Christopherson 	}
28761b6d9d9eSSean Christopherson 
28771b6d9d9eSSean Christopherson 	if (max_level == PG_LEVEL_4K)
28781b6d9d9eSSean Christopherson 		return PG_LEVEL_4K;
28791b6d9d9eSSean Christopherson 
2880ec607a56SPaolo Bonzini 	host_level = host_pfn_mapping_level(kvm, gfn, pfn, slot);
2881ec607a56SPaolo Bonzini 	return min(host_level, max_level);
28821b6d9d9eSSean Christopherson }
28831b6d9d9eSSean Christopherson 
288473a3c659SPaolo Bonzini void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
28850885904dSSean Christopherson {
2886e710c5f6SDavid Matlack 	struct kvm_memory_slot *slot = fault->slot;
288717eff019SSean Christopherson 	kvm_pfn_t mask;
28880885904dSSean Christopherson 
288973a3c659SPaolo Bonzini 	fault->huge_page_disallowed = fault->exec && fault->nx_huge_page_workaround_enabled;
28903cf06612SSean Christopherson 
289173a3c659SPaolo Bonzini 	if (unlikely(fault->max_level == PG_LEVEL_4K))
289273a3c659SPaolo Bonzini 		return;
289317eff019SSean Christopherson 
289473a3c659SPaolo Bonzini 	if (is_error_noslot_pfn(fault->pfn) || kvm_is_reserved_pfn(fault->pfn))
289573a3c659SPaolo Bonzini 		return;
289617eff019SSean Christopherson 
2897e710c5f6SDavid Matlack 	if (kvm_slot_dirty_track_enabled(slot))
289873a3c659SPaolo Bonzini 		return;
2899293e306eSSean Christopherson 
29003cf06612SSean Christopherson 	/*
29013cf06612SSean Christopherson 	 * Enforce the iTLB multihit workaround after capturing the requested
29023cf06612SSean Christopherson 	 * level, which will be used to do precise, accurate accounting.
29033cf06612SSean Christopherson 	 */
290473a3c659SPaolo Bonzini 	fault->req_level = kvm_mmu_max_mapping_level(vcpu->kvm, slot,
290573a3c659SPaolo Bonzini 						     fault->gfn, fault->pfn,
290673a3c659SPaolo Bonzini 						     fault->max_level);
290773a3c659SPaolo Bonzini 	if (fault->req_level == PG_LEVEL_4K || fault->huge_page_disallowed)
290873a3c659SPaolo Bonzini 		return;
29094cd071d1SSean Christopherson 
29100885904dSSean Christopherson 	/*
29114cd071d1SSean Christopherson 	 * mmu_notifier_retry() was successful and mmu_lock is held, so
29124cd071d1SSean Christopherson 	 * the pmd can't be split from under us.
29130885904dSSean Christopherson 	 */
291473a3c659SPaolo Bonzini 	fault->goal_level = fault->req_level;
291573a3c659SPaolo Bonzini 	mask = KVM_PAGES_PER_HPAGE(fault->goal_level) - 1;
291673a3c659SPaolo Bonzini 	VM_BUG_ON((fault->gfn & mask) != (fault->pfn & mask));
291773a3c659SPaolo Bonzini 	fault->pfn &= ~mask;
29180885904dSSean Christopherson }
29190885904dSSean Christopherson 
2920536f0e6aSPaolo Bonzini void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level)
2921c50d8ae3SPaolo Bonzini {
2922536f0e6aSPaolo Bonzini 	if (cur_level > PG_LEVEL_4K &&
2923536f0e6aSPaolo Bonzini 	    cur_level == fault->goal_level &&
2924c50d8ae3SPaolo Bonzini 	    is_shadow_present_pte(spte) &&
2925c50d8ae3SPaolo Bonzini 	    !is_large_pte(spte)) {
2926c50d8ae3SPaolo Bonzini 		/*
2927c50d8ae3SPaolo Bonzini 		 * A small SPTE exists for this pfn, but FNAME(fetch)
2928c50d8ae3SPaolo Bonzini 		 * and __direct_map would like to create a large PTE
2929c50d8ae3SPaolo Bonzini 		 * instead: just force them to go down another level,
2930c50d8ae3SPaolo Bonzini 		 * patching back for them into pfn the next 9 bits of
2931c50d8ae3SPaolo Bonzini 		 * the address.
2932c50d8ae3SPaolo Bonzini 		 */
2933536f0e6aSPaolo Bonzini 		u64 page_mask = KVM_PAGES_PER_HPAGE(cur_level) -
2934536f0e6aSPaolo Bonzini 				KVM_PAGES_PER_HPAGE(cur_level - 1);
2935536f0e6aSPaolo Bonzini 		fault->pfn |= fault->gfn & page_mask;
2936536f0e6aSPaolo Bonzini 		fault->goal_level--;
2937c50d8ae3SPaolo Bonzini 	}
2938c50d8ae3SPaolo Bonzini }
2939c50d8ae3SPaolo Bonzini 
294043b74355SPaolo Bonzini static int __direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
2941c50d8ae3SPaolo Bonzini {
2942c50d8ae3SPaolo Bonzini 	struct kvm_shadow_walk_iterator it;
2943c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
294473a3c659SPaolo Bonzini 	int ret;
294543b74355SPaolo Bonzini 	gfn_t base_gfn = fault->gfn;
2946c50d8ae3SPaolo Bonzini 
294773a3c659SPaolo Bonzini 	kvm_mmu_hugepage_adjust(vcpu, fault);
29484cd071d1SSean Christopherson 
2949f0066d94SPaolo Bonzini 	trace_kvm_mmu_spte_requested(fault);
295043b74355SPaolo Bonzini 	for_each_shadow_entry(vcpu, fault->addr, it) {
2951c50d8ae3SPaolo Bonzini 		/*
2952c50d8ae3SPaolo Bonzini 		 * We cannot overwrite existing page tables with an NX
2953c50d8ae3SPaolo Bonzini 		 * large page, as the leaf could be executable.
2954c50d8ae3SPaolo Bonzini 		 */
295573a3c659SPaolo Bonzini 		if (fault->nx_huge_page_workaround_enabled)
2956536f0e6aSPaolo Bonzini 			disallowed_hugepage_adjust(fault, *it.sptep, it.level);
2957c50d8ae3SPaolo Bonzini 
295843b74355SPaolo Bonzini 		base_gfn = fault->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
295973a3c659SPaolo Bonzini 		if (it.level == fault->goal_level)
2960c50d8ae3SPaolo Bonzini 			break;
2961c50d8ae3SPaolo Bonzini 
2962c50d8ae3SPaolo Bonzini 		drop_large_spte(vcpu, it.sptep);
296303fffc54SSean Christopherson 		if (is_shadow_present_pte(*it.sptep))
296403fffc54SSean Christopherson 			continue;
296503fffc54SSean Christopherson 
2966c50d8ae3SPaolo Bonzini 		sp = kvm_mmu_get_page(vcpu, base_gfn, it.addr,
2967c50d8ae3SPaolo Bonzini 				      it.level - 1, true, ACC_ALL);
2968c50d8ae3SPaolo Bonzini 
2969c50d8ae3SPaolo Bonzini 		link_shadow_page(vcpu, it.sptep, sp);
297073a3c659SPaolo Bonzini 		if (fault->is_tdp && fault->huge_page_disallowed &&
297173a3c659SPaolo Bonzini 		    fault->req_level >= it.level)
2972c50d8ae3SPaolo Bonzini 			account_huge_nx_page(vcpu->kvm, sp);
2973c50d8ae3SPaolo Bonzini 	}
2974c50d8ae3SPaolo Bonzini 
2975b1a429fbSSean Christopherson 	if (WARN_ON_ONCE(it.level != fault->goal_level))
2976b1a429fbSSean Christopherson 		return -EFAULT;
2977b1a429fbSSean Christopherson 
29788a9f566aSDavid Matlack 	ret = mmu_set_spte(vcpu, fault->slot, it.sptep, ACC_ALL,
2979a12f4381SPaolo Bonzini 			   base_gfn, fault->pfn, fault);
298012703759SSean Christopherson 	if (ret == RET_PF_SPURIOUS)
298112703759SSean Christopherson 		return ret;
298212703759SSean Christopherson 
2983c50d8ae3SPaolo Bonzini 	direct_pte_prefetch(vcpu, it.sptep);
2984c50d8ae3SPaolo Bonzini 	++vcpu->stat.pf_fixed;
2985c50d8ae3SPaolo Bonzini 	return ret;
2986c50d8ae3SPaolo Bonzini }
2987c50d8ae3SPaolo Bonzini 
2988c50d8ae3SPaolo Bonzini static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk)
2989c50d8ae3SPaolo Bonzini {
2990c50d8ae3SPaolo Bonzini 	send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, PAGE_SHIFT, tsk);
2991c50d8ae3SPaolo Bonzini }
2992c50d8ae3SPaolo Bonzini 
2993c50d8ae3SPaolo Bonzini static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
2994c50d8ae3SPaolo Bonzini {
2995c50d8ae3SPaolo Bonzini 	/*
2996c50d8ae3SPaolo Bonzini 	 * Do not cache the mmio info caused by writing the readonly gfn
2997c50d8ae3SPaolo Bonzini 	 * into the spte otherwise read access on readonly gfn also can
2998c50d8ae3SPaolo Bonzini 	 * caused mmio page fault and treat it as mmio access.
2999c50d8ae3SPaolo Bonzini 	 */
3000c50d8ae3SPaolo Bonzini 	if (pfn == KVM_PFN_ERR_RO_FAULT)
3001c50d8ae3SPaolo Bonzini 		return RET_PF_EMULATE;
3002c50d8ae3SPaolo Bonzini 
3003c50d8ae3SPaolo Bonzini 	if (pfn == KVM_PFN_ERR_HWPOISON) {
3004c50d8ae3SPaolo Bonzini 		kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current);
3005c50d8ae3SPaolo Bonzini 		return RET_PF_RETRY;
3006c50d8ae3SPaolo Bonzini 	}
3007c50d8ae3SPaolo Bonzini 
3008c50d8ae3SPaolo Bonzini 	return -EFAULT;
3009c50d8ae3SPaolo Bonzini }
3010c50d8ae3SPaolo Bonzini 
30113a13f4feSPaolo Bonzini static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
30123a13f4feSPaolo Bonzini 				unsigned int access, int *ret_val)
3013c50d8ae3SPaolo Bonzini {
3014c50d8ae3SPaolo Bonzini 	/* The pfn is invalid, report the error! */
30153a13f4feSPaolo Bonzini 	if (unlikely(is_error_pfn(fault->pfn))) {
30163a13f4feSPaolo Bonzini 		*ret_val = kvm_handle_bad_page(vcpu, fault->gfn, fault->pfn);
3017c50d8ae3SPaolo Bonzini 		return true;
3018c50d8ae3SPaolo Bonzini 	}
3019c50d8ae3SPaolo Bonzini 
3020e710c5f6SDavid Matlack 	if (unlikely(!fault->slot)) {
30213a13f4feSPaolo Bonzini 		gva_t gva = fault->is_tdp ? 0 : fault->addr;
30223a13f4feSPaolo Bonzini 
30233a13f4feSPaolo Bonzini 		vcpu_cache_mmio_info(vcpu, gva, fault->gfn,
3024c50d8ae3SPaolo Bonzini 				     access & shadow_mmio_access_mask);
302530ab5901SSean Christopherson 		/*
302630ab5901SSean Christopherson 		 * If MMIO caching is disabled, emulate immediately without
302730ab5901SSean Christopherson 		 * touching the shadow page tables as attempting to install an
302830ab5901SSean Christopherson 		 * MMIO SPTE will just be an expensive nop.
302930ab5901SSean Christopherson 		 */
303030ab5901SSean Christopherson 		if (unlikely(!shadow_mmio_value)) {
303130ab5901SSean Christopherson 			*ret_val = RET_PF_EMULATE;
303230ab5901SSean Christopherson 			return true;
303330ab5901SSean Christopherson 		}
303430ab5901SSean Christopherson 	}
3035c50d8ae3SPaolo Bonzini 
3036c50d8ae3SPaolo Bonzini 	return false;
3037c50d8ae3SPaolo Bonzini }
3038c50d8ae3SPaolo Bonzini 
30393c8ad5a6SPaolo Bonzini static bool page_fault_can_be_fast(struct kvm_page_fault *fault)
3040c50d8ae3SPaolo Bonzini {
3041c50d8ae3SPaolo Bonzini 	/*
3042c50d8ae3SPaolo Bonzini 	 * Do not fix the mmio spte with invalid generation number which
3043c50d8ae3SPaolo Bonzini 	 * need to be updated by slow page fault path.
3044c50d8ae3SPaolo Bonzini 	 */
30453c8ad5a6SPaolo Bonzini 	if (fault->rsvd)
3046c50d8ae3SPaolo Bonzini 		return false;
3047c50d8ae3SPaolo Bonzini 
3048c50d8ae3SPaolo Bonzini 	/* See if the page fault is due to an NX violation */
30493c8ad5a6SPaolo Bonzini 	if (unlikely(fault->exec && fault->present))
3050c50d8ae3SPaolo Bonzini 		return false;
3051c50d8ae3SPaolo Bonzini 
3052c50d8ae3SPaolo Bonzini 	/*
3053c50d8ae3SPaolo Bonzini 	 * #PF can be fast if:
3054c50d8ae3SPaolo Bonzini 	 * 1. The shadow page table entry is not present, which could mean that
3055c50d8ae3SPaolo Bonzini 	 *    the fault is potentially caused by access tracking (if enabled).
3056c50d8ae3SPaolo Bonzini 	 * 2. The shadow page table entry is present and the fault
3057c50d8ae3SPaolo Bonzini 	 *    is caused by write-protect, that means we just need change the W
3058c50d8ae3SPaolo Bonzini 	 *    bit of the spte which can be done out of mmu-lock.
3059c50d8ae3SPaolo Bonzini 	 *
3060c50d8ae3SPaolo Bonzini 	 * However, if access tracking is disabled we know that a non-present
3061c50d8ae3SPaolo Bonzini 	 * page must be a genuine page fault where we have to create a new SPTE.
3062c50d8ae3SPaolo Bonzini 	 * So, if access tracking is disabled, we return true only for write
3063c50d8ae3SPaolo Bonzini 	 * accesses to a present page.
3064c50d8ae3SPaolo Bonzini 	 */
3065c50d8ae3SPaolo Bonzini 
30663c8ad5a6SPaolo Bonzini 	return shadow_acc_track_mask != 0 || (fault->write && fault->present);
3067c50d8ae3SPaolo Bonzini }
3068c50d8ae3SPaolo Bonzini 
3069c50d8ae3SPaolo Bonzini /*
3070c50d8ae3SPaolo Bonzini  * Returns true if the SPTE was fixed successfully. Otherwise,
3071c50d8ae3SPaolo Bonzini  * someone else modified the SPTE from its original value.
3072c50d8ae3SPaolo Bonzini  */
3073c50d8ae3SPaolo Bonzini static bool
3074e710c5f6SDavid Matlack fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
3075c50d8ae3SPaolo Bonzini 			u64 *sptep, u64 old_spte, u64 new_spte)
3076c50d8ae3SPaolo Bonzini {
3077c50d8ae3SPaolo Bonzini 	/*
3078c50d8ae3SPaolo Bonzini 	 * Theoretically we could also set dirty bit (and flush TLB) here in
3079c50d8ae3SPaolo Bonzini 	 * order to eliminate unnecessary PML logging. See comments in
3080c50d8ae3SPaolo Bonzini 	 * set_spte. But fast_page_fault is very unlikely to happen with PML
3081c50d8ae3SPaolo Bonzini 	 * enabled, so we do not do this. This might result in the same GPA
3082c50d8ae3SPaolo Bonzini 	 * to be logged in PML buffer again when the write really happens, and
3083c50d8ae3SPaolo Bonzini 	 * eventually to be called by mark_page_dirty twice. But it's also no
3084c50d8ae3SPaolo Bonzini 	 * harm. This also avoids the TLB flush needed after setting dirty bit
3085c50d8ae3SPaolo Bonzini 	 * so non-PML cases won't be impacted.
3086c50d8ae3SPaolo Bonzini 	 *
3087c50d8ae3SPaolo Bonzini 	 * Compare with set_spte where instead shadow_dirty_mask is set.
3088c50d8ae3SPaolo Bonzini 	 */
3089c50d8ae3SPaolo Bonzini 	if (cmpxchg64(sptep, old_spte, new_spte) != old_spte)
3090c50d8ae3SPaolo Bonzini 		return false;
3091c50d8ae3SPaolo Bonzini 
3092e710c5f6SDavid Matlack 	if (is_writable_pte(new_spte) && !is_writable_pte(old_spte))
3093e710c5f6SDavid Matlack 		mark_page_dirty_in_slot(vcpu->kvm, fault->slot, fault->gfn);
3094c50d8ae3SPaolo Bonzini 
3095c50d8ae3SPaolo Bonzini 	return true;
3096c50d8ae3SPaolo Bonzini }
3097c50d8ae3SPaolo Bonzini 
30983c8ad5a6SPaolo Bonzini static bool is_access_allowed(struct kvm_page_fault *fault, u64 spte)
3099c50d8ae3SPaolo Bonzini {
31003c8ad5a6SPaolo Bonzini 	if (fault->exec)
3101c50d8ae3SPaolo Bonzini 		return is_executable_pte(spte);
3102c50d8ae3SPaolo Bonzini 
31033c8ad5a6SPaolo Bonzini 	if (fault->write)
3104c50d8ae3SPaolo Bonzini 		return is_writable_pte(spte);
3105c50d8ae3SPaolo Bonzini 
3106c50d8ae3SPaolo Bonzini 	/* Fault was on Read access */
3107c50d8ae3SPaolo Bonzini 	return spte & PT_PRESENT_MASK;
3108c50d8ae3SPaolo Bonzini }
3109c50d8ae3SPaolo Bonzini 
3110c50d8ae3SPaolo Bonzini /*
31116e8eb206SDavid Matlack  * Returns the last level spte pointer of the shadow page walk for the given
31126e8eb206SDavid Matlack  * gpa, and sets *spte to the spte value. This spte may be non-preset. If no
31136e8eb206SDavid Matlack  * walk could be performed, returns NULL and *spte does not contain valid data.
31146e8eb206SDavid Matlack  *
31156e8eb206SDavid Matlack  * Contract:
31166e8eb206SDavid Matlack  *  - Must be called between walk_shadow_page_lockless_{begin,end}.
31176e8eb206SDavid Matlack  *  - The returned sptep must not be used after walk_shadow_page_lockless_end.
31186e8eb206SDavid Matlack  */
31196e8eb206SDavid Matlack static u64 *fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gpa_t gpa, u64 *spte)
31206e8eb206SDavid Matlack {
31216e8eb206SDavid Matlack 	struct kvm_shadow_walk_iterator iterator;
31226e8eb206SDavid Matlack 	u64 old_spte;
31236e8eb206SDavid Matlack 	u64 *sptep = NULL;
31246e8eb206SDavid Matlack 
31256e8eb206SDavid Matlack 	for_each_shadow_entry_lockless(vcpu, gpa, iterator, old_spte) {
31266e8eb206SDavid Matlack 		sptep = iterator.sptep;
31276e8eb206SDavid Matlack 		*spte = old_spte;
31286e8eb206SDavid Matlack 	}
31296e8eb206SDavid Matlack 
31306e8eb206SDavid Matlack 	return sptep;
31316e8eb206SDavid Matlack }
31326e8eb206SDavid Matlack 
31336e8eb206SDavid Matlack /*
3134c4371c2aSSean Christopherson  * Returns one of RET_PF_INVALID, RET_PF_FIXED or RET_PF_SPURIOUS.
3135c50d8ae3SPaolo Bonzini  */
31363c8ad5a6SPaolo Bonzini static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
3137c50d8ae3SPaolo Bonzini {
3138c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
3139c4371c2aSSean Christopherson 	int ret = RET_PF_INVALID;
3140c50d8ae3SPaolo Bonzini 	u64 spte = 0ull;
31416e8eb206SDavid Matlack 	u64 *sptep = NULL;
3142c50d8ae3SPaolo Bonzini 	uint retry_count = 0;
3143c50d8ae3SPaolo Bonzini 
31443c8ad5a6SPaolo Bonzini 	if (!page_fault_can_be_fast(fault))
3145c4371c2aSSean Christopherson 		return ret;
3146c50d8ae3SPaolo Bonzini 
3147c50d8ae3SPaolo Bonzini 	walk_shadow_page_lockless_begin(vcpu);
3148c50d8ae3SPaolo Bonzini 
3149c50d8ae3SPaolo Bonzini 	do {
3150c50d8ae3SPaolo Bonzini 		u64 new_spte;
3151c50d8ae3SPaolo Bonzini 
31526e8eb206SDavid Matlack 		if (is_tdp_mmu(vcpu->arch.mmu))
31533c8ad5a6SPaolo Bonzini 			sptep = kvm_tdp_mmu_fast_pf_get_last_sptep(vcpu, fault->addr, &spte);
31546e8eb206SDavid Matlack 		else
31553c8ad5a6SPaolo Bonzini 			sptep = fast_pf_get_last_sptep(vcpu, fault->addr, &spte);
3156c50d8ae3SPaolo Bonzini 
3157ec89e643SSean Christopherson 		if (!is_shadow_present_pte(spte))
3158ec89e643SSean Christopherson 			break;
3159ec89e643SSean Christopherson 
31606e8eb206SDavid Matlack 		sp = sptep_to_sp(sptep);
3161c50d8ae3SPaolo Bonzini 		if (!is_last_spte(spte, sp->role.level))
3162c50d8ae3SPaolo Bonzini 			break;
3163c50d8ae3SPaolo Bonzini 
3164c50d8ae3SPaolo Bonzini 		/*
3165c50d8ae3SPaolo Bonzini 		 * Check whether the memory access that caused the fault would
3166c50d8ae3SPaolo Bonzini 		 * still cause it if it were to be performed right now. If not,
3167c50d8ae3SPaolo Bonzini 		 * then this is a spurious fault caused by TLB lazily flushed,
3168c50d8ae3SPaolo Bonzini 		 * or some other CPU has already fixed the PTE after the
3169c50d8ae3SPaolo Bonzini 		 * current CPU took the fault.
3170c50d8ae3SPaolo Bonzini 		 *
3171c50d8ae3SPaolo Bonzini 		 * Need not check the access of upper level table entries since
3172c50d8ae3SPaolo Bonzini 		 * they are always ACC_ALL.
3173c50d8ae3SPaolo Bonzini 		 */
31743c8ad5a6SPaolo Bonzini 		if (is_access_allowed(fault, spte)) {
3175c4371c2aSSean Christopherson 			ret = RET_PF_SPURIOUS;
3176c50d8ae3SPaolo Bonzini 			break;
3177c50d8ae3SPaolo Bonzini 		}
3178c50d8ae3SPaolo Bonzini 
3179c50d8ae3SPaolo Bonzini 		new_spte = spte;
3180c50d8ae3SPaolo Bonzini 
3181c50d8ae3SPaolo Bonzini 		if (is_access_track_spte(spte))
3182c50d8ae3SPaolo Bonzini 			new_spte = restore_acc_track_spte(new_spte);
3183c50d8ae3SPaolo Bonzini 
3184c50d8ae3SPaolo Bonzini 		/*
3185c50d8ae3SPaolo Bonzini 		 * Currently, to simplify the code, write-protection can
3186c50d8ae3SPaolo Bonzini 		 * be removed in the fast path only if the SPTE was
3187c50d8ae3SPaolo Bonzini 		 * write-protected for dirty-logging or access tracking.
3188c50d8ae3SPaolo Bonzini 		 */
31893c8ad5a6SPaolo Bonzini 		if (fault->write &&
3190e6302698SMiaohe Lin 		    spte_can_locklessly_be_made_writable(spte)) {
3191c50d8ae3SPaolo Bonzini 			new_spte |= PT_WRITABLE_MASK;
3192c50d8ae3SPaolo Bonzini 
3193c50d8ae3SPaolo Bonzini 			/*
3194c50d8ae3SPaolo Bonzini 			 * Do not fix write-permission on the large spte.  Since
3195c50d8ae3SPaolo Bonzini 			 * we only dirty the first page into the dirty-bitmap in
3196c50d8ae3SPaolo Bonzini 			 * fast_pf_fix_direct_spte(), other pages are missed
3197c50d8ae3SPaolo Bonzini 			 * if its slot has dirty logging enabled.
3198c50d8ae3SPaolo Bonzini 			 *
3199c50d8ae3SPaolo Bonzini 			 * Instead, we let the slow page fault path create a
3200c50d8ae3SPaolo Bonzini 			 * normal spte to fix the access.
3201c50d8ae3SPaolo Bonzini 			 *
3202c50d8ae3SPaolo Bonzini 			 * See the comments in kvm_arch_commit_memory_region().
3203c50d8ae3SPaolo Bonzini 			 */
32043bae0459SSean Christopherson 			if (sp->role.level > PG_LEVEL_4K)
3205c50d8ae3SPaolo Bonzini 				break;
3206c50d8ae3SPaolo Bonzini 		}
3207c50d8ae3SPaolo Bonzini 
3208c50d8ae3SPaolo Bonzini 		/* Verify that the fault can be handled in the fast path */
3209c50d8ae3SPaolo Bonzini 		if (new_spte == spte ||
32103c8ad5a6SPaolo Bonzini 		    !is_access_allowed(fault, new_spte))
3211c50d8ae3SPaolo Bonzini 			break;
3212c50d8ae3SPaolo Bonzini 
3213c50d8ae3SPaolo Bonzini 		/*
3214c50d8ae3SPaolo Bonzini 		 * Currently, fast page fault only works for direct mapping
3215c50d8ae3SPaolo Bonzini 		 * since the gfn is not stable for indirect shadow page. See
32163ecad8c2SMauro Carvalho Chehab 		 * Documentation/virt/kvm/locking.rst to get more detail.
3217c50d8ae3SPaolo Bonzini 		 */
3218e710c5f6SDavid Matlack 		if (fast_pf_fix_direct_spte(vcpu, fault, sptep, spte, new_spte)) {
3219c4371c2aSSean Christopherson 			ret = RET_PF_FIXED;
3220c50d8ae3SPaolo Bonzini 			break;
3221c4371c2aSSean Christopherson 		}
3222c50d8ae3SPaolo Bonzini 
3223c50d8ae3SPaolo Bonzini 		if (++retry_count > 4) {
3224c50d8ae3SPaolo Bonzini 			printk_once(KERN_WARNING
3225c50d8ae3SPaolo Bonzini 				"kvm: Fast #PF retrying more than 4 times.\n");
3226c50d8ae3SPaolo Bonzini 			break;
3227c50d8ae3SPaolo Bonzini 		}
3228c50d8ae3SPaolo Bonzini 
3229c50d8ae3SPaolo Bonzini 	} while (true);
3230c50d8ae3SPaolo Bonzini 
3231f0066d94SPaolo Bonzini 	trace_fast_page_fault(vcpu, fault, sptep, spte, ret);
3232c50d8ae3SPaolo Bonzini 	walk_shadow_page_lockless_end(vcpu);
3233c50d8ae3SPaolo Bonzini 
3234c4371c2aSSean Christopherson 	return ret;
3235c50d8ae3SPaolo Bonzini }
3236c50d8ae3SPaolo Bonzini 
3237c50d8ae3SPaolo Bonzini static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
3238c50d8ae3SPaolo Bonzini 			       struct list_head *invalid_list)
3239c50d8ae3SPaolo Bonzini {
3240c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
3241c50d8ae3SPaolo Bonzini 
3242c50d8ae3SPaolo Bonzini 	if (!VALID_PAGE(*root_hpa))
3243c50d8ae3SPaolo Bonzini 		return;
3244c50d8ae3SPaolo Bonzini 
3245e47c4aeeSSean Christopherson 	sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK);
324602c00b3aSBen Gardon 
3247897218ffSPaolo Bonzini 	if (is_tdp_mmu_page(sp))
32486103bc07SBen Gardon 		kvm_tdp_mmu_put_root(kvm, sp, false);
324976eb54e7SBen Gardon 	else if (!--sp->root_count && sp->role.invalid)
3250c50d8ae3SPaolo Bonzini 		kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
3251c50d8ae3SPaolo Bonzini 
3252c50d8ae3SPaolo Bonzini 	*root_hpa = INVALID_PAGE;
3253c50d8ae3SPaolo Bonzini }
3254c50d8ae3SPaolo Bonzini 
3255c50d8ae3SPaolo Bonzini /* roots_to_free must be some combination of the KVM_MMU_ROOT_* flags */
3256c50d8ae3SPaolo Bonzini void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
3257c50d8ae3SPaolo Bonzini 			ulong roots_to_free)
3258c50d8ae3SPaolo Bonzini {
32594d710de9SSean Christopherson 	struct kvm *kvm = vcpu->kvm;
3260c50d8ae3SPaolo Bonzini 	int i;
3261c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
3262c50d8ae3SPaolo Bonzini 	bool free_active_root = roots_to_free & KVM_MMU_ROOT_CURRENT;
3263c50d8ae3SPaolo Bonzini 
3264c50d8ae3SPaolo Bonzini 	BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG);
3265c50d8ae3SPaolo Bonzini 
3266c50d8ae3SPaolo Bonzini 	/* Before acquiring the MMU lock, see if we need to do any real work. */
3267c50d8ae3SPaolo Bonzini 	if (!(free_active_root && VALID_PAGE(mmu->root_hpa))) {
3268c50d8ae3SPaolo Bonzini 		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
3269c50d8ae3SPaolo Bonzini 			if ((roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) &&
3270c50d8ae3SPaolo Bonzini 			    VALID_PAGE(mmu->prev_roots[i].hpa))
3271c50d8ae3SPaolo Bonzini 				break;
3272c50d8ae3SPaolo Bonzini 
3273c50d8ae3SPaolo Bonzini 		if (i == KVM_MMU_NUM_PREV_ROOTS)
3274c50d8ae3SPaolo Bonzini 			return;
3275c50d8ae3SPaolo Bonzini 	}
3276c50d8ae3SPaolo Bonzini 
3277531810caSBen Gardon 	write_lock(&kvm->mmu_lock);
3278c50d8ae3SPaolo Bonzini 
3279c50d8ae3SPaolo Bonzini 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
3280c50d8ae3SPaolo Bonzini 		if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i))
32814d710de9SSean Christopherson 			mmu_free_root_page(kvm, &mmu->prev_roots[i].hpa,
3282c50d8ae3SPaolo Bonzini 					   &invalid_list);
3283c50d8ae3SPaolo Bonzini 
3284c50d8ae3SPaolo Bonzini 	if (free_active_root) {
3285c50d8ae3SPaolo Bonzini 		if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
3286c50d8ae3SPaolo Bonzini 		    (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) {
32874d710de9SSean Christopherson 			mmu_free_root_page(kvm, &mmu->root_hpa, &invalid_list);
328804d45551SSean Christopherson 		} else if (mmu->pae_root) {
3289c834e5e4SSean Christopherson 			for (i = 0; i < 4; ++i) {
3290c834e5e4SSean Christopherson 				if (!IS_VALID_PAE_ROOT(mmu->pae_root[i]))
3291c834e5e4SSean Christopherson 					continue;
3292c834e5e4SSean Christopherson 
3293c834e5e4SSean Christopherson 				mmu_free_root_page(kvm, &mmu->pae_root[i],
3294c50d8ae3SPaolo Bonzini 						   &invalid_list);
3295c834e5e4SSean Christopherson 				mmu->pae_root[i] = INVALID_PAE_ROOT;
3296c50d8ae3SPaolo Bonzini 			}
3297c50d8ae3SPaolo Bonzini 		}
329804d45551SSean Christopherson 		mmu->root_hpa = INVALID_PAGE;
3299be01e8e2SSean Christopherson 		mmu->root_pgd = 0;
3300c50d8ae3SPaolo Bonzini 	}
3301c50d8ae3SPaolo Bonzini 
33024d710de9SSean Christopherson 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
3303531810caSBen Gardon 	write_unlock(&kvm->mmu_lock);
3304c50d8ae3SPaolo Bonzini }
3305c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_free_roots);
3306c50d8ae3SPaolo Bonzini 
330725b62c62SSean Christopherson void kvm_mmu_free_guest_mode_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
330825b62c62SSean Christopherson {
330925b62c62SSean Christopherson 	unsigned long roots_to_free = 0;
331025b62c62SSean Christopherson 	hpa_t root_hpa;
331125b62c62SSean Christopherson 	int i;
331225b62c62SSean Christopherson 
331325b62c62SSean Christopherson 	/*
331425b62c62SSean Christopherson 	 * This should not be called while L2 is active, L2 can't invalidate
331525b62c62SSean Christopherson 	 * _only_ its own roots, e.g. INVVPID unconditionally exits.
331625b62c62SSean Christopherson 	 */
331725b62c62SSean Christopherson 	WARN_ON_ONCE(mmu->mmu_role.base.guest_mode);
331825b62c62SSean Christopherson 
331925b62c62SSean Christopherson 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
332025b62c62SSean Christopherson 		root_hpa = mmu->prev_roots[i].hpa;
332125b62c62SSean Christopherson 		if (!VALID_PAGE(root_hpa))
332225b62c62SSean Christopherson 			continue;
332325b62c62SSean Christopherson 
332425b62c62SSean Christopherson 		if (!to_shadow_page(root_hpa) ||
332525b62c62SSean Christopherson 			to_shadow_page(root_hpa)->role.guest_mode)
332625b62c62SSean Christopherson 			roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
332725b62c62SSean Christopherson 	}
332825b62c62SSean Christopherson 
332925b62c62SSean Christopherson 	kvm_mmu_free_roots(vcpu, mmu, roots_to_free);
333025b62c62SSean Christopherson }
333125b62c62SSean Christopherson EXPORT_SYMBOL_GPL(kvm_mmu_free_guest_mode_roots);
333225b62c62SSean Christopherson 
333325b62c62SSean Christopherson 
3334c50d8ae3SPaolo Bonzini static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
3335c50d8ae3SPaolo Bonzini {
3336c50d8ae3SPaolo Bonzini 	int ret = 0;
3337c50d8ae3SPaolo Bonzini 
3338995decb6SVitaly Kuznetsov 	if (!kvm_vcpu_is_visible_gfn(vcpu, root_gfn)) {
3339c50d8ae3SPaolo Bonzini 		kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
3340c50d8ae3SPaolo Bonzini 		ret = 1;
3341c50d8ae3SPaolo Bonzini 	}
3342c50d8ae3SPaolo Bonzini 
3343c50d8ae3SPaolo Bonzini 	return ret;
3344c50d8ae3SPaolo Bonzini }
3345c50d8ae3SPaolo Bonzini 
33468123f265SSean Christopherson static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva,
33478123f265SSean Christopherson 			    u8 level, bool direct)
3348c50d8ae3SPaolo Bonzini {
3349c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
33508123f265SSean Christopherson 
33518123f265SSean Christopherson 	sp = kvm_mmu_get_page(vcpu, gfn, gva, level, direct, ACC_ALL);
33528123f265SSean Christopherson 	++sp->root_count;
33538123f265SSean Christopherson 
33548123f265SSean Christopherson 	return __pa(sp->spt);
33558123f265SSean Christopherson }
33568123f265SSean Christopherson 
33578123f265SSean Christopherson static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
33588123f265SSean Christopherson {
3359b37233c9SSean Christopherson 	struct kvm_mmu *mmu = vcpu->arch.mmu;
3360b37233c9SSean Christopherson 	u8 shadow_root_level = mmu->shadow_root_level;
33618123f265SSean Christopherson 	hpa_t root;
3362c50d8ae3SPaolo Bonzini 	unsigned i;
33634a38162eSPaolo Bonzini 	int r;
33644a38162eSPaolo Bonzini 
33654a38162eSPaolo Bonzini 	write_lock(&vcpu->kvm->mmu_lock);
33664a38162eSPaolo Bonzini 	r = make_mmu_pages_available(vcpu);
33674a38162eSPaolo Bonzini 	if (r < 0)
33684a38162eSPaolo Bonzini 		goto out_unlock;
3369c50d8ae3SPaolo Bonzini 
3370897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(vcpu->kvm)) {
337102c00b3aSBen Gardon 		root = kvm_tdp_mmu_get_vcpu_root_hpa(vcpu);
3372b37233c9SSean Christopherson 		mmu->root_hpa = root;
337302c00b3aSBen Gardon 	} else if (shadow_root_level >= PT64_ROOT_4LEVEL) {
33746e6ec584SSean Christopherson 		root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level, true);
3375b37233c9SSean Christopherson 		mmu->root_hpa = root;
33768123f265SSean Christopherson 	} else if (shadow_root_level == PT32E_ROOT_LEVEL) {
33774a38162eSPaolo Bonzini 		if (WARN_ON_ONCE(!mmu->pae_root)) {
33784a38162eSPaolo Bonzini 			r = -EIO;
33794a38162eSPaolo Bonzini 			goto out_unlock;
33804a38162eSPaolo Bonzini 		}
338173ad1606SSean Christopherson 
3382c50d8ae3SPaolo Bonzini 		for (i = 0; i < 4; ++i) {
3383c834e5e4SSean Christopherson 			WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
3384c50d8ae3SPaolo Bonzini 
33858123f265SSean Christopherson 			root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT),
33868123f265SSean Christopherson 					      i << 30, PT32_ROOT_LEVEL, true);
338717e368d9SSean Christopherson 			mmu->pae_root[i] = root | PT_PRESENT_MASK |
338817e368d9SSean Christopherson 					   shadow_me_mask;
3389c50d8ae3SPaolo Bonzini 		}
3390b37233c9SSean Christopherson 		mmu->root_hpa = __pa(mmu->pae_root);
339173ad1606SSean Christopherson 	} else {
339273ad1606SSean Christopherson 		WARN_ONCE(1, "Bad TDP root level = %d\n", shadow_root_level);
33934a38162eSPaolo Bonzini 		r = -EIO;
33944a38162eSPaolo Bonzini 		goto out_unlock;
339573ad1606SSean Christopherson 	}
33963651c7fcSSean Christopherson 
3397be01e8e2SSean Christopherson 	/* root_pgd is ignored for direct MMUs. */
3398b37233c9SSean Christopherson 	mmu->root_pgd = 0;
33994a38162eSPaolo Bonzini out_unlock:
34004a38162eSPaolo Bonzini 	write_unlock(&vcpu->kvm->mmu_lock);
34014a38162eSPaolo Bonzini 	return r;
3402c50d8ae3SPaolo Bonzini }
3403c50d8ae3SPaolo Bonzini 
34041e76a3ceSDavid Stevens static int mmu_first_shadow_root_alloc(struct kvm *kvm)
34051e76a3ceSDavid Stevens {
34061e76a3ceSDavid Stevens 	struct kvm_memslots *slots;
34071e76a3ceSDavid Stevens 	struct kvm_memory_slot *slot;
34081e76a3ceSDavid Stevens 	int r = 0, i;
34091e76a3ceSDavid Stevens 
34101e76a3ceSDavid Stevens 	/*
34111e76a3ceSDavid Stevens 	 * Check if this is the first shadow root being allocated before
34121e76a3ceSDavid Stevens 	 * taking the lock.
34131e76a3ceSDavid Stevens 	 */
34141e76a3ceSDavid Stevens 	if (kvm_shadow_root_allocated(kvm))
34151e76a3ceSDavid Stevens 		return 0;
34161e76a3ceSDavid Stevens 
34171e76a3ceSDavid Stevens 	mutex_lock(&kvm->slots_arch_lock);
34181e76a3ceSDavid Stevens 
34191e76a3ceSDavid Stevens 	/* Recheck, under the lock, whether this is the first shadow root. */
34201e76a3ceSDavid Stevens 	if (kvm_shadow_root_allocated(kvm))
34211e76a3ceSDavid Stevens 		goto out_unlock;
34221e76a3ceSDavid Stevens 
34231e76a3ceSDavid Stevens 	/*
34241e76a3ceSDavid Stevens 	 * Check if anything actually needs to be allocated, e.g. all metadata
34251e76a3ceSDavid Stevens 	 * will be allocated upfront if TDP is disabled.
34261e76a3ceSDavid Stevens 	 */
34271e76a3ceSDavid Stevens 	if (kvm_memslots_have_rmaps(kvm) &&
34281e76a3ceSDavid Stevens 	    kvm_page_track_write_tracking_enabled(kvm))
34291e76a3ceSDavid Stevens 		goto out_success;
34301e76a3ceSDavid Stevens 
34311e76a3ceSDavid Stevens 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
34321e76a3ceSDavid Stevens 		slots = __kvm_memslots(kvm, i);
34331e76a3ceSDavid Stevens 		kvm_for_each_memslot(slot, slots) {
34341e76a3ceSDavid Stevens 			/*
34351e76a3ceSDavid Stevens 			 * Both of these functions are no-ops if the target is
34361e76a3ceSDavid Stevens 			 * already allocated, so unconditionally calling both
34371e76a3ceSDavid Stevens 			 * is safe.  Intentionally do NOT free allocations on
34381e76a3ceSDavid Stevens 			 * failure to avoid having to track which allocations
34391e76a3ceSDavid Stevens 			 * were made now versus when the memslot was created.
34401e76a3ceSDavid Stevens 			 * The metadata is guaranteed to be freed when the slot
34411e76a3ceSDavid Stevens 			 * is freed, and will be kept/used if userspace retries
34421e76a3ceSDavid Stevens 			 * KVM_RUN instead of killing the VM.
34431e76a3ceSDavid Stevens 			 */
34441e76a3ceSDavid Stevens 			r = memslot_rmap_alloc(slot, slot->npages);
34451e76a3ceSDavid Stevens 			if (r)
34461e76a3ceSDavid Stevens 				goto out_unlock;
34471e76a3ceSDavid Stevens 			r = kvm_page_track_write_tracking_alloc(slot);
34481e76a3ceSDavid Stevens 			if (r)
34491e76a3ceSDavid Stevens 				goto out_unlock;
34501e76a3ceSDavid Stevens 		}
34511e76a3ceSDavid Stevens 	}
34521e76a3ceSDavid Stevens 
34531e76a3ceSDavid Stevens 	/*
34541e76a3ceSDavid Stevens 	 * Ensure that shadow_root_allocated becomes true strictly after
34551e76a3ceSDavid Stevens 	 * all the related pointers are set.
34561e76a3ceSDavid Stevens 	 */
34571e76a3ceSDavid Stevens out_success:
34581e76a3ceSDavid Stevens 	smp_store_release(&kvm->arch.shadow_root_allocated, true);
34591e76a3ceSDavid Stevens 
34601e76a3ceSDavid Stevens out_unlock:
34611e76a3ceSDavid Stevens 	mutex_unlock(&kvm->slots_arch_lock);
34621e76a3ceSDavid Stevens 	return r;
34631e76a3ceSDavid Stevens }
34641e76a3ceSDavid Stevens 
3465c50d8ae3SPaolo Bonzini static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
3466c50d8ae3SPaolo Bonzini {
3467b37233c9SSean Christopherson 	struct kvm_mmu *mmu = vcpu->arch.mmu;
34686e0918aeSSean Christopherson 	u64 pdptrs[4], pm_mask;
3469be01e8e2SSean Christopherson 	gfn_t root_gfn, root_pgd;
34708123f265SSean Christopherson 	hpa_t root;
34714a38162eSPaolo Bonzini 	unsigned i;
34724a38162eSPaolo Bonzini 	int r;
3473c50d8ae3SPaolo Bonzini 
3474b37233c9SSean Christopherson 	root_pgd = mmu->get_guest_pgd(vcpu);
3475be01e8e2SSean Christopherson 	root_gfn = root_pgd >> PAGE_SHIFT;
3476c50d8ae3SPaolo Bonzini 
3477c50d8ae3SPaolo Bonzini 	if (mmu_check_root(vcpu, root_gfn))
3478c50d8ae3SPaolo Bonzini 		return 1;
3479c50d8ae3SPaolo Bonzini 
3480c50d8ae3SPaolo Bonzini 	/*
34814a38162eSPaolo Bonzini 	 * On SVM, reading PDPTRs might access guest memory, which might fault
34824a38162eSPaolo Bonzini 	 * and thus might sleep.  Grab the PDPTRs before acquiring mmu_lock.
34834a38162eSPaolo Bonzini 	 */
34846e0918aeSSean Christopherson 	if (mmu->root_level == PT32E_ROOT_LEVEL) {
34856e0918aeSSean Christopherson 		for (i = 0; i < 4; ++i) {
34866e0918aeSSean Christopherson 			pdptrs[i] = mmu->get_pdptr(vcpu, i);
34876e0918aeSSean Christopherson 			if (!(pdptrs[i] & PT_PRESENT_MASK))
34886e0918aeSSean Christopherson 				continue;
34896e0918aeSSean Christopherson 
34906e0918aeSSean Christopherson 			if (mmu_check_root(vcpu, pdptrs[i] >> PAGE_SHIFT))
34916e0918aeSSean Christopherson 				return 1;
34926e0918aeSSean Christopherson 		}
34936e0918aeSSean Christopherson 	}
34946e0918aeSSean Christopherson 
34951e76a3ceSDavid Stevens 	r = mmu_first_shadow_root_alloc(vcpu->kvm);
3496deae4a10SDavid Stevens 	if (r)
3497deae4a10SDavid Stevens 		return r;
3498deae4a10SDavid Stevens 
34994a38162eSPaolo Bonzini 	write_lock(&vcpu->kvm->mmu_lock);
35004a38162eSPaolo Bonzini 	r = make_mmu_pages_available(vcpu);
35014a38162eSPaolo Bonzini 	if (r < 0)
35024a38162eSPaolo Bonzini 		goto out_unlock;
35034a38162eSPaolo Bonzini 
3504c50d8ae3SPaolo Bonzini 	/*
3505c50d8ae3SPaolo Bonzini 	 * Do we shadow a long mode page table? If so we need to
3506c50d8ae3SPaolo Bonzini 	 * write-protect the guests page table root.
3507c50d8ae3SPaolo Bonzini 	 */
3508b37233c9SSean Christopherson 	if (mmu->root_level >= PT64_ROOT_4LEVEL) {
35098123f265SSean Christopherson 		root = mmu_alloc_root(vcpu, root_gfn, 0,
3510b37233c9SSean Christopherson 				      mmu->shadow_root_level, false);
3511b37233c9SSean Christopherson 		mmu->root_hpa = root;
3512be01e8e2SSean Christopherson 		goto set_root_pgd;
3513c50d8ae3SPaolo Bonzini 	}
3514c50d8ae3SPaolo Bonzini 
35154a38162eSPaolo Bonzini 	if (WARN_ON_ONCE(!mmu->pae_root)) {
35164a38162eSPaolo Bonzini 		r = -EIO;
35174a38162eSPaolo Bonzini 		goto out_unlock;
35184a38162eSPaolo Bonzini 	}
351973ad1606SSean Christopherson 
3520c50d8ae3SPaolo Bonzini 	/*
3521c50d8ae3SPaolo Bonzini 	 * We shadow a 32 bit page table. This may be a legacy 2-level
3522c50d8ae3SPaolo Bonzini 	 * or a PAE 3-level page table. In either case we need to be aware that
3523c50d8ae3SPaolo Bonzini 	 * the shadow page table may be a PAE or a long mode page table.
3524c50d8ae3SPaolo Bonzini 	 */
352517e368d9SSean Christopherson 	pm_mask = PT_PRESENT_MASK | shadow_me_mask;
3526cb0f722aSWei Huang 	if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL) {
3527c50d8ae3SPaolo Bonzini 		pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
3528c50d8ae3SPaolo Bonzini 
352903ca4589SSean Christopherson 		if (WARN_ON_ONCE(!mmu->pml4_root)) {
35304a38162eSPaolo Bonzini 			r = -EIO;
35314a38162eSPaolo Bonzini 			goto out_unlock;
35324a38162eSPaolo Bonzini 		}
353303ca4589SSean Christopherson 		mmu->pml4_root[0] = __pa(mmu->pae_root) | pm_mask;
3534cb0f722aSWei Huang 
3535cb0f722aSWei Huang 		if (mmu->shadow_root_level == PT64_ROOT_5LEVEL) {
3536cb0f722aSWei Huang 			if (WARN_ON_ONCE(!mmu->pml5_root)) {
3537cb0f722aSWei Huang 				r = -EIO;
3538cb0f722aSWei Huang 				goto out_unlock;
3539cb0f722aSWei Huang 			}
3540cb0f722aSWei Huang 			mmu->pml5_root[0] = __pa(mmu->pml4_root) | pm_mask;
3541cb0f722aSWei Huang 		}
354204d45551SSean Christopherson 	}
354304d45551SSean Christopherson 
3544c50d8ae3SPaolo Bonzini 	for (i = 0; i < 4; ++i) {
3545c834e5e4SSean Christopherson 		WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
35466e6ec584SSean Christopherson 
3547b37233c9SSean Christopherson 		if (mmu->root_level == PT32E_ROOT_LEVEL) {
35486e0918aeSSean Christopherson 			if (!(pdptrs[i] & PT_PRESENT_MASK)) {
3549c834e5e4SSean Christopherson 				mmu->pae_root[i] = INVALID_PAE_ROOT;
3550c50d8ae3SPaolo Bonzini 				continue;
3551c50d8ae3SPaolo Bonzini 			}
35526e0918aeSSean Christopherson 			root_gfn = pdptrs[i] >> PAGE_SHIFT;
3553c50d8ae3SPaolo Bonzini 		}
3554c50d8ae3SPaolo Bonzini 
35558123f265SSean Christopherson 		root = mmu_alloc_root(vcpu, root_gfn, i << 30,
35568123f265SSean Christopherson 				      PT32_ROOT_LEVEL, false);
3557b37233c9SSean Christopherson 		mmu->pae_root[i] = root | pm_mask;
3558c50d8ae3SPaolo Bonzini 	}
3559c50d8ae3SPaolo Bonzini 
3560cb0f722aSWei Huang 	if (mmu->shadow_root_level == PT64_ROOT_5LEVEL)
3561cb0f722aSWei Huang 		mmu->root_hpa = __pa(mmu->pml5_root);
3562cb0f722aSWei Huang 	else if (mmu->shadow_root_level == PT64_ROOT_4LEVEL)
356303ca4589SSean Christopherson 		mmu->root_hpa = __pa(mmu->pml4_root);
3564ba0a194fSSean Christopherson 	else
3565ba0a194fSSean Christopherson 		mmu->root_hpa = __pa(mmu->pae_root);
3566c50d8ae3SPaolo Bonzini 
3567be01e8e2SSean Christopherson set_root_pgd:
3568b37233c9SSean Christopherson 	mmu->root_pgd = root_pgd;
35694a38162eSPaolo Bonzini out_unlock:
35704a38162eSPaolo Bonzini 	write_unlock(&vcpu->kvm->mmu_lock);
3571c50d8ae3SPaolo Bonzini 
3572c50d8ae3SPaolo Bonzini 	return 0;
3573c50d8ae3SPaolo Bonzini }
3574c50d8ae3SPaolo Bonzini 
3575748e52b9SSean Christopherson static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
3576c50d8ae3SPaolo Bonzini {
3577748e52b9SSean Christopherson 	struct kvm_mmu *mmu = vcpu->arch.mmu;
3578a717a780SSean Christopherson 	bool need_pml5 = mmu->shadow_root_level > PT64_ROOT_4LEVEL;
3579cb0f722aSWei Huang 	u64 *pml5_root = NULL;
3580cb0f722aSWei Huang 	u64 *pml4_root = NULL;
3581cb0f722aSWei Huang 	u64 *pae_root;
3582748e52b9SSean Christopherson 
3583748e52b9SSean Christopherson 	/*
3584748e52b9SSean Christopherson 	 * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP
3585748e52b9SSean Christopherson 	 * tables are allocated and initialized at root creation as there is no
3586748e52b9SSean Christopherson 	 * equivalent level in the guest's NPT to shadow.  Allocate the tables
3587748e52b9SSean Christopherson 	 * on demand, as running a 32-bit L1 VMM on 64-bit KVM is very rare.
3588748e52b9SSean Christopherson 	 */
3589748e52b9SSean Christopherson 	if (mmu->direct_map || mmu->root_level >= PT64_ROOT_4LEVEL ||
3590748e52b9SSean Christopherson 	    mmu->shadow_root_level < PT64_ROOT_4LEVEL)
3591748e52b9SSean Christopherson 		return 0;
3592748e52b9SSean Christopherson 
3593a717a780SSean Christopherson 	/*
3594a717a780SSean Christopherson 	 * NPT, the only paging mode that uses this horror, uses a fixed number
3595a717a780SSean Christopherson 	 * of levels for the shadow page tables, e.g. all MMUs are 4-level or
3596a717a780SSean Christopherson 	 * all MMus are 5-level.  Thus, this can safely require that pml5_root
3597a717a780SSean Christopherson 	 * is allocated if the other roots are valid and pml5 is needed, as any
3598a717a780SSean Christopherson 	 * prior MMU would also have required pml5.
3599a717a780SSean Christopherson 	 */
3600a717a780SSean Christopherson 	if (mmu->pae_root && mmu->pml4_root && (!need_pml5 || mmu->pml5_root))
3601748e52b9SSean Christopherson 		return 0;
3602748e52b9SSean Christopherson 
3603748e52b9SSean Christopherson 	/*
3604748e52b9SSean Christopherson 	 * The special roots should always be allocated in concert.  Yell and
3605748e52b9SSean Christopherson 	 * bail if KVM ends up in a state where only one of the roots is valid.
3606748e52b9SSean Christopherson 	 */
3607cb0f722aSWei Huang 	if (WARN_ON_ONCE(!tdp_enabled || mmu->pae_root || mmu->pml4_root ||
3608a717a780SSean Christopherson 			 (need_pml5 && mmu->pml5_root)))
3609748e52b9SSean Christopherson 		return -EIO;
3610748e52b9SSean Christopherson 
36114a98623dSSean Christopherson 	/*
36124a98623dSSean Christopherson 	 * Unlike 32-bit NPT, the PDP table doesn't need to be in low mem, and
36134a98623dSSean Christopherson 	 * doesn't need to be decrypted.
36144a98623dSSean Christopherson 	 */
3615748e52b9SSean Christopherson 	pae_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3616748e52b9SSean Christopherson 	if (!pae_root)
3617748e52b9SSean Christopherson 		return -ENOMEM;
3618748e52b9SSean Christopherson 
3619cb0f722aSWei Huang #ifdef CONFIG_X86_64
362003ca4589SSean Christopherson 	pml4_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3621cb0f722aSWei Huang 	if (!pml4_root)
3622cb0f722aSWei Huang 		goto err_pml4;
3623cb0f722aSWei Huang 
3624a717a780SSean Christopherson 	if (need_pml5) {
3625cb0f722aSWei Huang 		pml5_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3626cb0f722aSWei Huang 		if (!pml5_root)
3627cb0f722aSWei Huang 			goto err_pml5;
3628748e52b9SSean Christopherson 	}
3629cb0f722aSWei Huang #endif
3630748e52b9SSean Christopherson 
3631748e52b9SSean Christopherson 	mmu->pae_root = pae_root;
363203ca4589SSean Christopherson 	mmu->pml4_root = pml4_root;
3633cb0f722aSWei Huang 	mmu->pml5_root = pml5_root;
3634748e52b9SSean Christopherson 
3635748e52b9SSean Christopherson 	return 0;
3636cb0f722aSWei Huang 
3637cb0f722aSWei Huang #ifdef CONFIG_X86_64
3638cb0f722aSWei Huang err_pml5:
3639cb0f722aSWei Huang 	free_page((unsigned long)pml4_root);
3640cb0f722aSWei Huang err_pml4:
3641cb0f722aSWei Huang 	free_page((unsigned long)pae_root);
3642cb0f722aSWei Huang 	return -ENOMEM;
3643cb0f722aSWei Huang #endif
3644c50d8ae3SPaolo Bonzini }
3645c50d8ae3SPaolo Bonzini 
3646*264d3dc1SLai Jiangshan static bool is_unsync_root(hpa_t root)
3647*264d3dc1SLai Jiangshan {
3648*264d3dc1SLai Jiangshan 	struct kvm_mmu_page *sp;
3649*264d3dc1SLai Jiangshan 
3650*264d3dc1SLai Jiangshan 	/*
3651*264d3dc1SLai Jiangshan 	 * The read barrier orders the CPU's read of SPTE.W during the page table
3652*264d3dc1SLai Jiangshan 	 * walk before the reads of sp->unsync/sp->unsync_children here.
3653*264d3dc1SLai Jiangshan 	 *
3654*264d3dc1SLai Jiangshan 	 * Even if another CPU was marking the SP as unsync-ed simultaneously,
3655*264d3dc1SLai Jiangshan 	 * any guest page table changes are not guaranteed to be visible anyway
3656*264d3dc1SLai Jiangshan 	 * until this VCPU issues a TLB flush strictly after those changes are
3657*264d3dc1SLai Jiangshan 	 * made.  We only need to ensure that the other CPU sets these flags
3658*264d3dc1SLai Jiangshan 	 * before any actual changes to the page tables are made.  The comments
3659*264d3dc1SLai Jiangshan 	 * in mmu_try_to_unsync_pages() describe what could go wrong if this
3660*264d3dc1SLai Jiangshan 	 * requirement isn't satisfied.
3661*264d3dc1SLai Jiangshan 	 */
3662*264d3dc1SLai Jiangshan 	smp_rmb();
3663*264d3dc1SLai Jiangshan 	sp = to_shadow_page(root);
3664*264d3dc1SLai Jiangshan 	if (sp->unsync || sp->unsync_children)
3665*264d3dc1SLai Jiangshan 		return true;
3666*264d3dc1SLai Jiangshan 
3667*264d3dc1SLai Jiangshan 	return false;
3668*264d3dc1SLai Jiangshan }
3669*264d3dc1SLai Jiangshan 
3670c50d8ae3SPaolo Bonzini void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
3671c50d8ae3SPaolo Bonzini {
3672c50d8ae3SPaolo Bonzini 	int i;
3673c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
3674c50d8ae3SPaolo Bonzini 
3675c50d8ae3SPaolo Bonzini 	if (vcpu->arch.mmu->direct_map)
3676c50d8ae3SPaolo Bonzini 		return;
3677c50d8ae3SPaolo Bonzini 
3678c50d8ae3SPaolo Bonzini 	if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
3679c50d8ae3SPaolo Bonzini 		return;
3680c50d8ae3SPaolo Bonzini 
3681c50d8ae3SPaolo Bonzini 	vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
3682c50d8ae3SPaolo Bonzini 
3683c50d8ae3SPaolo Bonzini 	if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
3684c50d8ae3SPaolo Bonzini 		hpa_t root = vcpu->arch.mmu->root_hpa;
3685e47c4aeeSSean Christopherson 		sp = to_shadow_page(root);
3686c50d8ae3SPaolo Bonzini 
3687*264d3dc1SLai Jiangshan 		if (!is_unsync_root(root))
3688c50d8ae3SPaolo Bonzini 			return;
3689c50d8ae3SPaolo Bonzini 
3690531810caSBen Gardon 		write_lock(&vcpu->kvm->mmu_lock);
3691c50d8ae3SPaolo Bonzini 		kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
3692c50d8ae3SPaolo Bonzini 
369365855ed8SLai Jiangshan 		mmu_sync_children(vcpu, sp, true);
3694c50d8ae3SPaolo Bonzini 
3695c50d8ae3SPaolo Bonzini 		kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
3696531810caSBen Gardon 		write_unlock(&vcpu->kvm->mmu_lock);
3697c50d8ae3SPaolo Bonzini 		return;
3698c50d8ae3SPaolo Bonzini 	}
3699c50d8ae3SPaolo Bonzini 
3700531810caSBen Gardon 	write_lock(&vcpu->kvm->mmu_lock);
3701c50d8ae3SPaolo Bonzini 	kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
3702c50d8ae3SPaolo Bonzini 
3703c50d8ae3SPaolo Bonzini 	for (i = 0; i < 4; ++i) {
3704c50d8ae3SPaolo Bonzini 		hpa_t root = vcpu->arch.mmu->pae_root[i];
3705c50d8ae3SPaolo Bonzini 
3706c834e5e4SSean Christopherson 		if (IS_VALID_PAE_ROOT(root)) {
3707c50d8ae3SPaolo Bonzini 			root &= PT64_BASE_ADDR_MASK;
3708e47c4aeeSSean Christopherson 			sp = to_shadow_page(root);
370965855ed8SLai Jiangshan 			mmu_sync_children(vcpu, sp, true);
3710c50d8ae3SPaolo Bonzini 		}
3711c50d8ae3SPaolo Bonzini 	}
3712c50d8ae3SPaolo Bonzini 
3713c50d8ae3SPaolo Bonzini 	kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
3714531810caSBen Gardon 	write_unlock(&vcpu->kvm->mmu_lock);
3715c50d8ae3SPaolo Bonzini }
3716c50d8ae3SPaolo Bonzini 
3717736c291cSSean Christopherson static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gpa_t vaddr,
3718c50d8ae3SPaolo Bonzini 				  u32 access, struct x86_exception *exception)
3719c50d8ae3SPaolo Bonzini {
3720c50d8ae3SPaolo Bonzini 	if (exception)
3721c50d8ae3SPaolo Bonzini 		exception->error_code = 0;
3722c50d8ae3SPaolo Bonzini 	return vaddr;
3723c50d8ae3SPaolo Bonzini }
3724c50d8ae3SPaolo Bonzini 
3725736c291cSSean Christopherson static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gpa_t vaddr,
3726c50d8ae3SPaolo Bonzini 					 u32 access,
3727c50d8ae3SPaolo Bonzini 					 struct x86_exception *exception)
3728c50d8ae3SPaolo Bonzini {
3729c50d8ae3SPaolo Bonzini 	if (exception)
3730c50d8ae3SPaolo Bonzini 		exception->error_code = 0;
3731c50d8ae3SPaolo Bonzini 	return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception);
3732c50d8ae3SPaolo Bonzini }
3733c50d8ae3SPaolo Bonzini 
3734c50d8ae3SPaolo Bonzini static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3735c50d8ae3SPaolo Bonzini {
3736c50d8ae3SPaolo Bonzini 	/*
3737c50d8ae3SPaolo Bonzini 	 * A nested guest cannot use the MMIO cache if it is using nested
3738c50d8ae3SPaolo Bonzini 	 * page tables, because cr2 is a nGPA while the cache stores GPAs.
3739c50d8ae3SPaolo Bonzini 	 */
3740c50d8ae3SPaolo Bonzini 	if (mmu_is_nested(vcpu))
3741c50d8ae3SPaolo Bonzini 		return false;
3742c50d8ae3SPaolo Bonzini 
3743c50d8ae3SPaolo Bonzini 	if (direct)
3744c50d8ae3SPaolo Bonzini 		return vcpu_match_mmio_gpa(vcpu, addr);
3745c50d8ae3SPaolo Bonzini 
3746c50d8ae3SPaolo Bonzini 	return vcpu_match_mmio_gva(vcpu, addr);
3747c50d8ae3SPaolo Bonzini }
3748c50d8ae3SPaolo Bonzini 
374995fb5b02SBen Gardon /*
375095fb5b02SBen Gardon  * Return the level of the lowest level SPTE added to sptes.
375195fb5b02SBen Gardon  * That SPTE may be non-present.
3752c5c8c7c5SDavid Matlack  *
3753c5c8c7c5SDavid Matlack  * Must be called between walk_shadow_page_lockless_{begin,end}.
375495fb5b02SBen Gardon  */
375539b4d43eSSean Christopherson static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level)
3756c50d8ae3SPaolo Bonzini {
3757c50d8ae3SPaolo Bonzini 	struct kvm_shadow_walk_iterator iterator;
37582aa07893SSean Christopherson 	int leaf = -1;
375995fb5b02SBen Gardon 	u64 spte;
3760c50d8ae3SPaolo Bonzini 
376139b4d43eSSean Christopherson 	for (shadow_walk_init(&iterator, vcpu, addr),
376239b4d43eSSean Christopherson 	     *root_level = iterator.level;
3763c50d8ae3SPaolo Bonzini 	     shadow_walk_okay(&iterator);
3764c50d8ae3SPaolo Bonzini 	     __shadow_walk_next(&iterator, spte)) {
376595fb5b02SBen Gardon 		leaf = iterator.level;
3766c50d8ae3SPaolo Bonzini 		spte = mmu_spte_get_lockless(iterator.sptep);
3767c50d8ae3SPaolo Bonzini 
3768dde81f94SSean Christopherson 		sptes[leaf] = spte;
376995fb5b02SBen Gardon 	}
377095fb5b02SBen Gardon 
377195fb5b02SBen Gardon 	return leaf;
377295fb5b02SBen Gardon }
377395fb5b02SBen Gardon 
37749aa41879SSean Christopherson /* return true if reserved bit(s) are detected on a valid, non-MMIO SPTE. */
377595fb5b02SBen Gardon static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
377695fb5b02SBen Gardon {
3777dde81f94SSean Christopherson 	u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
377895fb5b02SBen Gardon 	struct rsvd_bits_validate *rsvd_check;
377939b4d43eSSean Christopherson 	int root, leaf, level;
378095fb5b02SBen Gardon 	bool reserved = false;
378195fb5b02SBen Gardon 
3782c5c8c7c5SDavid Matlack 	walk_shadow_page_lockless_begin(vcpu);
3783c5c8c7c5SDavid Matlack 
378463c0cac9SDavid Matlack 	if (is_tdp_mmu(vcpu->arch.mmu))
378539b4d43eSSean Christopherson 		leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root);
378695fb5b02SBen Gardon 	else
378739b4d43eSSean Christopherson 		leaf = get_walk(vcpu, addr, sptes, &root);
378895fb5b02SBen Gardon 
3789c5c8c7c5SDavid Matlack 	walk_shadow_page_lockless_end(vcpu);
3790c5c8c7c5SDavid Matlack 
37912aa07893SSean Christopherson 	if (unlikely(leaf < 0)) {
37922aa07893SSean Christopherson 		*sptep = 0ull;
37932aa07893SSean Christopherson 		return reserved;
37942aa07893SSean Christopherson 	}
37952aa07893SSean Christopherson 
37969aa41879SSean Christopherson 	*sptep = sptes[leaf];
37979aa41879SSean Christopherson 
37989aa41879SSean Christopherson 	/*
37999aa41879SSean Christopherson 	 * Skip reserved bits checks on the terminal leaf if it's not a valid
38009aa41879SSean Christopherson 	 * SPTE.  Note, this also (intentionally) skips MMIO SPTEs, which, by
38019aa41879SSean Christopherson 	 * design, always have reserved bits set.  The purpose of the checks is
38029aa41879SSean Christopherson 	 * to detect reserved bits on non-MMIO SPTEs. i.e. buggy SPTEs.
38039aa41879SSean Christopherson 	 */
38049aa41879SSean Christopherson 	if (!is_shadow_present_pte(sptes[leaf]))
38059aa41879SSean Christopherson 		leaf++;
380695fb5b02SBen Gardon 
380795fb5b02SBen Gardon 	rsvd_check = &vcpu->arch.mmu->shadow_zero_check;
380895fb5b02SBen Gardon 
38099aa41879SSean Christopherson 	for (level = root; level >= leaf; level--)
3810961f8445SSean Christopherson 		reserved |= is_rsvd_spte(rsvd_check, sptes[level], level);
3811c50d8ae3SPaolo Bonzini 
3812c50d8ae3SPaolo Bonzini 	if (reserved) {
3813bb4cdf3aSSean Christopherson 		pr_err("%s: reserved bits set on MMU-present spte, addr 0x%llx, hierarchy:\n",
3814c50d8ae3SPaolo Bonzini 		       __func__, addr);
381595fb5b02SBen Gardon 		for (level = root; level >= leaf; level--)
3816bb4cdf3aSSean Christopherson 			pr_err("------ spte = 0x%llx level = %d, rsvd bits = 0x%llx",
3817bb4cdf3aSSean Christopherson 			       sptes[level], level,
3818961f8445SSean Christopherson 			       get_rsvd_bits(rsvd_check, sptes[level], level));
3819c50d8ae3SPaolo Bonzini 	}
3820ddce6208SSean Christopherson 
3821c50d8ae3SPaolo Bonzini 	return reserved;
3822c50d8ae3SPaolo Bonzini }
3823c50d8ae3SPaolo Bonzini 
3824c50d8ae3SPaolo Bonzini static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3825c50d8ae3SPaolo Bonzini {
3826c50d8ae3SPaolo Bonzini 	u64 spte;
3827c50d8ae3SPaolo Bonzini 	bool reserved;
3828c50d8ae3SPaolo Bonzini 
3829c50d8ae3SPaolo Bonzini 	if (mmio_info_in_cache(vcpu, addr, direct))
3830c50d8ae3SPaolo Bonzini 		return RET_PF_EMULATE;
3831c50d8ae3SPaolo Bonzini 
383295fb5b02SBen Gardon 	reserved = get_mmio_spte(vcpu, addr, &spte);
3833c50d8ae3SPaolo Bonzini 	if (WARN_ON(reserved))
3834c50d8ae3SPaolo Bonzini 		return -EINVAL;
3835c50d8ae3SPaolo Bonzini 
3836c50d8ae3SPaolo Bonzini 	if (is_mmio_spte(spte)) {
3837c50d8ae3SPaolo Bonzini 		gfn_t gfn = get_mmio_spte_gfn(spte);
38380a2b64c5SBen Gardon 		unsigned int access = get_mmio_spte_access(spte);
3839c50d8ae3SPaolo Bonzini 
3840c50d8ae3SPaolo Bonzini 		if (!check_mmio_spte(vcpu, spte))
3841c50d8ae3SPaolo Bonzini 			return RET_PF_INVALID;
3842c50d8ae3SPaolo Bonzini 
3843c50d8ae3SPaolo Bonzini 		if (direct)
3844c50d8ae3SPaolo Bonzini 			addr = 0;
3845c50d8ae3SPaolo Bonzini 
3846c50d8ae3SPaolo Bonzini 		trace_handle_mmio_page_fault(addr, gfn, access);
3847c50d8ae3SPaolo Bonzini 		vcpu_cache_mmio_info(vcpu, addr, gfn, access);
3848c50d8ae3SPaolo Bonzini 		return RET_PF_EMULATE;
3849c50d8ae3SPaolo Bonzini 	}
3850c50d8ae3SPaolo Bonzini 
3851c50d8ae3SPaolo Bonzini 	/*
3852c50d8ae3SPaolo Bonzini 	 * If the page table is zapped by other cpus, let CPU fault again on
3853c50d8ae3SPaolo Bonzini 	 * the address.
3854c50d8ae3SPaolo Bonzini 	 */
3855c50d8ae3SPaolo Bonzini 	return RET_PF_RETRY;
3856c50d8ae3SPaolo Bonzini }
3857c50d8ae3SPaolo Bonzini 
3858c50d8ae3SPaolo Bonzini static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
3859b8a5d551SPaolo Bonzini 					 struct kvm_page_fault *fault)
3860c50d8ae3SPaolo Bonzini {
3861b8a5d551SPaolo Bonzini 	if (unlikely(fault->rsvd))
3862c50d8ae3SPaolo Bonzini 		return false;
3863c50d8ae3SPaolo Bonzini 
3864b8a5d551SPaolo Bonzini 	if (!fault->present || !fault->write)
3865c50d8ae3SPaolo Bonzini 		return false;
3866c50d8ae3SPaolo Bonzini 
3867c50d8ae3SPaolo Bonzini 	/*
3868c50d8ae3SPaolo Bonzini 	 * guest is writing the page which is write tracked which can
3869c50d8ae3SPaolo Bonzini 	 * not be fixed by page fault handler.
3870c50d8ae3SPaolo Bonzini 	 */
3871deae4a10SDavid Stevens 	if (kvm_slot_page_track_is_active(vcpu, fault->slot, fault->gfn, KVM_PAGE_TRACK_WRITE))
3872c50d8ae3SPaolo Bonzini 		return true;
3873c50d8ae3SPaolo Bonzini 
3874c50d8ae3SPaolo Bonzini 	return false;
3875c50d8ae3SPaolo Bonzini }
3876c50d8ae3SPaolo Bonzini 
3877c50d8ae3SPaolo Bonzini static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
3878c50d8ae3SPaolo Bonzini {
3879c50d8ae3SPaolo Bonzini 	struct kvm_shadow_walk_iterator iterator;
3880c50d8ae3SPaolo Bonzini 	u64 spte;
3881c50d8ae3SPaolo Bonzini 
3882c50d8ae3SPaolo Bonzini 	walk_shadow_page_lockless_begin(vcpu);
38833e44dce4SLai Jiangshan 	for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
3884c50d8ae3SPaolo Bonzini 		clear_sp_write_flooding_count(iterator.sptep);
3885c50d8ae3SPaolo Bonzini 	walk_shadow_page_lockless_end(vcpu);
3886c50d8ae3SPaolo Bonzini }
3887c50d8ae3SPaolo Bonzini 
3888e8c22266SVitaly Kuznetsov static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
38899f1a8526SSean Christopherson 				    gfn_t gfn)
3890c50d8ae3SPaolo Bonzini {
3891c50d8ae3SPaolo Bonzini 	struct kvm_arch_async_pf arch;
3892c50d8ae3SPaolo Bonzini 
3893c50d8ae3SPaolo Bonzini 	arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
3894c50d8ae3SPaolo Bonzini 	arch.gfn = gfn;
3895c50d8ae3SPaolo Bonzini 	arch.direct_map = vcpu->arch.mmu->direct_map;
3896d8dd54e0SSean Christopherson 	arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
3897c50d8ae3SPaolo Bonzini 
38989f1a8526SSean Christopherson 	return kvm_setup_async_pf(vcpu, cr2_or_gpa,
38999f1a8526SSean Christopherson 				  kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
3900c50d8ae3SPaolo Bonzini }
3901c50d8ae3SPaolo Bonzini 
39023647cd04SPaolo Bonzini static bool kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, int *r)
3903c50d8ae3SPaolo Bonzini {
3904e710c5f6SDavid Matlack 	struct kvm_memory_slot *slot = fault->slot;
3905c50d8ae3SPaolo Bonzini 	bool async;
3906c50d8ae3SPaolo Bonzini 
3907e0c37868SSean Christopherson 	/*
3908e0c37868SSean Christopherson 	 * Retry the page fault if the gfn hit a memslot that is being deleted
3909e0c37868SSean Christopherson 	 * or moved.  This ensures any existing SPTEs for the old memslot will
3910e0c37868SSean Christopherson 	 * be zapped before KVM inserts a new MMIO SPTE for the gfn.
3911e0c37868SSean Christopherson 	 */
3912e0c37868SSean Christopherson 	if (slot && (slot->flags & KVM_MEMSLOT_INVALID))
39138f32d5e5SMaxim Levitsky 		goto out_retry;
3914e0c37868SSean Christopherson 
39159cc13d60SMaxim Levitsky 	if (!kvm_is_visible_memslot(slot)) {
3916c36b7150SPaolo Bonzini 		/* Don't expose private memslots to L2. */
39179cc13d60SMaxim Levitsky 		if (is_guest_mode(vcpu)) {
3918e710c5f6SDavid Matlack 			fault->slot = NULL;
39193647cd04SPaolo Bonzini 			fault->pfn = KVM_PFN_NOSLOT;
39203647cd04SPaolo Bonzini 			fault->map_writable = false;
3921c50d8ae3SPaolo Bonzini 			return false;
3922c50d8ae3SPaolo Bonzini 		}
39239cc13d60SMaxim Levitsky 		/*
39249cc13d60SMaxim Levitsky 		 * If the APIC access page exists but is disabled, go directly
39259cc13d60SMaxim Levitsky 		 * to emulation without caching the MMIO access or creating a
39269cc13d60SMaxim Levitsky 		 * MMIO SPTE.  That way the cache doesn't need to be purged
39279cc13d60SMaxim Levitsky 		 * when the AVIC is re-enabled.
39289cc13d60SMaxim Levitsky 		 */
39299cc13d60SMaxim Levitsky 		if (slot && slot->id == APIC_ACCESS_PAGE_PRIVATE_MEMSLOT &&
39309cc13d60SMaxim Levitsky 		    !kvm_apicv_activated(vcpu->kvm)) {
39319cc13d60SMaxim Levitsky 			*r = RET_PF_EMULATE;
39329cc13d60SMaxim Levitsky 			return true;
39339cc13d60SMaxim Levitsky 		}
39349cc13d60SMaxim Levitsky 	}
3935c50d8ae3SPaolo Bonzini 
3936c50d8ae3SPaolo Bonzini 	async = false;
39373647cd04SPaolo Bonzini 	fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, &async,
39383647cd04SPaolo Bonzini 					  fault->write, &fault->map_writable,
39393647cd04SPaolo Bonzini 					  &fault->hva);
3940c50d8ae3SPaolo Bonzini 	if (!async)
3941c50d8ae3SPaolo Bonzini 		return false; /* *pfn has correct page already */
3942c50d8ae3SPaolo Bonzini 
39432839180cSPaolo Bonzini 	if (!fault->prefetch && kvm_can_do_async_pf(vcpu)) {
39443647cd04SPaolo Bonzini 		trace_kvm_try_async_get_page(fault->addr, fault->gfn);
39453647cd04SPaolo Bonzini 		if (kvm_find_async_pf_gfn(vcpu, fault->gfn)) {
39463647cd04SPaolo Bonzini 			trace_kvm_async_pf_doublefault(fault->addr, fault->gfn);
3947c50d8ae3SPaolo Bonzini 			kvm_make_request(KVM_REQ_APF_HALT, vcpu);
39488f32d5e5SMaxim Levitsky 			goto out_retry;
39493647cd04SPaolo Bonzini 		} else if (kvm_arch_setup_async_pf(vcpu, fault->addr, fault->gfn))
39508f32d5e5SMaxim Levitsky 			goto out_retry;
3951c50d8ae3SPaolo Bonzini 	}
3952c50d8ae3SPaolo Bonzini 
39533647cd04SPaolo Bonzini 	fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, NULL,
39543647cd04SPaolo Bonzini 					  fault->write, &fault->map_writable,
39553647cd04SPaolo Bonzini 					  &fault->hva);
3956a7cc099fSAndrei Vagin 	return false;
39578f32d5e5SMaxim Levitsky 
39588f32d5e5SMaxim Levitsky out_retry:
39598f32d5e5SMaxim Levitsky 	*r = RET_PF_RETRY;
39608f32d5e5SMaxim Levitsky 	return true;
3961c50d8ae3SPaolo Bonzini }
3962c50d8ae3SPaolo Bonzini 
39634326e57eSPaolo Bonzini static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
3964c50d8ae3SPaolo Bonzini {
396563c0cac9SDavid Matlack 	bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu);
3966c50d8ae3SPaolo Bonzini 
39670f90e1c1SSean Christopherson 	unsigned long mmu_seq;
396883f06fa7SSean Christopherson 	int r;
3969c50d8ae3SPaolo Bonzini 
39703c8ad5a6SPaolo Bonzini 	fault->gfn = fault->addr >> PAGE_SHIFT;
3971e710c5f6SDavid Matlack 	fault->slot = kvm_vcpu_gfn_to_memslot(vcpu, fault->gfn);
3972e710c5f6SDavid Matlack 
3973b8a5d551SPaolo Bonzini 	if (page_fault_handle_page_track(vcpu, fault))
3974c50d8ae3SPaolo Bonzini 		return RET_PF_EMULATE;
3975c50d8ae3SPaolo Bonzini 
39763c8ad5a6SPaolo Bonzini 	r = fast_page_fault(vcpu, fault);
3977c4371c2aSSean Christopherson 	if (r != RET_PF_INVALID)
3978c4371c2aSSean Christopherson 		return r;
397983291445SSean Christopherson 
3980378f5cd6SSean Christopherson 	r = mmu_topup_memory_caches(vcpu, false);
3981c50d8ae3SPaolo Bonzini 	if (r)
3982c50d8ae3SPaolo Bonzini 		return r;
3983c50d8ae3SPaolo Bonzini 
3984367fd790SSean Christopherson 	mmu_seq = vcpu->kvm->mmu_notifier_seq;
3985367fd790SSean Christopherson 	smp_rmb();
3986367fd790SSean Christopherson 
39873647cd04SPaolo Bonzini 	if (kvm_faultin_pfn(vcpu, fault, &r))
39888f32d5e5SMaxim Levitsky 		return r;
3989367fd790SSean Christopherson 
39903a13f4feSPaolo Bonzini 	if (handle_abnormal_pfn(vcpu, fault, ACC_ALL, &r))
3991367fd790SSean Christopherson 		return r;
3992367fd790SSean Christopherson 
3993367fd790SSean Christopherson 	r = RET_PF_RETRY;
3994a2855afcSBen Gardon 
39950b873fd7SDavid Matlack 	if (is_tdp_mmu_fault)
3996a2855afcSBen Gardon 		read_lock(&vcpu->kvm->mmu_lock);
3997a2855afcSBen Gardon 	else
3998531810caSBen Gardon 		write_lock(&vcpu->kvm->mmu_lock);
3999a2855afcSBen Gardon 
4000e710c5f6SDavid Matlack 	if (fault->slot && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva))
4001367fd790SSean Christopherson 		goto out_unlock;
40027bd7ded6SSean Christopherson 	r = make_mmu_pages_available(vcpu);
40037bd7ded6SSean Christopherson 	if (r)
4004367fd790SSean Christopherson 		goto out_unlock;
4005bb18842eSBen Gardon 
40060b873fd7SDavid Matlack 	if (is_tdp_mmu_fault)
40072f6305ddSPaolo Bonzini 		r = kvm_tdp_mmu_map(vcpu, fault);
4008bb18842eSBen Gardon 	else
400943b74355SPaolo Bonzini 		r = __direct_map(vcpu, fault);
40100f90e1c1SSean Christopherson 
4011367fd790SSean Christopherson out_unlock:
40120b873fd7SDavid Matlack 	if (is_tdp_mmu_fault)
4013a2855afcSBen Gardon 		read_unlock(&vcpu->kvm->mmu_lock);
4014a2855afcSBen Gardon 	else
4015531810caSBen Gardon 		write_unlock(&vcpu->kvm->mmu_lock);
40163647cd04SPaolo Bonzini 	kvm_release_pfn_clean(fault->pfn);
4017367fd790SSean Christopherson 	return r;
4018c50d8ae3SPaolo Bonzini }
4019c50d8ae3SPaolo Bonzini 
4020c501040aSPaolo Bonzini static int nonpaging_page_fault(struct kvm_vcpu *vcpu,
4021c501040aSPaolo Bonzini 				struct kvm_page_fault *fault)
40220f90e1c1SSean Christopherson {
40234326e57eSPaolo Bonzini 	pgprintk("%s: gva %lx error %x\n", __func__, fault->addr, fault->error_code);
40240f90e1c1SSean Christopherson 
40250f90e1c1SSean Christopherson 	/* This path builds a PAE pagetable, we can map 2mb pages at maximum. */
40264326e57eSPaolo Bonzini 	fault->max_level = PG_LEVEL_2M;
40274326e57eSPaolo Bonzini 	return direct_page_fault(vcpu, fault);
40280f90e1c1SSean Christopherson }
40290f90e1c1SSean Christopherson 
4030c50d8ae3SPaolo Bonzini int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
4031c50d8ae3SPaolo Bonzini 				u64 fault_address, char *insn, int insn_len)
4032c50d8ae3SPaolo Bonzini {
4033c50d8ae3SPaolo Bonzini 	int r = 1;
40349ce372b3SVitaly Kuznetsov 	u32 flags = vcpu->arch.apf.host_apf_flags;
4035c50d8ae3SPaolo Bonzini 
4036736c291cSSean Christopherson #ifndef CONFIG_X86_64
4037736c291cSSean Christopherson 	/* A 64-bit CR2 should be impossible on 32-bit KVM. */
4038736c291cSSean Christopherson 	if (WARN_ON_ONCE(fault_address >> 32))
4039736c291cSSean Christopherson 		return -EFAULT;
4040736c291cSSean Christopherson #endif
4041736c291cSSean Christopherson 
4042c50d8ae3SPaolo Bonzini 	vcpu->arch.l1tf_flush_l1d = true;
40439ce372b3SVitaly Kuznetsov 	if (!flags) {
4044c50d8ae3SPaolo Bonzini 		trace_kvm_page_fault(fault_address, error_code);
4045c50d8ae3SPaolo Bonzini 
4046c50d8ae3SPaolo Bonzini 		if (kvm_event_needs_reinjection(vcpu))
4047c50d8ae3SPaolo Bonzini 			kvm_mmu_unprotect_page_virt(vcpu, fault_address);
4048c50d8ae3SPaolo Bonzini 		r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn,
4049c50d8ae3SPaolo Bonzini 				insn_len);
40509ce372b3SVitaly Kuznetsov 	} else if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) {
405168fd66f1SVitaly Kuznetsov 		vcpu->arch.apf.host_apf_flags = 0;
4052c50d8ae3SPaolo Bonzini 		local_irq_disable();
40536bca69adSThomas Gleixner 		kvm_async_pf_task_wait_schedule(fault_address);
4054c50d8ae3SPaolo Bonzini 		local_irq_enable();
40559ce372b3SVitaly Kuznetsov 	} else {
40569ce372b3SVitaly Kuznetsov 		WARN_ONCE(1, "Unexpected host async PF flags: %x\n", flags);
4057c50d8ae3SPaolo Bonzini 	}
40589ce372b3SVitaly Kuznetsov 
4059c50d8ae3SPaolo Bonzini 	return r;
4060c50d8ae3SPaolo Bonzini }
4061c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_handle_page_fault);
4062c50d8ae3SPaolo Bonzini 
4063c501040aSPaolo Bonzini int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
4064c50d8ae3SPaolo Bonzini {
40654326e57eSPaolo Bonzini 	while (fault->max_level > PG_LEVEL_4K) {
40664326e57eSPaolo Bonzini 		int page_num = KVM_PAGES_PER_HPAGE(fault->max_level);
40674326e57eSPaolo Bonzini 		gfn_t base = (fault->addr >> PAGE_SHIFT) & ~(page_num - 1);
4068c50d8ae3SPaolo Bonzini 
4069cb9b88c6SSean Christopherson 		if (kvm_mtrr_check_gfn_range_consistency(vcpu, base, page_num))
4070cb9b88c6SSean Christopherson 			break;
40714326e57eSPaolo Bonzini 
40724326e57eSPaolo Bonzini 		--fault->max_level;
4073c50d8ae3SPaolo Bonzini 	}
4074c50d8ae3SPaolo Bonzini 
40754326e57eSPaolo Bonzini 	return direct_page_fault(vcpu, fault);
4076c50d8ae3SPaolo Bonzini }
4077c50d8ae3SPaolo Bonzini 
407884a16226SSean Christopherson static void nonpaging_init_context(struct kvm_mmu *context)
4079c50d8ae3SPaolo Bonzini {
4080c50d8ae3SPaolo Bonzini 	context->page_fault = nonpaging_page_fault;
4081c50d8ae3SPaolo Bonzini 	context->gva_to_gpa = nonpaging_gva_to_gpa;
4082c50d8ae3SPaolo Bonzini 	context->sync_page = nonpaging_sync_page;
40835efac074SPaolo Bonzini 	context->invlpg = NULL;
4084c50d8ae3SPaolo Bonzini 	context->direct_map = true;
4085c50d8ae3SPaolo Bonzini }
4086c50d8ae3SPaolo Bonzini 
4087be01e8e2SSean Christopherson static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
40880be44352SSean Christopherson 				  union kvm_mmu_page_role role)
40890be44352SSean Christopherson {
4090be01e8e2SSean Christopherson 	return (role.direct || pgd == root->pgd) &&
4091e47c4aeeSSean Christopherson 	       VALID_PAGE(root->hpa) && to_shadow_page(root->hpa) &&
4092e47c4aeeSSean Christopherson 	       role.word == to_shadow_page(root->hpa)->role.word;
40930be44352SSean Christopherson }
40940be44352SSean Christopherson 
4095c50d8ae3SPaolo Bonzini /*
4096be01e8e2SSean Christopherson  * Find out if a previously cached root matching the new pgd/role is available.
4097c50d8ae3SPaolo Bonzini  * The current root is also inserted into the cache.
4098c50d8ae3SPaolo Bonzini  * If a matching root was found, it is assigned to kvm_mmu->root_hpa and true is
4099c50d8ae3SPaolo Bonzini  * returned.
4100c50d8ae3SPaolo Bonzini  * Otherwise, the LRU root from the cache is assigned to kvm_mmu->root_hpa and
4101c50d8ae3SPaolo Bonzini  * false is returned. This root should now be freed by the caller.
4102c50d8ae3SPaolo Bonzini  */
4103be01e8e2SSean Christopherson static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_pgd,
4104c50d8ae3SPaolo Bonzini 				  union kvm_mmu_page_role new_role)
4105c50d8ae3SPaolo Bonzini {
4106c50d8ae3SPaolo Bonzini 	uint i;
4107c50d8ae3SPaolo Bonzini 	struct kvm_mmu_root_info root;
4108c50d8ae3SPaolo Bonzini 	struct kvm_mmu *mmu = vcpu->arch.mmu;
4109c50d8ae3SPaolo Bonzini 
4110be01e8e2SSean Christopherson 	root.pgd = mmu->root_pgd;
4111c50d8ae3SPaolo Bonzini 	root.hpa = mmu->root_hpa;
4112c50d8ae3SPaolo Bonzini 
4113be01e8e2SSean Christopherson 	if (is_root_usable(&root, new_pgd, new_role))
41140be44352SSean Christopherson 		return true;
41150be44352SSean Christopherson 
4116c50d8ae3SPaolo Bonzini 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
4117c50d8ae3SPaolo Bonzini 		swap(root, mmu->prev_roots[i]);
4118c50d8ae3SPaolo Bonzini 
4119be01e8e2SSean Christopherson 		if (is_root_usable(&root, new_pgd, new_role))
4120c50d8ae3SPaolo Bonzini 			break;
4121c50d8ae3SPaolo Bonzini 	}
4122c50d8ae3SPaolo Bonzini 
4123c50d8ae3SPaolo Bonzini 	mmu->root_hpa = root.hpa;
4124be01e8e2SSean Christopherson 	mmu->root_pgd = root.pgd;
4125c50d8ae3SPaolo Bonzini 
4126c50d8ae3SPaolo Bonzini 	return i < KVM_MMU_NUM_PREV_ROOTS;
4127c50d8ae3SPaolo Bonzini }
4128c50d8ae3SPaolo Bonzini 
4129be01e8e2SSean Christopherson static bool fast_pgd_switch(struct kvm_vcpu *vcpu, gpa_t new_pgd,
4130b869855bSSean Christopherson 			    union kvm_mmu_page_role new_role)
4131c50d8ae3SPaolo Bonzini {
4132c50d8ae3SPaolo Bonzini 	struct kvm_mmu *mmu = vcpu->arch.mmu;
4133c50d8ae3SPaolo Bonzini 
4134c50d8ae3SPaolo Bonzini 	/*
4135c50d8ae3SPaolo Bonzini 	 * For now, limit the fast switch to 64-bit hosts+VMs in order to avoid
4136c50d8ae3SPaolo Bonzini 	 * having to deal with PDPTEs. We may add support for 32-bit hosts/VMs
4137c50d8ae3SPaolo Bonzini 	 * later if necessary.
4138c50d8ae3SPaolo Bonzini 	 */
4139c50d8ae3SPaolo Bonzini 	if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
4140b869855bSSean Christopherson 	    mmu->root_level >= PT64_ROOT_4LEVEL)
4141fe9304d3SVitaly Kuznetsov 		return cached_root_available(vcpu, new_pgd, new_role);
4142c50d8ae3SPaolo Bonzini 
4143c50d8ae3SPaolo Bonzini 	return false;
4144c50d8ae3SPaolo Bonzini }
4145c50d8ae3SPaolo Bonzini 
4146be01e8e2SSean Christopherson static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd,
4147b5129100SSean Christopherson 			      union kvm_mmu_page_role new_role)
4148c50d8ae3SPaolo Bonzini {
4149be01e8e2SSean Christopherson 	if (!fast_pgd_switch(vcpu, new_pgd, new_role)) {
4150b869855bSSean Christopherson 		kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, KVM_MMU_ROOT_CURRENT);
4151b869855bSSean Christopherson 		return;
4152c50d8ae3SPaolo Bonzini 	}
4153c50d8ae3SPaolo Bonzini 
4154c50d8ae3SPaolo Bonzini 	/*
4155b869855bSSean Christopherson 	 * It's possible that the cached previous root page is obsolete because
4156b869855bSSean Christopherson 	 * of a change in the MMU generation number. However, changing the
4157b869855bSSean Christopherson 	 * generation number is accompanied by KVM_REQ_MMU_RELOAD, which will
4158b869855bSSean Christopherson 	 * free the root set here and allocate a new one.
4159b869855bSSean Christopherson 	 */
4160b869855bSSean Christopherson 	kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
4161b869855bSSean Christopherson 
4162b5129100SSean Christopherson 	if (force_flush_and_sync_on_reuse) {
4163b869855bSSean Christopherson 		kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
4164b869855bSSean Christopherson 		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
4165b5129100SSean Christopherson 	}
4166b869855bSSean Christopherson 
4167b869855bSSean Christopherson 	/*
4168b869855bSSean Christopherson 	 * The last MMIO access's GVA and GPA are cached in the VCPU. When
4169b869855bSSean Christopherson 	 * switching to a new CR3, that GVA->GPA mapping may no longer be
4170b869855bSSean Christopherson 	 * valid. So clear any cached MMIO info even when we don't need to sync
4171b869855bSSean Christopherson 	 * the shadow page tables.
4172c50d8ae3SPaolo Bonzini 	 */
4173c50d8ae3SPaolo Bonzini 	vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
4174c50d8ae3SPaolo Bonzini 
4175daa5b6c1SBen Gardon 	/*
4176daa5b6c1SBen Gardon 	 * If this is a direct root page, it doesn't have a write flooding
4177daa5b6c1SBen Gardon 	 * count. Otherwise, clear the write flooding count.
4178daa5b6c1SBen Gardon 	 */
4179daa5b6c1SBen Gardon 	if (!new_role.direct)
4180daa5b6c1SBen Gardon 		__clear_sp_write_flooding_count(
4181daa5b6c1SBen Gardon 				to_shadow_page(vcpu->arch.mmu->root_hpa));
4182c50d8ae3SPaolo Bonzini }
4183c50d8ae3SPaolo Bonzini 
4184b5129100SSean Christopherson void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
4185c50d8ae3SPaolo Bonzini {
4186b5129100SSean Christopherson 	__kvm_mmu_new_pgd(vcpu, new_pgd, kvm_mmu_calc_root_page_role(vcpu));
4187c50d8ae3SPaolo Bonzini }
4188be01e8e2SSean Christopherson EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd);
4189c50d8ae3SPaolo Bonzini 
4190c50d8ae3SPaolo Bonzini static unsigned long get_cr3(struct kvm_vcpu *vcpu)
4191c50d8ae3SPaolo Bonzini {
4192c50d8ae3SPaolo Bonzini 	return kvm_read_cr3(vcpu);
4193c50d8ae3SPaolo Bonzini }
4194c50d8ae3SPaolo Bonzini 
4195c50d8ae3SPaolo Bonzini static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
4196c3e5e415SLai Jiangshan 			   unsigned int access)
4197c50d8ae3SPaolo Bonzini {
4198c50d8ae3SPaolo Bonzini 	if (unlikely(is_mmio_spte(*sptep))) {
4199c50d8ae3SPaolo Bonzini 		if (gfn != get_mmio_spte_gfn(*sptep)) {
4200c50d8ae3SPaolo Bonzini 			mmu_spte_clear_no_track(sptep);
4201c50d8ae3SPaolo Bonzini 			return true;
4202c50d8ae3SPaolo Bonzini 		}
4203c50d8ae3SPaolo Bonzini 
4204c50d8ae3SPaolo Bonzini 		mark_mmio_spte(vcpu, sptep, gfn, access);
4205c50d8ae3SPaolo Bonzini 		return true;
4206c50d8ae3SPaolo Bonzini 	}
4207c50d8ae3SPaolo Bonzini 
4208c50d8ae3SPaolo Bonzini 	return false;
4209c50d8ae3SPaolo Bonzini }
4210c50d8ae3SPaolo Bonzini 
4211c50d8ae3SPaolo Bonzini #define PTTYPE_EPT 18 /* arbitrary */
4212c50d8ae3SPaolo Bonzini #define PTTYPE PTTYPE_EPT
4213c50d8ae3SPaolo Bonzini #include "paging_tmpl.h"
4214c50d8ae3SPaolo Bonzini #undef PTTYPE
4215c50d8ae3SPaolo Bonzini 
4216c50d8ae3SPaolo Bonzini #define PTTYPE 64
4217c50d8ae3SPaolo Bonzini #include "paging_tmpl.h"
4218c50d8ae3SPaolo Bonzini #undef PTTYPE
4219c50d8ae3SPaolo Bonzini 
4220c50d8ae3SPaolo Bonzini #define PTTYPE 32
4221c50d8ae3SPaolo Bonzini #include "paging_tmpl.h"
4222c50d8ae3SPaolo Bonzini #undef PTTYPE
4223c50d8ae3SPaolo Bonzini 
4224c50d8ae3SPaolo Bonzini static void
4225b705a277SSean Christopherson __reset_rsvds_bits_mask(struct rsvd_bits_validate *rsvd_check,
42265b7f575cSSean Christopherson 			u64 pa_bits_rsvd, int level, bool nx, bool gbpages,
4227c50d8ae3SPaolo Bonzini 			bool pse, bool amd)
4228c50d8ae3SPaolo Bonzini {
4229c50d8ae3SPaolo Bonzini 	u64 gbpages_bit_rsvd = 0;
4230c50d8ae3SPaolo Bonzini 	u64 nonleaf_bit8_rsvd = 0;
42315b7f575cSSean Christopherson 	u64 high_bits_rsvd;
4232c50d8ae3SPaolo Bonzini 
4233c50d8ae3SPaolo Bonzini 	rsvd_check->bad_mt_xwr = 0;
4234c50d8ae3SPaolo Bonzini 
4235c50d8ae3SPaolo Bonzini 	if (!gbpages)
4236c50d8ae3SPaolo Bonzini 		gbpages_bit_rsvd = rsvd_bits(7, 7);
4237c50d8ae3SPaolo Bonzini 
42385b7f575cSSean Christopherson 	if (level == PT32E_ROOT_LEVEL)
42395b7f575cSSean Christopherson 		high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 62);
42405b7f575cSSean Christopherson 	else
42415b7f575cSSean Christopherson 		high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);
42425b7f575cSSean Christopherson 
42435b7f575cSSean Christopherson 	/* Note, NX doesn't exist in PDPTEs, this is handled below. */
42445b7f575cSSean Christopherson 	if (!nx)
42455b7f575cSSean Christopherson 		high_bits_rsvd |= rsvd_bits(63, 63);
42465b7f575cSSean Christopherson 
4247c50d8ae3SPaolo Bonzini 	/*
4248c50d8ae3SPaolo Bonzini 	 * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for
4249c50d8ae3SPaolo Bonzini 	 * leaf entries) on AMD CPUs only.
4250c50d8ae3SPaolo Bonzini 	 */
4251c50d8ae3SPaolo Bonzini 	if (amd)
4252c50d8ae3SPaolo Bonzini 		nonleaf_bit8_rsvd = rsvd_bits(8, 8);
4253c50d8ae3SPaolo Bonzini 
4254c50d8ae3SPaolo Bonzini 	switch (level) {
4255c50d8ae3SPaolo Bonzini 	case PT32_ROOT_LEVEL:
4256c50d8ae3SPaolo Bonzini 		/* no rsvd bits for 2 level 4K page table entries */
4257c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[0][1] = 0;
4258c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[0][0] = 0;
4259c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[1][0] =
4260c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[0][0];
4261c50d8ae3SPaolo Bonzini 
4262c50d8ae3SPaolo Bonzini 		if (!pse) {
4263c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[1][1] = 0;
4264c50d8ae3SPaolo Bonzini 			break;
4265c50d8ae3SPaolo Bonzini 		}
4266c50d8ae3SPaolo Bonzini 
4267c50d8ae3SPaolo Bonzini 		if (is_cpuid_PSE36())
4268c50d8ae3SPaolo Bonzini 			/* 36bits PSE 4MB page */
4269c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
4270c50d8ae3SPaolo Bonzini 		else
4271c50d8ae3SPaolo Bonzini 			/* 32 bits PSE 4MB page */
4272c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
4273c50d8ae3SPaolo Bonzini 		break;
4274c50d8ae3SPaolo Bonzini 	case PT32E_ROOT_LEVEL:
42755b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[0][2] = rsvd_bits(63, 63) |
42765b7f575cSSean Christopherson 						   high_bits_rsvd |
42775b7f575cSSean Christopherson 						   rsvd_bits(5, 8) |
42785b7f575cSSean Christopherson 						   rsvd_bits(1, 2);	/* PDPTE */
42795b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd;	/* PDE */
42805b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;	/* PTE */
42815b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
4282c50d8ae3SPaolo Bonzini 						   rsvd_bits(13, 20);	/* large page */
4283c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[1][0] =
4284c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[0][0];
4285c50d8ae3SPaolo Bonzini 		break;
4286c50d8ae3SPaolo Bonzini 	case PT64_ROOT_5LEVEL:
42875b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd |
42885b7f575cSSean Christopherson 						   nonleaf_bit8_rsvd |
42895b7f575cSSean Christopherson 						   rsvd_bits(7, 7);
4290c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[1][4] =
4291c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[0][4];
4292df561f66SGustavo A. R. Silva 		fallthrough;
4293c50d8ae3SPaolo Bonzini 	case PT64_ROOT_4LEVEL:
42945b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd |
42955b7f575cSSean Christopherson 						   nonleaf_bit8_rsvd |
42965b7f575cSSean Christopherson 						   rsvd_bits(7, 7);
42975b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd |
42985b7f575cSSean Christopherson 						   gbpages_bit_rsvd;
42995b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd;
43005b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
4301c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[1][3] =
4302c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[0][3];
43035b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd |
43045b7f575cSSean Christopherson 						   gbpages_bit_rsvd |
4305c50d8ae3SPaolo Bonzini 						   rsvd_bits(13, 29);
43065b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
4307c50d8ae3SPaolo Bonzini 						   rsvd_bits(13, 20); /* large page */
4308c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[1][0] =
4309c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[0][0];
4310c50d8ae3SPaolo Bonzini 		break;
4311c50d8ae3SPaolo Bonzini 	}
4312c50d8ae3SPaolo Bonzini }
4313c50d8ae3SPaolo Bonzini 
431427de9250SSean Christopherson static bool guest_can_use_gbpages(struct kvm_vcpu *vcpu)
431527de9250SSean Christopherson {
431627de9250SSean Christopherson 	/*
431727de9250SSean Christopherson 	 * If TDP is enabled, let the guest use GBPAGES if they're supported in
431827de9250SSean Christopherson 	 * hardware.  The hardware page walker doesn't let KVM disable GBPAGES,
431927de9250SSean Christopherson 	 * i.e. won't treat them as reserved, and KVM doesn't redo the GVA->GPA
432027de9250SSean Christopherson 	 * walk for performance and complexity reasons.  Not to mention KVM
432127de9250SSean Christopherson 	 * _can't_ solve the problem because GVA->GPA walks aren't visible to
432227de9250SSean Christopherson 	 * KVM once a TDP translation is installed.  Mimic hardware behavior so
432327de9250SSean Christopherson 	 * that KVM's is at least consistent, i.e. doesn't randomly inject #PF.
432427de9250SSean Christopherson 	 */
432527de9250SSean Christopherson 	return tdp_enabled ? boot_cpu_has(X86_FEATURE_GBPAGES) :
432627de9250SSean Christopherson 			     guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES);
432727de9250SSean Christopherson }
432827de9250SSean Christopherson 
4329c50d8ae3SPaolo Bonzini static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
4330c50d8ae3SPaolo Bonzini 				  struct kvm_mmu *context)
4331c50d8ae3SPaolo Bonzini {
4332b705a277SSean Christopherson 	__reset_rsvds_bits_mask(&context->guest_rsvd_check,
43335b7f575cSSean Christopherson 				vcpu->arch.reserved_gpa_bits,
433490599c28SSean Christopherson 				context->root_level, is_efer_nx(context),
433527de9250SSean Christopherson 				guest_can_use_gbpages(vcpu),
43364e9c0d80SSean Christopherson 				is_cr4_pse(context),
433723493d0aSSean Christopherson 				guest_cpuid_is_amd_or_hygon(vcpu));
4338c50d8ae3SPaolo Bonzini }
4339c50d8ae3SPaolo Bonzini 
4340c50d8ae3SPaolo Bonzini static void
4341c50d8ae3SPaolo Bonzini __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
43425b7f575cSSean Christopherson 			    u64 pa_bits_rsvd, bool execonly)
4343c50d8ae3SPaolo Bonzini {
43445b7f575cSSean Christopherson 	u64 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);
4345c50d8ae3SPaolo Bonzini 	u64 bad_mt_xwr;
4346c50d8ae3SPaolo Bonzini 
43475b7f575cSSean Christopherson 	rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd | rsvd_bits(3, 7);
43485b7f575cSSean Christopherson 	rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd | rsvd_bits(3, 7);
43495b7f575cSSean Christopherson 	rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd | rsvd_bits(3, 6);
43505b7f575cSSean Christopherson 	rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd | rsvd_bits(3, 6);
43515b7f575cSSean Christopherson 	rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
4352c50d8ae3SPaolo Bonzini 
4353c50d8ae3SPaolo Bonzini 	/* large page */
4354c50d8ae3SPaolo Bonzini 	rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4];
4355c50d8ae3SPaolo Bonzini 	rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3];
43565b7f575cSSean Christopherson 	rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd | rsvd_bits(12, 29);
43575b7f575cSSean Christopherson 	rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd | rsvd_bits(12, 20);
4358c50d8ae3SPaolo Bonzini 	rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0];
4359c50d8ae3SPaolo Bonzini 
4360c50d8ae3SPaolo Bonzini 	bad_mt_xwr = 0xFFull << (2 * 8);	/* bits 3..5 must not be 2 */
4361c50d8ae3SPaolo Bonzini 	bad_mt_xwr |= 0xFFull << (3 * 8);	/* bits 3..5 must not be 3 */
4362c50d8ae3SPaolo Bonzini 	bad_mt_xwr |= 0xFFull << (7 * 8);	/* bits 3..5 must not be 7 */
4363c50d8ae3SPaolo Bonzini 	bad_mt_xwr |= REPEAT_BYTE(1ull << 2);	/* bits 0..2 must not be 010 */
4364c50d8ae3SPaolo Bonzini 	bad_mt_xwr |= REPEAT_BYTE(1ull << 6);	/* bits 0..2 must not be 110 */
4365c50d8ae3SPaolo Bonzini 	if (!execonly) {
4366c50d8ae3SPaolo Bonzini 		/* bits 0..2 must not be 100 unless VMX capabilities allow it */
4367c50d8ae3SPaolo Bonzini 		bad_mt_xwr |= REPEAT_BYTE(1ull << 4);
4368c50d8ae3SPaolo Bonzini 	}
4369c50d8ae3SPaolo Bonzini 	rsvd_check->bad_mt_xwr = bad_mt_xwr;
4370c50d8ae3SPaolo Bonzini }
4371c50d8ae3SPaolo Bonzini 
4372c50d8ae3SPaolo Bonzini static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
4373c50d8ae3SPaolo Bonzini 		struct kvm_mmu *context, bool execonly)
4374c50d8ae3SPaolo Bonzini {
4375c50d8ae3SPaolo Bonzini 	__reset_rsvds_bits_mask_ept(&context->guest_rsvd_check,
43765b7f575cSSean Christopherson 				    vcpu->arch.reserved_gpa_bits, execonly);
4377c50d8ae3SPaolo Bonzini }
4378c50d8ae3SPaolo Bonzini 
43796f8e65a6SSean Christopherson static inline u64 reserved_hpa_bits(void)
43806f8e65a6SSean Christopherson {
43816f8e65a6SSean Christopherson 	return rsvd_bits(shadow_phys_bits, 63);
43826f8e65a6SSean Christopherson }
43836f8e65a6SSean Christopherson 
4384c50d8ae3SPaolo Bonzini /*
4385c50d8ae3SPaolo Bonzini  * the page table on host is the shadow page table for the page
4386c50d8ae3SPaolo Bonzini  * table in guest or amd nested guest, its mmu features completely
4387c50d8ae3SPaolo Bonzini  * follow the features in guest.
4388c50d8ae3SPaolo Bonzini  */
438916be1d12SSean Christopherson static void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
439016be1d12SSean Christopherson 					struct kvm_mmu *context)
4391c50d8ae3SPaolo Bonzini {
4392112022bdSSean Christopherson 	/*
4393112022bdSSean Christopherson 	 * KVM uses NX when TDP is disabled to handle a variety of scenarios,
4394112022bdSSean Christopherson 	 * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and
4395112022bdSSean Christopherson 	 * to generate correct permissions for CR0.WP=0/CR4.SMEP=1/EFER.NX=0.
4396112022bdSSean Christopherson 	 * The iTLB multi-hit workaround can be toggled at any time, so assume
4397112022bdSSean Christopherson 	 * NX can be used by any non-nested shadow MMU to avoid having to reset
4398112022bdSSean Christopherson 	 * MMU contexts.  Note, KVM forces EFER.NX=1 when TDP is disabled.
4399112022bdSSean Christopherson 	 */
440090599c28SSean Christopherson 	bool uses_nx = is_efer_nx(context) || !tdp_enabled;
44018c985b2dSSean Christopherson 
44028c985b2dSSean Christopherson 	/* @amd adds a check on bit of SPTEs, which KVM shouldn't use anyways. */
44038c985b2dSSean Christopherson 	bool is_amd = true;
44048c985b2dSSean Christopherson 	/* KVM doesn't use 2-level page tables for the shadow MMU. */
44058c985b2dSSean Christopherson 	bool is_pse = false;
4406c50d8ae3SPaolo Bonzini 	struct rsvd_bits_validate *shadow_zero_check;
4407c50d8ae3SPaolo Bonzini 	int i;
4408c50d8ae3SPaolo Bonzini 
44098c985b2dSSean Christopherson 	WARN_ON_ONCE(context->shadow_root_level < PT32E_ROOT_LEVEL);
44108c985b2dSSean Christopherson 
4411c50d8ae3SPaolo Bonzini 	shadow_zero_check = &context->shadow_zero_check;
4412b705a277SSean Christopherson 	__reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
4413c50d8ae3SPaolo Bonzini 				context->shadow_root_level, uses_nx,
441427de9250SSean Christopherson 				guest_can_use_gbpages(vcpu), is_pse, is_amd);
4415c50d8ae3SPaolo Bonzini 
4416c50d8ae3SPaolo Bonzini 	if (!shadow_me_mask)
4417c50d8ae3SPaolo Bonzini 		return;
4418c50d8ae3SPaolo Bonzini 
4419c50d8ae3SPaolo Bonzini 	for (i = context->shadow_root_level; --i >= 0;) {
4420c50d8ae3SPaolo Bonzini 		shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
4421c50d8ae3SPaolo Bonzini 		shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
4422c50d8ae3SPaolo Bonzini 	}
4423c50d8ae3SPaolo Bonzini 
4424c50d8ae3SPaolo Bonzini }
4425c50d8ae3SPaolo Bonzini 
4426c50d8ae3SPaolo Bonzini static inline bool boot_cpu_is_amd(void)
4427c50d8ae3SPaolo Bonzini {
4428c50d8ae3SPaolo Bonzini 	WARN_ON_ONCE(!tdp_enabled);
4429c50d8ae3SPaolo Bonzini 	return shadow_x_mask == 0;
4430c50d8ae3SPaolo Bonzini }
4431c50d8ae3SPaolo Bonzini 
4432c50d8ae3SPaolo Bonzini /*
4433c50d8ae3SPaolo Bonzini  * the direct page table on host, use as much mmu features as
4434c50d8ae3SPaolo Bonzini  * possible, however, kvm currently does not do execution-protection.
4435c50d8ae3SPaolo Bonzini  */
4436c50d8ae3SPaolo Bonzini static void
4437c50d8ae3SPaolo Bonzini reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
4438c50d8ae3SPaolo Bonzini 				struct kvm_mmu *context)
4439c50d8ae3SPaolo Bonzini {
4440c50d8ae3SPaolo Bonzini 	struct rsvd_bits_validate *shadow_zero_check;
4441c50d8ae3SPaolo Bonzini 	int i;
4442c50d8ae3SPaolo Bonzini 
4443c50d8ae3SPaolo Bonzini 	shadow_zero_check = &context->shadow_zero_check;
4444c50d8ae3SPaolo Bonzini 
4445c50d8ae3SPaolo Bonzini 	if (boot_cpu_is_amd())
4446b705a277SSean Christopherson 		__reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
4447c50d8ae3SPaolo Bonzini 					context->shadow_root_level, false,
4448c50d8ae3SPaolo Bonzini 					boot_cpu_has(X86_FEATURE_GBPAGES),
44498c985b2dSSean Christopherson 					false, true);
4450c50d8ae3SPaolo Bonzini 	else
4451c50d8ae3SPaolo Bonzini 		__reset_rsvds_bits_mask_ept(shadow_zero_check,
44526f8e65a6SSean Christopherson 					    reserved_hpa_bits(), false);
4453c50d8ae3SPaolo Bonzini 
4454c50d8ae3SPaolo Bonzini 	if (!shadow_me_mask)
4455c50d8ae3SPaolo Bonzini 		return;
4456c50d8ae3SPaolo Bonzini 
4457c50d8ae3SPaolo Bonzini 	for (i = context->shadow_root_level; --i >= 0;) {
4458c50d8ae3SPaolo Bonzini 		shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
4459c50d8ae3SPaolo Bonzini 		shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
4460c50d8ae3SPaolo Bonzini 	}
4461c50d8ae3SPaolo Bonzini }
4462c50d8ae3SPaolo Bonzini 
4463c50d8ae3SPaolo Bonzini /*
4464c50d8ae3SPaolo Bonzini  * as the comments in reset_shadow_zero_bits_mask() except it
4465c50d8ae3SPaolo Bonzini  * is the shadow page table for intel nested guest.
4466c50d8ae3SPaolo Bonzini  */
4467c50d8ae3SPaolo Bonzini static void
4468c50d8ae3SPaolo Bonzini reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
4469c50d8ae3SPaolo Bonzini 				struct kvm_mmu *context, bool execonly)
4470c50d8ae3SPaolo Bonzini {
4471c50d8ae3SPaolo Bonzini 	__reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
44726f8e65a6SSean Christopherson 				    reserved_hpa_bits(), execonly);
4473c50d8ae3SPaolo Bonzini }
4474c50d8ae3SPaolo Bonzini 
4475c50d8ae3SPaolo Bonzini #define BYTE_MASK(access) \
4476c50d8ae3SPaolo Bonzini 	((1 & (access) ? 2 : 0) | \
4477c50d8ae3SPaolo Bonzini 	 (2 & (access) ? 4 : 0) | \
4478c50d8ae3SPaolo Bonzini 	 (3 & (access) ? 8 : 0) | \
4479c50d8ae3SPaolo Bonzini 	 (4 & (access) ? 16 : 0) | \
4480c50d8ae3SPaolo Bonzini 	 (5 & (access) ? 32 : 0) | \
4481c50d8ae3SPaolo Bonzini 	 (6 & (access) ? 64 : 0) | \
4482c50d8ae3SPaolo Bonzini 	 (7 & (access) ? 128 : 0))
4483c50d8ae3SPaolo Bonzini 
4484c50d8ae3SPaolo Bonzini 
4485c596f147SSean Christopherson static void update_permission_bitmask(struct kvm_mmu *mmu, bool ept)
4486c50d8ae3SPaolo Bonzini {
4487c50d8ae3SPaolo Bonzini 	unsigned byte;
4488c50d8ae3SPaolo Bonzini 
4489c50d8ae3SPaolo Bonzini 	const u8 x = BYTE_MASK(ACC_EXEC_MASK);
4490c50d8ae3SPaolo Bonzini 	const u8 w = BYTE_MASK(ACC_WRITE_MASK);
4491c50d8ae3SPaolo Bonzini 	const u8 u = BYTE_MASK(ACC_USER_MASK);
4492c50d8ae3SPaolo Bonzini 
4493c596f147SSean Christopherson 	bool cr4_smep = is_cr4_smep(mmu);
4494c596f147SSean Christopherson 	bool cr4_smap = is_cr4_smap(mmu);
4495c596f147SSean Christopherson 	bool cr0_wp = is_cr0_wp(mmu);
449690599c28SSean Christopherson 	bool efer_nx = is_efer_nx(mmu);
4497c50d8ae3SPaolo Bonzini 
4498c50d8ae3SPaolo Bonzini 	for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) {
4499c50d8ae3SPaolo Bonzini 		unsigned pfec = byte << 1;
4500c50d8ae3SPaolo Bonzini 
4501c50d8ae3SPaolo Bonzini 		/*
4502c50d8ae3SPaolo Bonzini 		 * Each "*f" variable has a 1 bit for each UWX value
4503c50d8ae3SPaolo Bonzini 		 * that causes a fault with the given PFEC.
4504c50d8ae3SPaolo Bonzini 		 */
4505c50d8ae3SPaolo Bonzini 
4506c50d8ae3SPaolo Bonzini 		/* Faults from writes to non-writable pages */
4507c50d8ae3SPaolo Bonzini 		u8 wf = (pfec & PFERR_WRITE_MASK) ? (u8)~w : 0;
4508c50d8ae3SPaolo Bonzini 		/* Faults from user mode accesses to supervisor pages */
4509c50d8ae3SPaolo Bonzini 		u8 uf = (pfec & PFERR_USER_MASK) ? (u8)~u : 0;
4510c50d8ae3SPaolo Bonzini 		/* Faults from fetches of non-executable pages*/
4511c50d8ae3SPaolo Bonzini 		u8 ff = (pfec & PFERR_FETCH_MASK) ? (u8)~x : 0;
4512c50d8ae3SPaolo Bonzini 		/* Faults from kernel mode fetches of user pages */
4513c50d8ae3SPaolo Bonzini 		u8 smepf = 0;
4514c50d8ae3SPaolo Bonzini 		/* Faults from kernel mode accesses of user pages */
4515c50d8ae3SPaolo Bonzini 		u8 smapf = 0;
4516c50d8ae3SPaolo Bonzini 
4517c50d8ae3SPaolo Bonzini 		if (!ept) {
4518c50d8ae3SPaolo Bonzini 			/* Faults from kernel mode accesses to user pages */
4519c50d8ae3SPaolo Bonzini 			u8 kf = (pfec & PFERR_USER_MASK) ? 0 : u;
4520c50d8ae3SPaolo Bonzini 
4521c50d8ae3SPaolo Bonzini 			/* Not really needed: !nx will cause pte.nx to fault */
452290599c28SSean Christopherson 			if (!efer_nx)
4523c50d8ae3SPaolo Bonzini 				ff = 0;
4524c50d8ae3SPaolo Bonzini 
4525c50d8ae3SPaolo Bonzini 			/* Allow supervisor writes if !cr0.wp */
4526c50d8ae3SPaolo Bonzini 			if (!cr0_wp)
4527c50d8ae3SPaolo Bonzini 				wf = (pfec & PFERR_USER_MASK) ? wf : 0;
4528c50d8ae3SPaolo Bonzini 
4529c50d8ae3SPaolo Bonzini 			/* Disallow supervisor fetches of user code if cr4.smep */
4530c50d8ae3SPaolo Bonzini 			if (cr4_smep)
4531c50d8ae3SPaolo Bonzini 				smepf = (pfec & PFERR_FETCH_MASK) ? kf : 0;
4532c50d8ae3SPaolo Bonzini 
4533c50d8ae3SPaolo Bonzini 			/*
4534c50d8ae3SPaolo Bonzini 			 * SMAP:kernel-mode data accesses from user-mode
4535c50d8ae3SPaolo Bonzini 			 * mappings should fault. A fault is considered
4536c50d8ae3SPaolo Bonzini 			 * as a SMAP violation if all of the following
4537c50d8ae3SPaolo Bonzini 			 * conditions are true:
4538c50d8ae3SPaolo Bonzini 			 *   - X86_CR4_SMAP is set in CR4
4539c50d8ae3SPaolo Bonzini 			 *   - A user page is accessed
4540c50d8ae3SPaolo Bonzini 			 *   - The access is not a fetch
4541c50d8ae3SPaolo Bonzini 			 *   - Page fault in kernel mode
4542c50d8ae3SPaolo Bonzini 			 *   - if CPL = 3 or X86_EFLAGS_AC is clear
4543c50d8ae3SPaolo Bonzini 			 *
4544c50d8ae3SPaolo Bonzini 			 * Here, we cover the first three conditions.
4545c50d8ae3SPaolo Bonzini 			 * The fourth is computed dynamically in permission_fault();
4546c50d8ae3SPaolo Bonzini 			 * PFERR_RSVD_MASK bit will be set in PFEC if the access is
4547c50d8ae3SPaolo Bonzini 			 * *not* subject to SMAP restrictions.
4548c50d8ae3SPaolo Bonzini 			 */
4549c50d8ae3SPaolo Bonzini 			if (cr4_smap)
4550c50d8ae3SPaolo Bonzini 				smapf = (pfec & (PFERR_RSVD_MASK|PFERR_FETCH_MASK)) ? 0 : kf;
4551c50d8ae3SPaolo Bonzini 		}
4552c50d8ae3SPaolo Bonzini 
4553c50d8ae3SPaolo Bonzini 		mmu->permissions[byte] = ff | uf | wf | smepf | smapf;
4554c50d8ae3SPaolo Bonzini 	}
4555c50d8ae3SPaolo Bonzini }
4556c50d8ae3SPaolo Bonzini 
4557c50d8ae3SPaolo Bonzini /*
4558c50d8ae3SPaolo Bonzini * PKU is an additional mechanism by which the paging controls access to
4559c50d8ae3SPaolo Bonzini * user-mode addresses based on the value in the PKRU register.  Protection
4560c50d8ae3SPaolo Bonzini * key violations are reported through a bit in the page fault error code.
4561c50d8ae3SPaolo Bonzini * Unlike other bits of the error code, the PK bit is not known at the
4562c50d8ae3SPaolo Bonzini * call site of e.g. gva_to_gpa; it must be computed directly in
4563c50d8ae3SPaolo Bonzini * permission_fault based on two bits of PKRU, on some machine state (CR4,
4564c50d8ae3SPaolo Bonzini * CR0, EFER, CPL), and on other bits of the error code and the page tables.
4565c50d8ae3SPaolo Bonzini *
4566c50d8ae3SPaolo Bonzini * In particular the following conditions come from the error code, the
4567c50d8ae3SPaolo Bonzini * page tables and the machine state:
4568c50d8ae3SPaolo Bonzini * - PK is always zero unless CR4.PKE=1 and EFER.LMA=1
4569c50d8ae3SPaolo Bonzini * - PK is always zero if RSVD=1 (reserved bit set) or F=1 (instruction fetch)
4570c50d8ae3SPaolo Bonzini * - PK is always zero if U=0 in the page tables
4571c50d8ae3SPaolo Bonzini * - PKRU.WD is ignored if CR0.WP=0 and the access is a supervisor access.
4572c50d8ae3SPaolo Bonzini *
4573c50d8ae3SPaolo Bonzini * The PKRU bitmask caches the result of these four conditions.  The error
4574c50d8ae3SPaolo Bonzini * code (minus the P bit) and the page table's U bit form an index into the
4575c50d8ae3SPaolo Bonzini * PKRU bitmask.  Two bits of the PKRU bitmask are then extracted and ANDed
4576c50d8ae3SPaolo Bonzini * with the two bits of the PKRU register corresponding to the protection key.
4577c50d8ae3SPaolo Bonzini * For the first three conditions above the bits will be 00, thus masking
4578c50d8ae3SPaolo Bonzini * away both AD and WD.  For all reads or if the last condition holds, WD
4579c50d8ae3SPaolo Bonzini * only will be masked away.
4580c50d8ae3SPaolo Bonzini */
45812e4c0661SSean Christopherson static void update_pkru_bitmask(struct kvm_mmu *mmu)
4582c50d8ae3SPaolo Bonzini {
4583c50d8ae3SPaolo Bonzini 	unsigned bit;
4584c50d8ae3SPaolo Bonzini 	bool wp;
4585c50d8ae3SPaolo Bonzini 
45862e4c0661SSean Christopherson 	if (!is_cr4_pke(mmu)) {
4587c50d8ae3SPaolo Bonzini 		mmu->pkru_mask = 0;
4588c50d8ae3SPaolo Bonzini 		return;
4589c50d8ae3SPaolo Bonzini 	}
4590c50d8ae3SPaolo Bonzini 
45912e4c0661SSean Christopherson 	wp = is_cr0_wp(mmu);
4592c50d8ae3SPaolo Bonzini 
4593c50d8ae3SPaolo Bonzini 	for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) {
4594c50d8ae3SPaolo Bonzini 		unsigned pfec, pkey_bits;
4595c50d8ae3SPaolo Bonzini 		bool check_pkey, check_write, ff, uf, wf, pte_user;
4596c50d8ae3SPaolo Bonzini 
4597c50d8ae3SPaolo Bonzini 		pfec = bit << 1;
4598c50d8ae3SPaolo Bonzini 		ff = pfec & PFERR_FETCH_MASK;
4599c50d8ae3SPaolo Bonzini 		uf = pfec & PFERR_USER_MASK;
4600c50d8ae3SPaolo Bonzini 		wf = pfec & PFERR_WRITE_MASK;
4601c50d8ae3SPaolo Bonzini 
4602c50d8ae3SPaolo Bonzini 		/* PFEC.RSVD is replaced by ACC_USER_MASK. */
4603c50d8ae3SPaolo Bonzini 		pte_user = pfec & PFERR_RSVD_MASK;
4604c50d8ae3SPaolo Bonzini 
4605c50d8ae3SPaolo Bonzini 		/*
4606c50d8ae3SPaolo Bonzini 		 * Only need to check the access which is not an
4607c50d8ae3SPaolo Bonzini 		 * instruction fetch and is to a user page.
4608c50d8ae3SPaolo Bonzini 		 */
4609c50d8ae3SPaolo Bonzini 		check_pkey = (!ff && pte_user);
4610c50d8ae3SPaolo Bonzini 		/*
4611c50d8ae3SPaolo Bonzini 		 * write access is controlled by PKRU if it is a
4612c50d8ae3SPaolo Bonzini 		 * user access or CR0.WP = 1.
4613c50d8ae3SPaolo Bonzini 		 */
4614c50d8ae3SPaolo Bonzini 		check_write = check_pkey && wf && (uf || wp);
4615c50d8ae3SPaolo Bonzini 
4616c50d8ae3SPaolo Bonzini 		/* PKRU.AD stops both read and write access. */
4617c50d8ae3SPaolo Bonzini 		pkey_bits = !!check_pkey;
4618c50d8ae3SPaolo Bonzini 		/* PKRU.WD stops write access. */
4619c50d8ae3SPaolo Bonzini 		pkey_bits |= (!!check_write) << 1;
4620c50d8ae3SPaolo Bonzini 
4621c50d8ae3SPaolo Bonzini 		mmu->pkru_mask |= (pkey_bits & 3) << pfec;
4622c50d8ae3SPaolo Bonzini 	}
4623c50d8ae3SPaolo Bonzini }
4624c50d8ae3SPaolo Bonzini 
4625533f9a4bSSean Christopherson static void reset_guest_paging_metadata(struct kvm_vcpu *vcpu,
4626533f9a4bSSean Christopherson 					struct kvm_mmu *mmu)
4627c50d8ae3SPaolo Bonzini {
4628533f9a4bSSean Christopherson 	if (!is_cr0_pg(mmu))
4629533f9a4bSSean Christopherson 		return;
4630c50d8ae3SPaolo Bonzini 
4631533f9a4bSSean Christopherson 	reset_rsvds_bits_mask(vcpu, mmu);
4632533f9a4bSSean Christopherson 	update_permission_bitmask(mmu, false);
4633533f9a4bSSean Christopherson 	update_pkru_bitmask(mmu);
4634c50d8ae3SPaolo Bonzini }
4635c50d8ae3SPaolo Bonzini 
4636fe660f72SSean Christopherson static void paging64_init_context(struct kvm_mmu *context)
4637c50d8ae3SPaolo Bonzini {
4638c50d8ae3SPaolo Bonzini 	context->page_fault = paging64_page_fault;
4639c50d8ae3SPaolo Bonzini 	context->gva_to_gpa = paging64_gva_to_gpa;
4640c50d8ae3SPaolo Bonzini 	context->sync_page = paging64_sync_page;
4641c50d8ae3SPaolo Bonzini 	context->invlpg = paging64_invlpg;
4642c50d8ae3SPaolo Bonzini 	context->direct_map = false;
4643c50d8ae3SPaolo Bonzini }
4644c50d8ae3SPaolo Bonzini 
464584a16226SSean Christopherson static void paging32_init_context(struct kvm_mmu *context)
4646c50d8ae3SPaolo Bonzini {
4647c50d8ae3SPaolo Bonzini 	context->page_fault = paging32_page_fault;
4648c50d8ae3SPaolo Bonzini 	context->gva_to_gpa = paging32_gva_to_gpa;
4649c50d8ae3SPaolo Bonzini 	context->sync_page = paging32_sync_page;
4650c50d8ae3SPaolo Bonzini 	context->invlpg = paging32_invlpg;
4651c50d8ae3SPaolo Bonzini 	context->direct_map = false;
4652c50d8ae3SPaolo Bonzini }
4653c50d8ae3SPaolo Bonzini 
46548626c120SSean Christopherson static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu,
46558626c120SSean Christopherson 							 struct kvm_mmu_role_regs *regs)
4656c50d8ae3SPaolo Bonzini {
4657c50d8ae3SPaolo Bonzini 	union kvm_mmu_extended_role ext = {0};
4658c50d8ae3SPaolo Bonzini 
4659ca8d664fSSean Christopherson 	if (____is_cr0_pg(regs)) {
4660ca8d664fSSean Christopherson 		ext.cr0_pg = 1;
46618626c120SSean Christopherson 		ext.cr4_pae = ____is_cr4_pae(regs);
46628626c120SSean Christopherson 		ext.cr4_smep = ____is_cr4_smep(regs);
46638626c120SSean Christopherson 		ext.cr4_smap = ____is_cr4_smap(regs);
46648626c120SSean Christopherson 		ext.cr4_pse = ____is_cr4_pse(regs);
466584c679f5SSean Christopherson 
466684c679f5SSean Christopherson 		/* PKEY and LA57 are active iff long mode is active. */
466784c679f5SSean Christopherson 		ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs);
466884c679f5SSean Christopherson 		ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs);
4669ca8d664fSSean Christopherson 	}
4670c50d8ae3SPaolo Bonzini 
4671c50d8ae3SPaolo Bonzini 	ext.valid = 1;
4672c50d8ae3SPaolo Bonzini 
4673c50d8ae3SPaolo Bonzini 	return ext;
4674c50d8ae3SPaolo Bonzini }
4675c50d8ae3SPaolo Bonzini 
4676c50d8ae3SPaolo Bonzini static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
46778626c120SSean Christopherson 						   struct kvm_mmu_role_regs *regs,
4678c50d8ae3SPaolo Bonzini 						   bool base_only)
4679c50d8ae3SPaolo Bonzini {
4680c50d8ae3SPaolo Bonzini 	union kvm_mmu_role role = {0};
4681c50d8ae3SPaolo Bonzini 
4682c50d8ae3SPaolo Bonzini 	role.base.access = ACC_ALL;
4683ca8d664fSSean Christopherson 	if (____is_cr0_pg(regs)) {
4684167f8a5cSSean Christopherson 		role.base.efer_nx = ____is_efer_nx(regs);
46858626c120SSean Christopherson 		role.base.cr0_wp = ____is_cr0_wp(regs);
4686ca8d664fSSean Christopherson 	}
4687c50d8ae3SPaolo Bonzini 	role.base.smm = is_smm(vcpu);
4688c50d8ae3SPaolo Bonzini 	role.base.guest_mode = is_guest_mode(vcpu);
4689c50d8ae3SPaolo Bonzini 
4690c50d8ae3SPaolo Bonzini 	if (base_only)
4691c50d8ae3SPaolo Bonzini 		return role;
4692c50d8ae3SPaolo Bonzini 
46938626c120SSean Christopherson 	role.ext = kvm_calc_mmu_role_ext(vcpu, regs);
4694c50d8ae3SPaolo Bonzini 
4695c50d8ae3SPaolo Bonzini 	return role;
4696c50d8ae3SPaolo Bonzini }
4697c50d8ae3SPaolo Bonzini 
4698d468d94bSSean Christopherson static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
4699d468d94bSSean Christopherson {
4700746700d2SWei Huang 	/* tdp_root_level is architecture forced level, use it if nonzero */
4701746700d2SWei Huang 	if (tdp_root_level)
4702746700d2SWei Huang 		return tdp_root_level;
4703746700d2SWei Huang 
4704d468d94bSSean Christopherson 	/* Use 5-level TDP if and only if it's useful/necessary. */
470583013059SSean Christopherson 	if (max_tdp_level == 5 && cpuid_maxphyaddr(vcpu) <= 48)
4706d468d94bSSean Christopherson 		return 4;
4707d468d94bSSean Christopherson 
470883013059SSean Christopherson 	return max_tdp_level;
4709d468d94bSSean Christopherson }
4710d468d94bSSean Christopherson 
4711c50d8ae3SPaolo Bonzini static union kvm_mmu_role
47128626c120SSean Christopherson kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
47138626c120SSean Christopherson 				struct kvm_mmu_role_regs *regs, bool base_only)
4714c50d8ae3SPaolo Bonzini {
47158626c120SSean Christopherson 	union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs, base_only);
4716c50d8ae3SPaolo Bonzini 
4717c50d8ae3SPaolo Bonzini 	role.base.ad_disabled = (shadow_accessed_mask == 0);
4718d468d94bSSean Christopherson 	role.base.level = kvm_mmu_get_tdp_level(vcpu);
4719c50d8ae3SPaolo Bonzini 	role.base.direct = true;
4720c50d8ae3SPaolo Bonzini 	role.base.gpte_is_8_bytes = true;
4721c50d8ae3SPaolo Bonzini 
4722c50d8ae3SPaolo Bonzini 	return role;
4723c50d8ae3SPaolo Bonzini }
4724c50d8ae3SPaolo Bonzini 
4725c50d8ae3SPaolo Bonzini static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
4726c50d8ae3SPaolo Bonzini {
47278c008659SPaolo Bonzini 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
47288626c120SSean Christopherson 	struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
4729c50d8ae3SPaolo Bonzini 	union kvm_mmu_role new_role =
47308626c120SSean Christopherson 		kvm_calc_tdp_mmu_root_page_role(vcpu, &regs, false);
4731c50d8ae3SPaolo Bonzini 
4732c50d8ae3SPaolo Bonzini 	if (new_role.as_u64 == context->mmu_role.as_u64)
4733c50d8ae3SPaolo Bonzini 		return;
4734c50d8ae3SPaolo Bonzini 
4735c50d8ae3SPaolo Bonzini 	context->mmu_role.as_u64 = new_role.as_u64;
47367a02674dSSean Christopherson 	context->page_fault = kvm_tdp_page_fault;
4737c50d8ae3SPaolo Bonzini 	context->sync_page = nonpaging_sync_page;
47385efac074SPaolo Bonzini 	context->invlpg = NULL;
4739d468d94bSSean Christopherson 	context->shadow_root_level = kvm_mmu_get_tdp_level(vcpu);
4740c50d8ae3SPaolo Bonzini 	context->direct_map = true;
4741d8dd54e0SSean Christopherson 	context->get_guest_pgd = get_cr3;
4742c50d8ae3SPaolo Bonzini 	context->get_pdptr = kvm_pdptr_read;
4743c50d8ae3SPaolo Bonzini 	context->inject_page_fault = kvm_inject_page_fault;
4744f4bd6f73SSean Christopherson 	context->root_level = role_regs_to_root_level(&regs);
4745c50d8ae3SPaolo Bonzini 
474636f26787SSean Christopherson 	if (!is_cr0_pg(context))
4747c50d8ae3SPaolo Bonzini 		context->gva_to_gpa = nonpaging_gva_to_gpa;
474836f26787SSean Christopherson 	else if (is_cr4_pae(context))
4749c50d8ae3SPaolo Bonzini 		context->gva_to_gpa = paging64_gva_to_gpa;
4750f4bd6f73SSean Christopherson 	else
4751c50d8ae3SPaolo Bonzini 		context->gva_to_gpa = paging32_gva_to_gpa;
4752c50d8ae3SPaolo Bonzini 
4753533f9a4bSSean Christopherson 	reset_guest_paging_metadata(vcpu, context);
4754c50d8ae3SPaolo Bonzini 	reset_tdp_shadow_zero_bits_mask(vcpu, context);
4755c50d8ae3SPaolo Bonzini }
4756c50d8ae3SPaolo Bonzini 
4757c50d8ae3SPaolo Bonzini static union kvm_mmu_role
47588626c120SSean Christopherson kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu,
47598626c120SSean Christopherson 				      struct kvm_mmu_role_regs *regs, bool base_only)
4760c50d8ae3SPaolo Bonzini {
47618626c120SSean Christopherson 	union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs, base_only);
4762c50d8ae3SPaolo Bonzini 
47638626c120SSean Christopherson 	role.base.smep_andnot_wp = role.ext.cr4_smep && !____is_cr0_wp(regs);
47648626c120SSean Christopherson 	role.base.smap_andnot_wp = role.ext.cr4_smap && !____is_cr0_wp(regs);
4765ca8d664fSSean Christopherson 	role.base.gpte_is_8_bytes = ____is_cr0_pg(regs) && ____is_cr4_pae(regs);
4766c50d8ae3SPaolo Bonzini 
476759505b55SSean Christopherson 	return role;
476859505b55SSean Christopherson }
476959505b55SSean Christopherson 
477059505b55SSean Christopherson static union kvm_mmu_role
47718626c120SSean Christopherson kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu,
47728626c120SSean Christopherson 				   struct kvm_mmu_role_regs *regs, bool base_only)
477359505b55SSean Christopherson {
477459505b55SSean Christopherson 	union kvm_mmu_role role =
47758626c120SSean Christopherson 		kvm_calc_shadow_root_page_role_common(vcpu, regs, base_only);
477659505b55SSean Christopherson 
47778626c120SSean Christopherson 	role.base.direct = !____is_cr0_pg(regs);
477859505b55SSean Christopherson 
47798626c120SSean Christopherson 	if (!____is_efer_lma(regs))
4780c50d8ae3SPaolo Bonzini 		role.base.level = PT32E_ROOT_LEVEL;
47818626c120SSean Christopherson 	else if (____is_cr4_la57(regs))
4782c50d8ae3SPaolo Bonzini 		role.base.level = PT64_ROOT_5LEVEL;
4783c50d8ae3SPaolo Bonzini 	else
4784c50d8ae3SPaolo Bonzini 		role.base.level = PT64_ROOT_4LEVEL;
4785c50d8ae3SPaolo Bonzini 
4786c50d8ae3SPaolo Bonzini 	return role;
4787c50d8ae3SPaolo Bonzini }
4788c50d8ae3SPaolo Bonzini 
47898c008659SPaolo Bonzini static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
4790594e91a1SSean Christopherson 				    struct kvm_mmu_role_regs *regs,
47918c008659SPaolo Bonzini 				    union kvm_mmu_role new_role)
4792c50d8ae3SPaolo Bonzini {
479318db1b17SSean Christopherson 	if (new_role.as_u64 == context->mmu_role.as_u64)
479418db1b17SSean Christopherson 		return;
4795c50d8ae3SPaolo Bonzini 
4796c50d8ae3SPaolo Bonzini 	context->mmu_role.as_u64 = new_role.as_u64;
479718db1b17SSean Christopherson 
479836f26787SSean Christopherson 	if (!is_cr0_pg(context))
479984a16226SSean Christopherson 		nonpaging_init_context(context);
480036f26787SSean Christopherson 	else if (is_cr4_pae(context))
4801fe660f72SSean Christopherson 		paging64_init_context(context);
4802c50d8ae3SPaolo Bonzini 	else
480384a16226SSean Christopherson 		paging32_init_context(context);
4804f4bd6f73SSean Christopherson 	context->root_level = role_regs_to_root_level(regs);
4805c50d8ae3SPaolo Bonzini 
4806533f9a4bSSean Christopherson 	reset_guest_paging_metadata(vcpu, context);
4807d555f705SSean Christopherson 	context->shadow_root_level = new_role.base.level;
4808d555f705SSean Christopherson 
4809c50d8ae3SPaolo Bonzini 	reset_shadow_zero_bits_mask(vcpu, context);
4810c50d8ae3SPaolo Bonzini }
48110f04a2acSVitaly Kuznetsov 
4812594e91a1SSean Christopherson static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu,
4813594e91a1SSean Christopherson 				struct kvm_mmu_role_regs *regs)
48140f04a2acSVitaly Kuznetsov {
48158c008659SPaolo Bonzini 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
48160f04a2acSVitaly Kuznetsov 	union kvm_mmu_role new_role =
48178626c120SSean Christopherson 		kvm_calc_shadow_mmu_root_page_role(vcpu, regs, false);
48180f04a2acSVitaly Kuznetsov 
4819594e91a1SSean Christopherson 	shadow_mmu_init_context(vcpu, context, regs, new_role);
48200f04a2acSVitaly Kuznetsov }
48210f04a2acSVitaly Kuznetsov 
482259505b55SSean Christopherson static union kvm_mmu_role
48238626c120SSean Christopherson kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu,
48248626c120SSean Christopherson 				   struct kvm_mmu_role_regs *regs)
482559505b55SSean Christopherson {
482659505b55SSean Christopherson 	union kvm_mmu_role role =
48278626c120SSean Christopherson 		kvm_calc_shadow_root_page_role_common(vcpu, regs, false);
482859505b55SSean Christopherson 
482959505b55SSean Christopherson 	role.base.direct = false;
4830d468d94bSSean Christopherson 	role.base.level = kvm_mmu_get_tdp_level(vcpu);
483159505b55SSean Christopherson 
483259505b55SSean Christopherson 	return role;
483359505b55SSean Christopherson }
483459505b55SSean Christopherson 
4835dbc4739bSSean Christopherson void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
4836dbc4739bSSean Christopherson 			     unsigned long cr4, u64 efer, gpa_t nested_cr3)
48370f04a2acSVitaly Kuznetsov {
48388c008659SPaolo Bonzini 	struct kvm_mmu *context = &vcpu->arch.guest_mmu;
4839594e91a1SSean Christopherson 	struct kvm_mmu_role_regs regs = {
4840594e91a1SSean Christopherson 		.cr0 = cr0,
4841594e91a1SSean Christopherson 		.cr4 = cr4,
4842594e91a1SSean Christopherson 		.efer = efer,
4843594e91a1SSean Christopherson 	};
48448626c120SSean Christopherson 	union kvm_mmu_role new_role;
48450f04a2acSVitaly Kuznetsov 
48468626c120SSean Christopherson 	new_role = kvm_calc_shadow_npt_root_page_role(vcpu, &regs);
4847a506fdd2SVitaly Kuznetsov 
4848b5129100SSean Christopherson 	__kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base);
4849a3322d5cSSean Christopherson 
4850594e91a1SSean Christopherson 	shadow_mmu_init_context(vcpu, context, &regs, new_role);
48510f04a2acSVitaly Kuznetsov }
48520f04a2acSVitaly Kuznetsov EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);
4853c50d8ae3SPaolo Bonzini 
4854c50d8ae3SPaolo Bonzini static union kvm_mmu_role
4855c50d8ae3SPaolo Bonzini kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
4856bb1fcc70SSean Christopherson 				   bool execonly, u8 level)
4857c50d8ae3SPaolo Bonzini {
4858c50d8ae3SPaolo Bonzini 	union kvm_mmu_role role = {0};
4859c50d8ae3SPaolo Bonzini 
4860c50d8ae3SPaolo Bonzini 	/* SMM flag is inherited from root_mmu */
4861c50d8ae3SPaolo Bonzini 	role.base.smm = vcpu->arch.root_mmu.mmu_role.base.smm;
4862c50d8ae3SPaolo Bonzini 
4863bb1fcc70SSean Christopherson 	role.base.level = level;
4864c50d8ae3SPaolo Bonzini 	role.base.gpte_is_8_bytes = true;
4865c50d8ae3SPaolo Bonzini 	role.base.direct = false;
4866c50d8ae3SPaolo Bonzini 	role.base.ad_disabled = !accessed_dirty;
4867c50d8ae3SPaolo Bonzini 	role.base.guest_mode = true;
4868c50d8ae3SPaolo Bonzini 	role.base.access = ACC_ALL;
4869c50d8ae3SPaolo Bonzini 
4870cd6767c3SSean Christopherson 	/* EPT, and thus nested EPT, does not consume CR0, CR4, nor EFER. */
4871cd6767c3SSean Christopherson 	role.ext.word = 0;
4872c50d8ae3SPaolo Bonzini 	role.ext.execonly = execonly;
4873cd6767c3SSean Christopherson 	role.ext.valid = 1;
4874c50d8ae3SPaolo Bonzini 
4875c50d8ae3SPaolo Bonzini 	return role;
4876c50d8ae3SPaolo Bonzini }
4877c50d8ae3SPaolo Bonzini 
4878c50d8ae3SPaolo Bonzini void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
4879c50d8ae3SPaolo Bonzini 			     bool accessed_dirty, gpa_t new_eptp)
4880c50d8ae3SPaolo Bonzini {
48818c008659SPaolo Bonzini 	struct kvm_mmu *context = &vcpu->arch.guest_mmu;
4882bb1fcc70SSean Christopherson 	u8 level = vmx_eptp_page_walk_level(new_eptp);
4883c50d8ae3SPaolo Bonzini 	union kvm_mmu_role new_role =
4884c50d8ae3SPaolo Bonzini 		kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
4885bb1fcc70SSean Christopherson 						   execonly, level);
4886c50d8ae3SPaolo Bonzini 
4887b5129100SSean Christopherson 	__kvm_mmu_new_pgd(vcpu, new_eptp, new_role.base);
4888c50d8ae3SPaolo Bonzini 
4889c50d8ae3SPaolo Bonzini 	if (new_role.as_u64 == context->mmu_role.as_u64)
4890c50d8ae3SPaolo Bonzini 		return;
4891c50d8ae3SPaolo Bonzini 
489218db1b17SSean Christopherson 	context->mmu_role.as_u64 = new_role.as_u64;
489318db1b17SSean Christopherson 
4894bb1fcc70SSean Christopherson 	context->shadow_root_level = level;
4895c50d8ae3SPaolo Bonzini 
4896c50d8ae3SPaolo Bonzini 	context->ept_ad = accessed_dirty;
4897c50d8ae3SPaolo Bonzini 	context->page_fault = ept_page_fault;
4898c50d8ae3SPaolo Bonzini 	context->gva_to_gpa = ept_gva_to_gpa;
4899c50d8ae3SPaolo Bonzini 	context->sync_page = ept_sync_page;
4900c50d8ae3SPaolo Bonzini 	context->invlpg = ept_invlpg;
4901bb1fcc70SSean Christopherson 	context->root_level = level;
4902c50d8ae3SPaolo Bonzini 	context->direct_map = false;
4903c50d8ae3SPaolo Bonzini 
4904c596f147SSean Christopherson 	update_permission_bitmask(context, true);
49052e4c0661SSean Christopherson 	update_pkru_bitmask(context);
4906c50d8ae3SPaolo Bonzini 	reset_rsvds_bits_mask_ept(vcpu, context, execonly);
4907c50d8ae3SPaolo Bonzini 	reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
4908c50d8ae3SPaolo Bonzini }
4909c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
4910c50d8ae3SPaolo Bonzini 
4911c50d8ae3SPaolo Bonzini static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
4912c50d8ae3SPaolo Bonzini {
49138c008659SPaolo Bonzini 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
4914594e91a1SSean Christopherson 	struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
4915c50d8ae3SPaolo Bonzini 
4916594e91a1SSean Christopherson 	kvm_init_shadow_mmu(vcpu, &regs);
4917929d1cfaSPaolo Bonzini 
4918d8dd54e0SSean Christopherson 	context->get_guest_pgd     = get_cr3;
4919c50d8ae3SPaolo Bonzini 	context->get_pdptr         = kvm_pdptr_read;
4920c50d8ae3SPaolo Bonzini 	context->inject_page_fault = kvm_inject_page_fault;
4921c50d8ae3SPaolo Bonzini }
4922c50d8ae3SPaolo Bonzini 
49238626c120SSean Christopherson static union kvm_mmu_role
49248626c120SSean Christopherson kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu, struct kvm_mmu_role_regs *regs)
4925654430efSSean Christopherson {
49268626c120SSean Christopherson 	union kvm_mmu_role role;
49278626c120SSean Christopherson 
49288626c120SSean Christopherson 	role = kvm_calc_shadow_root_page_role_common(vcpu, regs, false);
4929654430efSSean Christopherson 
4930654430efSSean Christopherson 	/*
4931654430efSSean Christopherson 	 * Nested MMUs are used only for walking L2's gva->gpa, they never have
4932654430efSSean Christopherson 	 * shadow pages of their own and so "direct" has no meaning.   Set it
4933654430efSSean Christopherson 	 * to "true" to try to detect bogus usage of the nested MMU.
4934654430efSSean Christopherson 	 */
4935654430efSSean Christopherson 	role.base.direct = true;
4936f4bd6f73SSean Christopherson 	role.base.level = role_regs_to_root_level(regs);
4937654430efSSean Christopherson 	return role;
4938654430efSSean Christopherson }
4939654430efSSean Christopherson 
4940c50d8ae3SPaolo Bonzini static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
4941c50d8ae3SPaolo Bonzini {
49428626c120SSean Christopherson 	struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
49438626c120SSean Christopherson 	union kvm_mmu_role new_role = kvm_calc_nested_mmu_role(vcpu, &regs);
4944c50d8ae3SPaolo Bonzini 	struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
4945c50d8ae3SPaolo Bonzini 
4946c50d8ae3SPaolo Bonzini 	if (new_role.as_u64 == g_context->mmu_role.as_u64)
4947c50d8ae3SPaolo Bonzini 		return;
4948c50d8ae3SPaolo Bonzini 
4949c50d8ae3SPaolo Bonzini 	g_context->mmu_role.as_u64 = new_role.as_u64;
4950d8dd54e0SSean Christopherson 	g_context->get_guest_pgd     = get_cr3;
4951c50d8ae3SPaolo Bonzini 	g_context->get_pdptr         = kvm_pdptr_read;
4952c50d8ae3SPaolo Bonzini 	g_context->inject_page_fault = kvm_inject_page_fault;
49535472fcd4SSean Christopherson 	g_context->root_level        = new_role.base.level;
4954c50d8ae3SPaolo Bonzini 
4955c50d8ae3SPaolo Bonzini 	/*
49565efac074SPaolo Bonzini 	 * L2 page tables are never shadowed, so there is no need to sync
49575efac074SPaolo Bonzini 	 * SPTEs.
49585efac074SPaolo Bonzini 	 */
49595efac074SPaolo Bonzini 	g_context->invlpg            = NULL;
49605efac074SPaolo Bonzini 
49615efac074SPaolo Bonzini 	/*
4962c50d8ae3SPaolo Bonzini 	 * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using
4963c50d8ae3SPaolo Bonzini 	 * L1's nested page tables (e.g. EPT12). The nested translation
4964c50d8ae3SPaolo Bonzini 	 * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using
4965c50d8ae3SPaolo Bonzini 	 * L2's page tables as the first level of translation and L1's
4966c50d8ae3SPaolo Bonzini 	 * nested page tables as the second level of translation. Basically
4967c50d8ae3SPaolo Bonzini 	 * the gva_to_gpa functions between mmu and nested_mmu are swapped.
4968c50d8ae3SPaolo Bonzini 	 */
4969fa4b5588SSean Christopherson 	if (!is_paging(vcpu))
4970c50d8ae3SPaolo Bonzini 		g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested;
4971fa4b5588SSean Christopherson 	else if (is_long_mode(vcpu))
4972c50d8ae3SPaolo Bonzini 		g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
4973fa4b5588SSean Christopherson 	else if (is_pae(vcpu))
4974c50d8ae3SPaolo Bonzini 		g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
4975fa4b5588SSean Christopherson 	else
4976c50d8ae3SPaolo Bonzini 		g_context->gva_to_gpa = paging32_gva_to_gpa_nested;
4977fa4b5588SSean Christopherson 
4978533f9a4bSSean Christopherson 	reset_guest_paging_metadata(vcpu, g_context);
4979c50d8ae3SPaolo Bonzini }
4980c50d8ae3SPaolo Bonzini 
4981c9060662SSean Christopherson void kvm_init_mmu(struct kvm_vcpu *vcpu)
4982c50d8ae3SPaolo Bonzini {
4983c50d8ae3SPaolo Bonzini 	if (mmu_is_nested(vcpu))
4984c50d8ae3SPaolo Bonzini 		init_kvm_nested_mmu(vcpu);
4985c50d8ae3SPaolo Bonzini 	else if (tdp_enabled)
4986c50d8ae3SPaolo Bonzini 		init_kvm_tdp_mmu(vcpu);
4987c50d8ae3SPaolo Bonzini 	else
4988c50d8ae3SPaolo Bonzini 		init_kvm_softmmu(vcpu);
4989c50d8ae3SPaolo Bonzini }
4990c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_init_mmu);
4991c50d8ae3SPaolo Bonzini 
4992c50d8ae3SPaolo Bonzini static union kvm_mmu_page_role
4993c50d8ae3SPaolo Bonzini kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu)
4994c50d8ae3SPaolo Bonzini {
49958626c120SSean Christopherson 	struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
4996c50d8ae3SPaolo Bonzini 	union kvm_mmu_role role;
4997c50d8ae3SPaolo Bonzini 
4998c50d8ae3SPaolo Bonzini 	if (tdp_enabled)
49998626c120SSean Christopherson 		role = kvm_calc_tdp_mmu_root_page_role(vcpu, &regs, true);
5000c50d8ae3SPaolo Bonzini 	else
50018626c120SSean Christopherson 		role = kvm_calc_shadow_mmu_root_page_role(vcpu, &regs, true);
5002c50d8ae3SPaolo Bonzini 
5003c50d8ae3SPaolo Bonzini 	return role.base;
5004c50d8ae3SPaolo Bonzini }
5005c50d8ae3SPaolo Bonzini 
500649c6f875SSean Christopherson void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
500749c6f875SSean Christopherson {
500849c6f875SSean Christopherson 	/*
500949c6f875SSean Christopherson 	 * Invalidate all MMU roles to force them to reinitialize as CPUID
501049c6f875SSean Christopherson 	 * information is factored into reserved bit calculations.
501149c6f875SSean Christopherson 	 */
501249c6f875SSean Christopherson 	vcpu->arch.root_mmu.mmu_role.ext.valid = 0;
501349c6f875SSean Christopherson 	vcpu->arch.guest_mmu.mmu_role.ext.valid = 0;
501449c6f875SSean Christopherson 	vcpu->arch.nested_mmu.mmu_role.ext.valid = 0;
501549c6f875SSean Christopherson 	kvm_mmu_reset_context(vcpu);
501663f5a190SSean Christopherson 
501763f5a190SSean Christopherson 	/*
501863f5a190SSean Christopherson 	 * KVM does not correctly handle changing guest CPUID after KVM_RUN, as
501963f5a190SSean Christopherson 	 * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't
502063f5a190SSean Christopherson 	 * tracked in kvm_mmu_page_role.  As a result, KVM may miss guest page
502163f5a190SSean Christopherson 	 * faults due to reusing SPs/SPTEs.  Alert userspace, but otherwise
502263f5a190SSean Christopherson 	 * sweep the problem under the rug.
502363f5a190SSean Christopherson 	 *
502463f5a190SSean Christopherson 	 * KVM's horrific CPUID ABI makes the problem all but impossible to
502563f5a190SSean Christopherson 	 * solve, as correctly handling multiple vCPU models (with respect to
502663f5a190SSean Christopherson 	 * paging and physical address properties) in a single VM would require
502763f5a190SSean Christopherson 	 * tracking all relevant CPUID information in kvm_mmu_page_role.  That
502863f5a190SSean Christopherson 	 * is very undesirable as it would double the memory requirements for
502963f5a190SSean Christopherson 	 * gfn_track (see struct kvm_mmu_page_role comments), and in practice
503063f5a190SSean Christopherson 	 * no sane VMM mucks with the core vCPU model on the fly.
503163f5a190SSean Christopherson 	 */
503263f5a190SSean Christopherson 	if (vcpu->arch.last_vmentry_cpu != -1) {
503363f5a190SSean Christopherson 		pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} after KVM_RUN may cause guest instability\n");
503463f5a190SSean Christopherson 		pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} will fail after KVM_RUN starting with Linux 5.16\n");
503563f5a190SSean Christopherson 	}
503649c6f875SSean Christopherson }
503749c6f875SSean Christopherson 
5038c50d8ae3SPaolo Bonzini void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
5039c50d8ae3SPaolo Bonzini {
5040c50d8ae3SPaolo Bonzini 	kvm_mmu_unload(vcpu);
5041c9060662SSean Christopherson 	kvm_init_mmu(vcpu);
5042c50d8ae3SPaolo Bonzini }
5043c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
5044c50d8ae3SPaolo Bonzini 
5045c50d8ae3SPaolo Bonzini int kvm_mmu_load(struct kvm_vcpu *vcpu)
5046c50d8ae3SPaolo Bonzini {
5047c50d8ae3SPaolo Bonzini 	int r;
5048c50d8ae3SPaolo Bonzini 
5049378f5cd6SSean Christopherson 	r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->direct_map);
5050c50d8ae3SPaolo Bonzini 	if (r)
5051c50d8ae3SPaolo Bonzini 		goto out;
5052748e52b9SSean Christopherson 	r = mmu_alloc_special_roots(vcpu);
5053c50d8ae3SPaolo Bonzini 	if (r)
5054c50d8ae3SPaolo Bonzini 		goto out;
50554a38162eSPaolo Bonzini 	if (vcpu->arch.mmu->direct_map)
50566e6ec584SSean Christopherson 		r = mmu_alloc_direct_roots(vcpu);
50576e6ec584SSean Christopherson 	else
50586e6ec584SSean Christopherson 		r = mmu_alloc_shadow_roots(vcpu);
5059c50d8ae3SPaolo Bonzini 	if (r)
5060c50d8ae3SPaolo Bonzini 		goto out;
5061a91f387bSSean Christopherson 
5062a91f387bSSean Christopherson 	kvm_mmu_sync_roots(vcpu);
5063a91f387bSSean Christopherson 
5064727a7e27SPaolo Bonzini 	kvm_mmu_load_pgd(vcpu);
5065b3646477SJason Baron 	static_call(kvm_x86_tlb_flush_current)(vcpu);
5066c50d8ae3SPaolo Bonzini out:
5067c50d8ae3SPaolo Bonzini 	return r;
5068c50d8ae3SPaolo Bonzini }
5069c50d8ae3SPaolo Bonzini 
5070c50d8ae3SPaolo Bonzini void kvm_mmu_unload(struct kvm_vcpu *vcpu)
5071c50d8ae3SPaolo Bonzini {
5072c50d8ae3SPaolo Bonzini 	kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL);
5073c50d8ae3SPaolo Bonzini 	WARN_ON(VALID_PAGE(vcpu->arch.root_mmu.root_hpa));
5074c50d8ae3SPaolo Bonzini 	kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
5075c50d8ae3SPaolo Bonzini 	WARN_ON(VALID_PAGE(vcpu->arch.guest_mmu.root_hpa));
5076c50d8ae3SPaolo Bonzini }
5077c50d8ae3SPaolo Bonzini 
5078c50d8ae3SPaolo Bonzini static bool need_remote_flush(u64 old, u64 new)
5079c50d8ae3SPaolo Bonzini {
5080c50d8ae3SPaolo Bonzini 	if (!is_shadow_present_pte(old))
5081c50d8ae3SPaolo Bonzini 		return false;
5082c50d8ae3SPaolo Bonzini 	if (!is_shadow_present_pte(new))
5083c50d8ae3SPaolo Bonzini 		return true;
5084c50d8ae3SPaolo Bonzini 	if ((old ^ new) & PT64_BASE_ADDR_MASK)
5085c50d8ae3SPaolo Bonzini 		return true;
5086c50d8ae3SPaolo Bonzini 	old ^= shadow_nx_mask;
5087c50d8ae3SPaolo Bonzini 	new ^= shadow_nx_mask;
5088c50d8ae3SPaolo Bonzini 	return (old & ~new & PT64_PERM_MASK) != 0;
5089c50d8ae3SPaolo Bonzini }
5090c50d8ae3SPaolo Bonzini 
5091c50d8ae3SPaolo Bonzini static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
5092c50d8ae3SPaolo Bonzini 				    int *bytes)
5093c50d8ae3SPaolo Bonzini {
5094c50d8ae3SPaolo Bonzini 	u64 gentry = 0;
5095c50d8ae3SPaolo Bonzini 	int r;
5096c50d8ae3SPaolo Bonzini 
5097c50d8ae3SPaolo Bonzini 	/*
5098c50d8ae3SPaolo Bonzini 	 * Assume that the pte write on a page table of the same type
5099c50d8ae3SPaolo Bonzini 	 * as the current vcpu paging mode since we update the sptes only
5100c50d8ae3SPaolo Bonzini 	 * when they have the same mode.
5101c50d8ae3SPaolo Bonzini 	 */
5102c50d8ae3SPaolo Bonzini 	if (is_pae(vcpu) && *bytes == 4) {
5103c50d8ae3SPaolo Bonzini 		/* Handle a 32-bit guest writing two halves of a 64-bit gpte */
5104c50d8ae3SPaolo Bonzini 		*gpa &= ~(gpa_t)7;
5105c50d8ae3SPaolo Bonzini 		*bytes = 8;
5106c50d8ae3SPaolo Bonzini 	}
5107c50d8ae3SPaolo Bonzini 
5108c50d8ae3SPaolo Bonzini 	if (*bytes == 4 || *bytes == 8) {
5109c50d8ae3SPaolo Bonzini 		r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes);
5110c50d8ae3SPaolo Bonzini 		if (r)
5111c50d8ae3SPaolo Bonzini 			gentry = 0;
5112c50d8ae3SPaolo Bonzini 	}
5113c50d8ae3SPaolo Bonzini 
5114c50d8ae3SPaolo Bonzini 	return gentry;
5115c50d8ae3SPaolo Bonzini }
5116c50d8ae3SPaolo Bonzini 
5117c50d8ae3SPaolo Bonzini /*
5118c50d8ae3SPaolo Bonzini  * If we're seeing too many writes to a page, it may no longer be a page table,
5119c50d8ae3SPaolo Bonzini  * or we may be forking, in which case it is better to unmap the page.
5120c50d8ae3SPaolo Bonzini  */
5121c50d8ae3SPaolo Bonzini static bool detect_write_flooding(struct kvm_mmu_page *sp)
5122c50d8ae3SPaolo Bonzini {
5123c50d8ae3SPaolo Bonzini 	/*
5124c50d8ae3SPaolo Bonzini 	 * Skip write-flooding detected for the sp whose level is 1, because
5125c50d8ae3SPaolo Bonzini 	 * it can become unsync, then the guest page is not write-protected.
5126c50d8ae3SPaolo Bonzini 	 */
51273bae0459SSean Christopherson 	if (sp->role.level == PG_LEVEL_4K)
5128c50d8ae3SPaolo Bonzini 		return false;
5129c50d8ae3SPaolo Bonzini 
5130c50d8ae3SPaolo Bonzini 	atomic_inc(&sp->write_flooding_count);
5131c50d8ae3SPaolo Bonzini 	return atomic_read(&sp->write_flooding_count) >= 3;
5132c50d8ae3SPaolo Bonzini }
5133c50d8ae3SPaolo Bonzini 
5134c50d8ae3SPaolo Bonzini /*
5135c50d8ae3SPaolo Bonzini  * Misaligned accesses are too much trouble to fix up; also, they usually
5136c50d8ae3SPaolo Bonzini  * indicate a page is not used as a page table.
5137c50d8ae3SPaolo Bonzini  */
5138c50d8ae3SPaolo Bonzini static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
5139c50d8ae3SPaolo Bonzini 				    int bytes)
5140c50d8ae3SPaolo Bonzini {
5141c50d8ae3SPaolo Bonzini 	unsigned offset, pte_size, misaligned;
5142c50d8ae3SPaolo Bonzini 
5143c50d8ae3SPaolo Bonzini 	pgprintk("misaligned: gpa %llx bytes %d role %x\n",
5144c50d8ae3SPaolo Bonzini 		 gpa, bytes, sp->role.word);
5145c50d8ae3SPaolo Bonzini 
5146c50d8ae3SPaolo Bonzini 	offset = offset_in_page(gpa);
5147c50d8ae3SPaolo Bonzini 	pte_size = sp->role.gpte_is_8_bytes ? 8 : 4;
5148c50d8ae3SPaolo Bonzini 
5149c50d8ae3SPaolo Bonzini 	/*
5150c50d8ae3SPaolo Bonzini 	 * Sometimes, the OS only writes the last one bytes to update status
5151c50d8ae3SPaolo Bonzini 	 * bits, for example, in linux, andb instruction is used in clear_bit().
5152c50d8ae3SPaolo Bonzini 	 */
5153c50d8ae3SPaolo Bonzini 	if (!(offset & (pte_size - 1)) && bytes == 1)
5154c50d8ae3SPaolo Bonzini 		return false;
5155c50d8ae3SPaolo Bonzini 
5156c50d8ae3SPaolo Bonzini 	misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
5157c50d8ae3SPaolo Bonzini 	misaligned |= bytes < 4;
5158c50d8ae3SPaolo Bonzini 
5159c50d8ae3SPaolo Bonzini 	return misaligned;
5160c50d8ae3SPaolo Bonzini }
5161c50d8ae3SPaolo Bonzini 
5162c50d8ae3SPaolo Bonzini static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
5163c50d8ae3SPaolo Bonzini {
5164c50d8ae3SPaolo Bonzini 	unsigned page_offset, quadrant;
5165c50d8ae3SPaolo Bonzini 	u64 *spte;
5166c50d8ae3SPaolo Bonzini 	int level;
5167c50d8ae3SPaolo Bonzini 
5168c50d8ae3SPaolo Bonzini 	page_offset = offset_in_page(gpa);
5169c50d8ae3SPaolo Bonzini 	level = sp->role.level;
5170c50d8ae3SPaolo Bonzini 	*nspte = 1;
5171c50d8ae3SPaolo Bonzini 	if (!sp->role.gpte_is_8_bytes) {
5172c50d8ae3SPaolo Bonzini 		page_offset <<= 1;	/* 32->64 */
5173c50d8ae3SPaolo Bonzini 		/*
5174c50d8ae3SPaolo Bonzini 		 * A 32-bit pde maps 4MB while the shadow pdes map
5175c50d8ae3SPaolo Bonzini 		 * only 2MB.  So we need to double the offset again
5176c50d8ae3SPaolo Bonzini 		 * and zap two pdes instead of one.
5177c50d8ae3SPaolo Bonzini 		 */
5178c50d8ae3SPaolo Bonzini 		if (level == PT32_ROOT_LEVEL) {
5179c50d8ae3SPaolo Bonzini 			page_offset &= ~7; /* kill rounding error */
5180c50d8ae3SPaolo Bonzini 			page_offset <<= 1;
5181c50d8ae3SPaolo Bonzini 			*nspte = 2;
5182c50d8ae3SPaolo Bonzini 		}
5183c50d8ae3SPaolo Bonzini 		quadrant = page_offset >> PAGE_SHIFT;
5184c50d8ae3SPaolo Bonzini 		page_offset &= ~PAGE_MASK;
5185c50d8ae3SPaolo Bonzini 		if (quadrant != sp->role.quadrant)
5186c50d8ae3SPaolo Bonzini 			return NULL;
5187c50d8ae3SPaolo Bonzini 	}
5188c50d8ae3SPaolo Bonzini 
5189c50d8ae3SPaolo Bonzini 	spte = &sp->spt[page_offset / sizeof(*spte)];
5190c50d8ae3SPaolo Bonzini 	return spte;
5191c50d8ae3SPaolo Bonzini }
5192c50d8ae3SPaolo Bonzini 
5193c50d8ae3SPaolo Bonzini static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
5194c50d8ae3SPaolo Bonzini 			      const u8 *new, int bytes,
5195c50d8ae3SPaolo Bonzini 			      struct kvm_page_track_notifier_node *node)
5196c50d8ae3SPaolo Bonzini {
5197c50d8ae3SPaolo Bonzini 	gfn_t gfn = gpa >> PAGE_SHIFT;
5198c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
5199c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
5200c50d8ae3SPaolo Bonzini 	u64 entry, gentry, *spte;
5201c50d8ae3SPaolo Bonzini 	int npte;
520206152b2dSLai Jiangshan 	bool flush = false;
5203c50d8ae3SPaolo Bonzini 
5204c50d8ae3SPaolo Bonzini 	/*
5205c50d8ae3SPaolo Bonzini 	 * If we don't have indirect shadow pages, it means no page is
5206c50d8ae3SPaolo Bonzini 	 * write-protected, so we can exit simply.
5207c50d8ae3SPaolo Bonzini 	 */
5208c50d8ae3SPaolo Bonzini 	if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
5209c50d8ae3SPaolo Bonzini 		return;
5210c50d8ae3SPaolo Bonzini 
5211c50d8ae3SPaolo Bonzini 	pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
5212c50d8ae3SPaolo Bonzini 
5213c50d8ae3SPaolo Bonzini 	/*
5214c50d8ae3SPaolo Bonzini 	 * No need to care whether allocation memory is successful
5215d9f6e12fSIngo Molnar 	 * or not since pte prefetch is skipped if it does not have
5216c50d8ae3SPaolo Bonzini 	 * enough objects in the cache.
5217c50d8ae3SPaolo Bonzini 	 */
5218378f5cd6SSean Christopherson 	mmu_topup_memory_caches(vcpu, true);
5219c50d8ae3SPaolo Bonzini 
5220531810caSBen Gardon 	write_lock(&vcpu->kvm->mmu_lock);
5221c50d8ae3SPaolo Bonzini 
5222c50d8ae3SPaolo Bonzini 	gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes);
5223c50d8ae3SPaolo Bonzini 
5224c50d8ae3SPaolo Bonzini 	++vcpu->kvm->stat.mmu_pte_write;
5225c50d8ae3SPaolo Bonzini 	kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
5226c50d8ae3SPaolo Bonzini 
5227c50d8ae3SPaolo Bonzini 	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
5228c50d8ae3SPaolo Bonzini 		if (detect_write_misaligned(sp, gpa, bytes) ||
5229c50d8ae3SPaolo Bonzini 		      detect_write_flooding(sp)) {
5230c50d8ae3SPaolo Bonzini 			kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
5231c50d8ae3SPaolo Bonzini 			++vcpu->kvm->stat.mmu_flooded;
5232c50d8ae3SPaolo Bonzini 			continue;
5233c50d8ae3SPaolo Bonzini 		}
5234c50d8ae3SPaolo Bonzini 
5235c50d8ae3SPaolo Bonzini 		spte = get_written_sptes(sp, gpa, &npte);
5236c50d8ae3SPaolo Bonzini 		if (!spte)
5237c50d8ae3SPaolo Bonzini 			continue;
5238c50d8ae3SPaolo Bonzini 
5239c50d8ae3SPaolo Bonzini 		while (npte--) {
5240c50d8ae3SPaolo Bonzini 			entry = *spte;
52412de4085cSBen Gardon 			mmu_page_zap_pte(vcpu->kvm, sp, spte, NULL);
5242c5e2184dSSean Christopherson 			if (gentry && sp->role.level != PG_LEVEL_4K)
5243c5e2184dSSean Christopherson 				++vcpu->kvm->stat.mmu_pde_zapped;
5244c50d8ae3SPaolo Bonzini 			if (need_remote_flush(entry, *spte))
524506152b2dSLai Jiangshan 				flush = true;
5246c50d8ae3SPaolo Bonzini 			++spte;
5247c50d8ae3SPaolo Bonzini 		}
5248c50d8ae3SPaolo Bonzini 	}
524906152b2dSLai Jiangshan 	kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush);
5250c50d8ae3SPaolo Bonzini 	kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
5251531810caSBen Gardon 	write_unlock(&vcpu->kvm->mmu_lock);
5252c50d8ae3SPaolo Bonzini }
5253c50d8ae3SPaolo Bonzini 
5254736c291cSSean Christopherson int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
5255c50d8ae3SPaolo Bonzini 		       void *insn, int insn_len)
5256c50d8ae3SPaolo Bonzini {
525792daa48bSSean Christopherson 	int r, emulation_type = EMULTYPE_PF;
5258c50d8ae3SPaolo Bonzini 	bool direct = vcpu->arch.mmu->direct_map;
5259c50d8ae3SPaolo Bonzini 
52606948199aSSean Christopherson 	if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
5261ddce6208SSean Christopherson 		return RET_PF_RETRY;
5262ddce6208SSean Christopherson 
5263c50d8ae3SPaolo Bonzini 	r = RET_PF_INVALID;
5264c50d8ae3SPaolo Bonzini 	if (unlikely(error_code & PFERR_RSVD_MASK)) {
5265736c291cSSean Christopherson 		r = handle_mmio_page_fault(vcpu, cr2_or_gpa, direct);
5266c50d8ae3SPaolo Bonzini 		if (r == RET_PF_EMULATE)
5267c50d8ae3SPaolo Bonzini 			goto emulate;
5268c50d8ae3SPaolo Bonzini 	}
5269c50d8ae3SPaolo Bonzini 
5270c50d8ae3SPaolo Bonzini 	if (r == RET_PF_INVALID) {
52717a02674dSSean Christopherson 		r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa,
52727a02674dSSean Christopherson 					  lower_32_bits(error_code), false);
527319025e7bSSean Christopherson 		if (KVM_BUG_ON(r == RET_PF_INVALID, vcpu->kvm))
52747b367bc9SSean Christopherson 			return -EIO;
5275c50d8ae3SPaolo Bonzini 	}
5276c50d8ae3SPaolo Bonzini 
5277c50d8ae3SPaolo Bonzini 	if (r < 0)
5278c50d8ae3SPaolo Bonzini 		return r;
527983a2ba4cSSean Christopherson 	if (r != RET_PF_EMULATE)
528083a2ba4cSSean Christopherson 		return 1;
5281c50d8ae3SPaolo Bonzini 
5282c50d8ae3SPaolo Bonzini 	/*
5283c50d8ae3SPaolo Bonzini 	 * Before emulating the instruction, check if the error code
5284c50d8ae3SPaolo Bonzini 	 * was due to a RO violation while translating the guest page.
5285c50d8ae3SPaolo Bonzini 	 * This can occur when using nested virtualization with nested
5286c50d8ae3SPaolo Bonzini 	 * paging in both guests. If true, we simply unprotect the page
5287c50d8ae3SPaolo Bonzini 	 * and resume the guest.
5288c50d8ae3SPaolo Bonzini 	 */
5289c50d8ae3SPaolo Bonzini 	if (vcpu->arch.mmu->direct_map &&
5290c50d8ae3SPaolo Bonzini 	    (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) {
5291736c291cSSean Christopherson 		kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2_or_gpa));
5292c50d8ae3SPaolo Bonzini 		return 1;
5293c50d8ae3SPaolo Bonzini 	}
5294c50d8ae3SPaolo Bonzini 
5295c50d8ae3SPaolo Bonzini 	/*
5296c50d8ae3SPaolo Bonzini 	 * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still
5297c50d8ae3SPaolo Bonzini 	 * optimistically try to just unprotect the page and let the processor
5298c50d8ae3SPaolo Bonzini 	 * re-execute the instruction that caused the page fault.  Do not allow
5299c50d8ae3SPaolo Bonzini 	 * retrying MMIO emulation, as it's not only pointless but could also
5300c50d8ae3SPaolo Bonzini 	 * cause us to enter an infinite loop because the processor will keep
5301c50d8ae3SPaolo Bonzini 	 * faulting on the non-existent MMIO address.  Retrying an instruction
5302c50d8ae3SPaolo Bonzini 	 * from a nested guest is also pointless and dangerous as we are only
5303c50d8ae3SPaolo Bonzini 	 * explicitly shadowing L1's page tables, i.e. unprotecting something
5304c50d8ae3SPaolo Bonzini 	 * for L1 isn't going to magically fix whatever issue cause L2 to fail.
5305c50d8ae3SPaolo Bonzini 	 */
5306736c291cSSean Christopherson 	if (!mmio_info_in_cache(vcpu, cr2_or_gpa, direct) && !is_guest_mode(vcpu))
530792daa48bSSean Christopherson 		emulation_type |= EMULTYPE_ALLOW_RETRY_PF;
5308c50d8ae3SPaolo Bonzini emulate:
5309736c291cSSean Christopherson 	return x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn,
5310c50d8ae3SPaolo Bonzini 				       insn_len);
5311c50d8ae3SPaolo Bonzini }
5312c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
5313c50d8ae3SPaolo Bonzini 
53145efac074SPaolo Bonzini void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
53155efac074SPaolo Bonzini 			    gva_t gva, hpa_t root_hpa)
5316c50d8ae3SPaolo Bonzini {
5317c50d8ae3SPaolo Bonzini 	int i;
5318c50d8ae3SPaolo Bonzini 
53195efac074SPaolo Bonzini 	/* It's actually a GPA for vcpu->arch.guest_mmu.  */
53205efac074SPaolo Bonzini 	if (mmu != &vcpu->arch.guest_mmu) {
53215efac074SPaolo Bonzini 		/* INVLPG on a non-canonical address is a NOP according to the SDM.  */
5322c50d8ae3SPaolo Bonzini 		if (is_noncanonical_address(gva, vcpu))
5323c50d8ae3SPaolo Bonzini 			return;
5324c50d8ae3SPaolo Bonzini 
5325b3646477SJason Baron 		static_call(kvm_x86_tlb_flush_gva)(vcpu, gva);
53265efac074SPaolo Bonzini 	}
53275efac074SPaolo Bonzini 
53285efac074SPaolo Bonzini 	if (!mmu->invlpg)
53295efac074SPaolo Bonzini 		return;
53305efac074SPaolo Bonzini 
53315efac074SPaolo Bonzini 	if (root_hpa == INVALID_PAGE) {
5332c50d8ae3SPaolo Bonzini 		mmu->invlpg(vcpu, gva, mmu->root_hpa);
5333c50d8ae3SPaolo Bonzini 
5334c50d8ae3SPaolo Bonzini 		/*
5335c50d8ae3SPaolo Bonzini 		 * INVLPG is required to invalidate any global mappings for the VA,
5336c50d8ae3SPaolo Bonzini 		 * irrespective of PCID. Since it would take us roughly similar amount
5337c50d8ae3SPaolo Bonzini 		 * of work to determine whether any of the prev_root mappings of the VA
5338c50d8ae3SPaolo Bonzini 		 * is marked global, or to just sync it blindly, so we might as well
5339c50d8ae3SPaolo Bonzini 		 * just always sync it.
5340c50d8ae3SPaolo Bonzini 		 *
5341c50d8ae3SPaolo Bonzini 		 * Mappings not reachable via the current cr3 or the prev_roots will be
5342c50d8ae3SPaolo Bonzini 		 * synced when switching to that cr3, so nothing needs to be done here
5343c50d8ae3SPaolo Bonzini 		 * for them.
5344c50d8ae3SPaolo Bonzini 		 */
5345c50d8ae3SPaolo Bonzini 		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
5346c50d8ae3SPaolo Bonzini 			if (VALID_PAGE(mmu->prev_roots[i].hpa))
5347c50d8ae3SPaolo Bonzini 				mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
53485efac074SPaolo Bonzini 	} else {
53495efac074SPaolo Bonzini 		mmu->invlpg(vcpu, gva, root_hpa);
53505efac074SPaolo Bonzini 	}
53515efac074SPaolo Bonzini }
5352c50d8ae3SPaolo Bonzini 
53535efac074SPaolo Bonzini void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
53545efac074SPaolo Bonzini {
53555efac074SPaolo Bonzini 	kvm_mmu_invalidate_gva(vcpu, vcpu->arch.mmu, gva, INVALID_PAGE);
5356c50d8ae3SPaolo Bonzini 	++vcpu->stat.invlpg;
5357c50d8ae3SPaolo Bonzini }
5358c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
5359c50d8ae3SPaolo Bonzini 
53605efac074SPaolo Bonzini 
5361c50d8ae3SPaolo Bonzini void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
5362c50d8ae3SPaolo Bonzini {
5363c50d8ae3SPaolo Bonzini 	struct kvm_mmu *mmu = vcpu->arch.mmu;
5364c50d8ae3SPaolo Bonzini 	bool tlb_flush = false;
5365c50d8ae3SPaolo Bonzini 	uint i;
5366c50d8ae3SPaolo Bonzini 
5367c50d8ae3SPaolo Bonzini 	if (pcid == kvm_get_active_pcid(vcpu)) {
5368c50d8ae3SPaolo Bonzini 		mmu->invlpg(vcpu, gva, mmu->root_hpa);
5369c50d8ae3SPaolo Bonzini 		tlb_flush = true;
5370c50d8ae3SPaolo Bonzini 	}
5371c50d8ae3SPaolo Bonzini 
5372c50d8ae3SPaolo Bonzini 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
5373c50d8ae3SPaolo Bonzini 		if (VALID_PAGE(mmu->prev_roots[i].hpa) &&
5374be01e8e2SSean Christopherson 		    pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd)) {
5375c50d8ae3SPaolo Bonzini 			mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
5376c50d8ae3SPaolo Bonzini 			tlb_flush = true;
5377c50d8ae3SPaolo Bonzini 		}
5378c50d8ae3SPaolo Bonzini 	}
5379c50d8ae3SPaolo Bonzini 
5380c50d8ae3SPaolo Bonzini 	if (tlb_flush)
5381b3646477SJason Baron 		static_call(kvm_x86_tlb_flush_gva)(vcpu, gva);
5382c50d8ae3SPaolo Bonzini 
5383c50d8ae3SPaolo Bonzini 	++vcpu->stat.invlpg;
5384c50d8ae3SPaolo Bonzini 
5385c50d8ae3SPaolo Bonzini 	/*
5386c50d8ae3SPaolo Bonzini 	 * Mappings not reachable via the current cr3 or the prev_roots will be
5387c50d8ae3SPaolo Bonzini 	 * synced when switching to that cr3, so nothing needs to be done here
5388c50d8ae3SPaolo Bonzini 	 * for them.
5389c50d8ae3SPaolo Bonzini 	 */
5390c50d8ae3SPaolo Bonzini }
5391c50d8ae3SPaolo Bonzini 
5392746700d2SWei Huang void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
5393746700d2SWei Huang 		       int tdp_max_root_level, int tdp_huge_page_level)
5394c50d8ae3SPaolo Bonzini {
5395bde77235SSean Christopherson 	tdp_enabled = enable_tdp;
5396746700d2SWei Huang 	tdp_root_level = tdp_forced_root_level;
539783013059SSean Christopherson 	max_tdp_level = tdp_max_root_level;
5398703c335dSSean Christopherson 
5399703c335dSSean Christopherson 	/*
54001d92d2e8SSean Christopherson 	 * max_huge_page_level reflects KVM's MMU capabilities irrespective
5401703c335dSSean Christopherson 	 * of kernel support, e.g. KVM may be capable of using 1GB pages when
5402703c335dSSean Christopherson 	 * the kernel is not.  But, KVM never creates a page size greater than
5403703c335dSSean Christopherson 	 * what is used by the kernel for any given HVA, i.e. the kernel's
5404703c335dSSean Christopherson 	 * capabilities are ultimately consulted by kvm_mmu_hugepage_adjust().
5405703c335dSSean Christopherson 	 */
5406703c335dSSean Christopherson 	if (tdp_enabled)
54071d92d2e8SSean Christopherson 		max_huge_page_level = tdp_huge_page_level;
5408703c335dSSean Christopherson 	else if (boot_cpu_has(X86_FEATURE_GBPAGES))
54091d92d2e8SSean Christopherson 		max_huge_page_level = PG_LEVEL_1G;
5410703c335dSSean Christopherson 	else
54111d92d2e8SSean Christopherson 		max_huge_page_level = PG_LEVEL_2M;
5412c50d8ae3SPaolo Bonzini }
5413bde77235SSean Christopherson EXPORT_SYMBOL_GPL(kvm_configure_mmu);
5414c50d8ae3SPaolo Bonzini 
5415c50d8ae3SPaolo Bonzini /* The return value indicates if tlb flush on all vcpus is needed. */
5416269e9552SHamza Mahfooz typedef bool (*slot_level_handler) (struct kvm *kvm,
5417269e9552SHamza Mahfooz 				    struct kvm_rmap_head *rmap_head,
5418269e9552SHamza Mahfooz 				    const struct kvm_memory_slot *slot);
5419c50d8ae3SPaolo Bonzini 
5420c50d8ae3SPaolo Bonzini /* The caller should hold mmu-lock before calling this function. */
5421c50d8ae3SPaolo Bonzini static __always_inline bool
5422269e9552SHamza Mahfooz slot_handle_level_range(struct kvm *kvm, const struct kvm_memory_slot *memslot,
5423c50d8ae3SPaolo Bonzini 			slot_level_handler fn, int start_level, int end_level,
54241a61b7dbSSean Christopherson 			gfn_t start_gfn, gfn_t end_gfn, bool flush_on_yield,
54251a61b7dbSSean Christopherson 			bool flush)
5426c50d8ae3SPaolo Bonzini {
5427c50d8ae3SPaolo Bonzini 	struct slot_rmap_walk_iterator iterator;
5428c50d8ae3SPaolo Bonzini 
5429c50d8ae3SPaolo Bonzini 	for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn,
5430c50d8ae3SPaolo Bonzini 			end_gfn, &iterator) {
5431c50d8ae3SPaolo Bonzini 		if (iterator.rmap)
54320a234f5dSSean Christopherson 			flush |= fn(kvm, iterator.rmap, memslot);
5433c50d8ae3SPaolo Bonzini 
5434531810caSBen Gardon 		if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
5435302695a5SSean Christopherson 			if (flush && flush_on_yield) {
5436c50d8ae3SPaolo Bonzini 				kvm_flush_remote_tlbs_with_address(kvm,
5437c50d8ae3SPaolo Bonzini 						start_gfn,
5438c50d8ae3SPaolo Bonzini 						iterator.gfn - start_gfn + 1);
5439c50d8ae3SPaolo Bonzini 				flush = false;
5440c50d8ae3SPaolo Bonzini 			}
5441531810caSBen Gardon 			cond_resched_rwlock_write(&kvm->mmu_lock);
5442c50d8ae3SPaolo Bonzini 		}
5443c50d8ae3SPaolo Bonzini 	}
5444c50d8ae3SPaolo Bonzini 
5445c50d8ae3SPaolo Bonzini 	return flush;
5446c50d8ae3SPaolo Bonzini }
5447c50d8ae3SPaolo Bonzini 
5448c50d8ae3SPaolo Bonzini static __always_inline bool
5449269e9552SHamza Mahfooz slot_handle_level(struct kvm *kvm, const struct kvm_memory_slot *memslot,
5450c50d8ae3SPaolo Bonzini 		  slot_level_handler fn, int start_level, int end_level,
5451302695a5SSean Christopherson 		  bool flush_on_yield)
5452c50d8ae3SPaolo Bonzini {
5453c50d8ae3SPaolo Bonzini 	return slot_handle_level_range(kvm, memslot, fn, start_level,
5454c50d8ae3SPaolo Bonzini 			end_level, memslot->base_gfn,
5455c50d8ae3SPaolo Bonzini 			memslot->base_gfn + memslot->npages - 1,
54561a61b7dbSSean Christopherson 			flush_on_yield, false);
5457c50d8ae3SPaolo Bonzini }
5458c50d8ae3SPaolo Bonzini 
5459c50d8ae3SPaolo Bonzini static __always_inline bool
5460610265eaSDavid Matlack slot_handle_level_4k(struct kvm *kvm, const struct kvm_memory_slot *memslot,
5461302695a5SSean Christopherson 		     slot_level_handler fn, bool flush_on_yield)
5462c50d8ae3SPaolo Bonzini {
54633bae0459SSean Christopherson 	return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K,
5464302695a5SSean Christopherson 				 PG_LEVEL_4K, flush_on_yield);
5465c50d8ae3SPaolo Bonzini }
5466c50d8ae3SPaolo Bonzini 
5467c50d8ae3SPaolo Bonzini static void free_mmu_pages(struct kvm_mmu *mmu)
5468c50d8ae3SPaolo Bonzini {
54694a98623dSSean Christopherson 	if (!tdp_enabled && mmu->pae_root)
54704a98623dSSean Christopherson 		set_memory_encrypted((unsigned long)mmu->pae_root, 1);
5471c50d8ae3SPaolo Bonzini 	free_page((unsigned long)mmu->pae_root);
547203ca4589SSean Christopherson 	free_page((unsigned long)mmu->pml4_root);
5473cb0f722aSWei Huang 	free_page((unsigned long)mmu->pml5_root);
5474c50d8ae3SPaolo Bonzini }
5475c50d8ae3SPaolo Bonzini 
547604d28e37SSean Christopherson static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
5477c50d8ae3SPaolo Bonzini {
5478c50d8ae3SPaolo Bonzini 	struct page *page;
5479c50d8ae3SPaolo Bonzini 	int i;
5480c50d8ae3SPaolo Bonzini 
548104d28e37SSean Christopherson 	mmu->root_hpa = INVALID_PAGE;
548204d28e37SSean Christopherson 	mmu->root_pgd = 0;
548304d28e37SSean Christopherson 	mmu->translate_gpa = translate_gpa;
548404d28e37SSean Christopherson 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
548504d28e37SSean Christopherson 		mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
548604d28e37SSean Christopherson 
5487c50d8ae3SPaolo Bonzini 	/*
5488c50d8ae3SPaolo Bonzini 	 * When using PAE paging, the four PDPTEs are treated as 'root' pages,
5489c50d8ae3SPaolo Bonzini 	 * while the PDP table is a per-vCPU construct that's allocated at MMU
5490c50d8ae3SPaolo Bonzini 	 * creation.  When emulating 32-bit mode, cr3 is only 32 bits even on
5491c50d8ae3SPaolo Bonzini 	 * x86_64.  Therefore we need to allocate the PDP table in the first
549204d45551SSean Christopherson 	 * 4GB of memory, which happens to fit the DMA32 zone.  TDP paging
549304d45551SSean Christopherson 	 * generally doesn't use PAE paging and can skip allocating the PDP
549404d45551SSean Christopherson 	 * table.  The main exception, handled here, is SVM's 32-bit NPT.  The
549504d45551SSean Christopherson 	 * other exception is for shadowing L1's 32-bit or PAE NPT on 64-bit
549604d45551SSean Christopherson 	 * KVM; that horror is handled on-demand by mmu_alloc_shadow_roots().
5497c50d8ae3SPaolo Bonzini 	 */
5498d468d94bSSean Christopherson 	if (tdp_enabled && kvm_mmu_get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
5499c50d8ae3SPaolo Bonzini 		return 0;
5500c50d8ae3SPaolo Bonzini 
5501c50d8ae3SPaolo Bonzini 	page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32);
5502c50d8ae3SPaolo Bonzini 	if (!page)
5503c50d8ae3SPaolo Bonzini 		return -ENOMEM;
5504c50d8ae3SPaolo Bonzini 
5505c50d8ae3SPaolo Bonzini 	mmu->pae_root = page_address(page);
55064a98623dSSean Christopherson 
55074a98623dSSean Christopherson 	/*
55084a98623dSSean Christopherson 	 * CR3 is only 32 bits when PAE paging is used, thus it's impossible to
55094a98623dSSean Christopherson 	 * get the CPU to treat the PDPTEs as encrypted.  Decrypt the page so
55104a98623dSSean Christopherson 	 * that KVM's writes and the CPU's reads get along.  Note, this is
55114a98623dSSean Christopherson 	 * only necessary when using shadow paging, as 64-bit NPT can get at
55124a98623dSSean Christopherson 	 * the C-bit even when shadowing 32-bit NPT, and SME isn't supported
55134a98623dSSean Christopherson 	 * by 32-bit kernels (when KVM itself uses 32-bit NPT).
55144a98623dSSean Christopherson 	 */
55154a98623dSSean Christopherson 	if (!tdp_enabled)
55164a98623dSSean Christopherson 		set_memory_decrypted((unsigned long)mmu->pae_root, 1);
55174a98623dSSean Christopherson 	else
55184a98623dSSean Christopherson 		WARN_ON_ONCE(shadow_me_mask);
55194a98623dSSean Christopherson 
5520c50d8ae3SPaolo Bonzini 	for (i = 0; i < 4; ++i)
5521c834e5e4SSean Christopherson 		mmu->pae_root[i] = INVALID_PAE_ROOT;
5522c50d8ae3SPaolo Bonzini 
5523c50d8ae3SPaolo Bonzini 	return 0;
5524c50d8ae3SPaolo Bonzini }
5525c50d8ae3SPaolo Bonzini 
5526c50d8ae3SPaolo Bonzini int kvm_mmu_create(struct kvm_vcpu *vcpu)
5527c50d8ae3SPaolo Bonzini {
5528c50d8ae3SPaolo Bonzini 	int ret;
5529c50d8ae3SPaolo Bonzini 
55305962bfb7SSean Christopherson 	vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
55315f6078f9SSean Christopherson 	vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;
55325f6078f9SSean Christopherson 
55335962bfb7SSean Christopherson 	vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
55345f6078f9SSean Christopherson 	vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
55355962bfb7SSean Christopherson 
553696880883SSean Christopherson 	vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
553796880883SSean Christopherson 
5538c50d8ae3SPaolo Bonzini 	vcpu->arch.mmu = &vcpu->arch.root_mmu;
5539c50d8ae3SPaolo Bonzini 	vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
5540c50d8ae3SPaolo Bonzini 
5541c50d8ae3SPaolo Bonzini 	vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
5542c50d8ae3SPaolo Bonzini 
554304d28e37SSean Christopherson 	ret = __kvm_mmu_create(vcpu, &vcpu->arch.guest_mmu);
5544c50d8ae3SPaolo Bonzini 	if (ret)
5545c50d8ae3SPaolo Bonzini 		return ret;
5546c50d8ae3SPaolo Bonzini 
554704d28e37SSean Christopherson 	ret = __kvm_mmu_create(vcpu, &vcpu->arch.root_mmu);
5548c50d8ae3SPaolo Bonzini 	if (ret)
5549c50d8ae3SPaolo Bonzini 		goto fail_allocate_root;
5550c50d8ae3SPaolo Bonzini 
5551c50d8ae3SPaolo Bonzini 	return ret;
5552c50d8ae3SPaolo Bonzini  fail_allocate_root:
5553c50d8ae3SPaolo Bonzini 	free_mmu_pages(&vcpu->arch.guest_mmu);
5554c50d8ae3SPaolo Bonzini 	return ret;
5555c50d8ae3SPaolo Bonzini }
5556c50d8ae3SPaolo Bonzini 
5557c50d8ae3SPaolo Bonzini #define BATCH_ZAP_PAGES	10
5558c50d8ae3SPaolo Bonzini static void kvm_zap_obsolete_pages(struct kvm *kvm)
5559c50d8ae3SPaolo Bonzini {
5560c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp, *node;
5561c50d8ae3SPaolo Bonzini 	int nr_zapped, batch = 0;
5562c50d8ae3SPaolo Bonzini 
5563c50d8ae3SPaolo Bonzini restart:
5564c50d8ae3SPaolo Bonzini 	list_for_each_entry_safe_reverse(sp, node,
5565c50d8ae3SPaolo Bonzini 	      &kvm->arch.active_mmu_pages, link) {
5566c50d8ae3SPaolo Bonzini 		/*
5567c50d8ae3SPaolo Bonzini 		 * No obsolete valid page exists before a newly created page
5568c50d8ae3SPaolo Bonzini 		 * since active_mmu_pages is a FIFO list.
5569c50d8ae3SPaolo Bonzini 		 */
5570c50d8ae3SPaolo Bonzini 		if (!is_obsolete_sp(kvm, sp))
5571c50d8ae3SPaolo Bonzini 			break;
5572c50d8ae3SPaolo Bonzini 
5573c50d8ae3SPaolo Bonzini 		/*
5574f95eec9bSSean Christopherson 		 * Invalid pages should never land back on the list of active
5575f95eec9bSSean Christopherson 		 * pages.  Skip the bogus page, otherwise we'll get stuck in an
5576f95eec9bSSean Christopherson 		 * infinite loop if the page gets put back on the list (again).
5577c50d8ae3SPaolo Bonzini 		 */
5578f95eec9bSSean Christopherson 		if (WARN_ON(sp->role.invalid))
5579c50d8ae3SPaolo Bonzini 			continue;
5580c50d8ae3SPaolo Bonzini 
5581c50d8ae3SPaolo Bonzini 		/*
5582c50d8ae3SPaolo Bonzini 		 * No need to flush the TLB since we're only zapping shadow
5583c50d8ae3SPaolo Bonzini 		 * pages with an obsolete generation number and all vCPUS have
5584c50d8ae3SPaolo Bonzini 		 * loaded a new root, i.e. the shadow pages being zapped cannot
5585c50d8ae3SPaolo Bonzini 		 * be in active use by the guest.
5586c50d8ae3SPaolo Bonzini 		 */
5587c50d8ae3SPaolo Bonzini 		if (batch >= BATCH_ZAP_PAGES &&
5588531810caSBen Gardon 		    cond_resched_rwlock_write(&kvm->mmu_lock)) {
5589c50d8ae3SPaolo Bonzini 			batch = 0;
5590c50d8ae3SPaolo Bonzini 			goto restart;
5591c50d8ae3SPaolo Bonzini 		}
5592c50d8ae3SPaolo Bonzini 
5593c50d8ae3SPaolo Bonzini 		if (__kvm_mmu_prepare_zap_page(kvm, sp,
5594c50d8ae3SPaolo Bonzini 				&kvm->arch.zapped_obsolete_pages, &nr_zapped)) {
5595c50d8ae3SPaolo Bonzini 			batch += nr_zapped;
5596c50d8ae3SPaolo Bonzini 			goto restart;
5597c50d8ae3SPaolo Bonzini 		}
5598c50d8ae3SPaolo Bonzini 	}
5599c50d8ae3SPaolo Bonzini 
5600c50d8ae3SPaolo Bonzini 	/*
5601c50d8ae3SPaolo Bonzini 	 * Trigger a remote TLB flush before freeing the page tables to ensure
5602c50d8ae3SPaolo Bonzini 	 * KVM is not in the middle of a lockless shadow page table walk, which
5603c50d8ae3SPaolo Bonzini 	 * may reference the pages.
5604c50d8ae3SPaolo Bonzini 	 */
5605c50d8ae3SPaolo Bonzini 	kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages);
5606c50d8ae3SPaolo Bonzini }
5607c50d8ae3SPaolo Bonzini 
5608c50d8ae3SPaolo Bonzini /*
5609c50d8ae3SPaolo Bonzini  * Fast invalidate all shadow pages and use lock-break technique
5610c50d8ae3SPaolo Bonzini  * to zap obsolete pages.
5611c50d8ae3SPaolo Bonzini  *
5612c50d8ae3SPaolo Bonzini  * It's required when memslot is being deleted or VM is being
5613c50d8ae3SPaolo Bonzini  * destroyed, in these cases, we should ensure that KVM MMU does
5614c50d8ae3SPaolo Bonzini  * not use any resource of the being-deleted slot or all slots
5615c50d8ae3SPaolo Bonzini  * after calling the function.
5616c50d8ae3SPaolo Bonzini  */
5617c50d8ae3SPaolo Bonzini static void kvm_mmu_zap_all_fast(struct kvm *kvm)
5618c50d8ae3SPaolo Bonzini {
5619c50d8ae3SPaolo Bonzini 	lockdep_assert_held(&kvm->slots_lock);
5620c50d8ae3SPaolo Bonzini 
5621531810caSBen Gardon 	write_lock(&kvm->mmu_lock);
5622c50d8ae3SPaolo Bonzini 	trace_kvm_mmu_zap_all_fast(kvm);
5623c50d8ae3SPaolo Bonzini 
5624c50d8ae3SPaolo Bonzini 	/*
5625c50d8ae3SPaolo Bonzini 	 * Toggle mmu_valid_gen between '0' and '1'.  Because slots_lock is
5626c50d8ae3SPaolo Bonzini 	 * held for the entire duration of zapping obsolete pages, it's
5627c50d8ae3SPaolo Bonzini 	 * impossible for there to be multiple invalid generations associated
5628c50d8ae3SPaolo Bonzini 	 * with *valid* shadow pages at any given time, i.e. there is exactly
5629c50d8ae3SPaolo Bonzini 	 * one valid generation and (at most) one invalid generation.
5630c50d8ae3SPaolo Bonzini 	 */
5631c50d8ae3SPaolo Bonzini 	kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1;
5632c50d8ae3SPaolo Bonzini 
5633b7cccd39SBen Gardon 	/* In order to ensure all threads see this change when
5634b7cccd39SBen Gardon 	 * handling the MMU reload signal, this must happen in the
5635b7cccd39SBen Gardon 	 * same critical section as kvm_reload_remote_mmus, and
5636b7cccd39SBen Gardon 	 * before kvm_zap_obsolete_pages as kvm_zap_obsolete_pages
5637b7cccd39SBen Gardon 	 * could drop the MMU lock and yield.
5638b7cccd39SBen Gardon 	 */
5639b7cccd39SBen Gardon 	if (is_tdp_mmu_enabled(kvm))
5640b7cccd39SBen Gardon 		kvm_tdp_mmu_invalidate_all_roots(kvm);
5641b7cccd39SBen Gardon 
5642c50d8ae3SPaolo Bonzini 	/*
5643c50d8ae3SPaolo Bonzini 	 * Notify all vcpus to reload its shadow page table and flush TLB.
5644c50d8ae3SPaolo Bonzini 	 * Then all vcpus will switch to new shadow page table with the new
5645c50d8ae3SPaolo Bonzini 	 * mmu_valid_gen.
5646c50d8ae3SPaolo Bonzini 	 *
5647c50d8ae3SPaolo Bonzini 	 * Note: we need to do this under the protection of mmu_lock,
5648c50d8ae3SPaolo Bonzini 	 * otherwise, vcpu would purge shadow page but miss tlb flush.
5649c50d8ae3SPaolo Bonzini 	 */
5650c50d8ae3SPaolo Bonzini 	kvm_reload_remote_mmus(kvm);
5651c50d8ae3SPaolo Bonzini 
5652c50d8ae3SPaolo Bonzini 	kvm_zap_obsolete_pages(kvm);
5653faaf05b0SBen Gardon 
5654531810caSBen Gardon 	write_unlock(&kvm->mmu_lock);
56554c6654bdSBen Gardon 
56564c6654bdSBen Gardon 	if (is_tdp_mmu_enabled(kvm)) {
56574c6654bdSBen Gardon 		read_lock(&kvm->mmu_lock);
56584c6654bdSBen Gardon 		kvm_tdp_mmu_zap_invalidated_roots(kvm);
56594c6654bdSBen Gardon 		read_unlock(&kvm->mmu_lock);
56604c6654bdSBen Gardon 	}
5661c50d8ae3SPaolo Bonzini }
5662c50d8ae3SPaolo Bonzini 
5663c50d8ae3SPaolo Bonzini static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
5664c50d8ae3SPaolo Bonzini {
5665c50d8ae3SPaolo Bonzini 	return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
5666c50d8ae3SPaolo Bonzini }
5667c50d8ae3SPaolo Bonzini 
5668c50d8ae3SPaolo Bonzini static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
5669c50d8ae3SPaolo Bonzini 			struct kvm_memory_slot *slot,
5670c50d8ae3SPaolo Bonzini 			struct kvm_page_track_notifier_node *node)
5671c50d8ae3SPaolo Bonzini {
5672c50d8ae3SPaolo Bonzini 	kvm_mmu_zap_all_fast(kvm);
5673c50d8ae3SPaolo Bonzini }
5674c50d8ae3SPaolo Bonzini 
5675c50d8ae3SPaolo Bonzini void kvm_mmu_init_vm(struct kvm *kvm)
5676c50d8ae3SPaolo Bonzini {
5677c50d8ae3SPaolo Bonzini 	struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
5678c50d8ae3SPaolo Bonzini 
5679ce25681dSSean Christopherson 	spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
5680ce25681dSSean Christopherson 
56811e76a3ceSDavid Stevens 	kvm_mmu_init_tdp_mmu(kvm);
5682deae4a10SDavid Stevens 
5683c50d8ae3SPaolo Bonzini 	node->track_write = kvm_mmu_pte_write;
5684c50d8ae3SPaolo Bonzini 	node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
5685c50d8ae3SPaolo Bonzini 	kvm_page_track_register_notifier(kvm, node);
5686c50d8ae3SPaolo Bonzini }
5687c50d8ae3SPaolo Bonzini 
5688c50d8ae3SPaolo Bonzini void kvm_mmu_uninit_vm(struct kvm *kvm)
5689c50d8ae3SPaolo Bonzini {
5690c50d8ae3SPaolo Bonzini 	struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
5691c50d8ae3SPaolo Bonzini 
5692c50d8ae3SPaolo Bonzini 	kvm_page_track_unregister_notifier(kvm, node);
5693fe5db27dSBen Gardon 
5694fe5db27dSBen Gardon 	kvm_mmu_uninit_tdp_mmu(kvm);
5695c50d8ae3SPaolo Bonzini }
5696c50d8ae3SPaolo Bonzini 
569788f58535SMaxim Levitsky /*
569888f58535SMaxim Levitsky  * Invalidate (zap) SPTEs that cover GFNs from gfn_start and up to gfn_end
569988f58535SMaxim Levitsky  * (not including it)
570088f58535SMaxim Levitsky  */
5701c50d8ae3SPaolo Bonzini void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
5702c50d8ae3SPaolo Bonzini {
5703c50d8ae3SPaolo Bonzini 	struct kvm_memslots *slots;
5704c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *memslot;
5705c50d8ae3SPaolo Bonzini 	int i;
57061a61b7dbSSean Christopherson 	bool flush = false;
5707c50d8ae3SPaolo Bonzini 
5708531810caSBen Gardon 	write_lock(&kvm->mmu_lock);
57095a324c24SSean Christopherson 
5710edb298c6SMaxim Levitsky 	kvm_inc_notifier_count(kvm, gfn_start, gfn_end);
5711edb298c6SMaxim Levitsky 
57125a324c24SSean Christopherson 	if (kvm_memslots_have_rmaps(kvm)) {
5713c50d8ae3SPaolo Bonzini 		for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
5714c50d8ae3SPaolo Bonzini 			slots = __kvm_memslots(kvm, i);
5715c50d8ae3SPaolo Bonzini 			kvm_for_each_memslot(memslot, slots) {
5716c50d8ae3SPaolo Bonzini 				gfn_t start, end;
5717c50d8ae3SPaolo Bonzini 
5718c50d8ae3SPaolo Bonzini 				start = max(gfn_start, memslot->base_gfn);
5719c50d8ae3SPaolo Bonzini 				end = min(gfn_end, memslot->base_gfn + memslot->npages);
5720c50d8ae3SPaolo Bonzini 				if (start >= end)
5721c50d8ae3SPaolo Bonzini 					continue;
5722c50d8ae3SPaolo Bonzini 
5723269e9552SHamza Mahfooz 				flush = slot_handle_level_range(kvm,
5724269e9552SHamza Mahfooz 						(const struct kvm_memory_slot *) memslot,
5725e2209710SBen Gardon 						kvm_zap_rmapp, PG_LEVEL_4K,
5726e2209710SBen Gardon 						KVM_MAX_HUGEPAGE_LEVEL, start,
5727e2209710SBen Gardon 						end - 1, true, flush);
5728c50d8ae3SPaolo Bonzini 			}
5729c50d8ae3SPaolo Bonzini 		}
5730faaf05b0SBen Gardon 		if (flush)
57312822da44SMaxim Levitsky 			kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
57322822da44SMaxim Levitsky 							   gfn_end - gfn_start);
5733e2209710SBen Gardon 	}
57346103bc07SBen Gardon 
57356103bc07SBen Gardon 	if (is_tdp_mmu_enabled(kvm)) {
57366103bc07SBen Gardon 		for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
57376103bc07SBen Gardon 			flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, gfn_start,
57385a324c24SSean Christopherson 							  gfn_end, flush);
57392822da44SMaxim Levitsky 		if (flush)
57402822da44SMaxim Levitsky 			kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
57412822da44SMaxim Levitsky 							   gfn_end - gfn_start);
57426103bc07SBen Gardon 	}
57435a324c24SSean Christopherson 
57445a324c24SSean Christopherson 	if (flush)
57455a324c24SSean Christopherson 		kvm_flush_remote_tlbs_with_address(kvm, gfn_start, gfn_end);
57465a324c24SSean Christopherson 
5747edb298c6SMaxim Levitsky 	kvm_dec_notifier_count(kvm, gfn_start, gfn_end);
5748edb298c6SMaxim Levitsky 
57495a324c24SSean Christopherson 	write_unlock(&kvm->mmu_lock);
5750c50d8ae3SPaolo Bonzini }
5751c50d8ae3SPaolo Bonzini 
5752c50d8ae3SPaolo Bonzini static bool slot_rmap_write_protect(struct kvm *kvm,
57530a234f5dSSean Christopherson 				    struct kvm_rmap_head *rmap_head,
5754269e9552SHamza Mahfooz 				    const struct kvm_memory_slot *slot)
5755c50d8ae3SPaolo Bonzini {
5756c50d8ae3SPaolo Bonzini 	return __rmap_write_protect(kvm, rmap_head, false);
5757c50d8ae3SPaolo Bonzini }
5758c50d8ae3SPaolo Bonzini 
5759c50d8ae3SPaolo Bonzini void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
5760269e9552SHamza Mahfooz 				      const struct kvm_memory_slot *memslot,
57613c9bd400SJay Zhou 				      int start_level)
5762c50d8ae3SPaolo Bonzini {
5763e2209710SBen Gardon 	bool flush = false;
5764c50d8ae3SPaolo Bonzini 
5765e2209710SBen Gardon 	if (kvm_memslots_have_rmaps(kvm)) {
5766531810caSBen Gardon 		write_lock(&kvm->mmu_lock);
57673c9bd400SJay Zhou 		flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
5768e2209710SBen Gardon 					  start_level, KVM_MAX_HUGEPAGE_LEVEL,
5769e2209710SBen Gardon 					  false);
5770531810caSBen Gardon 		write_unlock(&kvm->mmu_lock);
5771e2209710SBen Gardon 	}
5772c50d8ae3SPaolo Bonzini 
577324ae4cfaSBen Gardon 	if (is_tdp_mmu_enabled(kvm)) {
577424ae4cfaSBen Gardon 		read_lock(&kvm->mmu_lock);
577524ae4cfaSBen Gardon 		flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, start_level);
577624ae4cfaSBen Gardon 		read_unlock(&kvm->mmu_lock);
577724ae4cfaSBen Gardon 	}
577824ae4cfaSBen Gardon 
5779c50d8ae3SPaolo Bonzini 	/*
5780c50d8ae3SPaolo Bonzini 	 * We can flush all the TLBs out of the mmu lock without TLB
5781c50d8ae3SPaolo Bonzini 	 * corruption since we just change the spte from writable to
5782c50d8ae3SPaolo Bonzini 	 * readonly so that we only need to care the case of changing
5783c50d8ae3SPaolo Bonzini 	 * spte from present to present (changing the spte from present
5784c50d8ae3SPaolo Bonzini 	 * to nonpresent will flush all the TLBs immediately), in other
5785c50d8ae3SPaolo Bonzini 	 * words, the only case we care is mmu_spte_update() where we
57865fc3424fSSean Christopherson 	 * have checked Host-writable | MMU-writable instead of
57875fc3424fSSean Christopherson 	 * PT_WRITABLE_MASK, that means it does not depend on PT_WRITABLE_MASK
57885fc3424fSSean Christopherson 	 * anymore.
5789c50d8ae3SPaolo Bonzini 	 */
5790c50d8ae3SPaolo Bonzini 	if (flush)
57917f42aa76SSean Christopherson 		kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
5792c50d8ae3SPaolo Bonzini }
5793c50d8ae3SPaolo Bonzini 
5794c50d8ae3SPaolo Bonzini static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
57950a234f5dSSean Christopherson 					 struct kvm_rmap_head *rmap_head,
5796269e9552SHamza Mahfooz 					 const struct kvm_memory_slot *slot)
5797c50d8ae3SPaolo Bonzini {
5798c50d8ae3SPaolo Bonzini 	u64 *sptep;
5799c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
5800c50d8ae3SPaolo Bonzini 	int need_tlb_flush = 0;
5801c50d8ae3SPaolo Bonzini 	kvm_pfn_t pfn;
5802c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
5803c50d8ae3SPaolo Bonzini 
5804c50d8ae3SPaolo Bonzini restart:
5805c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(rmap_head, &iter, sptep) {
580657354682SSean Christopherson 		sp = sptep_to_sp(sptep);
5807c50d8ae3SPaolo Bonzini 		pfn = spte_to_pfn(*sptep);
5808c50d8ae3SPaolo Bonzini 
5809c50d8ae3SPaolo Bonzini 		/*
5810c50d8ae3SPaolo Bonzini 		 * We cannot do huge page mapping for indirect shadow pages,
5811c50d8ae3SPaolo Bonzini 		 * which are found on the last rmap (level = 1) when not using
5812c50d8ae3SPaolo Bonzini 		 * tdp; such shadow pages are synced with the page table in
5813c50d8ae3SPaolo Bonzini 		 * the guest, and the guest page table is using 4K page size
5814c50d8ae3SPaolo Bonzini 		 * mapping if the indirect sp has level = 1.
5815c50d8ae3SPaolo Bonzini 		 */
5816c50d8ae3SPaolo Bonzini 		if (sp->role.direct && !kvm_is_reserved_pfn(pfn) &&
58179eba50f8SSean Christopherson 		    sp->role.level < kvm_mmu_max_mapping_level(kvm, slot, sp->gfn,
58189eba50f8SSean Christopherson 							       pfn, PG_LEVEL_NUM)) {
581971f51d2cSMingwei Zhang 			pte_list_remove(kvm, rmap_head, sptep);
5820c50d8ae3SPaolo Bonzini 
5821c50d8ae3SPaolo Bonzini 			if (kvm_available_flush_tlb_with_range())
5822c50d8ae3SPaolo Bonzini 				kvm_flush_remote_tlbs_with_address(kvm, sp->gfn,
5823c50d8ae3SPaolo Bonzini 					KVM_PAGES_PER_HPAGE(sp->role.level));
5824c50d8ae3SPaolo Bonzini 			else
5825c50d8ae3SPaolo Bonzini 				need_tlb_flush = 1;
5826c50d8ae3SPaolo Bonzini 
5827c50d8ae3SPaolo Bonzini 			goto restart;
5828c50d8ae3SPaolo Bonzini 		}
5829c50d8ae3SPaolo Bonzini 	}
5830c50d8ae3SPaolo Bonzini 
5831c50d8ae3SPaolo Bonzini 	return need_tlb_flush;
5832c50d8ae3SPaolo Bonzini }
5833c50d8ae3SPaolo Bonzini 
5834c50d8ae3SPaolo Bonzini void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
5835269e9552SHamza Mahfooz 				   const struct kvm_memory_slot *slot)
5836c50d8ae3SPaolo Bonzini {
583731c65657SColin Ian King 	bool flush = false;
58389eba50f8SSean Christopherson 
5839e2209710SBen Gardon 	if (kvm_memslots_have_rmaps(kvm)) {
5840531810caSBen Gardon 		write_lock(&kvm->mmu_lock);
5841610265eaSDavid Matlack 		/*
5842610265eaSDavid Matlack 		 * Zap only 4k SPTEs since the legacy MMU only supports dirty
5843610265eaSDavid Matlack 		 * logging at a 4k granularity and never creates collapsible
5844610265eaSDavid Matlack 		 * 2m SPTEs during dirty logging.
5845610265eaSDavid Matlack 		 */
5846610265eaSDavid Matlack 		flush = slot_handle_level_4k(kvm, slot, kvm_mmu_zap_collapsible_spte, true);
5847302695a5SSean Christopherson 		if (flush)
5848302695a5SSean Christopherson 			kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
5849531810caSBen Gardon 		write_unlock(&kvm->mmu_lock);
5850e2209710SBen Gardon 	}
58512db6f772SBen Gardon 
58522db6f772SBen Gardon 	if (is_tdp_mmu_enabled(kvm)) {
58532db6f772SBen Gardon 		read_lock(&kvm->mmu_lock);
58542db6f772SBen Gardon 		flush = kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot, flush);
58552db6f772SBen Gardon 		if (flush)
58562db6f772SBen Gardon 			kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
58572db6f772SBen Gardon 		read_unlock(&kvm->mmu_lock);
58582db6f772SBen Gardon 	}
5859c50d8ae3SPaolo Bonzini }
5860c50d8ae3SPaolo Bonzini 
5861b3594ffbSSean Christopherson void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
58626c9dd6d2SPaolo Bonzini 					const struct kvm_memory_slot *memslot)
5863b3594ffbSSean Christopherson {
5864b3594ffbSSean Christopherson 	/*
58657f42aa76SSean Christopherson 	 * All current use cases for flushing the TLBs for a specific memslot
5866302695a5SSean Christopherson 	 * related to dirty logging, and many do the TLB flush out of mmu_lock.
58677f42aa76SSean Christopherson 	 * The interaction between the various operations on memslot must be
58687f42aa76SSean Christopherson 	 * serialized by slots_locks to ensure the TLB flush from one operation
58697f42aa76SSean Christopherson 	 * is observed by any other operation on the same memslot.
5870b3594ffbSSean Christopherson 	 */
5871b3594ffbSSean Christopherson 	lockdep_assert_held(&kvm->slots_lock);
5872cec37648SSean Christopherson 	kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
5873cec37648SSean Christopherson 					   memslot->npages);
5874b3594ffbSSean Christopherson }
5875b3594ffbSSean Christopherson 
5876c50d8ae3SPaolo Bonzini void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
5877269e9552SHamza Mahfooz 				   const struct kvm_memory_slot *memslot)
5878c50d8ae3SPaolo Bonzini {
5879e2209710SBen Gardon 	bool flush = false;
5880c50d8ae3SPaolo Bonzini 
5881e2209710SBen Gardon 	if (kvm_memslots_have_rmaps(kvm)) {
5882531810caSBen Gardon 		write_lock(&kvm->mmu_lock);
5883610265eaSDavid Matlack 		/*
5884610265eaSDavid Matlack 		 * Clear dirty bits only on 4k SPTEs since the legacy MMU only
5885610265eaSDavid Matlack 		 * support dirty logging at a 4k granularity.
5886610265eaSDavid Matlack 		 */
5887610265eaSDavid Matlack 		flush = slot_handle_level_4k(kvm, memslot, __rmap_clear_dirty, false);
5888531810caSBen Gardon 		write_unlock(&kvm->mmu_lock);
5889e2209710SBen Gardon 	}
5890c50d8ae3SPaolo Bonzini 
589124ae4cfaSBen Gardon 	if (is_tdp_mmu_enabled(kvm)) {
589224ae4cfaSBen Gardon 		read_lock(&kvm->mmu_lock);
589324ae4cfaSBen Gardon 		flush |= kvm_tdp_mmu_clear_dirty_slot(kvm, memslot);
589424ae4cfaSBen Gardon 		read_unlock(&kvm->mmu_lock);
589524ae4cfaSBen Gardon 	}
589624ae4cfaSBen Gardon 
5897c50d8ae3SPaolo Bonzini 	/*
5898c50d8ae3SPaolo Bonzini 	 * It's also safe to flush TLBs out of mmu lock here as currently this
5899c50d8ae3SPaolo Bonzini 	 * function is only used for dirty logging, in which case flushing TLB
5900c50d8ae3SPaolo Bonzini 	 * out of mmu lock also guarantees no dirty pages will be lost in
5901c50d8ae3SPaolo Bonzini 	 * dirty_bitmap.
5902c50d8ae3SPaolo Bonzini 	 */
5903c50d8ae3SPaolo Bonzini 	if (flush)
59047f42aa76SSean Christopherson 		kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
5905c50d8ae3SPaolo Bonzini }
5906c50d8ae3SPaolo Bonzini 
5907c50d8ae3SPaolo Bonzini void kvm_mmu_zap_all(struct kvm *kvm)
5908c50d8ae3SPaolo Bonzini {
5909c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp, *node;
5910c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
5911c50d8ae3SPaolo Bonzini 	int ign;
5912c50d8ae3SPaolo Bonzini 
5913531810caSBen Gardon 	write_lock(&kvm->mmu_lock);
5914c50d8ae3SPaolo Bonzini restart:
5915c50d8ae3SPaolo Bonzini 	list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
5916f95eec9bSSean Christopherson 		if (WARN_ON(sp->role.invalid))
5917c50d8ae3SPaolo Bonzini 			continue;
5918c50d8ae3SPaolo Bonzini 		if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
5919c50d8ae3SPaolo Bonzini 			goto restart;
5920531810caSBen Gardon 		if (cond_resched_rwlock_write(&kvm->mmu_lock))
5921c50d8ae3SPaolo Bonzini 			goto restart;
5922c50d8ae3SPaolo Bonzini 	}
5923c50d8ae3SPaolo Bonzini 
5924c50d8ae3SPaolo Bonzini 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
5925faaf05b0SBen Gardon 
5926897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
5927faaf05b0SBen Gardon 		kvm_tdp_mmu_zap_all(kvm);
5928faaf05b0SBen Gardon 
5929531810caSBen Gardon 	write_unlock(&kvm->mmu_lock);
5930c50d8ae3SPaolo Bonzini }
5931c50d8ae3SPaolo Bonzini 
5932c50d8ae3SPaolo Bonzini void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
5933c50d8ae3SPaolo Bonzini {
5934c50d8ae3SPaolo Bonzini 	WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
5935c50d8ae3SPaolo Bonzini 
5936c50d8ae3SPaolo Bonzini 	gen &= MMIO_SPTE_GEN_MASK;
5937c50d8ae3SPaolo Bonzini 
5938c50d8ae3SPaolo Bonzini 	/*
5939c50d8ae3SPaolo Bonzini 	 * Generation numbers are incremented in multiples of the number of
5940c50d8ae3SPaolo Bonzini 	 * address spaces in order to provide unique generations across all
5941c50d8ae3SPaolo Bonzini 	 * address spaces.  Strip what is effectively the address space
5942c50d8ae3SPaolo Bonzini 	 * modifier prior to checking for a wrap of the MMIO generation so
5943c50d8ae3SPaolo Bonzini 	 * that a wrap in any address space is detected.
5944c50d8ae3SPaolo Bonzini 	 */
5945c50d8ae3SPaolo Bonzini 	gen &= ~((u64)KVM_ADDRESS_SPACE_NUM - 1);
5946c50d8ae3SPaolo Bonzini 
5947c50d8ae3SPaolo Bonzini 	/*
5948c50d8ae3SPaolo Bonzini 	 * The very rare case: if the MMIO generation number has wrapped,
5949c50d8ae3SPaolo Bonzini 	 * zap all shadow pages.
5950c50d8ae3SPaolo Bonzini 	 */
5951c50d8ae3SPaolo Bonzini 	if (unlikely(gen == 0)) {
5952c50d8ae3SPaolo Bonzini 		kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
5953c50d8ae3SPaolo Bonzini 		kvm_mmu_zap_all_fast(kvm);
5954c50d8ae3SPaolo Bonzini 	}
5955c50d8ae3SPaolo Bonzini }
5956c50d8ae3SPaolo Bonzini 
5957c50d8ae3SPaolo Bonzini static unsigned long
5958c50d8ae3SPaolo Bonzini mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
5959c50d8ae3SPaolo Bonzini {
5960c50d8ae3SPaolo Bonzini 	struct kvm *kvm;
5961c50d8ae3SPaolo Bonzini 	int nr_to_scan = sc->nr_to_scan;
5962c50d8ae3SPaolo Bonzini 	unsigned long freed = 0;
5963c50d8ae3SPaolo Bonzini 
5964c50d8ae3SPaolo Bonzini 	mutex_lock(&kvm_lock);
5965c50d8ae3SPaolo Bonzini 
5966c50d8ae3SPaolo Bonzini 	list_for_each_entry(kvm, &vm_list, vm_list) {
5967c50d8ae3SPaolo Bonzini 		int idx;
5968c50d8ae3SPaolo Bonzini 		LIST_HEAD(invalid_list);
5969c50d8ae3SPaolo Bonzini 
5970c50d8ae3SPaolo Bonzini 		/*
5971c50d8ae3SPaolo Bonzini 		 * Never scan more than sc->nr_to_scan VM instances.
5972c50d8ae3SPaolo Bonzini 		 * Will not hit this condition practically since we do not try
5973c50d8ae3SPaolo Bonzini 		 * to shrink more than one VM and it is very unlikely to see
5974c50d8ae3SPaolo Bonzini 		 * !n_used_mmu_pages so many times.
5975c50d8ae3SPaolo Bonzini 		 */
5976c50d8ae3SPaolo Bonzini 		if (!nr_to_scan--)
5977c50d8ae3SPaolo Bonzini 			break;
5978c50d8ae3SPaolo Bonzini 		/*
5979c50d8ae3SPaolo Bonzini 		 * n_used_mmu_pages is accessed without holding kvm->mmu_lock
5980c50d8ae3SPaolo Bonzini 		 * here. We may skip a VM instance errorneosly, but we do not
5981c50d8ae3SPaolo Bonzini 		 * want to shrink a VM that only started to populate its MMU
5982c50d8ae3SPaolo Bonzini 		 * anyway.
5983c50d8ae3SPaolo Bonzini 		 */
5984c50d8ae3SPaolo Bonzini 		if (!kvm->arch.n_used_mmu_pages &&
5985c50d8ae3SPaolo Bonzini 		    !kvm_has_zapped_obsolete_pages(kvm))
5986c50d8ae3SPaolo Bonzini 			continue;
5987c50d8ae3SPaolo Bonzini 
5988c50d8ae3SPaolo Bonzini 		idx = srcu_read_lock(&kvm->srcu);
5989531810caSBen Gardon 		write_lock(&kvm->mmu_lock);
5990c50d8ae3SPaolo Bonzini 
5991c50d8ae3SPaolo Bonzini 		if (kvm_has_zapped_obsolete_pages(kvm)) {
5992c50d8ae3SPaolo Bonzini 			kvm_mmu_commit_zap_page(kvm,
5993c50d8ae3SPaolo Bonzini 			      &kvm->arch.zapped_obsolete_pages);
5994c50d8ae3SPaolo Bonzini 			goto unlock;
5995c50d8ae3SPaolo Bonzini 		}
5996c50d8ae3SPaolo Bonzini 
5997ebdb292dSSean Christopherson 		freed = kvm_mmu_zap_oldest_mmu_pages(kvm, sc->nr_to_scan);
5998c50d8ae3SPaolo Bonzini 
5999c50d8ae3SPaolo Bonzini unlock:
6000531810caSBen Gardon 		write_unlock(&kvm->mmu_lock);
6001c50d8ae3SPaolo Bonzini 		srcu_read_unlock(&kvm->srcu, idx);
6002c50d8ae3SPaolo Bonzini 
6003c50d8ae3SPaolo Bonzini 		/*
6004c50d8ae3SPaolo Bonzini 		 * unfair on small ones
6005c50d8ae3SPaolo Bonzini 		 * per-vm shrinkers cry out
6006c50d8ae3SPaolo Bonzini 		 * sadness comes quickly
6007c50d8ae3SPaolo Bonzini 		 */
6008c50d8ae3SPaolo Bonzini 		list_move_tail(&kvm->vm_list, &vm_list);
6009c50d8ae3SPaolo Bonzini 		break;
6010c50d8ae3SPaolo Bonzini 	}
6011c50d8ae3SPaolo Bonzini 
6012c50d8ae3SPaolo Bonzini 	mutex_unlock(&kvm_lock);
6013c50d8ae3SPaolo Bonzini 	return freed;
6014c50d8ae3SPaolo Bonzini }
6015c50d8ae3SPaolo Bonzini 
6016c50d8ae3SPaolo Bonzini static unsigned long
6017c50d8ae3SPaolo Bonzini mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
6018c50d8ae3SPaolo Bonzini {
6019c50d8ae3SPaolo Bonzini 	return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
6020c50d8ae3SPaolo Bonzini }
6021c50d8ae3SPaolo Bonzini 
6022c50d8ae3SPaolo Bonzini static struct shrinker mmu_shrinker = {
6023c50d8ae3SPaolo Bonzini 	.count_objects = mmu_shrink_count,
6024c50d8ae3SPaolo Bonzini 	.scan_objects = mmu_shrink_scan,
6025c50d8ae3SPaolo Bonzini 	.seeks = DEFAULT_SEEKS * 10,
6026c50d8ae3SPaolo Bonzini };
6027c50d8ae3SPaolo Bonzini 
6028c50d8ae3SPaolo Bonzini static void mmu_destroy_caches(void)
6029c50d8ae3SPaolo Bonzini {
6030c50d8ae3SPaolo Bonzini 	kmem_cache_destroy(pte_list_desc_cache);
6031c50d8ae3SPaolo Bonzini 	kmem_cache_destroy(mmu_page_header_cache);
6032c50d8ae3SPaolo Bonzini }
6033c50d8ae3SPaolo Bonzini 
6034c50d8ae3SPaolo Bonzini static bool get_nx_auto_mode(void)
6035c50d8ae3SPaolo Bonzini {
6036c50d8ae3SPaolo Bonzini 	/* Return true when CPU has the bug, and mitigations are ON */
6037c50d8ae3SPaolo Bonzini 	return boot_cpu_has_bug(X86_BUG_ITLB_MULTIHIT) && !cpu_mitigations_off();
6038c50d8ae3SPaolo Bonzini }
6039c50d8ae3SPaolo Bonzini 
6040c50d8ae3SPaolo Bonzini static void __set_nx_huge_pages(bool val)
6041c50d8ae3SPaolo Bonzini {
6042c50d8ae3SPaolo Bonzini 	nx_huge_pages = itlb_multihit_kvm_mitigation = val;
6043c50d8ae3SPaolo Bonzini }
6044c50d8ae3SPaolo Bonzini 
6045c50d8ae3SPaolo Bonzini static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
6046c50d8ae3SPaolo Bonzini {
6047c50d8ae3SPaolo Bonzini 	bool old_val = nx_huge_pages;
6048c50d8ae3SPaolo Bonzini 	bool new_val;
6049c50d8ae3SPaolo Bonzini 
6050c50d8ae3SPaolo Bonzini 	/* In "auto" mode deploy workaround only if CPU has the bug. */
6051c50d8ae3SPaolo Bonzini 	if (sysfs_streq(val, "off"))
6052c50d8ae3SPaolo Bonzini 		new_val = 0;
6053c50d8ae3SPaolo Bonzini 	else if (sysfs_streq(val, "force"))
6054c50d8ae3SPaolo Bonzini 		new_val = 1;
6055c50d8ae3SPaolo Bonzini 	else if (sysfs_streq(val, "auto"))
6056c50d8ae3SPaolo Bonzini 		new_val = get_nx_auto_mode();
6057c50d8ae3SPaolo Bonzini 	else if (strtobool(val, &new_val) < 0)
6058c50d8ae3SPaolo Bonzini 		return -EINVAL;
6059c50d8ae3SPaolo Bonzini 
6060c50d8ae3SPaolo Bonzini 	__set_nx_huge_pages(new_val);
6061c50d8ae3SPaolo Bonzini 
6062c50d8ae3SPaolo Bonzini 	if (new_val != old_val) {
6063c50d8ae3SPaolo Bonzini 		struct kvm *kvm;
6064c50d8ae3SPaolo Bonzini 
6065c50d8ae3SPaolo Bonzini 		mutex_lock(&kvm_lock);
6066c50d8ae3SPaolo Bonzini 
6067c50d8ae3SPaolo Bonzini 		list_for_each_entry(kvm, &vm_list, vm_list) {
6068c50d8ae3SPaolo Bonzini 			mutex_lock(&kvm->slots_lock);
6069c50d8ae3SPaolo Bonzini 			kvm_mmu_zap_all_fast(kvm);
6070c50d8ae3SPaolo Bonzini 			mutex_unlock(&kvm->slots_lock);
6071c50d8ae3SPaolo Bonzini 
6072c50d8ae3SPaolo Bonzini 			wake_up_process(kvm->arch.nx_lpage_recovery_thread);
6073c50d8ae3SPaolo Bonzini 		}
6074c50d8ae3SPaolo Bonzini 		mutex_unlock(&kvm_lock);
6075c50d8ae3SPaolo Bonzini 	}
6076c50d8ae3SPaolo Bonzini 
6077c50d8ae3SPaolo Bonzini 	return 0;
6078c50d8ae3SPaolo Bonzini }
6079c50d8ae3SPaolo Bonzini 
6080c50d8ae3SPaolo Bonzini int kvm_mmu_module_init(void)
6081c50d8ae3SPaolo Bonzini {
6082c50d8ae3SPaolo Bonzini 	int ret = -ENOMEM;
6083c50d8ae3SPaolo Bonzini 
6084c50d8ae3SPaolo Bonzini 	if (nx_huge_pages == -1)
6085c50d8ae3SPaolo Bonzini 		__set_nx_huge_pages(get_nx_auto_mode());
6086c50d8ae3SPaolo Bonzini 
6087c50d8ae3SPaolo Bonzini 	/*
6088c50d8ae3SPaolo Bonzini 	 * MMU roles use union aliasing which is, generally speaking, an
6089c50d8ae3SPaolo Bonzini 	 * undefined behavior. However, we supposedly know how compilers behave
6090c50d8ae3SPaolo Bonzini 	 * and the current status quo is unlikely to change. Guardians below are
6091c50d8ae3SPaolo Bonzini 	 * supposed to let us know if the assumption becomes false.
6092c50d8ae3SPaolo Bonzini 	 */
6093c50d8ae3SPaolo Bonzini 	BUILD_BUG_ON(sizeof(union kvm_mmu_page_role) != sizeof(u32));
6094c50d8ae3SPaolo Bonzini 	BUILD_BUG_ON(sizeof(union kvm_mmu_extended_role) != sizeof(u32));
6095c50d8ae3SPaolo Bonzini 	BUILD_BUG_ON(sizeof(union kvm_mmu_role) != sizeof(u64));
6096c50d8ae3SPaolo Bonzini 
6097c50d8ae3SPaolo Bonzini 	kvm_mmu_reset_all_pte_masks();
6098c50d8ae3SPaolo Bonzini 
6099c50d8ae3SPaolo Bonzini 	pte_list_desc_cache = kmem_cache_create("pte_list_desc",
6100c50d8ae3SPaolo Bonzini 					    sizeof(struct pte_list_desc),
6101c50d8ae3SPaolo Bonzini 					    0, SLAB_ACCOUNT, NULL);
6102c50d8ae3SPaolo Bonzini 	if (!pte_list_desc_cache)
6103c50d8ae3SPaolo Bonzini 		goto out;
6104c50d8ae3SPaolo Bonzini 
6105c50d8ae3SPaolo Bonzini 	mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
6106c50d8ae3SPaolo Bonzini 						  sizeof(struct kvm_mmu_page),
6107c50d8ae3SPaolo Bonzini 						  0, SLAB_ACCOUNT, NULL);
6108c50d8ae3SPaolo Bonzini 	if (!mmu_page_header_cache)
6109c50d8ae3SPaolo Bonzini 		goto out;
6110c50d8ae3SPaolo Bonzini 
6111c50d8ae3SPaolo Bonzini 	if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL))
6112c50d8ae3SPaolo Bonzini 		goto out;
6113c50d8ae3SPaolo Bonzini 
6114c50d8ae3SPaolo Bonzini 	ret = register_shrinker(&mmu_shrinker);
6115c50d8ae3SPaolo Bonzini 	if (ret)
6116c50d8ae3SPaolo Bonzini 		goto out;
6117c50d8ae3SPaolo Bonzini 
6118c50d8ae3SPaolo Bonzini 	return 0;
6119c50d8ae3SPaolo Bonzini 
6120c50d8ae3SPaolo Bonzini out:
6121c50d8ae3SPaolo Bonzini 	mmu_destroy_caches();
6122c50d8ae3SPaolo Bonzini 	return ret;
6123c50d8ae3SPaolo Bonzini }
6124c50d8ae3SPaolo Bonzini 
6125c50d8ae3SPaolo Bonzini /*
6126c50d8ae3SPaolo Bonzini  * Calculate mmu pages needed for kvm.
6127c50d8ae3SPaolo Bonzini  */
6128c50d8ae3SPaolo Bonzini unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
6129c50d8ae3SPaolo Bonzini {
6130c50d8ae3SPaolo Bonzini 	unsigned long nr_mmu_pages;
6131c50d8ae3SPaolo Bonzini 	unsigned long nr_pages = 0;
6132c50d8ae3SPaolo Bonzini 	struct kvm_memslots *slots;
6133c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *memslot;
6134c50d8ae3SPaolo Bonzini 	int i;
6135c50d8ae3SPaolo Bonzini 
6136c50d8ae3SPaolo Bonzini 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
6137c50d8ae3SPaolo Bonzini 		slots = __kvm_memslots(kvm, i);
6138c50d8ae3SPaolo Bonzini 
6139c50d8ae3SPaolo Bonzini 		kvm_for_each_memslot(memslot, slots)
6140c50d8ae3SPaolo Bonzini 			nr_pages += memslot->npages;
6141c50d8ae3SPaolo Bonzini 	}
6142c50d8ae3SPaolo Bonzini 
6143c50d8ae3SPaolo Bonzini 	nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
6144c50d8ae3SPaolo Bonzini 	nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES);
6145c50d8ae3SPaolo Bonzini 
6146c50d8ae3SPaolo Bonzini 	return nr_mmu_pages;
6147c50d8ae3SPaolo Bonzini }
6148c50d8ae3SPaolo Bonzini 
6149c50d8ae3SPaolo Bonzini void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
6150c50d8ae3SPaolo Bonzini {
6151c50d8ae3SPaolo Bonzini 	kvm_mmu_unload(vcpu);
6152c50d8ae3SPaolo Bonzini 	free_mmu_pages(&vcpu->arch.root_mmu);
6153c50d8ae3SPaolo Bonzini 	free_mmu_pages(&vcpu->arch.guest_mmu);
6154c50d8ae3SPaolo Bonzini 	mmu_free_memory_caches(vcpu);
6155c50d8ae3SPaolo Bonzini }
6156c50d8ae3SPaolo Bonzini 
6157c50d8ae3SPaolo Bonzini void kvm_mmu_module_exit(void)
6158c50d8ae3SPaolo Bonzini {
6159c50d8ae3SPaolo Bonzini 	mmu_destroy_caches();
6160c50d8ae3SPaolo Bonzini 	percpu_counter_destroy(&kvm_total_used_mmu_pages);
6161c50d8ae3SPaolo Bonzini 	unregister_shrinker(&mmu_shrinker);
6162c50d8ae3SPaolo Bonzini 	mmu_audit_disable();
6163c50d8ae3SPaolo Bonzini }
6164c50d8ae3SPaolo Bonzini 
61654dfe4f40SJunaid Shahid static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp)
6166c50d8ae3SPaolo Bonzini {
61674dfe4f40SJunaid Shahid 	bool was_recovery_enabled, is_recovery_enabled;
61684dfe4f40SJunaid Shahid 	uint old_period, new_period;
6169c50d8ae3SPaolo Bonzini 	int err;
6170c50d8ae3SPaolo Bonzini 
61714dfe4f40SJunaid Shahid 	was_recovery_enabled = nx_huge_pages_recovery_ratio;
61724dfe4f40SJunaid Shahid 	old_period = nx_huge_pages_recovery_period_ms;
61734dfe4f40SJunaid Shahid 
6174c50d8ae3SPaolo Bonzini 	err = param_set_uint(val, kp);
6175c50d8ae3SPaolo Bonzini 	if (err)
6176c50d8ae3SPaolo Bonzini 		return err;
6177c50d8ae3SPaolo Bonzini 
61784dfe4f40SJunaid Shahid 	is_recovery_enabled = nx_huge_pages_recovery_ratio;
61794dfe4f40SJunaid Shahid 	new_period = nx_huge_pages_recovery_period_ms;
61804dfe4f40SJunaid Shahid 
61814dfe4f40SJunaid Shahid 	if (READ_ONCE(nx_huge_pages) && is_recovery_enabled &&
61824dfe4f40SJunaid Shahid 	    (!was_recovery_enabled || old_period > new_period)) {
6183c50d8ae3SPaolo Bonzini 		struct kvm *kvm;
6184c50d8ae3SPaolo Bonzini 
6185c50d8ae3SPaolo Bonzini 		mutex_lock(&kvm_lock);
6186c50d8ae3SPaolo Bonzini 
6187c50d8ae3SPaolo Bonzini 		list_for_each_entry(kvm, &vm_list, vm_list)
6188c50d8ae3SPaolo Bonzini 			wake_up_process(kvm->arch.nx_lpage_recovery_thread);
6189c50d8ae3SPaolo Bonzini 
6190c50d8ae3SPaolo Bonzini 		mutex_unlock(&kvm_lock);
6191c50d8ae3SPaolo Bonzini 	}
6192c50d8ae3SPaolo Bonzini 
6193c50d8ae3SPaolo Bonzini 	return err;
6194c50d8ae3SPaolo Bonzini }
6195c50d8ae3SPaolo Bonzini 
6196c50d8ae3SPaolo Bonzini static void kvm_recover_nx_lpages(struct kvm *kvm)
6197c50d8ae3SPaolo Bonzini {
6198ade74e14SSean Christopherson 	unsigned long nx_lpage_splits = kvm->stat.nx_lpage_splits;
6199c50d8ae3SPaolo Bonzini 	int rcu_idx;
6200c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
6201c50d8ae3SPaolo Bonzini 	unsigned int ratio;
6202c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
6203048f4980SSean Christopherson 	bool flush = false;
6204c50d8ae3SPaolo Bonzini 	ulong to_zap;
6205c50d8ae3SPaolo Bonzini 
6206c50d8ae3SPaolo Bonzini 	rcu_idx = srcu_read_lock(&kvm->srcu);
6207531810caSBen Gardon 	write_lock(&kvm->mmu_lock);
6208c50d8ae3SPaolo Bonzini 
6209c50d8ae3SPaolo Bonzini 	ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
6210ade74e14SSean Christopherson 	to_zap = ratio ? DIV_ROUND_UP(nx_lpage_splits, ratio) : 0;
62117d919c7aSSean Christopherson 	for ( ; to_zap; --to_zap) {
62127d919c7aSSean Christopherson 		if (list_empty(&kvm->arch.lpage_disallowed_mmu_pages))
62137d919c7aSSean Christopherson 			break;
62147d919c7aSSean Christopherson 
6215c50d8ae3SPaolo Bonzini 		/*
6216c50d8ae3SPaolo Bonzini 		 * We use a separate list instead of just using active_mmu_pages
6217c50d8ae3SPaolo Bonzini 		 * because the number of lpage_disallowed pages is expected to
6218c50d8ae3SPaolo Bonzini 		 * be relatively small compared to the total.
6219c50d8ae3SPaolo Bonzini 		 */
6220c50d8ae3SPaolo Bonzini 		sp = list_first_entry(&kvm->arch.lpage_disallowed_mmu_pages,
6221c50d8ae3SPaolo Bonzini 				      struct kvm_mmu_page,
6222c50d8ae3SPaolo Bonzini 				      lpage_disallowed_link);
6223c50d8ae3SPaolo Bonzini 		WARN_ON_ONCE(!sp->lpage_disallowed);
6224897218ffSPaolo Bonzini 		if (is_tdp_mmu_page(sp)) {
6225315f02c6SPaolo Bonzini 			flush |= kvm_tdp_mmu_zap_sp(kvm, sp);
62268d1a182eSBen Gardon 		} else {
6227c50d8ae3SPaolo Bonzini 			kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
6228c50d8ae3SPaolo Bonzini 			WARN_ON_ONCE(sp->lpage_disallowed);
622929cf0f50SBen Gardon 		}
6230c50d8ae3SPaolo Bonzini 
6231531810caSBen Gardon 		if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
6232048f4980SSean Christopherson 			kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
6233531810caSBen Gardon 			cond_resched_rwlock_write(&kvm->mmu_lock);
6234048f4980SSean Christopherson 			flush = false;
6235c50d8ae3SPaolo Bonzini 		}
6236c50d8ae3SPaolo Bonzini 	}
6237048f4980SSean Christopherson 	kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
6238c50d8ae3SPaolo Bonzini 
6239531810caSBen Gardon 	write_unlock(&kvm->mmu_lock);
6240c50d8ae3SPaolo Bonzini 	srcu_read_unlock(&kvm->srcu, rcu_idx);
6241c50d8ae3SPaolo Bonzini }
6242c50d8ae3SPaolo Bonzini 
6243c50d8ae3SPaolo Bonzini static long get_nx_lpage_recovery_timeout(u64 start_time)
6244c50d8ae3SPaolo Bonzini {
62454dfe4f40SJunaid Shahid 	uint ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
62464dfe4f40SJunaid Shahid 	uint period = READ_ONCE(nx_huge_pages_recovery_period_ms);
62474dfe4f40SJunaid Shahid 
62484dfe4f40SJunaid Shahid 	if (!period && ratio) {
62494dfe4f40SJunaid Shahid 		/* Make sure the period is not less than one second.  */
62504dfe4f40SJunaid Shahid 		ratio = min(ratio, 3600u);
62514dfe4f40SJunaid Shahid 		period = 60 * 60 * 1000 / ratio;
62524dfe4f40SJunaid Shahid 	}
62534dfe4f40SJunaid Shahid 
62544dfe4f40SJunaid Shahid 	return READ_ONCE(nx_huge_pages) && ratio
62554dfe4f40SJunaid Shahid 		? start_time + msecs_to_jiffies(period) - get_jiffies_64()
6256c50d8ae3SPaolo Bonzini 		: MAX_SCHEDULE_TIMEOUT;
6257c50d8ae3SPaolo Bonzini }
6258c50d8ae3SPaolo Bonzini 
6259c50d8ae3SPaolo Bonzini static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data)
6260c50d8ae3SPaolo Bonzini {
6261c50d8ae3SPaolo Bonzini 	u64 start_time;
6262c50d8ae3SPaolo Bonzini 	long remaining_time;
6263c50d8ae3SPaolo Bonzini 
6264c50d8ae3SPaolo Bonzini 	while (true) {
6265c50d8ae3SPaolo Bonzini 		start_time = get_jiffies_64();
6266c50d8ae3SPaolo Bonzini 		remaining_time = get_nx_lpage_recovery_timeout(start_time);
6267c50d8ae3SPaolo Bonzini 
6268c50d8ae3SPaolo Bonzini 		set_current_state(TASK_INTERRUPTIBLE);
6269c50d8ae3SPaolo Bonzini 		while (!kthread_should_stop() && remaining_time > 0) {
6270c50d8ae3SPaolo Bonzini 			schedule_timeout(remaining_time);
6271c50d8ae3SPaolo Bonzini 			remaining_time = get_nx_lpage_recovery_timeout(start_time);
6272c50d8ae3SPaolo Bonzini 			set_current_state(TASK_INTERRUPTIBLE);
6273c50d8ae3SPaolo Bonzini 		}
6274c50d8ae3SPaolo Bonzini 
6275c50d8ae3SPaolo Bonzini 		set_current_state(TASK_RUNNING);
6276c50d8ae3SPaolo Bonzini 
6277c50d8ae3SPaolo Bonzini 		if (kthread_should_stop())
6278c50d8ae3SPaolo Bonzini 			return 0;
6279c50d8ae3SPaolo Bonzini 
6280c50d8ae3SPaolo Bonzini 		kvm_recover_nx_lpages(kvm);
6281c50d8ae3SPaolo Bonzini 	}
6282c50d8ae3SPaolo Bonzini }
6283c50d8ae3SPaolo Bonzini 
6284c50d8ae3SPaolo Bonzini int kvm_mmu_post_init_vm(struct kvm *kvm)
6285c50d8ae3SPaolo Bonzini {
6286c50d8ae3SPaolo Bonzini 	int err;
6287c50d8ae3SPaolo Bonzini 
6288c50d8ae3SPaolo Bonzini 	err = kvm_vm_create_worker_thread(kvm, kvm_nx_lpage_recovery_worker, 0,
6289c50d8ae3SPaolo Bonzini 					  "kvm-nx-lpage-recovery",
6290c50d8ae3SPaolo Bonzini 					  &kvm->arch.nx_lpage_recovery_thread);
6291c50d8ae3SPaolo Bonzini 	if (!err)
6292c50d8ae3SPaolo Bonzini 		kthread_unpark(kvm->arch.nx_lpage_recovery_thread);
6293c50d8ae3SPaolo Bonzini 
6294c50d8ae3SPaolo Bonzini 	return err;
6295c50d8ae3SPaolo Bonzini }
6296c50d8ae3SPaolo Bonzini 
6297c50d8ae3SPaolo Bonzini void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
6298c50d8ae3SPaolo Bonzini {
6299c50d8ae3SPaolo Bonzini 	if (kvm->arch.nx_lpage_recovery_thread)
6300c50d8ae3SPaolo Bonzini 		kthread_stop(kvm->arch.nx_lpage_recovery_thread);
6301c50d8ae3SPaolo Bonzini }
6302