xref: /linux/arch/x86/kvm/mmu/mmu.c (revision 746700d21fd52399c97aeb7791584bbf5426983c)
1c50d8ae3SPaolo Bonzini // SPDX-License-Identifier: GPL-2.0-only
2c50d8ae3SPaolo Bonzini /*
3c50d8ae3SPaolo Bonzini  * Kernel-based Virtual Machine driver for Linux
4c50d8ae3SPaolo Bonzini  *
5c50d8ae3SPaolo Bonzini  * This module enables machines with Intel VT-x extensions to run virtual
6c50d8ae3SPaolo Bonzini  * machines without emulation or binary translation.
7c50d8ae3SPaolo Bonzini  *
8c50d8ae3SPaolo Bonzini  * MMU support
9c50d8ae3SPaolo Bonzini  *
10c50d8ae3SPaolo Bonzini  * Copyright (C) 2006 Qumranet, Inc.
11c50d8ae3SPaolo Bonzini  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
12c50d8ae3SPaolo Bonzini  *
13c50d8ae3SPaolo Bonzini  * Authors:
14c50d8ae3SPaolo Bonzini  *   Yaniv Kamay  <yaniv@qumranet.com>
15c50d8ae3SPaolo Bonzini  *   Avi Kivity   <avi@qumranet.com>
16c50d8ae3SPaolo Bonzini  */
17c50d8ae3SPaolo Bonzini 
18c50d8ae3SPaolo Bonzini #include "irq.h"
1988197e6aS彭浩(Richard) #include "ioapic.h"
20c50d8ae3SPaolo Bonzini #include "mmu.h"
216ca9a6f3SSean Christopherson #include "mmu_internal.h"
22fe5db27dSBen Gardon #include "tdp_mmu.h"
23c50d8ae3SPaolo Bonzini #include "x86.h"
24c50d8ae3SPaolo Bonzini #include "kvm_cache_regs.h"
252f728d66SSean Christopherson #include "kvm_emulate.h"
26c50d8ae3SPaolo Bonzini #include "cpuid.h"
275a9624afSPaolo Bonzini #include "spte.h"
28c50d8ae3SPaolo Bonzini 
29c50d8ae3SPaolo Bonzini #include <linux/kvm_host.h>
30c50d8ae3SPaolo Bonzini #include <linux/types.h>
31c50d8ae3SPaolo Bonzini #include <linux/string.h>
32c50d8ae3SPaolo Bonzini #include <linux/mm.h>
33c50d8ae3SPaolo Bonzini #include <linux/highmem.h>
34c50d8ae3SPaolo Bonzini #include <linux/moduleparam.h>
35c50d8ae3SPaolo Bonzini #include <linux/export.h>
36c50d8ae3SPaolo Bonzini #include <linux/swap.h>
37c50d8ae3SPaolo Bonzini #include <linux/hugetlb.h>
38c50d8ae3SPaolo Bonzini #include <linux/compiler.h>
39c50d8ae3SPaolo Bonzini #include <linux/srcu.h>
40c50d8ae3SPaolo Bonzini #include <linux/slab.h>
41c50d8ae3SPaolo Bonzini #include <linux/sched/signal.h>
42c50d8ae3SPaolo Bonzini #include <linux/uaccess.h>
43c50d8ae3SPaolo Bonzini #include <linux/hash.h>
44c50d8ae3SPaolo Bonzini #include <linux/kern_levels.h>
45c50d8ae3SPaolo Bonzini #include <linux/kthread.h>
46c50d8ae3SPaolo Bonzini 
47c50d8ae3SPaolo Bonzini #include <asm/page.h>
48eb243d1dSIngo Molnar #include <asm/memtype.h>
49c50d8ae3SPaolo Bonzini #include <asm/cmpxchg.h>
50c50d8ae3SPaolo Bonzini #include <asm/io.h>
514a98623dSSean Christopherson #include <asm/set_memory.h>
52c50d8ae3SPaolo Bonzini #include <asm/vmx.h>
53c50d8ae3SPaolo Bonzini #include <asm/kvm_page_track.h>
54c50d8ae3SPaolo Bonzini #include "trace.h"
55c50d8ae3SPaolo Bonzini 
56fc9bf2e0SSean Christopherson #include "paging.h"
57fc9bf2e0SSean Christopherson 
58c50d8ae3SPaolo Bonzini extern bool itlb_multihit_kvm_mitigation;
59c50d8ae3SPaolo Bonzini 
60a9d6496dSShaokun Zhang int __read_mostly nx_huge_pages = -1;
61c50d8ae3SPaolo Bonzini #ifdef CONFIG_PREEMPT_RT
62c50d8ae3SPaolo Bonzini /* Recovery can cause latency spikes, disable it for PREEMPT_RT.  */
63c50d8ae3SPaolo Bonzini static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
64c50d8ae3SPaolo Bonzini #else
65c50d8ae3SPaolo Bonzini static uint __read_mostly nx_huge_pages_recovery_ratio = 60;
66c50d8ae3SPaolo Bonzini #endif
67c50d8ae3SPaolo Bonzini 
68c50d8ae3SPaolo Bonzini static int set_nx_huge_pages(const char *val, const struct kernel_param *kp);
69c50d8ae3SPaolo Bonzini static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp);
70c50d8ae3SPaolo Bonzini 
71d5d6c18dSJoe Perches static const struct kernel_param_ops nx_huge_pages_ops = {
72c50d8ae3SPaolo Bonzini 	.set = set_nx_huge_pages,
73c50d8ae3SPaolo Bonzini 	.get = param_get_bool,
74c50d8ae3SPaolo Bonzini };
75c50d8ae3SPaolo Bonzini 
76d5d6c18dSJoe Perches static const struct kernel_param_ops nx_huge_pages_recovery_ratio_ops = {
77c50d8ae3SPaolo Bonzini 	.set = set_nx_huge_pages_recovery_ratio,
78c50d8ae3SPaolo Bonzini 	.get = param_get_uint,
79c50d8ae3SPaolo Bonzini };
80c50d8ae3SPaolo Bonzini 
81c50d8ae3SPaolo Bonzini module_param_cb(nx_huge_pages, &nx_huge_pages_ops, &nx_huge_pages, 0644);
82c50d8ae3SPaolo Bonzini __MODULE_PARM_TYPE(nx_huge_pages, "bool");
83c50d8ae3SPaolo Bonzini module_param_cb(nx_huge_pages_recovery_ratio, &nx_huge_pages_recovery_ratio_ops,
84c50d8ae3SPaolo Bonzini 		&nx_huge_pages_recovery_ratio, 0644);
85c50d8ae3SPaolo Bonzini __MODULE_PARM_TYPE(nx_huge_pages_recovery_ratio, "uint");
86c50d8ae3SPaolo Bonzini 
8771fe7013SSean Christopherson static bool __read_mostly force_flush_and_sync_on_reuse;
8871fe7013SSean Christopherson module_param_named(flush_on_reuse, force_flush_and_sync_on_reuse, bool, 0644);
8971fe7013SSean Christopherson 
90c50d8ae3SPaolo Bonzini /*
91c50d8ae3SPaolo Bonzini  * When setting this variable to true it enables Two-Dimensional-Paging
92c50d8ae3SPaolo Bonzini  * where the hardware walks 2 page tables:
93c50d8ae3SPaolo Bonzini  * 1. the guest-virtual to guest-physical
94c50d8ae3SPaolo Bonzini  * 2. while doing 1. it walks guest-physical to host-physical
95c50d8ae3SPaolo Bonzini  * If the hardware supports that we don't need to do shadow paging.
96c50d8ae3SPaolo Bonzini  */
97c50d8ae3SPaolo Bonzini bool tdp_enabled = false;
98c50d8ae3SPaolo Bonzini 
991d92d2e8SSean Christopherson static int max_huge_page_level __read_mostly;
100*746700d2SWei Huang static int tdp_root_level __read_mostly;
10183013059SSean Christopherson static int max_tdp_level __read_mostly;
102703c335dSSean Christopherson 
103c50d8ae3SPaolo Bonzini enum {
104c50d8ae3SPaolo Bonzini 	AUDIT_PRE_PAGE_FAULT,
105c50d8ae3SPaolo Bonzini 	AUDIT_POST_PAGE_FAULT,
106c50d8ae3SPaolo Bonzini 	AUDIT_PRE_PTE_WRITE,
107c50d8ae3SPaolo Bonzini 	AUDIT_POST_PTE_WRITE,
108c50d8ae3SPaolo Bonzini 	AUDIT_PRE_SYNC,
109c50d8ae3SPaolo Bonzini 	AUDIT_POST_SYNC
110c50d8ae3SPaolo Bonzini };
111c50d8ae3SPaolo Bonzini 
112c50d8ae3SPaolo Bonzini #ifdef MMU_DEBUG
1135a9624afSPaolo Bonzini bool dbg = 0;
114c50d8ae3SPaolo Bonzini module_param(dbg, bool, 0644);
115c50d8ae3SPaolo Bonzini #endif
116c50d8ae3SPaolo Bonzini 
117c50d8ae3SPaolo Bonzini #define PTE_PREFETCH_NUM		8
118c50d8ae3SPaolo Bonzini 
119c50d8ae3SPaolo Bonzini #define PT32_LEVEL_BITS 10
120c50d8ae3SPaolo Bonzini 
121c50d8ae3SPaolo Bonzini #define PT32_LEVEL_SHIFT(level) \
122c50d8ae3SPaolo Bonzini 		(PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
123c50d8ae3SPaolo Bonzini 
124c50d8ae3SPaolo Bonzini #define PT32_LVL_OFFSET_MASK(level) \
125c50d8ae3SPaolo Bonzini 	(PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
126c50d8ae3SPaolo Bonzini 						* PT32_LEVEL_BITS))) - 1))
127c50d8ae3SPaolo Bonzini 
128c50d8ae3SPaolo Bonzini #define PT32_INDEX(address, level)\
129c50d8ae3SPaolo Bonzini 	(((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
130c50d8ae3SPaolo Bonzini 
131c50d8ae3SPaolo Bonzini 
132c50d8ae3SPaolo Bonzini #define PT32_BASE_ADDR_MASK PAGE_MASK
133c50d8ae3SPaolo Bonzini #define PT32_DIR_BASE_ADDR_MASK \
134c50d8ae3SPaolo Bonzini 	(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
135c50d8ae3SPaolo Bonzini #define PT32_LVL_ADDR_MASK(level) \
136c50d8ae3SPaolo Bonzini 	(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
137c50d8ae3SPaolo Bonzini 					    * PT32_LEVEL_BITS))) - 1))
138c50d8ae3SPaolo Bonzini 
139c50d8ae3SPaolo Bonzini #include <trace/events/kvm.h>
140c50d8ae3SPaolo Bonzini 
141dc1cff96SPeter Xu /* make pte_list_desc fit well in cache lines */
14213236e25SPeter Xu #define PTE_LIST_EXT 14
143c50d8ae3SPaolo Bonzini 
14413236e25SPeter Xu /*
14513236e25SPeter Xu  * Slight optimization of cacheline layout, by putting `more' and `spte_count'
14613236e25SPeter Xu  * at the start; then accessing it will only use one single cacheline for
14713236e25SPeter Xu  * either full (entries==PTE_LIST_EXT) case or entries<=6.
14813236e25SPeter Xu  */
149c50d8ae3SPaolo Bonzini struct pte_list_desc {
150c50d8ae3SPaolo Bonzini 	struct pte_list_desc *more;
15113236e25SPeter Xu 	/*
15213236e25SPeter Xu 	 * Stores number of entries stored in the pte_list_desc.  No need to be
15313236e25SPeter Xu 	 * u64 but just for easier alignment.  When PTE_LIST_EXT, means full.
15413236e25SPeter Xu 	 */
15513236e25SPeter Xu 	u64 spte_count;
15613236e25SPeter Xu 	u64 *sptes[PTE_LIST_EXT];
157c50d8ae3SPaolo Bonzini };
158c50d8ae3SPaolo Bonzini 
159c50d8ae3SPaolo Bonzini struct kvm_shadow_walk_iterator {
160c50d8ae3SPaolo Bonzini 	u64 addr;
161c50d8ae3SPaolo Bonzini 	hpa_t shadow_addr;
162c50d8ae3SPaolo Bonzini 	u64 *sptep;
163c50d8ae3SPaolo Bonzini 	int level;
164c50d8ae3SPaolo Bonzini 	unsigned index;
165c50d8ae3SPaolo Bonzini };
166c50d8ae3SPaolo Bonzini 
167c50d8ae3SPaolo Bonzini #define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker)     \
168c50d8ae3SPaolo Bonzini 	for (shadow_walk_init_using_root(&(_walker), (_vcpu),              \
169c50d8ae3SPaolo Bonzini 					 (_root), (_addr));                \
170c50d8ae3SPaolo Bonzini 	     shadow_walk_okay(&(_walker));			           \
171c50d8ae3SPaolo Bonzini 	     shadow_walk_next(&(_walker)))
172c50d8ae3SPaolo Bonzini 
173c50d8ae3SPaolo Bonzini #define for_each_shadow_entry(_vcpu, _addr, _walker)            \
174c50d8ae3SPaolo Bonzini 	for (shadow_walk_init(&(_walker), _vcpu, _addr);	\
175c50d8ae3SPaolo Bonzini 	     shadow_walk_okay(&(_walker));			\
176c50d8ae3SPaolo Bonzini 	     shadow_walk_next(&(_walker)))
177c50d8ae3SPaolo Bonzini 
178c50d8ae3SPaolo Bonzini #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte)	\
179c50d8ae3SPaolo Bonzini 	for (shadow_walk_init(&(_walker), _vcpu, _addr);		\
180c50d8ae3SPaolo Bonzini 	     shadow_walk_okay(&(_walker)) &&				\
181c50d8ae3SPaolo Bonzini 		({ spte = mmu_spte_get_lockless(_walker.sptep); 1; });	\
182c50d8ae3SPaolo Bonzini 	     __shadow_walk_next(&(_walker), spte))
183c50d8ae3SPaolo Bonzini 
184c50d8ae3SPaolo Bonzini static struct kmem_cache *pte_list_desc_cache;
18502c00b3aSBen Gardon struct kmem_cache *mmu_page_header_cache;
186c50d8ae3SPaolo Bonzini static struct percpu_counter kvm_total_used_mmu_pages;
187c50d8ae3SPaolo Bonzini 
188c50d8ae3SPaolo Bonzini static void mmu_spte_set(u64 *sptep, u64 spte);
189c50d8ae3SPaolo Bonzini static union kvm_mmu_page_role
190c50d8ae3SPaolo Bonzini kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
191c50d8ae3SPaolo Bonzini 
192594e91a1SSean Christopherson struct kvm_mmu_role_regs {
193594e91a1SSean Christopherson 	const unsigned long cr0;
194594e91a1SSean Christopherson 	const unsigned long cr4;
195594e91a1SSean Christopherson 	const u64 efer;
196594e91a1SSean Christopherson };
197594e91a1SSean Christopherson 
198c50d8ae3SPaolo Bonzini #define CREATE_TRACE_POINTS
199c50d8ae3SPaolo Bonzini #include "mmutrace.h"
200c50d8ae3SPaolo Bonzini 
201594e91a1SSean Christopherson /*
202594e91a1SSean Christopherson  * Yes, lot's of underscores.  They're a hint that you probably shouldn't be
203594e91a1SSean Christopherson  * reading from the role_regs.  Once the mmu_role is constructed, it becomes
204594e91a1SSean Christopherson  * the single source of truth for the MMU's state.
205594e91a1SSean Christopherson  */
206594e91a1SSean Christopherson #define BUILD_MMU_ROLE_REGS_ACCESSOR(reg, name, flag)			\
207594e91a1SSean Christopherson static inline bool ____is_##reg##_##name(struct kvm_mmu_role_regs *regs)\
208594e91a1SSean Christopherson {									\
209594e91a1SSean Christopherson 	return !!(regs->reg & flag);					\
210594e91a1SSean Christopherson }
211594e91a1SSean Christopherson BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, pg, X86_CR0_PG);
212594e91a1SSean Christopherson BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, wp, X86_CR0_WP);
213594e91a1SSean Christopherson BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pse, X86_CR4_PSE);
214594e91a1SSean Christopherson BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pae, X86_CR4_PAE);
215594e91a1SSean Christopherson BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smep, X86_CR4_SMEP);
216594e91a1SSean Christopherson BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smap, X86_CR4_SMAP);
217594e91a1SSean Christopherson BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pke, X86_CR4_PKE);
218594e91a1SSean Christopherson BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, la57, X86_CR4_LA57);
219594e91a1SSean Christopherson BUILD_MMU_ROLE_REGS_ACCESSOR(efer, nx, EFER_NX);
220594e91a1SSean Christopherson BUILD_MMU_ROLE_REGS_ACCESSOR(efer, lma, EFER_LMA);
221594e91a1SSean Christopherson 
22260667724SSean Christopherson /*
22360667724SSean Christopherson  * The MMU itself (with a valid role) is the single source of truth for the
22460667724SSean Christopherson  * MMU.  Do not use the regs used to build the MMU/role, nor the vCPU.  The
22560667724SSean Christopherson  * regs don't account for dependencies, e.g. clearing CR4 bits if CR0.PG=1,
22660667724SSean Christopherson  * and the vCPU may be incorrect/irrelevant.
22760667724SSean Christopherson  */
22860667724SSean Christopherson #define BUILD_MMU_ROLE_ACCESSOR(base_or_ext, reg, name)		\
22960667724SSean Christopherson static inline bool is_##reg##_##name(struct kvm_mmu *mmu)	\
23060667724SSean Christopherson {								\
23160667724SSean Christopherson 	return !!(mmu->mmu_role. base_or_ext . reg##_##name);	\
23260667724SSean Christopherson }
23360667724SSean Christopherson BUILD_MMU_ROLE_ACCESSOR(ext,  cr0, pg);
23460667724SSean Christopherson BUILD_MMU_ROLE_ACCESSOR(base, cr0, wp);
23560667724SSean Christopherson BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pse);
23660667724SSean Christopherson BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pae);
23760667724SSean Christopherson BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, smep);
23860667724SSean Christopherson BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, smap);
23960667724SSean Christopherson BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pke);
24060667724SSean Christopherson BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, la57);
24160667724SSean Christopherson BUILD_MMU_ROLE_ACCESSOR(base, efer, nx);
24260667724SSean Christopherson 
243594e91a1SSean Christopherson static struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu)
244594e91a1SSean Christopherson {
245594e91a1SSean Christopherson 	struct kvm_mmu_role_regs regs = {
246594e91a1SSean Christopherson 		.cr0 = kvm_read_cr0_bits(vcpu, KVM_MMU_CR0_ROLE_BITS),
247594e91a1SSean Christopherson 		.cr4 = kvm_read_cr4_bits(vcpu, KVM_MMU_CR4_ROLE_BITS),
248594e91a1SSean Christopherson 		.efer = vcpu->arch.efer,
249594e91a1SSean Christopherson 	};
250594e91a1SSean Christopherson 
251594e91a1SSean Christopherson 	return regs;
252594e91a1SSean Christopherson }
253c50d8ae3SPaolo Bonzini 
254f4bd6f73SSean Christopherson static int role_regs_to_root_level(struct kvm_mmu_role_regs *regs)
255f4bd6f73SSean Christopherson {
256f4bd6f73SSean Christopherson 	if (!____is_cr0_pg(regs))
257f4bd6f73SSean Christopherson 		return 0;
258f4bd6f73SSean Christopherson 	else if (____is_efer_lma(regs))
259f4bd6f73SSean Christopherson 		return ____is_cr4_la57(regs) ? PT64_ROOT_5LEVEL :
260f4bd6f73SSean Christopherson 					       PT64_ROOT_4LEVEL;
261f4bd6f73SSean Christopherson 	else if (____is_cr4_pae(regs))
262f4bd6f73SSean Christopherson 		return PT32E_ROOT_LEVEL;
263f4bd6f73SSean Christopherson 	else
264f4bd6f73SSean Christopherson 		return PT32_ROOT_LEVEL;
265f4bd6f73SSean Christopherson }
266c50d8ae3SPaolo Bonzini 
267c50d8ae3SPaolo Bonzini static inline bool kvm_available_flush_tlb_with_range(void)
268c50d8ae3SPaolo Bonzini {
269afaf0b2fSSean Christopherson 	return kvm_x86_ops.tlb_remote_flush_with_range;
270c50d8ae3SPaolo Bonzini }
271c50d8ae3SPaolo Bonzini 
272c50d8ae3SPaolo Bonzini static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm,
273c50d8ae3SPaolo Bonzini 		struct kvm_tlb_range *range)
274c50d8ae3SPaolo Bonzini {
275c50d8ae3SPaolo Bonzini 	int ret = -ENOTSUPP;
276c50d8ae3SPaolo Bonzini 
277afaf0b2fSSean Christopherson 	if (range && kvm_x86_ops.tlb_remote_flush_with_range)
278b3646477SJason Baron 		ret = static_call(kvm_x86_tlb_remote_flush_with_range)(kvm, range);
279c50d8ae3SPaolo Bonzini 
280c50d8ae3SPaolo Bonzini 	if (ret)
281c50d8ae3SPaolo Bonzini 		kvm_flush_remote_tlbs(kvm);
282c50d8ae3SPaolo Bonzini }
283c50d8ae3SPaolo Bonzini 
2842f2fad08SBen Gardon void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
285c50d8ae3SPaolo Bonzini 		u64 start_gfn, u64 pages)
286c50d8ae3SPaolo Bonzini {
287c50d8ae3SPaolo Bonzini 	struct kvm_tlb_range range;
288c50d8ae3SPaolo Bonzini 
289c50d8ae3SPaolo Bonzini 	range.start_gfn = start_gfn;
290c50d8ae3SPaolo Bonzini 	range.pages = pages;
291c50d8ae3SPaolo Bonzini 
292c50d8ae3SPaolo Bonzini 	kvm_flush_remote_tlbs_with_range(kvm, &range);
293c50d8ae3SPaolo Bonzini }
294c50d8ae3SPaolo Bonzini 
2958f79b064SBen Gardon static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
2968f79b064SBen Gardon 			   unsigned int access)
2978f79b064SBen Gardon {
298c236d962SSean Christopherson 	u64 spte = make_mmio_spte(vcpu, gfn, access);
2998f79b064SBen Gardon 
300c236d962SSean Christopherson 	trace_mark_mmio_spte(sptep, gfn, spte);
301c236d962SSean Christopherson 	mmu_spte_set(sptep, spte);
302c50d8ae3SPaolo Bonzini }
303c50d8ae3SPaolo Bonzini 
304c50d8ae3SPaolo Bonzini static gfn_t get_mmio_spte_gfn(u64 spte)
305c50d8ae3SPaolo Bonzini {
306c50d8ae3SPaolo Bonzini 	u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
307c50d8ae3SPaolo Bonzini 
3088a967d65SPaolo Bonzini 	gpa |= (spte >> SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)
309c50d8ae3SPaolo Bonzini 	       & shadow_nonpresent_or_rsvd_mask;
310c50d8ae3SPaolo Bonzini 
311c50d8ae3SPaolo Bonzini 	return gpa >> PAGE_SHIFT;
312c50d8ae3SPaolo Bonzini }
313c50d8ae3SPaolo Bonzini 
314c50d8ae3SPaolo Bonzini static unsigned get_mmio_spte_access(u64 spte)
315c50d8ae3SPaolo Bonzini {
316c50d8ae3SPaolo Bonzini 	return spte & shadow_mmio_access_mask;
317c50d8ae3SPaolo Bonzini }
318c50d8ae3SPaolo Bonzini 
319c50d8ae3SPaolo Bonzini static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
320c50d8ae3SPaolo Bonzini {
321c50d8ae3SPaolo Bonzini 	u64 kvm_gen, spte_gen, gen;
322c50d8ae3SPaolo Bonzini 
323c50d8ae3SPaolo Bonzini 	gen = kvm_vcpu_memslots(vcpu)->generation;
324c50d8ae3SPaolo Bonzini 	if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
325c50d8ae3SPaolo Bonzini 		return false;
326c50d8ae3SPaolo Bonzini 
327c50d8ae3SPaolo Bonzini 	kvm_gen = gen & MMIO_SPTE_GEN_MASK;
328c50d8ae3SPaolo Bonzini 	spte_gen = get_mmio_spte_generation(spte);
329c50d8ae3SPaolo Bonzini 
330c50d8ae3SPaolo Bonzini 	trace_check_mmio_spte(spte, kvm_gen, spte_gen);
331c50d8ae3SPaolo Bonzini 	return likely(kvm_gen == spte_gen);
332c50d8ae3SPaolo Bonzini }
333c50d8ae3SPaolo Bonzini 
334cd313569SMohammed Gamal static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
335cd313569SMohammed Gamal                                   struct x86_exception *exception)
336cd313569SMohammed Gamal {
337ec7771abSMohammed Gamal 	/* Check if guest physical address doesn't exceed guest maximum */
338dc46515cSSean Christopherson 	if (kvm_vcpu_is_illegal_gpa(vcpu, gpa)) {
339ec7771abSMohammed Gamal 		exception->error_code |= PFERR_RSVD_MASK;
340ec7771abSMohammed Gamal 		return UNMAPPED_GVA;
341ec7771abSMohammed Gamal 	}
342ec7771abSMohammed Gamal 
343cd313569SMohammed Gamal         return gpa;
344cd313569SMohammed Gamal }
345cd313569SMohammed Gamal 
346c50d8ae3SPaolo Bonzini static int is_cpuid_PSE36(void)
347c50d8ae3SPaolo Bonzini {
348c50d8ae3SPaolo Bonzini 	return 1;
349c50d8ae3SPaolo Bonzini }
350c50d8ae3SPaolo Bonzini 
351c50d8ae3SPaolo Bonzini static gfn_t pse36_gfn_delta(u32 gpte)
352c50d8ae3SPaolo Bonzini {
353c50d8ae3SPaolo Bonzini 	int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
354c50d8ae3SPaolo Bonzini 
355c50d8ae3SPaolo Bonzini 	return (gpte & PT32_DIR_PSE36_MASK) << shift;
356c50d8ae3SPaolo Bonzini }
357c50d8ae3SPaolo Bonzini 
358c50d8ae3SPaolo Bonzini #ifdef CONFIG_X86_64
359c50d8ae3SPaolo Bonzini static void __set_spte(u64 *sptep, u64 spte)
360c50d8ae3SPaolo Bonzini {
361c50d8ae3SPaolo Bonzini 	WRITE_ONCE(*sptep, spte);
362c50d8ae3SPaolo Bonzini }
363c50d8ae3SPaolo Bonzini 
364c50d8ae3SPaolo Bonzini static void __update_clear_spte_fast(u64 *sptep, u64 spte)
365c50d8ae3SPaolo Bonzini {
366c50d8ae3SPaolo Bonzini 	WRITE_ONCE(*sptep, spte);
367c50d8ae3SPaolo Bonzini }
368c50d8ae3SPaolo Bonzini 
369c50d8ae3SPaolo Bonzini static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
370c50d8ae3SPaolo Bonzini {
371c50d8ae3SPaolo Bonzini 	return xchg(sptep, spte);
372c50d8ae3SPaolo Bonzini }
373c50d8ae3SPaolo Bonzini 
374c50d8ae3SPaolo Bonzini static u64 __get_spte_lockless(u64 *sptep)
375c50d8ae3SPaolo Bonzini {
376c50d8ae3SPaolo Bonzini 	return READ_ONCE(*sptep);
377c50d8ae3SPaolo Bonzini }
378c50d8ae3SPaolo Bonzini #else
379c50d8ae3SPaolo Bonzini union split_spte {
380c50d8ae3SPaolo Bonzini 	struct {
381c50d8ae3SPaolo Bonzini 		u32 spte_low;
382c50d8ae3SPaolo Bonzini 		u32 spte_high;
383c50d8ae3SPaolo Bonzini 	};
384c50d8ae3SPaolo Bonzini 	u64 spte;
385c50d8ae3SPaolo Bonzini };
386c50d8ae3SPaolo Bonzini 
387c50d8ae3SPaolo Bonzini static void count_spte_clear(u64 *sptep, u64 spte)
388c50d8ae3SPaolo Bonzini {
38957354682SSean Christopherson 	struct kvm_mmu_page *sp =  sptep_to_sp(sptep);
390c50d8ae3SPaolo Bonzini 
391c50d8ae3SPaolo Bonzini 	if (is_shadow_present_pte(spte))
392c50d8ae3SPaolo Bonzini 		return;
393c50d8ae3SPaolo Bonzini 
394c50d8ae3SPaolo Bonzini 	/* Ensure the spte is completely set before we increase the count */
395c50d8ae3SPaolo Bonzini 	smp_wmb();
396c50d8ae3SPaolo Bonzini 	sp->clear_spte_count++;
397c50d8ae3SPaolo Bonzini }
398c50d8ae3SPaolo Bonzini 
399c50d8ae3SPaolo Bonzini static void __set_spte(u64 *sptep, u64 spte)
400c50d8ae3SPaolo Bonzini {
401c50d8ae3SPaolo Bonzini 	union split_spte *ssptep, sspte;
402c50d8ae3SPaolo Bonzini 
403c50d8ae3SPaolo Bonzini 	ssptep = (union split_spte *)sptep;
404c50d8ae3SPaolo Bonzini 	sspte = (union split_spte)spte;
405c50d8ae3SPaolo Bonzini 
406c50d8ae3SPaolo Bonzini 	ssptep->spte_high = sspte.spte_high;
407c50d8ae3SPaolo Bonzini 
408c50d8ae3SPaolo Bonzini 	/*
409c50d8ae3SPaolo Bonzini 	 * If we map the spte from nonpresent to present, We should store
410c50d8ae3SPaolo Bonzini 	 * the high bits firstly, then set present bit, so cpu can not
411c50d8ae3SPaolo Bonzini 	 * fetch this spte while we are setting the spte.
412c50d8ae3SPaolo Bonzini 	 */
413c50d8ae3SPaolo Bonzini 	smp_wmb();
414c50d8ae3SPaolo Bonzini 
415c50d8ae3SPaolo Bonzini 	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
416c50d8ae3SPaolo Bonzini }
417c50d8ae3SPaolo Bonzini 
418c50d8ae3SPaolo Bonzini static void __update_clear_spte_fast(u64 *sptep, u64 spte)
419c50d8ae3SPaolo Bonzini {
420c50d8ae3SPaolo Bonzini 	union split_spte *ssptep, sspte;
421c50d8ae3SPaolo Bonzini 
422c50d8ae3SPaolo Bonzini 	ssptep = (union split_spte *)sptep;
423c50d8ae3SPaolo Bonzini 	sspte = (union split_spte)spte;
424c50d8ae3SPaolo Bonzini 
425c50d8ae3SPaolo Bonzini 	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
426c50d8ae3SPaolo Bonzini 
427c50d8ae3SPaolo Bonzini 	/*
428c50d8ae3SPaolo Bonzini 	 * If we map the spte from present to nonpresent, we should clear
429c50d8ae3SPaolo Bonzini 	 * present bit firstly to avoid vcpu fetch the old high bits.
430c50d8ae3SPaolo Bonzini 	 */
431c50d8ae3SPaolo Bonzini 	smp_wmb();
432c50d8ae3SPaolo Bonzini 
433c50d8ae3SPaolo Bonzini 	ssptep->spte_high = sspte.spte_high;
434c50d8ae3SPaolo Bonzini 	count_spte_clear(sptep, spte);
435c50d8ae3SPaolo Bonzini }
436c50d8ae3SPaolo Bonzini 
437c50d8ae3SPaolo Bonzini static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
438c50d8ae3SPaolo Bonzini {
439c50d8ae3SPaolo Bonzini 	union split_spte *ssptep, sspte, orig;
440c50d8ae3SPaolo Bonzini 
441c50d8ae3SPaolo Bonzini 	ssptep = (union split_spte *)sptep;
442c50d8ae3SPaolo Bonzini 	sspte = (union split_spte)spte;
443c50d8ae3SPaolo Bonzini 
444c50d8ae3SPaolo Bonzini 	/* xchg acts as a barrier before the setting of the high bits */
445c50d8ae3SPaolo Bonzini 	orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low);
446c50d8ae3SPaolo Bonzini 	orig.spte_high = ssptep->spte_high;
447c50d8ae3SPaolo Bonzini 	ssptep->spte_high = sspte.spte_high;
448c50d8ae3SPaolo Bonzini 	count_spte_clear(sptep, spte);
449c50d8ae3SPaolo Bonzini 
450c50d8ae3SPaolo Bonzini 	return orig.spte;
451c50d8ae3SPaolo Bonzini }
452c50d8ae3SPaolo Bonzini 
453c50d8ae3SPaolo Bonzini /*
454c50d8ae3SPaolo Bonzini  * The idea using the light way get the spte on x86_32 guest is from
455c50d8ae3SPaolo Bonzini  * gup_get_pte (mm/gup.c).
456c50d8ae3SPaolo Bonzini  *
457c50d8ae3SPaolo Bonzini  * An spte tlb flush may be pending, because kvm_set_pte_rmapp
458c50d8ae3SPaolo Bonzini  * coalesces them and we are running out of the MMU lock.  Therefore
459c50d8ae3SPaolo Bonzini  * we need to protect against in-progress updates of the spte.
460c50d8ae3SPaolo Bonzini  *
461c50d8ae3SPaolo Bonzini  * Reading the spte while an update is in progress may get the old value
462c50d8ae3SPaolo Bonzini  * for the high part of the spte.  The race is fine for a present->non-present
463c50d8ae3SPaolo Bonzini  * change (because the high part of the spte is ignored for non-present spte),
464c50d8ae3SPaolo Bonzini  * but for a present->present change we must reread the spte.
465c50d8ae3SPaolo Bonzini  *
466c50d8ae3SPaolo Bonzini  * All such changes are done in two steps (present->non-present and
467c50d8ae3SPaolo Bonzini  * non-present->present), hence it is enough to count the number of
468c50d8ae3SPaolo Bonzini  * present->non-present updates: if it changed while reading the spte,
469c50d8ae3SPaolo Bonzini  * we might have hit the race.  This is done using clear_spte_count.
470c50d8ae3SPaolo Bonzini  */
471c50d8ae3SPaolo Bonzini static u64 __get_spte_lockless(u64 *sptep)
472c50d8ae3SPaolo Bonzini {
47357354682SSean Christopherson 	struct kvm_mmu_page *sp =  sptep_to_sp(sptep);
474c50d8ae3SPaolo Bonzini 	union split_spte spte, *orig = (union split_spte *)sptep;
475c50d8ae3SPaolo Bonzini 	int count;
476c50d8ae3SPaolo Bonzini 
477c50d8ae3SPaolo Bonzini retry:
478c50d8ae3SPaolo Bonzini 	count = sp->clear_spte_count;
479c50d8ae3SPaolo Bonzini 	smp_rmb();
480c50d8ae3SPaolo Bonzini 
481c50d8ae3SPaolo Bonzini 	spte.spte_low = orig->spte_low;
482c50d8ae3SPaolo Bonzini 	smp_rmb();
483c50d8ae3SPaolo Bonzini 
484c50d8ae3SPaolo Bonzini 	spte.spte_high = orig->spte_high;
485c50d8ae3SPaolo Bonzini 	smp_rmb();
486c50d8ae3SPaolo Bonzini 
487c50d8ae3SPaolo Bonzini 	if (unlikely(spte.spte_low != orig->spte_low ||
488c50d8ae3SPaolo Bonzini 	      count != sp->clear_spte_count))
489c50d8ae3SPaolo Bonzini 		goto retry;
490c50d8ae3SPaolo Bonzini 
491c50d8ae3SPaolo Bonzini 	return spte.spte;
492c50d8ae3SPaolo Bonzini }
493c50d8ae3SPaolo Bonzini #endif
494c50d8ae3SPaolo Bonzini 
495c50d8ae3SPaolo Bonzini static bool spte_has_volatile_bits(u64 spte)
496c50d8ae3SPaolo Bonzini {
497c50d8ae3SPaolo Bonzini 	if (!is_shadow_present_pte(spte))
498c50d8ae3SPaolo Bonzini 		return false;
499c50d8ae3SPaolo Bonzini 
500c50d8ae3SPaolo Bonzini 	/*
501c50d8ae3SPaolo Bonzini 	 * Always atomically update spte if it can be updated
502c50d8ae3SPaolo Bonzini 	 * out of mmu-lock, it can ensure dirty bit is not lost,
503c50d8ae3SPaolo Bonzini 	 * also, it can help us to get a stable is_writable_pte()
504c50d8ae3SPaolo Bonzini 	 * to ensure tlb flush is not missed.
505c50d8ae3SPaolo Bonzini 	 */
506c50d8ae3SPaolo Bonzini 	if (spte_can_locklessly_be_made_writable(spte) ||
507c50d8ae3SPaolo Bonzini 	    is_access_track_spte(spte))
508c50d8ae3SPaolo Bonzini 		return true;
509c50d8ae3SPaolo Bonzini 
510c50d8ae3SPaolo Bonzini 	if (spte_ad_enabled(spte)) {
511c50d8ae3SPaolo Bonzini 		if ((spte & shadow_accessed_mask) == 0 ||
512c50d8ae3SPaolo Bonzini 	    	    (is_writable_pte(spte) && (spte & shadow_dirty_mask) == 0))
513c50d8ae3SPaolo Bonzini 			return true;
514c50d8ae3SPaolo Bonzini 	}
515c50d8ae3SPaolo Bonzini 
516c50d8ae3SPaolo Bonzini 	return false;
517c50d8ae3SPaolo Bonzini }
518c50d8ae3SPaolo Bonzini 
519c50d8ae3SPaolo Bonzini /* Rules for using mmu_spte_set:
520c50d8ae3SPaolo Bonzini  * Set the sptep from nonpresent to present.
521c50d8ae3SPaolo Bonzini  * Note: the sptep being assigned *must* be either not present
522c50d8ae3SPaolo Bonzini  * or in a state where the hardware will not attempt to update
523c50d8ae3SPaolo Bonzini  * the spte.
524c50d8ae3SPaolo Bonzini  */
525c50d8ae3SPaolo Bonzini static void mmu_spte_set(u64 *sptep, u64 new_spte)
526c50d8ae3SPaolo Bonzini {
527c50d8ae3SPaolo Bonzini 	WARN_ON(is_shadow_present_pte(*sptep));
528c50d8ae3SPaolo Bonzini 	__set_spte(sptep, new_spte);
529c50d8ae3SPaolo Bonzini }
530c50d8ae3SPaolo Bonzini 
531c50d8ae3SPaolo Bonzini /*
532c50d8ae3SPaolo Bonzini  * Update the SPTE (excluding the PFN), but do not track changes in its
533c50d8ae3SPaolo Bonzini  * accessed/dirty status.
534c50d8ae3SPaolo Bonzini  */
535c50d8ae3SPaolo Bonzini static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte)
536c50d8ae3SPaolo Bonzini {
537c50d8ae3SPaolo Bonzini 	u64 old_spte = *sptep;
538c50d8ae3SPaolo Bonzini 
539c50d8ae3SPaolo Bonzini 	WARN_ON(!is_shadow_present_pte(new_spte));
540c50d8ae3SPaolo Bonzini 
541c50d8ae3SPaolo Bonzini 	if (!is_shadow_present_pte(old_spte)) {
542c50d8ae3SPaolo Bonzini 		mmu_spte_set(sptep, new_spte);
543c50d8ae3SPaolo Bonzini 		return old_spte;
544c50d8ae3SPaolo Bonzini 	}
545c50d8ae3SPaolo Bonzini 
546c50d8ae3SPaolo Bonzini 	if (!spte_has_volatile_bits(old_spte))
547c50d8ae3SPaolo Bonzini 		__update_clear_spte_fast(sptep, new_spte);
548c50d8ae3SPaolo Bonzini 	else
549c50d8ae3SPaolo Bonzini 		old_spte = __update_clear_spte_slow(sptep, new_spte);
550c50d8ae3SPaolo Bonzini 
551c50d8ae3SPaolo Bonzini 	WARN_ON(spte_to_pfn(old_spte) != spte_to_pfn(new_spte));
552c50d8ae3SPaolo Bonzini 
553c50d8ae3SPaolo Bonzini 	return old_spte;
554c50d8ae3SPaolo Bonzini }
555c50d8ae3SPaolo Bonzini 
556c50d8ae3SPaolo Bonzini /* Rules for using mmu_spte_update:
557c50d8ae3SPaolo Bonzini  * Update the state bits, it means the mapped pfn is not changed.
558c50d8ae3SPaolo Bonzini  *
559c50d8ae3SPaolo Bonzini  * Whenever we overwrite a writable spte with a read-only one we
560c50d8ae3SPaolo Bonzini  * should flush remote TLBs. Otherwise rmap_write_protect
561c50d8ae3SPaolo Bonzini  * will find a read-only spte, even though the writable spte
562c50d8ae3SPaolo Bonzini  * might be cached on a CPU's TLB, the return value indicates this
563c50d8ae3SPaolo Bonzini  * case.
564c50d8ae3SPaolo Bonzini  *
565c50d8ae3SPaolo Bonzini  * Returns true if the TLB needs to be flushed
566c50d8ae3SPaolo Bonzini  */
567c50d8ae3SPaolo Bonzini static bool mmu_spte_update(u64 *sptep, u64 new_spte)
568c50d8ae3SPaolo Bonzini {
569c50d8ae3SPaolo Bonzini 	bool flush = false;
570c50d8ae3SPaolo Bonzini 	u64 old_spte = mmu_spte_update_no_track(sptep, new_spte);
571c50d8ae3SPaolo Bonzini 
572c50d8ae3SPaolo Bonzini 	if (!is_shadow_present_pte(old_spte))
573c50d8ae3SPaolo Bonzini 		return false;
574c50d8ae3SPaolo Bonzini 
575c50d8ae3SPaolo Bonzini 	/*
576c50d8ae3SPaolo Bonzini 	 * For the spte updated out of mmu-lock is safe, since
577c50d8ae3SPaolo Bonzini 	 * we always atomically update it, see the comments in
578c50d8ae3SPaolo Bonzini 	 * spte_has_volatile_bits().
579c50d8ae3SPaolo Bonzini 	 */
580c50d8ae3SPaolo Bonzini 	if (spte_can_locklessly_be_made_writable(old_spte) &&
581c50d8ae3SPaolo Bonzini 	      !is_writable_pte(new_spte))
582c50d8ae3SPaolo Bonzini 		flush = true;
583c50d8ae3SPaolo Bonzini 
584c50d8ae3SPaolo Bonzini 	/*
585c50d8ae3SPaolo Bonzini 	 * Flush TLB when accessed/dirty states are changed in the page tables,
586c50d8ae3SPaolo Bonzini 	 * to guarantee consistency between TLB and page tables.
587c50d8ae3SPaolo Bonzini 	 */
588c50d8ae3SPaolo Bonzini 
589c50d8ae3SPaolo Bonzini 	if (is_accessed_spte(old_spte) && !is_accessed_spte(new_spte)) {
590c50d8ae3SPaolo Bonzini 		flush = true;
591c50d8ae3SPaolo Bonzini 		kvm_set_pfn_accessed(spte_to_pfn(old_spte));
592c50d8ae3SPaolo Bonzini 	}
593c50d8ae3SPaolo Bonzini 
594c50d8ae3SPaolo Bonzini 	if (is_dirty_spte(old_spte) && !is_dirty_spte(new_spte)) {
595c50d8ae3SPaolo Bonzini 		flush = true;
596c50d8ae3SPaolo Bonzini 		kvm_set_pfn_dirty(spte_to_pfn(old_spte));
597c50d8ae3SPaolo Bonzini 	}
598c50d8ae3SPaolo Bonzini 
599c50d8ae3SPaolo Bonzini 	return flush;
600c50d8ae3SPaolo Bonzini }
601c50d8ae3SPaolo Bonzini 
602c50d8ae3SPaolo Bonzini /*
603c50d8ae3SPaolo Bonzini  * Rules for using mmu_spte_clear_track_bits:
604c50d8ae3SPaolo Bonzini  * It sets the sptep from present to nonpresent, and track the
605c50d8ae3SPaolo Bonzini  * state bits, it is used to clear the last level sptep.
6067fa2a347SSean Christopherson  * Returns the old PTE.
607c50d8ae3SPaolo Bonzini  */
60871f51d2cSMingwei Zhang static int mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)
609c50d8ae3SPaolo Bonzini {
610c50d8ae3SPaolo Bonzini 	kvm_pfn_t pfn;
611c50d8ae3SPaolo Bonzini 	u64 old_spte = *sptep;
61271f51d2cSMingwei Zhang 	int level = sptep_to_sp(sptep)->role.level;
613c50d8ae3SPaolo Bonzini 
614c50d8ae3SPaolo Bonzini 	if (!spte_has_volatile_bits(old_spte))
615c50d8ae3SPaolo Bonzini 		__update_clear_spte_fast(sptep, 0ull);
616c50d8ae3SPaolo Bonzini 	else
617c50d8ae3SPaolo Bonzini 		old_spte = __update_clear_spte_slow(sptep, 0ull);
618c50d8ae3SPaolo Bonzini 
619c50d8ae3SPaolo Bonzini 	if (!is_shadow_present_pte(old_spte))
6207fa2a347SSean Christopherson 		return old_spte;
621c50d8ae3SPaolo Bonzini 
62271f51d2cSMingwei Zhang 	kvm_update_page_stats(kvm, level, -1);
62371f51d2cSMingwei Zhang 
624c50d8ae3SPaolo Bonzini 	pfn = spte_to_pfn(old_spte);
625c50d8ae3SPaolo Bonzini 
626c50d8ae3SPaolo Bonzini 	/*
627c50d8ae3SPaolo Bonzini 	 * KVM does not hold the refcount of the page used by
628c50d8ae3SPaolo Bonzini 	 * kvm mmu, before reclaiming the page, we should
629c50d8ae3SPaolo Bonzini 	 * unmap it from mmu first.
630c50d8ae3SPaolo Bonzini 	 */
631c50d8ae3SPaolo Bonzini 	WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
632c50d8ae3SPaolo Bonzini 
633c50d8ae3SPaolo Bonzini 	if (is_accessed_spte(old_spte))
634c50d8ae3SPaolo Bonzini 		kvm_set_pfn_accessed(pfn);
635c50d8ae3SPaolo Bonzini 
636c50d8ae3SPaolo Bonzini 	if (is_dirty_spte(old_spte))
637c50d8ae3SPaolo Bonzini 		kvm_set_pfn_dirty(pfn);
638c50d8ae3SPaolo Bonzini 
6397fa2a347SSean Christopherson 	return old_spte;
640c50d8ae3SPaolo Bonzini }
641c50d8ae3SPaolo Bonzini 
642c50d8ae3SPaolo Bonzini /*
643c50d8ae3SPaolo Bonzini  * Rules for using mmu_spte_clear_no_track:
644c50d8ae3SPaolo Bonzini  * Directly clear spte without caring the state bits of sptep,
645c50d8ae3SPaolo Bonzini  * it is used to set the upper level spte.
646c50d8ae3SPaolo Bonzini  */
647c50d8ae3SPaolo Bonzini static void mmu_spte_clear_no_track(u64 *sptep)
648c50d8ae3SPaolo Bonzini {
649c50d8ae3SPaolo Bonzini 	__update_clear_spte_fast(sptep, 0ull);
650c50d8ae3SPaolo Bonzini }
651c50d8ae3SPaolo Bonzini 
652c50d8ae3SPaolo Bonzini static u64 mmu_spte_get_lockless(u64 *sptep)
653c50d8ae3SPaolo Bonzini {
654c50d8ae3SPaolo Bonzini 	return __get_spte_lockless(sptep);
655c50d8ae3SPaolo Bonzini }
656c50d8ae3SPaolo Bonzini 
657c50d8ae3SPaolo Bonzini /* Restore an acc-track PTE back to a regular PTE */
658c50d8ae3SPaolo Bonzini static u64 restore_acc_track_spte(u64 spte)
659c50d8ae3SPaolo Bonzini {
660c50d8ae3SPaolo Bonzini 	u64 new_spte = spte;
6618a967d65SPaolo Bonzini 	u64 saved_bits = (spte >> SHADOW_ACC_TRACK_SAVED_BITS_SHIFT)
6628a967d65SPaolo Bonzini 			 & SHADOW_ACC_TRACK_SAVED_BITS_MASK;
663c50d8ae3SPaolo Bonzini 
664c50d8ae3SPaolo Bonzini 	WARN_ON_ONCE(spte_ad_enabled(spte));
665c50d8ae3SPaolo Bonzini 	WARN_ON_ONCE(!is_access_track_spte(spte));
666c50d8ae3SPaolo Bonzini 
667c50d8ae3SPaolo Bonzini 	new_spte &= ~shadow_acc_track_mask;
6688a967d65SPaolo Bonzini 	new_spte &= ~(SHADOW_ACC_TRACK_SAVED_BITS_MASK <<
6698a967d65SPaolo Bonzini 		      SHADOW_ACC_TRACK_SAVED_BITS_SHIFT);
670c50d8ae3SPaolo Bonzini 	new_spte |= saved_bits;
671c50d8ae3SPaolo Bonzini 
672c50d8ae3SPaolo Bonzini 	return new_spte;
673c50d8ae3SPaolo Bonzini }
674c50d8ae3SPaolo Bonzini 
675c50d8ae3SPaolo Bonzini /* Returns the Accessed status of the PTE and resets it at the same time. */
676c50d8ae3SPaolo Bonzini static bool mmu_spte_age(u64 *sptep)
677c50d8ae3SPaolo Bonzini {
678c50d8ae3SPaolo Bonzini 	u64 spte = mmu_spte_get_lockless(sptep);
679c50d8ae3SPaolo Bonzini 
680c50d8ae3SPaolo Bonzini 	if (!is_accessed_spte(spte))
681c50d8ae3SPaolo Bonzini 		return false;
682c50d8ae3SPaolo Bonzini 
683c50d8ae3SPaolo Bonzini 	if (spte_ad_enabled(spte)) {
684c50d8ae3SPaolo Bonzini 		clear_bit((ffs(shadow_accessed_mask) - 1),
685c50d8ae3SPaolo Bonzini 			  (unsigned long *)sptep);
686c50d8ae3SPaolo Bonzini 	} else {
687c50d8ae3SPaolo Bonzini 		/*
688c50d8ae3SPaolo Bonzini 		 * Capture the dirty status of the page, so that it doesn't get
689c50d8ae3SPaolo Bonzini 		 * lost when the SPTE is marked for access tracking.
690c50d8ae3SPaolo Bonzini 		 */
691c50d8ae3SPaolo Bonzini 		if (is_writable_pte(spte))
692c50d8ae3SPaolo Bonzini 			kvm_set_pfn_dirty(spte_to_pfn(spte));
693c50d8ae3SPaolo Bonzini 
694c50d8ae3SPaolo Bonzini 		spte = mark_spte_for_access_track(spte);
695c50d8ae3SPaolo Bonzini 		mmu_spte_update_no_track(sptep, spte);
696c50d8ae3SPaolo Bonzini 	}
697c50d8ae3SPaolo Bonzini 
698c50d8ae3SPaolo Bonzini 	return true;
699c50d8ae3SPaolo Bonzini }
700c50d8ae3SPaolo Bonzini 
701c50d8ae3SPaolo Bonzini static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
702c50d8ae3SPaolo Bonzini {
703c5c8c7c5SDavid Matlack 	if (is_tdp_mmu(vcpu->arch.mmu)) {
704c5c8c7c5SDavid Matlack 		kvm_tdp_mmu_walk_lockless_begin();
705c5c8c7c5SDavid Matlack 	} else {
706c50d8ae3SPaolo Bonzini 		/*
707c50d8ae3SPaolo Bonzini 		 * Prevent page table teardown by making any free-er wait during
708c50d8ae3SPaolo Bonzini 		 * kvm_flush_remote_tlbs() IPI to all active vcpus.
709c50d8ae3SPaolo Bonzini 		 */
710c50d8ae3SPaolo Bonzini 		local_irq_disable();
711c50d8ae3SPaolo Bonzini 
712c50d8ae3SPaolo Bonzini 		/*
713c50d8ae3SPaolo Bonzini 		 * Make sure a following spte read is not reordered ahead of the write
714c50d8ae3SPaolo Bonzini 		 * to vcpu->mode.
715c50d8ae3SPaolo Bonzini 		 */
716c50d8ae3SPaolo Bonzini 		smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
717c50d8ae3SPaolo Bonzini 	}
718c5c8c7c5SDavid Matlack }
719c50d8ae3SPaolo Bonzini 
720c50d8ae3SPaolo Bonzini static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
721c50d8ae3SPaolo Bonzini {
722c5c8c7c5SDavid Matlack 	if (is_tdp_mmu(vcpu->arch.mmu)) {
723c5c8c7c5SDavid Matlack 		kvm_tdp_mmu_walk_lockless_end();
724c5c8c7c5SDavid Matlack 	} else {
725c50d8ae3SPaolo Bonzini 		/*
726c50d8ae3SPaolo Bonzini 		 * Make sure the write to vcpu->mode is not reordered in front of
727c50d8ae3SPaolo Bonzini 		 * reads to sptes.  If it does, kvm_mmu_commit_zap_page() can see us
728c50d8ae3SPaolo Bonzini 		 * OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
729c50d8ae3SPaolo Bonzini 		 */
730c50d8ae3SPaolo Bonzini 		smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
731c50d8ae3SPaolo Bonzini 		local_irq_enable();
732c50d8ae3SPaolo Bonzini 	}
733c5c8c7c5SDavid Matlack }
734c50d8ae3SPaolo Bonzini 
735378f5cd6SSean Christopherson static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
736c50d8ae3SPaolo Bonzini {
737c50d8ae3SPaolo Bonzini 	int r;
738c50d8ae3SPaolo Bonzini 
739531281adSSean Christopherson 	/* 1 rmap, 1 parent PTE per level, and the prefetched rmaps. */
74094ce87efSSean Christopherson 	r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
741531281adSSean Christopherson 				       1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);
742c50d8ae3SPaolo Bonzini 	if (r)
743c50d8ae3SPaolo Bonzini 		return r;
74494ce87efSSean Christopherson 	r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
745171a90d7SSean Christopherson 				       PT64_ROOT_MAX_LEVEL);
746171a90d7SSean Christopherson 	if (r)
747171a90d7SSean Christopherson 		return r;
748378f5cd6SSean Christopherson 	if (maybe_indirect) {
74994ce87efSSean Christopherson 		r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
750171a90d7SSean Christopherson 					       PT64_ROOT_MAX_LEVEL);
751c50d8ae3SPaolo Bonzini 		if (r)
752c50d8ae3SPaolo Bonzini 			return r;
753378f5cd6SSean Christopherson 	}
75494ce87efSSean Christopherson 	return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
755531281adSSean Christopherson 					  PT64_ROOT_MAX_LEVEL);
756c50d8ae3SPaolo Bonzini }
757c50d8ae3SPaolo Bonzini 
758c50d8ae3SPaolo Bonzini static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
759c50d8ae3SPaolo Bonzini {
76094ce87efSSean Christopherson 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
76194ce87efSSean Christopherson 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
76294ce87efSSean Christopherson 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_gfn_array_cache);
76394ce87efSSean Christopherson 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
764c50d8ae3SPaolo Bonzini }
765c50d8ae3SPaolo Bonzini 
766c50d8ae3SPaolo Bonzini static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu)
767c50d8ae3SPaolo Bonzini {
76894ce87efSSean Christopherson 	return kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache);
769c50d8ae3SPaolo Bonzini }
770c50d8ae3SPaolo Bonzini 
771c50d8ae3SPaolo Bonzini static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
772c50d8ae3SPaolo Bonzini {
773c50d8ae3SPaolo Bonzini 	kmem_cache_free(pte_list_desc_cache, pte_list_desc);
774c50d8ae3SPaolo Bonzini }
775c50d8ae3SPaolo Bonzini 
776c50d8ae3SPaolo Bonzini static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
777c50d8ae3SPaolo Bonzini {
778c50d8ae3SPaolo Bonzini 	if (!sp->role.direct)
779c50d8ae3SPaolo Bonzini 		return sp->gfns[index];
780c50d8ae3SPaolo Bonzini 
781c50d8ae3SPaolo Bonzini 	return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS));
782c50d8ae3SPaolo Bonzini }
783c50d8ae3SPaolo Bonzini 
784c50d8ae3SPaolo Bonzini static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
785c50d8ae3SPaolo Bonzini {
786c50d8ae3SPaolo Bonzini 	if (!sp->role.direct) {
787c50d8ae3SPaolo Bonzini 		sp->gfns[index] = gfn;
788c50d8ae3SPaolo Bonzini 		return;
789c50d8ae3SPaolo Bonzini 	}
790c50d8ae3SPaolo Bonzini 
791c50d8ae3SPaolo Bonzini 	if (WARN_ON(gfn != kvm_mmu_page_get_gfn(sp, index)))
792c50d8ae3SPaolo Bonzini 		pr_err_ratelimited("gfn mismatch under direct page %llx "
793c50d8ae3SPaolo Bonzini 				   "(expected %llx, got %llx)\n",
794c50d8ae3SPaolo Bonzini 				   sp->gfn,
795c50d8ae3SPaolo Bonzini 				   kvm_mmu_page_get_gfn(sp, index), gfn);
796c50d8ae3SPaolo Bonzini }
797c50d8ae3SPaolo Bonzini 
798c50d8ae3SPaolo Bonzini /*
799c50d8ae3SPaolo Bonzini  * Return the pointer to the large page information for a given gfn,
800c50d8ae3SPaolo Bonzini  * handling slots that are not large page aligned.
801c50d8ae3SPaolo Bonzini  */
802c50d8ae3SPaolo Bonzini static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
8038ca6f063SBen Gardon 		const struct kvm_memory_slot *slot, int level)
804c50d8ae3SPaolo Bonzini {
805c50d8ae3SPaolo Bonzini 	unsigned long idx;
806c50d8ae3SPaolo Bonzini 
807c50d8ae3SPaolo Bonzini 	idx = gfn_to_index(gfn, slot->base_gfn, level);
808c50d8ae3SPaolo Bonzini 	return &slot->arch.lpage_info[level - 2][idx];
809c50d8ae3SPaolo Bonzini }
810c50d8ae3SPaolo Bonzini 
811269e9552SHamza Mahfooz static void update_gfn_disallow_lpage_count(const struct kvm_memory_slot *slot,
812c50d8ae3SPaolo Bonzini 					    gfn_t gfn, int count)
813c50d8ae3SPaolo Bonzini {
814c50d8ae3SPaolo Bonzini 	struct kvm_lpage_info *linfo;
815c50d8ae3SPaolo Bonzini 	int i;
816c50d8ae3SPaolo Bonzini 
8173bae0459SSean Christopherson 	for (i = PG_LEVEL_2M; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
818c50d8ae3SPaolo Bonzini 		linfo = lpage_info_slot(gfn, slot, i);
819c50d8ae3SPaolo Bonzini 		linfo->disallow_lpage += count;
820c50d8ae3SPaolo Bonzini 		WARN_ON(linfo->disallow_lpage < 0);
821c50d8ae3SPaolo Bonzini 	}
822c50d8ae3SPaolo Bonzini }
823c50d8ae3SPaolo Bonzini 
824269e9552SHamza Mahfooz void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
825c50d8ae3SPaolo Bonzini {
826c50d8ae3SPaolo Bonzini 	update_gfn_disallow_lpage_count(slot, gfn, 1);
827c50d8ae3SPaolo Bonzini }
828c50d8ae3SPaolo Bonzini 
829269e9552SHamza Mahfooz void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
830c50d8ae3SPaolo Bonzini {
831c50d8ae3SPaolo Bonzini 	update_gfn_disallow_lpage_count(slot, gfn, -1);
832c50d8ae3SPaolo Bonzini }
833c50d8ae3SPaolo Bonzini 
834c50d8ae3SPaolo Bonzini static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
835c50d8ae3SPaolo Bonzini {
836c50d8ae3SPaolo Bonzini 	struct kvm_memslots *slots;
837c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
838c50d8ae3SPaolo Bonzini 	gfn_t gfn;
839c50d8ae3SPaolo Bonzini 
840c50d8ae3SPaolo Bonzini 	kvm->arch.indirect_shadow_pages++;
841c50d8ae3SPaolo Bonzini 	gfn = sp->gfn;
842c50d8ae3SPaolo Bonzini 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
843c50d8ae3SPaolo Bonzini 	slot = __gfn_to_memslot(slots, gfn);
844c50d8ae3SPaolo Bonzini 
845c50d8ae3SPaolo Bonzini 	/* the non-leaf shadow pages are keeping readonly. */
8463bae0459SSean Christopherson 	if (sp->role.level > PG_LEVEL_4K)
847c50d8ae3SPaolo Bonzini 		return kvm_slot_page_track_add_page(kvm, slot, gfn,
848c50d8ae3SPaolo Bonzini 						    KVM_PAGE_TRACK_WRITE);
849c50d8ae3SPaolo Bonzini 
850c50d8ae3SPaolo Bonzini 	kvm_mmu_gfn_disallow_lpage(slot, gfn);
851c50d8ae3SPaolo Bonzini }
852c50d8ae3SPaolo Bonzini 
85329cf0f50SBen Gardon void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
854c50d8ae3SPaolo Bonzini {
855c50d8ae3SPaolo Bonzini 	if (sp->lpage_disallowed)
856c50d8ae3SPaolo Bonzini 		return;
857c50d8ae3SPaolo Bonzini 
858c50d8ae3SPaolo Bonzini 	++kvm->stat.nx_lpage_splits;
859c50d8ae3SPaolo Bonzini 	list_add_tail(&sp->lpage_disallowed_link,
860c50d8ae3SPaolo Bonzini 		      &kvm->arch.lpage_disallowed_mmu_pages);
861c50d8ae3SPaolo Bonzini 	sp->lpage_disallowed = true;
862c50d8ae3SPaolo Bonzini }
863c50d8ae3SPaolo Bonzini 
864c50d8ae3SPaolo Bonzini static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
865c50d8ae3SPaolo Bonzini {
866c50d8ae3SPaolo Bonzini 	struct kvm_memslots *slots;
867c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
868c50d8ae3SPaolo Bonzini 	gfn_t gfn;
869c50d8ae3SPaolo Bonzini 
870c50d8ae3SPaolo Bonzini 	kvm->arch.indirect_shadow_pages--;
871c50d8ae3SPaolo Bonzini 	gfn = sp->gfn;
872c50d8ae3SPaolo Bonzini 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
873c50d8ae3SPaolo Bonzini 	slot = __gfn_to_memslot(slots, gfn);
8743bae0459SSean Christopherson 	if (sp->role.level > PG_LEVEL_4K)
875c50d8ae3SPaolo Bonzini 		return kvm_slot_page_track_remove_page(kvm, slot, gfn,
876c50d8ae3SPaolo Bonzini 						       KVM_PAGE_TRACK_WRITE);
877c50d8ae3SPaolo Bonzini 
878c50d8ae3SPaolo Bonzini 	kvm_mmu_gfn_allow_lpage(slot, gfn);
879c50d8ae3SPaolo Bonzini }
880c50d8ae3SPaolo Bonzini 
88129cf0f50SBen Gardon void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
882c50d8ae3SPaolo Bonzini {
883c50d8ae3SPaolo Bonzini 	--kvm->stat.nx_lpage_splits;
884c50d8ae3SPaolo Bonzini 	sp->lpage_disallowed = false;
885c50d8ae3SPaolo Bonzini 	list_del(&sp->lpage_disallowed_link);
886c50d8ae3SPaolo Bonzini }
887c50d8ae3SPaolo Bonzini 
888c50d8ae3SPaolo Bonzini static struct kvm_memory_slot *
889c50d8ae3SPaolo Bonzini gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
890c50d8ae3SPaolo Bonzini 			    bool no_dirty_log)
891c50d8ae3SPaolo Bonzini {
892c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
893c50d8ae3SPaolo Bonzini 
894c50d8ae3SPaolo Bonzini 	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
89591b0d268SPaolo Bonzini 	if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
89691b0d268SPaolo Bonzini 		return NULL;
897044c59c4SPeter Xu 	if (no_dirty_log && kvm_slot_dirty_track_enabled(slot))
89891b0d268SPaolo Bonzini 		return NULL;
899c50d8ae3SPaolo Bonzini 
900c50d8ae3SPaolo Bonzini 	return slot;
901c50d8ae3SPaolo Bonzini }
902c50d8ae3SPaolo Bonzini 
903c50d8ae3SPaolo Bonzini /*
904c50d8ae3SPaolo Bonzini  * About rmap_head encoding:
905c50d8ae3SPaolo Bonzini  *
906c50d8ae3SPaolo Bonzini  * If the bit zero of rmap_head->val is clear, then it points to the only spte
907c50d8ae3SPaolo Bonzini  * in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct
908c50d8ae3SPaolo Bonzini  * pte_list_desc containing more mappings.
909c50d8ae3SPaolo Bonzini  */
910c50d8ae3SPaolo Bonzini 
911c50d8ae3SPaolo Bonzini /*
912c50d8ae3SPaolo Bonzini  * Returns the number of pointers in the rmap chain, not counting the new one.
913c50d8ae3SPaolo Bonzini  */
914c50d8ae3SPaolo Bonzini static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
915c50d8ae3SPaolo Bonzini 			struct kvm_rmap_head *rmap_head)
916c50d8ae3SPaolo Bonzini {
917c50d8ae3SPaolo Bonzini 	struct pte_list_desc *desc;
91813236e25SPeter Xu 	int count = 0;
919c50d8ae3SPaolo Bonzini 
920c50d8ae3SPaolo Bonzini 	if (!rmap_head->val) {
921805a0f83SStephen Zhang 		rmap_printk("%p %llx 0->1\n", spte, *spte);
922c50d8ae3SPaolo Bonzini 		rmap_head->val = (unsigned long)spte;
923c50d8ae3SPaolo Bonzini 	} else if (!(rmap_head->val & 1)) {
924805a0f83SStephen Zhang 		rmap_printk("%p %llx 1->many\n", spte, *spte);
925c50d8ae3SPaolo Bonzini 		desc = mmu_alloc_pte_list_desc(vcpu);
926c50d8ae3SPaolo Bonzini 		desc->sptes[0] = (u64 *)rmap_head->val;
927c50d8ae3SPaolo Bonzini 		desc->sptes[1] = spte;
92813236e25SPeter Xu 		desc->spte_count = 2;
929c50d8ae3SPaolo Bonzini 		rmap_head->val = (unsigned long)desc | 1;
930c50d8ae3SPaolo Bonzini 		++count;
931c50d8ae3SPaolo Bonzini 	} else {
932805a0f83SStephen Zhang 		rmap_printk("%p %llx many->many\n", spte, *spte);
933c50d8ae3SPaolo Bonzini 		desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
93413236e25SPeter Xu 		while (desc->spte_count == PTE_LIST_EXT) {
935c50d8ae3SPaolo Bonzini 			count += PTE_LIST_EXT;
936c6c4f961SLi RongQing 			if (!desc->more) {
937c50d8ae3SPaolo Bonzini 				desc->more = mmu_alloc_pte_list_desc(vcpu);
938c50d8ae3SPaolo Bonzini 				desc = desc->more;
93913236e25SPeter Xu 				desc->spte_count = 0;
940c6c4f961SLi RongQing 				break;
941c6c4f961SLi RongQing 			}
942c6c4f961SLi RongQing 			desc = desc->more;
943c50d8ae3SPaolo Bonzini 		}
94413236e25SPeter Xu 		count += desc->spte_count;
94513236e25SPeter Xu 		desc->sptes[desc->spte_count++] = spte;
946c50d8ae3SPaolo Bonzini 	}
947c50d8ae3SPaolo Bonzini 	return count;
948c50d8ae3SPaolo Bonzini }
949c50d8ae3SPaolo Bonzini 
950c50d8ae3SPaolo Bonzini static void
951c50d8ae3SPaolo Bonzini pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
952c50d8ae3SPaolo Bonzini 			   struct pte_list_desc *desc, int i,
953c50d8ae3SPaolo Bonzini 			   struct pte_list_desc *prev_desc)
954c50d8ae3SPaolo Bonzini {
95513236e25SPeter Xu 	int j = desc->spte_count - 1;
956c50d8ae3SPaolo Bonzini 
957c50d8ae3SPaolo Bonzini 	desc->sptes[i] = desc->sptes[j];
958c50d8ae3SPaolo Bonzini 	desc->sptes[j] = NULL;
95913236e25SPeter Xu 	desc->spte_count--;
96013236e25SPeter Xu 	if (desc->spte_count)
961c50d8ae3SPaolo Bonzini 		return;
962c50d8ae3SPaolo Bonzini 	if (!prev_desc && !desc->more)
963fe3c2b4cSMiaohe Lin 		rmap_head->val = 0;
964c50d8ae3SPaolo Bonzini 	else
965c50d8ae3SPaolo Bonzini 		if (prev_desc)
966c50d8ae3SPaolo Bonzini 			prev_desc->more = desc->more;
967c50d8ae3SPaolo Bonzini 		else
968c50d8ae3SPaolo Bonzini 			rmap_head->val = (unsigned long)desc->more | 1;
969c50d8ae3SPaolo Bonzini 	mmu_free_pte_list_desc(desc);
970c50d8ae3SPaolo Bonzini }
971c50d8ae3SPaolo Bonzini 
972c50d8ae3SPaolo Bonzini static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
973c50d8ae3SPaolo Bonzini {
974c50d8ae3SPaolo Bonzini 	struct pte_list_desc *desc;
975c50d8ae3SPaolo Bonzini 	struct pte_list_desc *prev_desc;
976c50d8ae3SPaolo Bonzini 	int i;
977c50d8ae3SPaolo Bonzini 
978c50d8ae3SPaolo Bonzini 	if (!rmap_head->val) {
979c50d8ae3SPaolo Bonzini 		pr_err("%s: %p 0->BUG\n", __func__, spte);
980c50d8ae3SPaolo Bonzini 		BUG();
981c50d8ae3SPaolo Bonzini 	} else if (!(rmap_head->val & 1)) {
982805a0f83SStephen Zhang 		rmap_printk("%p 1->0\n", spte);
983c50d8ae3SPaolo Bonzini 		if ((u64 *)rmap_head->val != spte) {
984c50d8ae3SPaolo Bonzini 			pr_err("%s:  %p 1->BUG\n", __func__, spte);
985c50d8ae3SPaolo Bonzini 			BUG();
986c50d8ae3SPaolo Bonzini 		}
987c50d8ae3SPaolo Bonzini 		rmap_head->val = 0;
988c50d8ae3SPaolo Bonzini 	} else {
989805a0f83SStephen Zhang 		rmap_printk("%p many->many\n", spte);
990c50d8ae3SPaolo Bonzini 		desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
991c50d8ae3SPaolo Bonzini 		prev_desc = NULL;
992c50d8ae3SPaolo Bonzini 		while (desc) {
99313236e25SPeter Xu 			for (i = 0; i < desc->spte_count; ++i) {
994c50d8ae3SPaolo Bonzini 				if (desc->sptes[i] == spte) {
995c50d8ae3SPaolo Bonzini 					pte_list_desc_remove_entry(rmap_head,
996c50d8ae3SPaolo Bonzini 							desc, i, prev_desc);
997c50d8ae3SPaolo Bonzini 					return;
998c50d8ae3SPaolo Bonzini 				}
999c50d8ae3SPaolo Bonzini 			}
1000c50d8ae3SPaolo Bonzini 			prev_desc = desc;
1001c50d8ae3SPaolo Bonzini 			desc = desc->more;
1002c50d8ae3SPaolo Bonzini 		}
1003c50d8ae3SPaolo Bonzini 		pr_err("%s: %p many->many\n", __func__, spte);
1004c50d8ae3SPaolo Bonzini 		BUG();
1005c50d8ae3SPaolo Bonzini 	}
1006c50d8ae3SPaolo Bonzini }
1007c50d8ae3SPaolo Bonzini 
100871f51d2cSMingwei Zhang static void pte_list_remove(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
100971f51d2cSMingwei Zhang 			    u64 *sptep)
1010c50d8ae3SPaolo Bonzini {
101171f51d2cSMingwei Zhang 	mmu_spte_clear_track_bits(kvm, sptep);
1012c50d8ae3SPaolo Bonzini 	__pte_list_remove(sptep, rmap_head);
1013c50d8ae3SPaolo Bonzini }
1014c50d8ae3SPaolo Bonzini 
1015a75b5404SPeter Xu /* Return true if rmap existed, false otherwise */
101671f51d2cSMingwei Zhang static bool pte_list_destroy(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
1017a75b5404SPeter Xu {
1018a75b5404SPeter Xu 	struct pte_list_desc *desc, *next;
1019a75b5404SPeter Xu 	int i;
1020a75b5404SPeter Xu 
1021a75b5404SPeter Xu 	if (!rmap_head->val)
1022a75b5404SPeter Xu 		return false;
1023a75b5404SPeter Xu 
1024a75b5404SPeter Xu 	if (!(rmap_head->val & 1)) {
102571f51d2cSMingwei Zhang 		mmu_spte_clear_track_bits(kvm, (u64 *)rmap_head->val);
1026a75b5404SPeter Xu 		goto out;
1027a75b5404SPeter Xu 	}
1028a75b5404SPeter Xu 
1029a75b5404SPeter Xu 	desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1030a75b5404SPeter Xu 
1031a75b5404SPeter Xu 	for (; desc; desc = next) {
1032a75b5404SPeter Xu 		for (i = 0; i < desc->spte_count; i++)
103371f51d2cSMingwei Zhang 			mmu_spte_clear_track_bits(kvm, desc->sptes[i]);
1034a75b5404SPeter Xu 		next = desc->more;
1035a75b5404SPeter Xu 		mmu_free_pte_list_desc(desc);
1036a75b5404SPeter Xu 	}
1037a75b5404SPeter Xu out:
1038a75b5404SPeter Xu 	/* rmap_head is meaningless now, remember to reset it */
1039a75b5404SPeter Xu 	rmap_head->val = 0;
1040a75b5404SPeter Xu 	return true;
1041a75b5404SPeter Xu }
1042a75b5404SPeter Xu 
10433bcd0662SPeter Xu unsigned int pte_list_count(struct kvm_rmap_head *rmap_head)
10443bcd0662SPeter Xu {
10453bcd0662SPeter Xu 	struct pte_list_desc *desc;
10463bcd0662SPeter Xu 	unsigned int count = 0;
10473bcd0662SPeter Xu 
10483bcd0662SPeter Xu 	if (!rmap_head->val)
10493bcd0662SPeter Xu 		return 0;
10503bcd0662SPeter Xu 	else if (!(rmap_head->val & 1))
10513bcd0662SPeter Xu 		return 1;
10523bcd0662SPeter Xu 
10533bcd0662SPeter Xu 	desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
10543bcd0662SPeter Xu 
10553bcd0662SPeter Xu 	while (desc) {
10563bcd0662SPeter Xu 		count += desc->spte_count;
10573bcd0662SPeter Xu 		desc = desc->more;
10583bcd0662SPeter Xu 	}
10593bcd0662SPeter Xu 
10603bcd0662SPeter Xu 	return count;
10613bcd0662SPeter Xu }
10623bcd0662SPeter Xu 
106393e083d4SDavid Matlack static struct kvm_rmap_head *gfn_to_rmap(gfn_t gfn, int level,
1064269e9552SHamza Mahfooz 					 const struct kvm_memory_slot *slot)
1065c50d8ae3SPaolo Bonzini {
1066c50d8ae3SPaolo Bonzini 	unsigned long idx;
1067c50d8ae3SPaolo Bonzini 
1068c50d8ae3SPaolo Bonzini 	idx = gfn_to_index(gfn, slot->base_gfn, level);
10693bae0459SSean Christopherson 	return &slot->arch.rmap[level - PG_LEVEL_4K][idx];
1070c50d8ae3SPaolo Bonzini }
1071c50d8ae3SPaolo Bonzini 
1072c50d8ae3SPaolo Bonzini static bool rmap_can_add(struct kvm_vcpu *vcpu)
1073c50d8ae3SPaolo Bonzini {
1074356ec69aSSean Christopherson 	struct kvm_mmu_memory_cache *mc;
1075c50d8ae3SPaolo Bonzini 
1076356ec69aSSean Christopherson 	mc = &vcpu->arch.mmu_pte_list_desc_cache;
107794ce87efSSean Christopherson 	return kvm_mmu_memory_cache_nr_free_objects(mc);
1078c50d8ae3SPaolo Bonzini }
1079c50d8ae3SPaolo Bonzini 
1080c50d8ae3SPaolo Bonzini static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
1081c50d8ae3SPaolo Bonzini {
1082601f8af0SDavid Matlack 	struct kvm_memory_slot *slot;
1083c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
1084c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *rmap_head;
1085c50d8ae3SPaolo Bonzini 
108657354682SSean Christopherson 	sp = sptep_to_sp(spte);
1087c50d8ae3SPaolo Bonzini 	kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
1088601f8af0SDavid Matlack 	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
108993e083d4SDavid Matlack 	rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1090c50d8ae3SPaolo Bonzini 	return pte_list_add(vcpu, spte, rmap_head);
1091c50d8ae3SPaolo Bonzini }
1092c50d8ae3SPaolo Bonzini 
1093601f8af0SDavid Matlack 
1094c50d8ae3SPaolo Bonzini static void rmap_remove(struct kvm *kvm, u64 *spte)
1095c50d8ae3SPaolo Bonzini {
1096601f8af0SDavid Matlack 	struct kvm_memslots *slots;
1097601f8af0SDavid Matlack 	struct kvm_memory_slot *slot;
1098c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
1099c50d8ae3SPaolo Bonzini 	gfn_t gfn;
1100c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *rmap_head;
1101c50d8ae3SPaolo Bonzini 
110257354682SSean Christopherson 	sp = sptep_to_sp(spte);
1103c50d8ae3SPaolo Bonzini 	gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
1104601f8af0SDavid Matlack 
1105601f8af0SDavid Matlack 	/*
1106601f8af0SDavid Matlack 	 * Unlike rmap_add and rmap_recycle, rmap_remove does not run in the
1107601f8af0SDavid Matlack 	 * context of a vCPU so have to determine which memslots to use based
1108601f8af0SDavid Matlack 	 * on context information in sp->role.
1109601f8af0SDavid Matlack 	 */
1110601f8af0SDavid Matlack 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
1111601f8af0SDavid Matlack 
1112601f8af0SDavid Matlack 	slot = __gfn_to_memslot(slots, gfn);
111393e083d4SDavid Matlack 	rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1114601f8af0SDavid Matlack 
1115c50d8ae3SPaolo Bonzini 	__pte_list_remove(spte, rmap_head);
1116c50d8ae3SPaolo Bonzini }
1117c50d8ae3SPaolo Bonzini 
1118c50d8ae3SPaolo Bonzini /*
1119c50d8ae3SPaolo Bonzini  * Used by the following functions to iterate through the sptes linked by a
1120c50d8ae3SPaolo Bonzini  * rmap.  All fields are private and not assumed to be used outside.
1121c50d8ae3SPaolo Bonzini  */
1122c50d8ae3SPaolo Bonzini struct rmap_iterator {
1123c50d8ae3SPaolo Bonzini 	/* private fields */
1124c50d8ae3SPaolo Bonzini 	struct pte_list_desc *desc;	/* holds the sptep if not NULL */
1125c50d8ae3SPaolo Bonzini 	int pos;			/* index of the sptep */
1126c50d8ae3SPaolo Bonzini };
1127c50d8ae3SPaolo Bonzini 
1128c50d8ae3SPaolo Bonzini /*
1129c50d8ae3SPaolo Bonzini  * Iteration must be started by this function.  This should also be used after
1130c50d8ae3SPaolo Bonzini  * removing/dropping sptes from the rmap link because in such cases the
11310a03cbdaSMiaohe Lin  * information in the iterator may not be valid.
1132c50d8ae3SPaolo Bonzini  *
1133c50d8ae3SPaolo Bonzini  * Returns sptep if found, NULL otherwise.
1134c50d8ae3SPaolo Bonzini  */
1135c50d8ae3SPaolo Bonzini static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head,
1136c50d8ae3SPaolo Bonzini 			   struct rmap_iterator *iter)
1137c50d8ae3SPaolo Bonzini {
1138c50d8ae3SPaolo Bonzini 	u64 *sptep;
1139c50d8ae3SPaolo Bonzini 
1140c50d8ae3SPaolo Bonzini 	if (!rmap_head->val)
1141c50d8ae3SPaolo Bonzini 		return NULL;
1142c50d8ae3SPaolo Bonzini 
1143c50d8ae3SPaolo Bonzini 	if (!(rmap_head->val & 1)) {
1144c50d8ae3SPaolo Bonzini 		iter->desc = NULL;
1145c50d8ae3SPaolo Bonzini 		sptep = (u64 *)rmap_head->val;
1146c50d8ae3SPaolo Bonzini 		goto out;
1147c50d8ae3SPaolo Bonzini 	}
1148c50d8ae3SPaolo Bonzini 
1149c50d8ae3SPaolo Bonzini 	iter->desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1150c50d8ae3SPaolo Bonzini 	iter->pos = 0;
1151c50d8ae3SPaolo Bonzini 	sptep = iter->desc->sptes[iter->pos];
1152c50d8ae3SPaolo Bonzini out:
1153c50d8ae3SPaolo Bonzini 	BUG_ON(!is_shadow_present_pte(*sptep));
1154c50d8ae3SPaolo Bonzini 	return sptep;
1155c50d8ae3SPaolo Bonzini }
1156c50d8ae3SPaolo Bonzini 
1157c50d8ae3SPaolo Bonzini /*
1158c50d8ae3SPaolo Bonzini  * Must be used with a valid iterator: e.g. after rmap_get_first().
1159c50d8ae3SPaolo Bonzini  *
1160c50d8ae3SPaolo Bonzini  * Returns sptep if found, NULL otherwise.
1161c50d8ae3SPaolo Bonzini  */
1162c50d8ae3SPaolo Bonzini static u64 *rmap_get_next(struct rmap_iterator *iter)
1163c50d8ae3SPaolo Bonzini {
1164c50d8ae3SPaolo Bonzini 	u64 *sptep;
1165c50d8ae3SPaolo Bonzini 
1166c50d8ae3SPaolo Bonzini 	if (iter->desc) {
1167c50d8ae3SPaolo Bonzini 		if (iter->pos < PTE_LIST_EXT - 1) {
1168c50d8ae3SPaolo Bonzini 			++iter->pos;
1169c50d8ae3SPaolo Bonzini 			sptep = iter->desc->sptes[iter->pos];
1170c50d8ae3SPaolo Bonzini 			if (sptep)
1171c50d8ae3SPaolo Bonzini 				goto out;
1172c50d8ae3SPaolo Bonzini 		}
1173c50d8ae3SPaolo Bonzini 
1174c50d8ae3SPaolo Bonzini 		iter->desc = iter->desc->more;
1175c50d8ae3SPaolo Bonzini 
1176c50d8ae3SPaolo Bonzini 		if (iter->desc) {
1177c50d8ae3SPaolo Bonzini 			iter->pos = 0;
1178c50d8ae3SPaolo Bonzini 			/* desc->sptes[0] cannot be NULL */
1179c50d8ae3SPaolo Bonzini 			sptep = iter->desc->sptes[iter->pos];
1180c50d8ae3SPaolo Bonzini 			goto out;
1181c50d8ae3SPaolo Bonzini 		}
1182c50d8ae3SPaolo Bonzini 	}
1183c50d8ae3SPaolo Bonzini 
1184c50d8ae3SPaolo Bonzini 	return NULL;
1185c50d8ae3SPaolo Bonzini out:
1186c50d8ae3SPaolo Bonzini 	BUG_ON(!is_shadow_present_pte(*sptep));
1187c50d8ae3SPaolo Bonzini 	return sptep;
1188c50d8ae3SPaolo Bonzini }
1189c50d8ae3SPaolo Bonzini 
1190c50d8ae3SPaolo Bonzini #define for_each_rmap_spte(_rmap_head_, _iter_, _spte_)			\
1191c50d8ae3SPaolo Bonzini 	for (_spte_ = rmap_get_first(_rmap_head_, _iter_);		\
1192c50d8ae3SPaolo Bonzini 	     _spte_; _spte_ = rmap_get_next(_iter_))
1193c50d8ae3SPaolo Bonzini 
1194c50d8ae3SPaolo Bonzini static void drop_spte(struct kvm *kvm, u64 *sptep)
1195c50d8ae3SPaolo Bonzini {
119671f51d2cSMingwei Zhang 	u64 old_spte = mmu_spte_clear_track_bits(kvm, sptep);
11977fa2a347SSean Christopherson 
11987fa2a347SSean Christopherson 	if (is_shadow_present_pte(old_spte))
1199c50d8ae3SPaolo Bonzini 		rmap_remove(kvm, sptep);
1200c50d8ae3SPaolo Bonzini }
1201c50d8ae3SPaolo Bonzini 
1202c50d8ae3SPaolo Bonzini 
1203c50d8ae3SPaolo Bonzini static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
1204c50d8ae3SPaolo Bonzini {
1205c50d8ae3SPaolo Bonzini 	if (is_large_pte(*sptep)) {
120657354682SSean Christopherson 		WARN_ON(sptep_to_sp(sptep)->role.level == PG_LEVEL_4K);
1207c50d8ae3SPaolo Bonzini 		drop_spte(kvm, sptep);
1208c50d8ae3SPaolo Bonzini 		return true;
1209c50d8ae3SPaolo Bonzini 	}
1210c50d8ae3SPaolo Bonzini 
1211c50d8ae3SPaolo Bonzini 	return false;
1212c50d8ae3SPaolo Bonzini }
1213c50d8ae3SPaolo Bonzini 
1214c50d8ae3SPaolo Bonzini static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
1215c50d8ae3SPaolo Bonzini {
1216c50d8ae3SPaolo Bonzini 	if (__drop_large_spte(vcpu->kvm, sptep)) {
121757354682SSean Christopherson 		struct kvm_mmu_page *sp = sptep_to_sp(sptep);
1218c50d8ae3SPaolo Bonzini 
1219c50d8ae3SPaolo Bonzini 		kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
1220c50d8ae3SPaolo Bonzini 			KVM_PAGES_PER_HPAGE(sp->role.level));
1221c50d8ae3SPaolo Bonzini 	}
1222c50d8ae3SPaolo Bonzini }
1223c50d8ae3SPaolo Bonzini 
1224c50d8ae3SPaolo Bonzini /*
1225c50d8ae3SPaolo Bonzini  * Write-protect on the specified @sptep, @pt_protect indicates whether
1226c50d8ae3SPaolo Bonzini  * spte write-protection is caused by protecting shadow page table.
1227c50d8ae3SPaolo Bonzini  *
1228c50d8ae3SPaolo Bonzini  * Note: write protection is difference between dirty logging and spte
1229c50d8ae3SPaolo Bonzini  * protection:
1230c50d8ae3SPaolo Bonzini  * - for dirty logging, the spte can be set to writable at anytime if
1231c50d8ae3SPaolo Bonzini  *   its dirty bitmap is properly set.
1232c50d8ae3SPaolo Bonzini  * - for spte protection, the spte can be writable only after unsync-ing
1233c50d8ae3SPaolo Bonzini  *   shadow page.
1234c50d8ae3SPaolo Bonzini  *
1235c50d8ae3SPaolo Bonzini  * Return true if tlb need be flushed.
1236c50d8ae3SPaolo Bonzini  */
1237c50d8ae3SPaolo Bonzini static bool spte_write_protect(u64 *sptep, bool pt_protect)
1238c50d8ae3SPaolo Bonzini {
1239c50d8ae3SPaolo Bonzini 	u64 spte = *sptep;
1240c50d8ae3SPaolo Bonzini 
1241c50d8ae3SPaolo Bonzini 	if (!is_writable_pte(spte) &&
1242c50d8ae3SPaolo Bonzini 	      !(pt_protect && spte_can_locklessly_be_made_writable(spte)))
1243c50d8ae3SPaolo Bonzini 		return false;
1244c50d8ae3SPaolo Bonzini 
1245805a0f83SStephen Zhang 	rmap_printk("spte %p %llx\n", sptep, *sptep);
1246c50d8ae3SPaolo Bonzini 
1247c50d8ae3SPaolo Bonzini 	if (pt_protect)
12485fc3424fSSean Christopherson 		spte &= ~shadow_mmu_writable_mask;
1249c50d8ae3SPaolo Bonzini 	spte = spte & ~PT_WRITABLE_MASK;
1250c50d8ae3SPaolo Bonzini 
1251c50d8ae3SPaolo Bonzini 	return mmu_spte_update(sptep, spte);
1252c50d8ae3SPaolo Bonzini }
1253c50d8ae3SPaolo Bonzini 
1254c50d8ae3SPaolo Bonzini static bool __rmap_write_protect(struct kvm *kvm,
1255c50d8ae3SPaolo Bonzini 				 struct kvm_rmap_head *rmap_head,
1256c50d8ae3SPaolo Bonzini 				 bool pt_protect)
1257c50d8ae3SPaolo Bonzini {
1258c50d8ae3SPaolo Bonzini 	u64 *sptep;
1259c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
1260c50d8ae3SPaolo Bonzini 	bool flush = false;
1261c50d8ae3SPaolo Bonzini 
1262c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(rmap_head, &iter, sptep)
1263c50d8ae3SPaolo Bonzini 		flush |= spte_write_protect(sptep, pt_protect);
1264c50d8ae3SPaolo Bonzini 
1265c50d8ae3SPaolo Bonzini 	return flush;
1266c50d8ae3SPaolo Bonzini }
1267c50d8ae3SPaolo Bonzini 
1268c50d8ae3SPaolo Bonzini static bool spte_clear_dirty(u64 *sptep)
1269c50d8ae3SPaolo Bonzini {
1270c50d8ae3SPaolo Bonzini 	u64 spte = *sptep;
1271c50d8ae3SPaolo Bonzini 
1272805a0f83SStephen Zhang 	rmap_printk("spte %p %llx\n", sptep, *sptep);
1273c50d8ae3SPaolo Bonzini 
1274c50d8ae3SPaolo Bonzini 	MMU_WARN_ON(!spte_ad_enabled(spte));
1275c50d8ae3SPaolo Bonzini 	spte &= ~shadow_dirty_mask;
1276c50d8ae3SPaolo Bonzini 	return mmu_spte_update(sptep, spte);
1277c50d8ae3SPaolo Bonzini }
1278c50d8ae3SPaolo Bonzini 
1279c50d8ae3SPaolo Bonzini static bool spte_wrprot_for_clear_dirty(u64 *sptep)
1280c50d8ae3SPaolo Bonzini {
1281c50d8ae3SPaolo Bonzini 	bool was_writable = test_and_clear_bit(PT_WRITABLE_SHIFT,
1282c50d8ae3SPaolo Bonzini 					       (unsigned long *)sptep);
1283c50d8ae3SPaolo Bonzini 	if (was_writable && !spte_ad_enabled(*sptep))
1284c50d8ae3SPaolo Bonzini 		kvm_set_pfn_dirty(spte_to_pfn(*sptep));
1285c50d8ae3SPaolo Bonzini 
1286c50d8ae3SPaolo Bonzini 	return was_writable;
1287c50d8ae3SPaolo Bonzini }
1288c50d8ae3SPaolo Bonzini 
1289c50d8ae3SPaolo Bonzini /*
1290c50d8ae3SPaolo Bonzini  * Gets the GFN ready for another round of dirty logging by clearing the
1291c50d8ae3SPaolo Bonzini  *	- D bit on ad-enabled SPTEs, and
1292c50d8ae3SPaolo Bonzini  *	- W bit on ad-disabled SPTEs.
1293c50d8ae3SPaolo Bonzini  * Returns true iff any D or W bits were cleared.
1294c50d8ae3SPaolo Bonzini  */
12950a234f5dSSean Christopherson static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1296269e9552SHamza Mahfooz 			       const struct kvm_memory_slot *slot)
1297c50d8ae3SPaolo Bonzini {
1298c50d8ae3SPaolo Bonzini 	u64 *sptep;
1299c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
1300c50d8ae3SPaolo Bonzini 	bool flush = false;
1301c50d8ae3SPaolo Bonzini 
1302c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(rmap_head, &iter, sptep)
1303c50d8ae3SPaolo Bonzini 		if (spte_ad_need_write_protect(*sptep))
1304c50d8ae3SPaolo Bonzini 			flush |= spte_wrprot_for_clear_dirty(sptep);
1305c50d8ae3SPaolo Bonzini 		else
1306c50d8ae3SPaolo Bonzini 			flush |= spte_clear_dirty(sptep);
1307c50d8ae3SPaolo Bonzini 
1308c50d8ae3SPaolo Bonzini 	return flush;
1309c50d8ae3SPaolo Bonzini }
1310c50d8ae3SPaolo Bonzini 
1311c50d8ae3SPaolo Bonzini /**
1312c50d8ae3SPaolo Bonzini  * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
1313c50d8ae3SPaolo Bonzini  * @kvm: kvm instance
1314c50d8ae3SPaolo Bonzini  * @slot: slot to protect
1315c50d8ae3SPaolo Bonzini  * @gfn_offset: start of the BITS_PER_LONG pages we care about
1316c50d8ae3SPaolo Bonzini  * @mask: indicates which pages we should protect
1317c50d8ae3SPaolo Bonzini  *
131889212919SKeqian Zhu  * Used when we do not need to care about huge page mappings.
1319c50d8ae3SPaolo Bonzini  */
1320c50d8ae3SPaolo Bonzini static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1321c50d8ae3SPaolo Bonzini 				     struct kvm_memory_slot *slot,
1322c50d8ae3SPaolo Bonzini 				     gfn_t gfn_offset, unsigned long mask)
1323c50d8ae3SPaolo Bonzini {
1324c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *rmap_head;
1325c50d8ae3SPaolo Bonzini 
1326897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
1327a6a0b05dSBen Gardon 		kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
1328a6a0b05dSBen Gardon 				slot->base_gfn + gfn_offset, mask, true);
1329e2209710SBen Gardon 
1330e2209710SBen Gardon 	if (!kvm_memslots_have_rmaps(kvm))
1331e2209710SBen Gardon 		return;
1332e2209710SBen Gardon 
1333c50d8ae3SPaolo Bonzini 	while (mask) {
133493e083d4SDavid Matlack 		rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
13353bae0459SSean Christopherson 					PG_LEVEL_4K, slot);
1336c50d8ae3SPaolo Bonzini 		__rmap_write_protect(kvm, rmap_head, false);
1337c50d8ae3SPaolo Bonzini 
1338c50d8ae3SPaolo Bonzini 		/* clear the first set bit */
1339c50d8ae3SPaolo Bonzini 		mask &= mask - 1;
1340c50d8ae3SPaolo Bonzini 	}
1341c50d8ae3SPaolo Bonzini }
1342c50d8ae3SPaolo Bonzini 
1343c50d8ae3SPaolo Bonzini /**
1344c50d8ae3SPaolo Bonzini  * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages, or write
1345c50d8ae3SPaolo Bonzini  * protect the page if the D-bit isn't supported.
1346c50d8ae3SPaolo Bonzini  * @kvm: kvm instance
1347c50d8ae3SPaolo Bonzini  * @slot: slot to clear D-bit
1348c50d8ae3SPaolo Bonzini  * @gfn_offset: start of the BITS_PER_LONG pages we care about
1349c50d8ae3SPaolo Bonzini  * @mask: indicates which pages we should clear D-bit
1350c50d8ae3SPaolo Bonzini  *
1351c50d8ae3SPaolo Bonzini  * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap.
1352c50d8ae3SPaolo Bonzini  */
1353a018eba5SSean Christopherson static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1354c50d8ae3SPaolo Bonzini 					 struct kvm_memory_slot *slot,
1355c50d8ae3SPaolo Bonzini 					 gfn_t gfn_offset, unsigned long mask)
1356c50d8ae3SPaolo Bonzini {
1357c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *rmap_head;
1358c50d8ae3SPaolo Bonzini 
1359897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
1360a6a0b05dSBen Gardon 		kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
1361a6a0b05dSBen Gardon 				slot->base_gfn + gfn_offset, mask, false);
1362e2209710SBen Gardon 
1363e2209710SBen Gardon 	if (!kvm_memslots_have_rmaps(kvm))
1364e2209710SBen Gardon 		return;
1365e2209710SBen Gardon 
1366c50d8ae3SPaolo Bonzini 	while (mask) {
136793e083d4SDavid Matlack 		rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
13683bae0459SSean Christopherson 					PG_LEVEL_4K, slot);
13690a234f5dSSean Christopherson 		__rmap_clear_dirty(kvm, rmap_head, slot);
1370c50d8ae3SPaolo Bonzini 
1371c50d8ae3SPaolo Bonzini 		/* clear the first set bit */
1372c50d8ae3SPaolo Bonzini 		mask &= mask - 1;
1373c50d8ae3SPaolo Bonzini 	}
1374c50d8ae3SPaolo Bonzini }
1375c50d8ae3SPaolo Bonzini 
1376c50d8ae3SPaolo Bonzini /**
1377c50d8ae3SPaolo Bonzini  * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
1378c50d8ae3SPaolo Bonzini  * PT level pages.
1379c50d8ae3SPaolo Bonzini  *
1380c50d8ae3SPaolo Bonzini  * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
1381c50d8ae3SPaolo Bonzini  * enable dirty logging for them.
1382c50d8ae3SPaolo Bonzini  *
138389212919SKeqian Zhu  * We need to care about huge page mappings: e.g. during dirty logging we may
138489212919SKeqian Zhu  * have such mappings.
1385c50d8ae3SPaolo Bonzini  */
1386c50d8ae3SPaolo Bonzini void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1387c50d8ae3SPaolo Bonzini 				struct kvm_memory_slot *slot,
1388c50d8ae3SPaolo Bonzini 				gfn_t gfn_offset, unsigned long mask)
1389c50d8ae3SPaolo Bonzini {
139089212919SKeqian Zhu 	/*
139189212919SKeqian Zhu 	 * Huge pages are NOT write protected when we start dirty logging in
139289212919SKeqian Zhu 	 * initially-all-set mode; must write protect them here so that they
139389212919SKeqian Zhu 	 * are split to 4K on the first write.
139489212919SKeqian Zhu 	 *
139589212919SKeqian Zhu 	 * The gfn_offset is guaranteed to be aligned to 64, but the base_gfn
139689212919SKeqian Zhu 	 * of memslot has no such restriction, so the range can cross two large
139789212919SKeqian Zhu 	 * pages.
139889212919SKeqian Zhu 	 */
139989212919SKeqian Zhu 	if (kvm_dirty_log_manual_protect_and_init_set(kvm)) {
140089212919SKeqian Zhu 		gfn_t start = slot->base_gfn + gfn_offset + __ffs(mask);
140189212919SKeqian Zhu 		gfn_t end = slot->base_gfn + gfn_offset + __fls(mask);
140289212919SKeqian Zhu 
140389212919SKeqian Zhu 		kvm_mmu_slot_gfn_write_protect(kvm, slot, start, PG_LEVEL_2M);
140489212919SKeqian Zhu 
140589212919SKeqian Zhu 		/* Cross two large pages? */
140689212919SKeqian Zhu 		if (ALIGN(start << PAGE_SHIFT, PMD_SIZE) !=
140789212919SKeqian Zhu 		    ALIGN(end << PAGE_SHIFT, PMD_SIZE))
140889212919SKeqian Zhu 			kvm_mmu_slot_gfn_write_protect(kvm, slot, end,
140989212919SKeqian Zhu 						       PG_LEVEL_2M);
141089212919SKeqian Zhu 	}
141189212919SKeqian Zhu 
141289212919SKeqian Zhu 	/* Now handle 4K PTEs.  */
1413a018eba5SSean Christopherson 	if (kvm_x86_ops.cpu_dirty_log_size)
1414a018eba5SSean Christopherson 		kvm_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset, mask);
1415c50d8ae3SPaolo Bonzini 	else
1416c50d8ae3SPaolo Bonzini 		kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1417c50d8ae3SPaolo Bonzini }
1418c50d8ae3SPaolo Bonzini 
1419fb04a1edSPeter Xu int kvm_cpu_dirty_log_size(void)
1420fb04a1edSPeter Xu {
14216dd03800SSean Christopherson 	return kvm_x86_ops.cpu_dirty_log_size;
1422fb04a1edSPeter Xu }
1423fb04a1edSPeter Xu 
1424c50d8ae3SPaolo Bonzini bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
14253ad93562SKeqian Zhu 				    struct kvm_memory_slot *slot, u64 gfn,
14263ad93562SKeqian Zhu 				    int min_level)
1427c50d8ae3SPaolo Bonzini {
1428c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *rmap_head;
1429c50d8ae3SPaolo Bonzini 	int i;
1430c50d8ae3SPaolo Bonzini 	bool write_protected = false;
1431c50d8ae3SPaolo Bonzini 
1432e2209710SBen Gardon 	if (kvm_memslots_have_rmaps(kvm)) {
14333ad93562SKeqian Zhu 		for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
143493e083d4SDavid Matlack 			rmap_head = gfn_to_rmap(gfn, i, slot);
1435c50d8ae3SPaolo Bonzini 			write_protected |= __rmap_write_protect(kvm, rmap_head, true);
1436c50d8ae3SPaolo Bonzini 		}
1437e2209710SBen Gardon 	}
1438c50d8ae3SPaolo Bonzini 
1439897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
144046044f72SBen Gardon 		write_protected |=
14413ad93562SKeqian Zhu 			kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn, min_level);
144246044f72SBen Gardon 
1443c50d8ae3SPaolo Bonzini 	return write_protected;
1444c50d8ae3SPaolo Bonzini }
1445c50d8ae3SPaolo Bonzini 
1446c50d8ae3SPaolo Bonzini static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
1447c50d8ae3SPaolo Bonzini {
1448c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
1449c50d8ae3SPaolo Bonzini 
1450c50d8ae3SPaolo Bonzini 	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
14513ad93562SKeqian Zhu 	return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K);
1452c50d8ae3SPaolo Bonzini }
1453c50d8ae3SPaolo Bonzini 
14540a234f5dSSean Christopherson static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1455269e9552SHamza Mahfooz 			  const struct kvm_memory_slot *slot)
1456c50d8ae3SPaolo Bonzini {
145771f51d2cSMingwei Zhang 	return pte_list_destroy(kvm, rmap_head);
1458c50d8ae3SPaolo Bonzini }
1459c50d8ae3SPaolo Bonzini 
14603039bcc7SSean Christopherson static bool kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1461c50d8ae3SPaolo Bonzini 			    struct kvm_memory_slot *slot, gfn_t gfn, int level,
14623039bcc7SSean Christopherson 			    pte_t unused)
1463c50d8ae3SPaolo Bonzini {
14640a234f5dSSean Christopherson 	return kvm_zap_rmapp(kvm, rmap_head, slot);
1465c50d8ae3SPaolo Bonzini }
1466c50d8ae3SPaolo Bonzini 
14673039bcc7SSean Christopherson static bool kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1468c50d8ae3SPaolo Bonzini 			      struct kvm_memory_slot *slot, gfn_t gfn, int level,
14693039bcc7SSean Christopherson 			      pte_t pte)
1470c50d8ae3SPaolo Bonzini {
1471c50d8ae3SPaolo Bonzini 	u64 *sptep;
1472c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
1473c50d8ae3SPaolo Bonzini 	int need_flush = 0;
1474c50d8ae3SPaolo Bonzini 	u64 new_spte;
1475c50d8ae3SPaolo Bonzini 	kvm_pfn_t new_pfn;
1476c50d8ae3SPaolo Bonzini 
14773039bcc7SSean Christopherson 	WARN_ON(pte_huge(pte));
14783039bcc7SSean Christopherson 	new_pfn = pte_pfn(pte);
1479c50d8ae3SPaolo Bonzini 
1480c50d8ae3SPaolo Bonzini restart:
1481c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(rmap_head, &iter, sptep) {
1482805a0f83SStephen Zhang 		rmap_printk("spte %p %llx gfn %llx (%d)\n",
1483c50d8ae3SPaolo Bonzini 			    sptep, *sptep, gfn, level);
1484c50d8ae3SPaolo Bonzini 
1485c50d8ae3SPaolo Bonzini 		need_flush = 1;
1486c50d8ae3SPaolo Bonzini 
14873039bcc7SSean Christopherson 		if (pte_write(pte)) {
148871f51d2cSMingwei Zhang 			pte_list_remove(kvm, rmap_head, sptep);
1489c50d8ae3SPaolo Bonzini 			goto restart;
1490c50d8ae3SPaolo Bonzini 		} else {
1491cb3eedabSPaolo Bonzini 			new_spte = kvm_mmu_changed_pte_notifier_make_spte(
1492cb3eedabSPaolo Bonzini 					*sptep, new_pfn);
1493c50d8ae3SPaolo Bonzini 
149471f51d2cSMingwei Zhang 			mmu_spte_clear_track_bits(kvm, sptep);
1495c50d8ae3SPaolo Bonzini 			mmu_spte_set(sptep, new_spte);
1496c50d8ae3SPaolo Bonzini 		}
1497c50d8ae3SPaolo Bonzini 	}
1498c50d8ae3SPaolo Bonzini 
1499c50d8ae3SPaolo Bonzini 	if (need_flush && kvm_available_flush_tlb_with_range()) {
1500c50d8ae3SPaolo Bonzini 		kvm_flush_remote_tlbs_with_address(kvm, gfn, 1);
1501c50d8ae3SPaolo Bonzini 		return 0;
1502c50d8ae3SPaolo Bonzini 	}
1503c50d8ae3SPaolo Bonzini 
1504c50d8ae3SPaolo Bonzini 	return need_flush;
1505c50d8ae3SPaolo Bonzini }
1506c50d8ae3SPaolo Bonzini 
1507c50d8ae3SPaolo Bonzini struct slot_rmap_walk_iterator {
1508c50d8ae3SPaolo Bonzini 	/* input fields. */
1509269e9552SHamza Mahfooz 	const struct kvm_memory_slot *slot;
1510c50d8ae3SPaolo Bonzini 	gfn_t start_gfn;
1511c50d8ae3SPaolo Bonzini 	gfn_t end_gfn;
1512c50d8ae3SPaolo Bonzini 	int start_level;
1513c50d8ae3SPaolo Bonzini 	int end_level;
1514c50d8ae3SPaolo Bonzini 
1515c50d8ae3SPaolo Bonzini 	/* output fields. */
1516c50d8ae3SPaolo Bonzini 	gfn_t gfn;
1517c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *rmap;
1518c50d8ae3SPaolo Bonzini 	int level;
1519c50d8ae3SPaolo Bonzini 
1520c50d8ae3SPaolo Bonzini 	/* private field. */
1521c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *end_rmap;
1522c50d8ae3SPaolo Bonzini };
1523c50d8ae3SPaolo Bonzini 
1524c50d8ae3SPaolo Bonzini static void
1525c50d8ae3SPaolo Bonzini rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level)
1526c50d8ae3SPaolo Bonzini {
1527c50d8ae3SPaolo Bonzini 	iterator->level = level;
1528c50d8ae3SPaolo Bonzini 	iterator->gfn = iterator->start_gfn;
152993e083d4SDavid Matlack 	iterator->rmap = gfn_to_rmap(iterator->gfn, level, iterator->slot);
153093e083d4SDavid Matlack 	iterator->end_rmap = gfn_to_rmap(iterator->end_gfn, level, iterator->slot);
1531c50d8ae3SPaolo Bonzini }
1532c50d8ae3SPaolo Bonzini 
1533c50d8ae3SPaolo Bonzini static void
1534c50d8ae3SPaolo Bonzini slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator,
1535269e9552SHamza Mahfooz 		    const struct kvm_memory_slot *slot, int start_level,
1536c50d8ae3SPaolo Bonzini 		    int end_level, gfn_t start_gfn, gfn_t end_gfn)
1537c50d8ae3SPaolo Bonzini {
1538c50d8ae3SPaolo Bonzini 	iterator->slot = slot;
1539c50d8ae3SPaolo Bonzini 	iterator->start_level = start_level;
1540c50d8ae3SPaolo Bonzini 	iterator->end_level = end_level;
1541c50d8ae3SPaolo Bonzini 	iterator->start_gfn = start_gfn;
1542c50d8ae3SPaolo Bonzini 	iterator->end_gfn = end_gfn;
1543c50d8ae3SPaolo Bonzini 
1544c50d8ae3SPaolo Bonzini 	rmap_walk_init_level(iterator, iterator->start_level);
1545c50d8ae3SPaolo Bonzini }
1546c50d8ae3SPaolo Bonzini 
1547c50d8ae3SPaolo Bonzini static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator)
1548c50d8ae3SPaolo Bonzini {
1549c50d8ae3SPaolo Bonzini 	return !!iterator->rmap;
1550c50d8ae3SPaolo Bonzini }
1551c50d8ae3SPaolo Bonzini 
1552c50d8ae3SPaolo Bonzini static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
1553c50d8ae3SPaolo Bonzini {
1554c50d8ae3SPaolo Bonzini 	if (++iterator->rmap <= iterator->end_rmap) {
1555c50d8ae3SPaolo Bonzini 		iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level));
1556c50d8ae3SPaolo Bonzini 		return;
1557c50d8ae3SPaolo Bonzini 	}
1558c50d8ae3SPaolo Bonzini 
1559c50d8ae3SPaolo Bonzini 	if (++iterator->level > iterator->end_level) {
1560c50d8ae3SPaolo Bonzini 		iterator->rmap = NULL;
1561c50d8ae3SPaolo Bonzini 		return;
1562c50d8ae3SPaolo Bonzini 	}
1563c50d8ae3SPaolo Bonzini 
1564c50d8ae3SPaolo Bonzini 	rmap_walk_init_level(iterator, iterator->level);
1565c50d8ae3SPaolo Bonzini }
1566c50d8ae3SPaolo Bonzini 
1567c50d8ae3SPaolo Bonzini #define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_,	\
1568c50d8ae3SPaolo Bonzini 	   _start_gfn, _end_gfn, _iter_)				\
1569c50d8ae3SPaolo Bonzini 	for (slot_rmap_walk_init(_iter_, _slot_, _start_level_,		\
1570c50d8ae3SPaolo Bonzini 				 _end_level_, _start_gfn, _end_gfn);	\
1571c50d8ae3SPaolo Bonzini 	     slot_rmap_walk_okay(_iter_);				\
1572c50d8ae3SPaolo Bonzini 	     slot_rmap_walk_next(_iter_))
1573c50d8ae3SPaolo Bonzini 
15743039bcc7SSean Christopherson typedef bool (*rmap_handler_t)(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1575c1b91493SSean Christopherson 			       struct kvm_memory_slot *slot, gfn_t gfn,
15763039bcc7SSean Christopherson 			       int level, pte_t pte);
1577c1b91493SSean Christopherson 
15783039bcc7SSean Christopherson static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm,
15793039bcc7SSean Christopherson 						 struct kvm_gfn_range *range,
1580c1b91493SSean Christopherson 						 rmap_handler_t handler)
1581c50d8ae3SPaolo Bonzini {
1582c50d8ae3SPaolo Bonzini 	struct slot_rmap_walk_iterator iterator;
15833039bcc7SSean Christopherson 	bool ret = false;
1584c50d8ae3SPaolo Bonzini 
15853039bcc7SSean Christopherson 	for_each_slot_rmap_range(range->slot, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
15863039bcc7SSean Christopherson 				 range->start, range->end - 1, &iterator)
15873039bcc7SSean Christopherson 		ret |= handler(kvm, iterator.rmap, range->slot, iterator.gfn,
15883039bcc7SSean Christopherson 			       iterator.level, range->pte);
1589c50d8ae3SPaolo Bonzini 
1590c50d8ae3SPaolo Bonzini 	return ret;
1591c50d8ae3SPaolo Bonzini }
1592c50d8ae3SPaolo Bonzini 
15933039bcc7SSean Christopherson bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1594c50d8ae3SPaolo Bonzini {
1595e2209710SBen Gardon 	bool flush = false;
1596c50d8ae3SPaolo Bonzini 
1597e2209710SBen Gardon 	if (kvm_memslots_have_rmaps(kvm))
15983039bcc7SSean Christopherson 		flush = kvm_handle_gfn_range(kvm, range, kvm_unmap_rmapp);
1599063afacdSBen Gardon 
1600897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
16013039bcc7SSean Christopherson 		flush |= kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
1602063afacdSBen Gardon 
16033039bcc7SSean Christopherson 	return flush;
1604c50d8ae3SPaolo Bonzini }
1605c50d8ae3SPaolo Bonzini 
16063039bcc7SSean Christopherson bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1607c50d8ae3SPaolo Bonzini {
1608e2209710SBen Gardon 	bool flush = false;
16091d8dd6b3SBen Gardon 
1610e2209710SBen Gardon 	if (kvm_memslots_have_rmaps(kvm))
16113039bcc7SSean Christopherson 		flush = kvm_handle_gfn_range(kvm, range, kvm_set_pte_rmapp);
16121d8dd6b3SBen Gardon 
1613897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
16143039bcc7SSean Christopherson 		flush |= kvm_tdp_mmu_set_spte_gfn(kvm, range);
16151d8dd6b3SBen Gardon 
16163039bcc7SSean Christopherson 	return flush;
1617c50d8ae3SPaolo Bonzini }
1618c50d8ae3SPaolo Bonzini 
16193039bcc7SSean Christopherson static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1620c50d8ae3SPaolo Bonzini 			  struct kvm_memory_slot *slot, gfn_t gfn, int level,
16213039bcc7SSean Christopherson 			  pte_t unused)
1622c50d8ae3SPaolo Bonzini {
1623c50d8ae3SPaolo Bonzini 	u64 *sptep;
16243f649ab7SKees Cook 	struct rmap_iterator iter;
1625c50d8ae3SPaolo Bonzini 	int young = 0;
1626c50d8ae3SPaolo Bonzini 
1627c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(rmap_head, &iter, sptep)
1628c50d8ae3SPaolo Bonzini 		young |= mmu_spte_age(sptep);
1629c50d8ae3SPaolo Bonzini 
1630c50d8ae3SPaolo Bonzini 	return young;
1631c50d8ae3SPaolo Bonzini }
1632c50d8ae3SPaolo Bonzini 
16333039bcc7SSean Christopherson static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1634c50d8ae3SPaolo Bonzini 			       struct kvm_memory_slot *slot, gfn_t gfn,
16353039bcc7SSean Christopherson 			       int level, pte_t unused)
1636c50d8ae3SPaolo Bonzini {
1637c50d8ae3SPaolo Bonzini 	u64 *sptep;
1638c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
1639c50d8ae3SPaolo Bonzini 
1640c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(rmap_head, &iter, sptep)
1641c50d8ae3SPaolo Bonzini 		if (is_accessed_spte(*sptep))
1642c50d8ae3SPaolo Bonzini 			return 1;
1643c50d8ae3SPaolo Bonzini 	return 0;
1644c50d8ae3SPaolo Bonzini }
1645c50d8ae3SPaolo Bonzini 
1646c50d8ae3SPaolo Bonzini #define RMAP_RECYCLE_THRESHOLD 1000
1647c50d8ae3SPaolo Bonzini 
1648c50d8ae3SPaolo Bonzini static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
1649c50d8ae3SPaolo Bonzini {
1650601f8af0SDavid Matlack 	struct kvm_memory_slot *slot;
1651c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *rmap_head;
1652c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
1653c50d8ae3SPaolo Bonzini 
165457354682SSean Christopherson 	sp = sptep_to_sp(spte);
1655601f8af0SDavid Matlack 	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
165693e083d4SDavid Matlack 	rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1657c50d8ae3SPaolo Bonzini 
16583039bcc7SSean Christopherson 	kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0));
1659c50d8ae3SPaolo Bonzini 	kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
1660c50d8ae3SPaolo Bonzini 			KVM_PAGES_PER_HPAGE(sp->role.level));
1661c50d8ae3SPaolo Bonzini }
1662c50d8ae3SPaolo Bonzini 
16633039bcc7SSean Christopherson bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1664c50d8ae3SPaolo Bonzini {
1665e2209710SBen Gardon 	bool young = false;
1666f8e14497SBen Gardon 
1667e2209710SBen Gardon 	if (kvm_memslots_have_rmaps(kvm))
16683039bcc7SSean Christopherson 		young = kvm_handle_gfn_range(kvm, range, kvm_age_rmapp);
16693039bcc7SSean Christopherson 
1670897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
16713039bcc7SSean Christopherson 		young |= kvm_tdp_mmu_age_gfn_range(kvm, range);
1672f8e14497SBen Gardon 
1673f8e14497SBen Gardon 	return young;
1674c50d8ae3SPaolo Bonzini }
1675c50d8ae3SPaolo Bonzini 
16763039bcc7SSean Christopherson bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1677c50d8ae3SPaolo Bonzini {
1678e2209710SBen Gardon 	bool young = false;
1679f8e14497SBen Gardon 
1680e2209710SBen Gardon 	if (kvm_memslots_have_rmaps(kvm))
16813039bcc7SSean Christopherson 		young = kvm_handle_gfn_range(kvm, range, kvm_test_age_rmapp);
16823039bcc7SSean Christopherson 
1683897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
16843039bcc7SSean Christopherson 		young |= kvm_tdp_mmu_test_age_gfn(kvm, range);
1685f8e14497SBen Gardon 
1686f8e14497SBen Gardon 	return young;
1687c50d8ae3SPaolo Bonzini }
1688c50d8ae3SPaolo Bonzini 
1689c50d8ae3SPaolo Bonzini #ifdef MMU_DEBUG
1690c50d8ae3SPaolo Bonzini static int is_empty_shadow_page(u64 *spt)
1691c50d8ae3SPaolo Bonzini {
1692c50d8ae3SPaolo Bonzini 	u64 *pos;
1693c50d8ae3SPaolo Bonzini 	u64 *end;
1694c50d8ae3SPaolo Bonzini 
1695c50d8ae3SPaolo Bonzini 	for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
1696c50d8ae3SPaolo Bonzini 		if (is_shadow_present_pte(*pos)) {
1697c50d8ae3SPaolo Bonzini 			printk(KERN_ERR "%s: %p %llx\n", __func__,
1698c50d8ae3SPaolo Bonzini 			       pos, *pos);
1699c50d8ae3SPaolo Bonzini 			return 0;
1700c50d8ae3SPaolo Bonzini 		}
1701c50d8ae3SPaolo Bonzini 	return 1;
1702c50d8ae3SPaolo Bonzini }
1703c50d8ae3SPaolo Bonzini #endif
1704c50d8ae3SPaolo Bonzini 
1705c50d8ae3SPaolo Bonzini /*
1706c50d8ae3SPaolo Bonzini  * This value is the sum of all of the kvm instances's
1707c50d8ae3SPaolo Bonzini  * kvm->arch.n_used_mmu_pages values.  We need a global,
1708c50d8ae3SPaolo Bonzini  * aggregate version in order to make the slab shrinker
1709c50d8ae3SPaolo Bonzini  * faster
1710c50d8ae3SPaolo Bonzini  */
1711d5aaad6fSSean Christopherson static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, long nr)
1712c50d8ae3SPaolo Bonzini {
1713c50d8ae3SPaolo Bonzini 	kvm->arch.n_used_mmu_pages += nr;
1714c50d8ae3SPaolo Bonzini 	percpu_counter_add(&kvm_total_used_mmu_pages, nr);
1715c50d8ae3SPaolo Bonzini }
1716c50d8ae3SPaolo Bonzini 
1717c50d8ae3SPaolo Bonzini static void kvm_mmu_free_page(struct kvm_mmu_page *sp)
1718c50d8ae3SPaolo Bonzini {
1719c50d8ae3SPaolo Bonzini 	MMU_WARN_ON(!is_empty_shadow_page(sp->spt));
1720c50d8ae3SPaolo Bonzini 	hlist_del(&sp->hash_link);
1721c50d8ae3SPaolo Bonzini 	list_del(&sp->link);
1722c50d8ae3SPaolo Bonzini 	free_page((unsigned long)sp->spt);
1723c50d8ae3SPaolo Bonzini 	if (!sp->role.direct)
1724c50d8ae3SPaolo Bonzini 		free_page((unsigned long)sp->gfns);
1725c50d8ae3SPaolo Bonzini 	kmem_cache_free(mmu_page_header_cache, sp);
1726c50d8ae3SPaolo Bonzini }
1727c50d8ae3SPaolo Bonzini 
1728c50d8ae3SPaolo Bonzini static unsigned kvm_page_table_hashfn(gfn_t gfn)
1729c50d8ae3SPaolo Bonzini {
1730c50d8ae3SPaolo Bonzini 	return hash_64(gfn, KVM_MMU_HASH_SHIFT);
1731c50d8ae3SPaolo Bonzini }
1732c50d8ae3SPaolo Bonzini 
1733c50d8ae3SPaolo Bonzini static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
1734c50d8ae3SPaolo Bonzini 				    struct kvm_mmu_page *sp, u64 *parent_pte)
1735c50d8ae3SPaolo Bonzini {
1736c50d8ae3SPaolo Bonzini 	if (!parent_pte)
1737c50d8ae3SPaolo Bonzini 		return;
1738c50d8ae3SPaolo Bonzini 
1739c50d8ae3SPaolo Bonzini 	pte_list_add(vcpu, parent_pte, &sp->parent_ptes);
1740c50d8ae3SPaolo Bonzini }
1741c50d8ae3SPaolo Bonzini 
1742c50d8ae3SPaolo Bonzini static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
1743c50d8ae3SPaolo Bonzini 				       u64 *parent_pte)
1744c50d8ae3SPaolo Bonzini {
1745c50d8ae3SPaolo Bonzini 	__pte_list_remove(parent_pte, &sp->parent_ptes);
1746c50d8ae3SPaolo Bonzini }
1747c50d8ae3SPaolo Bonzini 
1748c50d8ae3SPaolo Bonzini static void drop_parent_pte(struct kvm_mmu_page *sp,
1749c50d8ae3SPaolo Bonzini 			    u64 *parent_pte)
1750c50d8ae3SPaolo Bonzini {
1751c50d8ae3SPaolo Bonzini 	mmu_page_remove_parent_pte(sp, parent_pte);
1752c50d8ae3SPaolo Bonzini 	mmu_spte_clear_no_track(parent_pte);
1753c50d8ae3SPaolo Bonzini }
1754c50d8ae3SPaolo Bonzini 
1755c50d8ae3SPaolo Bonzini static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct)
1756c50d8ae3SPaolo Bonzini {
1757c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
1758c50d8ae3SPaolo Bonzini 
175994ce87efSSean Christopherson 	sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
176094ce87efSSean Christopherson 	sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
1761c50d8ae3SPaolo Bonzini 	if (!direct)
176294ce87efSSean Christopherson 		sp->gfns = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache);
1763c50d8ae3SPaolo Bonzini 	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
1764c50d8ae3SPaolo Bonzini 
1765c50d8ae3SPaolo Bonzini 	/*
1766c50d8ae3SPaolo Bonzini 	 * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
1767c50d8ae3SPaolo Bonzini 	 * depends on valid pages being added to the head of the list.  See
1768c50d8ae3SPaolo Bonzini 	 * comments in kvm_zap_obsolete_pages().
1769c50d8ae3SPaolo Bonzini 	 */
1770c50d8ae3SPaolo Bonzini 	sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
1771c50d8ae3SPaolo Bonzini 	list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
1772c50d8ae3SPaolo Bonzini 	kvm_mod_used_mmu_pages(vcpu->kvm, +1);
1773c50d8ae3SPaolo Bonzini 	return sp;
1774c50d8ae3SPaolo Bonzini }
1775c50d8ae3SPaolo Bonzini 
1776c50d8ae3SPaolo Bonzini static void mark_unsync(u64 *spte);
1777c50d8ae3SPaolo Bonzini static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
1778c50d8ae3SPaolo Bonzini {
1779c50d8ae3SPaolo Bonzini 	u64 *sptep;
1780c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
1781c50d8ae3SPaolo Bonzini 
1782c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) {
1783c50d8ae3SPaolo Bonzini 		mark_unsync(sptep);
1784c50d8ae3SPaolo Bonzini 	}
1785c50d8ae3SPaolo Bonzini }
1786c50d8ae3SPaolo Bonzini 
1787c50d8ae3SPaolo Bonzini static void mark_unsync(u64 *spte)
1788c50d8ae3SPaolo Bonzini {
1789c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
1790c50d8ae3SPaolo Bonzini 	unsigned int index;
1791c50d8ae3SPaolo Bonzini 
179257354682SSean Christopherson 	sp = sptep_to_sp(spte);
1793c50d8ae3SPaolo Bonzini 	index = spte - sp->spt;
1794c50d8ae3SPaolo Bonzini 	if (__test_and_set_bit(index, sp->unsync_child_bitmap))
1795c50d8ae3SPaolo Bonzini 		return;
1796c50d8ae3SPaolo Bonzini 	if (sp->unsync_children++)
1797c50d8ae3SPaolo Bonzini 		return;
1798c50d8ae3SPaolo Bonzini 	kvm_mmu_mark_parents_unsync(sp);
1799c50d8ae3SPaolo Bonzini }
1800c50d8ae3SPaolo Bonzini 
1801c50d8ae3SPaolo Bonzini static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
1802c50d8ae3SPaolo Bonzini 			       struct kvm_mmu_page *sp)
1803c50d8ae3SPaolo Bonzini {
1804c50d8ae3SPaolo Bonzini 	return 0;
1805c50d8ae3SPaolo Bonzini }
1806c50d8ae3SPaolo Bonzini 
1807c50d8ae3SPaolo Bonzini #define KVM_PAGE_ARRAY_NR 16
1808c50d8ae3SPaolo Bonzini 
1809c50d8ae3SPaolo Bonzini struct kvm_mmu_pages {
1810c50d8ae3SPaolo Bonzini 	struct mmu_page_and_offset {
1811c50d8ae3SPaolo Bonzini 		struct kvm_mmu_page *sp;
1812c50d8ae3SPaolo Bonzini 		unsigned int idx;
1813c50d8ae3SPaolo Bonzini 	} page[KVM_PAGE_ARRAY_NR];
1814c50d8ae3SPaolo Bonzini 	unsigned int nr;
1815c50d8ae3SPaolo Bonzini };
1816c50d8ae3SPaolo Bonzini 
1817c50d8ae3SPaolo Bonzini static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
1818c50d8ae3SPaolo Bonzini 			 int idx)
1819c50d8ae3SPaolo Bonzini {
1820c50d8ae3SPaolo Bonzini 	int i;
1821c50d8ae3SPaolo Bonzini 
1822c50d8ae3SPaolo Bonzini 	if (sp->unsync)
1823c50d8ae3SPaolo Bonzini 		for (i=0; i < pvec->nr; i++)
1824c50d8ae3SPaolo Bonzini 			if (pvec->page[i].sp == sp)
1825c50d8ae3SPaolo Bonzini 				return 0;
1826c50d8ae3SPaolo Bonzini 
1827c50d8ae3SPaolo Bonzini 	pvec->page[pvec->nr].sp = sp;
1828c50d8ae3SPaolo Bonzini 	pvec->page[pvec->nr].idx = idx;
1829c50d8ae3SPaolo Bonzini 	pvec->nr++;
1830c50d8ae3SPaolo Bonzini 	return (pvec->nr == KVM_PAGE_ARRAY_NR);
1831c50d8ae3SPaolo Bonzini }
1832c50d8ae3SPaolo Bonzini 
1833c50d8ae3SPaolo Bonzini static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx)
1834c50d8ae3SPaolo Bonzini {
1835c50d8ae3SPaolo Bonzini 	--sp->unsync_children;
1836c50d8ae3SPaolo Bonzini 	WARN_ON((int)sp->unsync_children < 0);
1837c50d8ae3SPaolo Bonzini 	__clear_bit(idx, sp->unsync_child_bitmap);
1838c50d8ae3SPaolo Bonzini }
1839c50d8ae3SPaolo Bonzini 
1840c50d8ae3SPaolo Bonzini static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
1841c50d8ae3SPaolo Bonzini 			   struct kvm_mmu_pages *pvec)
1842c50d8ae3SPaolo Bonzini {
1843c50d8ae3SPaolo Bonzini 	int i, ret, nr_unsync_leaf = 0;
1844c50d8ae3SPaolo Bonzini 
1845c50d8ae3SPaolo Bonzini 	for_each_set_bit(i, sp->unsync_child_bitmap, 512) {
1846c50d8ae3SPaolo Bonzini 		struct kvm_mmu_page *child;
1847c50d8ae3SPaolo Bonzini 		u64 ent = sp->spt[i];
1848c50d8ae3SPaolo Bonzini 
1849c50d8ae3SPaolo Bonzini 		if (!is_shadow_present_pte(ent) || is_large_pte(ent)) {
1850c50d8ae3SPaolo Bonzini 			clear_unsync_child_bit(sp, i);
1851c50d8ae3SPaolo Bonzini 			continue;
1852c50d8ae3SPaolo Bonzini 		}
1853c50d8ae3SPaolo Bonzini 
1854e47c4aeeSSean Christopherson 		child = to_shadow_page(ent & PT64_BASE_ADDR_MASK);
1855c50d8ae3SPaolo Bonzini 
1856c50d8ae3SPaolo Bonzini 		if (child->unsync_children) {
1857c50d8ae3SPaolo Bonzini 			if (mmu_pages_add(pvec, child, i))
1858c50d8ae3SPaolo Bonzini 				return -ENOSPC;
1859c50d8ae3SPaolo Bonzini 
1860c50d8ae3SPaolo Bonzini 			ret = __mmu_unsync_walk(child, pvec);
1861c50d8ae3SPaolo Bonzini 			if (!ret) {
1862c50d8ae3SPaolo Bonzini 				clear_unsync_child_bit(sp, i);
1863c50d8ae3SPaolo Bonzini 				continue;
1864c50d8ae3SPaolo Bonzini 			} else if (ret > 0) {
1865c50d8ae3SPaolo Bonzini 				nr_unsync_leaf += ret;
1866c50d8ae3SPaolo Bonzini 			} else
1867c50d8ae3SPaolo Bonzini 				return ret;
1868c50d8ae3SPaolo Bonzini 		} else if (child->unsync) {
1869c50d8ae3SPaolo Bonzini 			nr_unsync_leaf++;
1870c50d8ae3SPaolo Bonzini 			if (mmu_pages_add(pvec, child, i))
1871c50d8ae3SPaolo Bonzini 				return -ENOSPC;
1872c50d8ae3SPaolo Bonzini 		} else
1873c50d8ae3SPaolo Bonzini 			clear_unsync_child_bit(sp, i);
1874c50d8ae3SPaolo Bonzini 	}
1875c50d8ae3SPaolo Bonzini 
1876c50d8ae3SPaolo Bonzini 	return nr_unsync_leaf;
1877c50d8ae3SPaolo Bonzini }
1878c50d8ae3SPaolo Bonzini 
1879c50d8ae3SPaolo Bonzini #define INVALID_INDEX (-1)
1880c50d8ae3SPaolo Bonzini 
1881c50d8ae3SPaolo Bonzini static int mmu_unsync_walk(struct kvm_mmu_page *sp,
1882c50d8ae3SPaolo Bonzini 			   struct kvm_mmu_pages *pvec)
1883c50d8ae3SPaolo Bonzini {
1884c50d8ae3SPaolo Bonzini 	pvec->nr = 0;
1885c50d8ae3SPaolo Bonzini 	if (!sp->unsync_children)
1886c50d8ae3SPaolo Bonzini 		return 0;
1887c50d8ae3SPaolo Bonzini 
1888c50d8ae3SPaolo Bonzini 	mmu_pages_add(pvec, sp, INVALID_INDEX);
1889c50d8ae3SPaolo Bonzini 	return __mmu_unsync_walk(sp, pvec);
1890c50d8ae3SPaolo Bonzini }
1891c50d8ae3SPaolo Bonzini 
1892c50d8ae3SPaolo Bonzini static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1893c50d8ae3SPaolo Bonzini {
1894c50d8ae3SPaolo Bonzini 	WARN_ON(!sp->unsync);
1895c50d8ae3SPaolo Bonzini 	trace_kvm_mmu_sync_page(sp);
1896c50d8ae3SPaolo Bonzini 	sp->unsync = 0;
1897c50d8ae3SPaolo Bonzini 	--kvm->stat.mmu_unsync;
1898c50d8ae3SPaolo Bonzini }
1899c50d8ae3SPaolo Bonzini 
1900c50d8ae3SPaolo Bonzini static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1901c50d8ae3SPaolo Bonzini 				     struct list_head *invalid_list);
1902c50d8ae3SPaolo Bonzini static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1903c50d8ae3SPaolo Bonzini 				    struct list_head *invalid_list);
1904c50d8ae3SPaolo Bonzini 
1905ac101b7cSSean Christopherson #define for_each_valid_sp(_kvm, _sp, _list)				\
1906ac101b7cSSean Christopherson 	hlist_for_each_entry(_sp, _list, hash_link)			\
1907c50d8ae3SPaolo Bonzini 		if (is_obsolete_sp((_kvm), (_sp))) {			\
1908c50d8ae3SPaolo Bonzini 		} else
1909c50d8ae3SPaolo Bonzini 
1910c50d8ae3SPaolo Bonzini #define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn)			\
1911ac101b7cSSean Christopherson 	for_each_valid_sp(_kvm, _sp,					\
1912ac101b7cSSean Christopherson 	  &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)])	\
1913c50d8ae3SPaolo Bonzini 		if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
1914c50d8ae3SPaolo Bonzini 
1915479a1efcSSean Christopherson static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1916c50d8ae3SPaolo Bonzini 			 struct list_head *invalid_list)
1917c50d8ae3SPaolo Bonzini {
19182640b086SSean Christopherson 	if (vcpu->arch.mmu->sync_page(vcpu, sp) == 0) {
1919c50d8ae3SPaolo Bonzini 		kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
1920c50d8ae3SPaolo Bonzini 		return false;
1921c50d8ae3SPaolo Bonzini 	}
1922c50d8ae3SPaolo Bonzini 
1923c50d8ae3SPaolo Bonzini 	return true;
1924c50d8ae3SPaolo Bonzini }
1925c50d8ae3SPaolo Bonzini 
1926c50d8ae3SPaolo Bonzini static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
1927c50d8ae3SPaolo Bonzini 					struct list_head *invalid_list,
1928c50d8ae3SPaolo Bonzini 					bool remote_flush)
1929c50d8ae3SPaolo Bonzini {
1930c50d8ae3SPaolo Bonzini 	if (!remote_flush && list_empty(invalid_list))
1931c50d8ae3SPaolo Bonzini 		return false;
1932c50d8ae3SPaolo Bonzini 
1933c50d8ae3SPaolo Bonzini 	if (!list_empty(invalid_list))
1934c50d8ae3SPaolo Bonzini 		kvm_mmu_commit_zap_page(kvm, invalid_list);
1935c50d8ae3SPaolo Bonzini 	else
1936c50d8ae3SPaolo Bonzini 		kvm_flush_remote_tlbs(kvm);
1937c50d8ae3SPaolo Bonzini 	return true;
1938c50d8ae3SPaolo Bonzini }
1939c50d8ae3SPaolo Bonzini 
1940c50d8ae3SPaolo Bonzini static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu,
1941c50d8ae3SPaolo Bonzini 				 struct list_head *invalid_list,
1942c50d8ae3SPaolo Bonzini 				 bool remote_flush, bool local_flush)
1943c50d8ae3SPaolo Bonzini {
1944c50d8ae3SPaolo Bonzini 	if (kvm_mmu_remote_flush_or_zap(vcpu->kvm, invalid_list, remote_flush))
1945c50d8ae3SPaolo Bonzini 		return;
1946c50d8ae3SPaolo Bonzini 
1947c50d8ae3SPaolo Bonzini 	if (local_flush)
19488c8560b8SSean Christopherson 		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
1949c50d8ae3SPaolo Bonzini }
1950c50d8ae3SPaolo Bonzini 
1951c50d8ae3SPaolo Bonzini #ifdef CONFIG_KVM_MMU_AUDIT
1952c50d8ae3SPaolo Bonzini #include "mmu_audit.c"
1953c50d8ae3SPaolo Bonzini #else
1954c50d8ae3SPaolo Bonzini static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { }
1955c50d8ae3SPaolo Bonzini static void mmu_audit_disable(void) { }
1956c50d8ae3SPaolo Bonzini #endif
1957c50d8ae3SPaolo Bonzini 
1958c50d8ae3SPaolo Bonzini static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
1959c50d8ae3SPaolo Bonzini {
1960c50d8ae3SPaolo Bonzini 	return sp->role.invalid ||
1961c50d8ae3SPaolo Bonzini 	       unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
1962c50d8ae3SPaolo Bonzini }
1963c50d8ae3SPaolo Bonzini 
1964c50d8ae3SPaolo Bonzini struct mmu_page_path {
1965c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL];
1966c50d8ae3SPaolo Bonzini 	unsigned int idx[PT64_ROOT_MAX_LEVEL];
1967c50d8ae3SPaolo Bonzini };
1968c50d8ae3SPaolo Bonzini 
1969c50d8ae3SPaolo Bonzini #define for_each_sp(pvec, sp, parents, i)			\
1970c50d8ae3SPaolo Bonzini 		for (i = mmu_pages_first(&pvec, &parents);	\
1971c50d8ae3SPaolo Bonzini 			i < pvec.nr && ({ sp = pvec.page[i].sp; 1;});	\
1972c50d8ae3SPaolo Bonzini 			i = mmu_pages_next(&pvec, &parents, i))
1973c50d8ae3SPaolo Bonzini 
1974c50d8ae3SPaolo Bonzini static int mmu_pages_next(struct kvm_mmu_pages *pvec,
1975c50d8ae3SPaolo Bonzini 			  struct mmu_page_path *parents,
1976c50d8ae3SPaolo Bonzini 			  int i)
1977c50d8ae3SPaolo Bonzini {
1978c50d8ae3SPaolo Bonzini 	int n;
1979c50d8ae3SPaolo Bonzini 
1980c50d8ae3SPaolo Bonzini 	for (n = i+1; n < pvec->nr; n++) {
1981c50d8ae3SPaolo Bonzini 		struct kvm_mmu_page *sp = pvec->page[n].sp;
1982c50d8ae3SPaolo Bonzini 		unsigned idx = pvec->page[n].idx;
1983c50d8ae3SPaolo Bonzini 		int level = sp->role.level;
1984c50d8ae3SPaolo Bonzini 
1985c50d8ae3SPaolo Bonzini 		parents->idx[level-1] = idx;
19863bae0459SSean Christopherson 		if (level == PG_LEVEL_4K)
1987c50d8ae3SPaolo Bonzini 			break;
1988c50d8ae3SPaolo Bonzini 
1989c50d8ae3SPaolo Bonzini 		parents->parent[level-2] = sp;
1990c50d8ae3SPaolo Bonzini 	}
1991c50d8ae3SPaolo Bonzini 
1992c50d8ae3SPaolo Bonzini 	return n;
1993c50d8ae3SPaolo Bonzini }
1994c50d8ae3SPaolo Bonzini 
1995c50d8ae3SPaolo Bonzini static int mmu_pages_first(struct kvm_mmu_pages *pvec,
1996c50d8ae3SPaolo Bonzini 			   struct mmu_page_path *parents)
1997c50d8ae3SPaolo Bonzini {
1998c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
1999c50d8ae3SPaolo Bonzini 	int level;
2000c50d8ae3SPaolo Bonzini 
2001c50d8ae3SPaolo Bonzini 	if (pvec->nr == 0)
2002c50d8ae3SPaolo Bonzini 		return 0;
2003c50d8ae3SPaolo Bonzini 
2004c50d8ae3SPaolo Bonzini 	WARN_ON(pvec->page[0].idx != INVALID_INDEX);
2005c50d8ae3SPaolo Bonzini 
2006c50d8ae3SPaolo Bonzini 	sp = pvec->page[0].sp;
2007c50d8ae3SPaolo Bonzini 	level = sp->role.level;
20083bae0459SSean Christopherson 	WARN_ON(level == PG_LEVEL_4K);
2009c50d8ae3SPaolo Bonzini 
2010c50d8ae3SPaolo Bonzini 	parents->parent[level-2] = sp;
2011c50d8ae3SPaolo Bonzini 
2012c50d8ae3SPaolo Bonzini 	/* Also set up a sentinel.  Further entries in pvec are all
2013c50d8ae3SPaolo Bonzini 	 * children of sp, so this element is never overwritten.
2014c50d8ae3SPaolo Bonzini 	 */
2015c50d8ae3SPaolo Bonzini 	parents->parent[level-1] = NULL;
2016c50d8ae3SPaolo Bonzini 	return mmu_pages_next(pvec, parents, 0);
2017c50d8ae3SPaolo Bonzini }
2018c50d8ae3SPaolo Bonzini 
2019c50d8ae3SPaolo Bonzini static void mmu_pages_clear_parents(struct mmu_page_path *parents)
2020c50d8ae3SPaolo Bonzini {
2021c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
2022c50d8ae3SPaolo Bonzini 	unsigned int level = 0;
2023c50d8ae3SPaolo Bonzini 
2024c50d8ae3SPaolo Bonzini 	do {
2025c50d8ae3SPaolo Bonzini 		unsigned int idx = parents->idx[level];
2026c50d8ae3SPaolo Bonzini 		sp = parents->parent[level];
2027c50d8ae3SPaolo Bonzini 		if (!sp)
2028c50d8ae3SPaolo Bonzini 			return;
2029c50d8ae3SPaolo Bonzini 
2030c50d8ae3SPaolo Bonzini 		WARN_ON(idx == INVALID_INDEX);
2031c50d8ae3SPaolo Bonzini 		clear_unsync_child_bit(sp, idx);
2032c50d8ae3SPaolo Bonzini 		level++;
2033c50d8ae3SPaolo Bonzini 	} while (!sp->unsync_children);
2034c50d8ae3SPaolo Bonzini }
2035c50d8ae3SPaolo Bonzini 
2036c50d8ae3SPaolo Bonzini static void mmu_sync_children(struct kvm_vcpu *vcpu,
2037c50d8ae3SPaolo Bonzini 			      struct kvm_mmu_page *parent)
2038c50d8ae3SPaolo Bonzini {
2039c50d8ae3SPaolo Bonzini 	int i;
2040c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
2041c50d8ae3SPaolo Bonzini 	struct mmu_page_path parents;
2042c50d8ae3SPaolo Bonzini 	struct kvm_mmu_pages pages;
2043c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
2044c50d8ae3SPaolo Bonzini 	bool flush = false;
2045c50d8ae3SPaolo Bonzini 
2046c50d8ae3SPaolo Bonzini 	while (mmu_unsync_walk(parent, &pages)) {
2047c50d8ae3SPaolo Bonzini 		bool protected = false;
2048c50d8ae3SPaolo Bonzini 
2049c50d8ae3SPaolo Bonzini 		for_each_sp(pages, sp, parents, i)
2050c50d8ae3SPaolo Bonzini 			protected |= rmap_write_protect(vcpu, sp->gfn);
2051c50d8ae3SPaolo Bonzini 
2052c50d8ae3SPaolo Bonzini 		if (protected) {
2053c50d8ae3SPaolo Bonzini 			kvm_flush_remote_tlbs(vcpu->kvm);
2054c50d8ae3SPaolo Bonzini 			flush = false;
2055c50d8ae3SPaolo Bonzini 		}
2056c50d8ae3SPaolo Bonzini 
2057c50d8ae3SPaolo Bonzini 		for_each_sp(pages, sp, parents, i) {
2058479a1efcSSean Christopherson 			kvm_unlink_unsync_page(vcpu->kvm, sp);
2059c50d8ae3SPaolo Bonzini 			flush |= kvm_sync_page(vcpu, sp, &invalid_list);
2060c50d8ae3SPaolo Bonzini 			mmu_pages_clear_parents(&parents);
2061c50d8ae3SPaolo Bonzini 		}
2062531810caSBen Gardon 		if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
2063c50d8ae3SPaolo Bonzini 			kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
2064531810caSBen Gardon 			cond_resched_rwlock_write(&vcpu->kvm->mmu_lock);
2065c50d8ae3SPaolo Bonzini 			flush = false;
2066c50d8ae3SPaolo Bonzini 		}
2067c50d8ae3SPaolo Bonzini 	}
2068c50d8ae3SPaolo Bonzini 
2069c50d8ae3SPaolo Bonzini 	kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
2070c50d8ae3SPaolo Bonzini }
2071c50d8ae3SPaolo Bonzini 
2072c50d8ae3SPaolo Bonzini static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
2073c50d8ae3SPaolo Bonzini {
2074c50d8ae3SPaolo Bonzini 	atomic_set(&sp->write_flooding_count,  0);
2075c50d8ae3SPaolo Bonzini }
2076c50d8ae3SPaolo Bonzini 
2077c50d8ae3SPaolo Bonzini static void clear_sp_write_flooding_count(u64 *spte)
2078c50d8ae3SPaolo Bonzini {
207957354682SSean Christopherson 	__clear_sp_write_flooding_count(sptep_to_sp(spte));
2080c50d8ae3SPaolo Bonzini }
2081c50d8ae3SPaolo Bonzini 
2082c50d8ae3SPaolo Bonzini static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
2083c50d8ae3SPaolo Bonzini 					     gfn_t gfn,
2084c50d8ae3SPaolo Bonzini 					     gva_t gaddr,
2085c50d8ae3SPaolo Bonzini 					     unsigned level,
2086c50d8ae3SPaolo Bonzini 					     int direct,
20870a2b64c5SBen Gardon 					     unsigned int access)
2088c50d8ae3SPaolo Bonzini {
2089fb58a9c3SSean Christopherson 	bool direct_mmu = vcpu->arch.mmu->direct_map;
2090c50d8ae3SPaolo Bonzini 	union kvm_mmu_page_role role;
2091ac101b7cSSean Christopherson 	struct hlist_head *sp_list;
2092c50d8ae3SPaolo Bonzini 	unsigned quadrant;
2093c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
2094c50d8ae3SPaolo Bonzini 	int collisions = 0;
2095c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
2096c50d8ae3SPaolo Bonzini 
2097c50d8ae3SPaolo Bonzini 	role = vcpu->arch.mmu->mmu_role.base;
2098c50d8ae3SPaolo Bonzini 	role.level = level;
2099c50d8ae3SPaolo Bonzini 	role.direct = direct;
2100c50d8ae3SPaolo Bonzini 	if (role.direct)
2101c50d8ae3SPaolo Bonzini 		role.gpte_is_8_bytes = true;
2102c50d8ae3SPaolo Bonzini 	role.access = access;
2103fb58a9c3SSean Christopherson 	if (!direct_mmu && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) {
2104c50d8ae3SPaolo Bonzini 		quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
2105c50d8ae3SPaolo Bonzini 		quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
2106c50d8ae3SPaolo Bonzini 		role.quadrant = quadrant;
2107c50d8ae3SPaolo Bonzini 	}
2108ac101b7cSSean Christopherson 
2109ac101b7cSSean Christopherson 	sp_list = &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
2110ac101b7cSSean Christopherson 	for_each_valid_sp(vcpu->kvm, sp, sp_list) {
2111c50d8ae3SPaolo Bonzini 		if (sp->gfn != gfn) {
2112c50d8ae3SPaolo Bonzini 			collisions++;
2113c50d8ae3SPaolo Bonzini 			continue;
2114c50d8ae3SPaolo Bonzini 		}
2115c50d8ae3SPaolo Bonzini 
2116ddc16abbSSean Christopherson 		if (sp->role.word != role.word) {
2117ddc16abbSSean Christopherson 			/*
2118ddc16abbSSean Christopherson 			 * If the guest is creating an upper-level page, zap
2119ddc16abbSSean Christopherson 			 * unsync pages for the same gfn.  While it's possible
2120ddc16abbSSean Christopherson 			 * the guest is using recursive page tables, in all
2121ddc16abbSSean Christopherson 			 * likelihood the guest has stopped using the unsync
2122ddc16abbSSean Christopherson 			 * page and is installing a completely unrelated page.
2123ddc16abbSSean Christopherson 			 * Unsync pages must not be left as is, because the new
2124ddc16abbSSean Christopherson 			 * upper-level page will be write-protected.
2125ddc16abbSSean Christopherson 			 */
2126ddc16abbSSean Christopherson 			if (level > PG_LEVEL_4K && sp->unsync)
2127ddc16abbSSean Christopherson 				kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
2128ddc16abbSSean Christopherson 							 &invalid_list);
2129c50d8ae3SPaolo Bonzini 			continue;
2130ddc16abbSSean Christopherson 		}
2131c50d8ae3SPaolo Bonzini 
2132fb58a9c3SSean Christopherson 		if (direct_mmu)
2133fb58a9c3SSean Christopherson 			goto trace_get_page;
2134fb58a9c3SSean Christopherson 
2135c50d8ae3SPaolo Bonzini 		if (sp->unsync) {
213607dc4f35SSean Christopherson 			/*
2137479a1efcSSean Christopherson 			 * The page is good, but is stale.  kvm_sync_page does
213807dc4f35SSean Christopherson 			 * get the latest guest state, but (unlike mmu_unsync_children)
213907dc4f35SSean Christopherson 			 * it doesn't write-protect the page or mark it synchronized!
214007dc4f35SSean Christopherson 			 * This way the validity of the mapping is ensured, but the
214107dc4f35SSean Christopherson 			 * overhead of write protection is not incurred until the
214207dc4f35SSean Christopherson 			 * guest invalidates the TLB mapping.  This allows multiple
214307dc4f35SSean Christopherson 			 * SPs for a single gfn to be unsync.
214407dc4f35SSean Christopherson 			 *
214507dc4f35SSean Christopherson 			 * If the sync fails, the page is zapped.  If so, break
214607dc4f35SSean Christopherson 			 * in order to rebuild it.
2147c50d8ae3SPaolo Bonzini 			 */
2148479a1efcSSean Christopherson 			if (!kvm_sync_page(vcpu, sp, &invalid_list))
2149c50d8ae3SPaolo Bonzini 				break;
2150c50d8ae3SPaolo Bonzini 
2151c50d8ae3SPaolo Bonzini 			WARN_ON(!list_empty(&invalid_list));
21528c8560b8SSean Christopherson 			kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
2153c50d8ae3SPaolo Bonzini 		}
2154c50d8ae3SPaolo Bonzini 
2155c50d8ae3SPaolo Bonzini 		if (sp->unsync_children)
2156f6f6195bSLai Jiangshan 			kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
2157c50d8ae3SPaolo Bonzini 
2158c50d8ae3SPaolo Bonzini 		__clear_sp_write_flooding_count(sp);
2159fb58a9c3SSean Christopherson 
2160fb58a9c3SSean Christopherson trace_get_page:
2161c50d8ae3SPaolo Bonzini 		trace_kvm_mmu_get_page(sp, false);
2162c50d8ae3SPaolo Bonzini 		goto out;
2163c50d8ae3SPaolo Bonzini 	}
2164c50d8ae3SPaolo Bonzini 
2165c50d8ae3SPaolo Bonzini 	++vcpu->kvm->stat.mmu_cache_miss;
2166c50d8ae3SPaolo Bonzini 
2167c50d8ae3SPaolo Bonzini 	sp = kvm_mmu_alloc_page(vcpu, direct);
2168c50d8ae3SPaolo Bonzini 
2169c50d8ae3SPaolo Bonzini 	sp->gfn = gfn;
2170c50d8ae3SPaolo Bonzini 	sp->role = role;
2171ac101b7cSSean Christopherson 	hlist_add_head(&sp->hash_link, sp_list);
2172c50d8ae3SPaolo Bonzini 	if (!direct) {
2173c50d8ae3SPaolo Bonzini 		account_shadowed(vcpu->kvm, sp);
21743bae0459SSean Christopherson 		if (level == PG_LEVEL_4K && rmap_write_protect(vcpu, gfn))
2175c50d8ae3SPaolo Bonzini 			kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
2176c50d8ae3SPaolo Bonzini 	}
2177c50d8ae3SPaolo Bonzini 	trace_kvm_mmu_get_page(sp, true);
2178c50d8ae3SPaolo Bonzini out:
2179ddc16abbSSean Christopherson 	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
2180ddc16abbSSean Christopherson 
2181c50d8ae3SPaolo Bonzini 	if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions)
2182c50d8ae3SPaolo Bonzini 		vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions;
2183c50d8ae3SPaolo Bonzini 	return sp;
2184c50d8ae3SPaolo Bonzini }
2185c50d8ae3SPaolo Bonzini 
2186c50d8ae3SPaolo Bonzini static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator,
2187c50d8ae3SPaolo Bonzini 					struct kvm_vcpu *vcpu, hpa_t root,
2188c50d8ae3SPaolo Bonzini 					u64 addr)
2189c50d8ae3SPaolo Bonzini {
2190c50d8ae3SPaolo Bonzini 	iterator->addr = addr;
2191c50d8ae3SPaolo Bonzini 	iterator->shadow_addr = root;
2192c50d8ae3SPaolo Bonzini 	iterator->level = vcpu->arch.mmu->shadow_root_level;
2193c50d8ae3SPaolo Bonzini 
2194c50d8ae3SPaolo Bonzini 	if (iterator->level == PT64_ROOT_4LEVEL &&
2195c50d8ae3SPaolo Bonzini 	    vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL &&
2196c50d8ae3SPaolo Bonzini 	    !vcpu->arch.mmu->direct_map)
2197c50d8ae3SPaolo Bonzini 		--iterator->level;
2198c50d8ae3SPaolo Bonzini 
2199c50d8ae3SPaolo Bonzini 	if (iterator->level == PT32E_ROOT_LEVEL) {
2200c50d8ae3SPaolo Bonzini 		/*
2201c50d8ae3SPaolo Bonzini 		 * prev_root is currently only used for 64-bit hosts. So only
2202c50d8ae3SPaolo Bonzini 		 * the active root_hpa is valid here.
2203c50d8ae3SPaolo Bonzini 		 */
2204c50d8ae3SPaolo Bonzini 		BUG_ON(root != vcpu->arch.mmu->root_hpa);
2205c50d8ae3SPaolo Bonzini 
2206c50d8ae3SPaolo Bonzini 		iterator->shadow_addr
2207c50d8ae3SPaolo Bonzini 			= vcpu->arch.mmu->pae_root[(addr >> 30) & 3];
2208c50d8ae3SPaolo Bonzini 		iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
2209c50d8ae3SPaolo Bonzini 		--iterator->level;
2210c50d8ae3SPaolo Bonzini 		if (!iterator->shadow_addr)
2211c50d8ae3SPaolo Bonzini 			iterator->level = 0;
2212c50d8ae3SPaolo Bonzini 	}
2213c50d8ae3SPaolo Bonzini }
2214c50d8ae3SPaolo Bonzini 
2215c50d8ae3SPaolo Bonzini static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
2216c50d8ae3SPaolo Bonzini 			     struct kvm_vcpu *vcpu, u64 addr)
2217c50d8ae3SPaolo Bonzini {
2218c50d8ae3SPaolo Bonzini 	shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root_hpa,
2219c50d8ae3SPaolo Bonzini 				    addr);
2220c50d8ae3SPaolo Bonzini }
2221c50d8ae3SPaolo Bonzini 
2222c50d8ae3SPaolo Bonzini static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
2223c50d8ae3SPaolo Bonzini {
22243bae0459SSean Christopherson 	if (iterator->level < PG_LEVEL_4K)
2225c50d8ae3SPaolo Bonzini 		return false;
2226c50d8ae3SPaolo Bonzini 
2227c50d8ae3SPaolo Bonzini 	iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
2228c50d8ae3SPaolo Bonzini 	iterator->sptep	= ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
2229c50d8ae3SPaolo Bonzini 	return true;
2230c50d8ae3SPaolo Bonzini }
2231c50d8ae3SPaolo Bonzini 
2232c50d8ae3SPaolo Bonzini static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator,
2233c50d8ae3SPaolo Bonzini 			       u64 spte)
2234c50d8ae3SPaolo Bonzini {
2235c50d8ae3SPaolo Bonzini 	if (is_last_spte(spte, iterator->level)) {
2236c50d8ae3SPaolo Bonzini 		iterator->level = 0;
2237c50d8ae3SPaolo Bonzini 		return;
2238c50d8ae3SPaolo Bonzini 	}
2239c50d8ae3SPaolo Bonzini 
2240c50d8ae3SPaolo Bonzini 	iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK;
2241c50d8ae3SPaolo Bonzini 	--iterator->level;
2242c50d8ae3SPaolo Bonzini }
2243c50d8ae3SPaolo Bonzini 
2244c50d8ae3SPaolo Bonzini static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
2245c50d8ae3SPaolo Bonzini {
2246c50d8ae3SPaolo Bonzini 	__shadow_walk_next(iterator, *iterator->sptep);
2247c50d8ae3SPaolo Bonzini }
2248c50d8ae3SPaolo Bonzini 
2249c50d8ae3SPaolo Bonzini static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
2250c50d8ae3SPaolo Bonzini 			     struct kvm_mmu_page *sp)
2251c50d8ae3SPaolo Bonzini {
2252c50d8ae3SPaolo Bonzini 	u64 spte;
2253c50d8ae3SPaolo Bonzini 
2254c50d8ae3SPaolo Bonzini 	BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
2255c50d8ae3SPaolo Bonzini 
2256cc4674d0SBen Gardon 	spte = make_nonleaf_spte(sp->spt, sp_ad_disabled(sp));
2257c50d8ae3SPaolo Bonzini 
2258c50d8ae3SPaolo Bonzini 	mmu_spte_set(sptep, spte);
2259c50d8ae3SPaolo Bonzini 
2260c50d8ae3SPaolo Bonzini 	mmu_page_add_parent_pte(vcpu, sp, sptep);
2261c50d8ae3SPaolo Bonzini 
2262c50d8ae3SPaolo Bonzini 	if (sp->unsync_children || sp->unsync)
2263c50d8ae3SPaolo Bonzini 		mark_unsync(sptep);
2264c50d8ae3SPaolo Bonzini }
2265c50d8ae3SPaolo Bonzini 
2266c50d8ae3SPaolo Bonzini static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2267c50d8ae3SPaolo Bonzini 				   unsigned direct_access)
2268c50d8ae3SPaolo Bonzini {
2269c50d8ae3SPaolo Bonzini 	if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
2270c50d8ae3SPaolo Bonzini 		struct kvm_mmu_page *child;
2271c50d8ae3SPaolo Bonzini 
2272c50d8ae3SPaolo Bonzini 		/*
2273c50d8ae3SPaolo Bonzini 		 * For the direct sp, if the guest pte's dirty bit
2274c50d8ae3SPaolo Bonzini 		 * changed form clean to dirty, it will corrupt the
2275c50d8ae3SPaolo Bonzini 		 * sp's access: allow writable in the read-only sp,
2276c50d8ae3SPaolo Bonzini 		 * so we should update the spte at this point to get
2277c50d8ae3SPaolo Bonzini 		 * a new sp with the correct access.
2278c50d8ae3SPaolo Bonzini 		 */
2279e47c4aeeSSean Christopherson 		child = to_shadow_page(*sptep & PT64_BASE_ADDR_MASK);
2280c50d8ae3SPaolo Bonzini 		if (child->role.access == direct_access)
2281c50d8ae3SPaolo Bonzini 			return;
2282c50d8ae3SPaolo Bonzini 
2283c50d8ae3SPaolo Bonzini 		drop_parent_pte(child, sptep);
2284c50d8ae3SPaolo Bonzini 		kvm_flush_remote_tlbs_with_address(vcpu->kvm, child->gfn, 1);
2285c50d8ae3SPaolo Bonzini 	}
2286c50d8ae3SPaolo Bonzini }
2287c50d8ae3SPaolo Bonzini 
22882de4085cSBen Gardon /* Returns the number of zapped non-leaf child shadow pages. */
22892de4085cSBen Gardon static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
22902de4085cSBen Gardon 			    u64 *spte, struct list_head *invalid_list)
2291c50d8ae3SPaolo Bonzini {
2292c50d8ae3SPaolo Bonzini 	u64 pte;
2293c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *child;
2294c50d8ae3SPaolo Bonzini 
2295c50d8ae3SPaolo Bonzini 	pte = *spte;
2296c50d8ae3SPaolo Bonzini 	if (is_shadow_present_pte(pte)) {
2297c50d8ae3SPaolo Bonzini 		if (is_last_spte(pte, sp->role.level)) {
2298c50d8ae3SPaolo Bonzini 			drop_spte(kvm, spte);
2299c50d8ae3SPaolo Bonzini 		} else {
2300e47c4aeeSSean Christopherson 			child = to_shadow_page(pte & PT64_BASE_ADDR_MASK);
2301c50d8ae3SPaolo Bonzini 			drop_parent_pte(child, spte);
23022de4085cSBen Gardon 
23032de4085cSBen Gardon 			/*
23042de4085cSBen Gardon 			 * Recursively zap nested TDP SPs, parentless SPs are
23052de4085cSBen Gardon 			 * unlikely to be used again in the near future.  This
23062de4085cSBen Gardon 			 * avoids retaining a large number of stale nested SPs.
23072de4085cSBen Gardon 			 */
23082de4085cSBen Gardon 			if (tdp_enabled && invalid_list &&
23092de4085cSBen Gardon 			    child->role.guest_mode && !child->parent_ptes.val)
23102de4085cSBen Gardon 				return kvm_mmu_prepare_zap_page(kvm, child,
23112de4085cSBen Gardon 								invalid_list);
2312c50d8ae3SPaolo Bonzini 		}
2313ace569e0SSean Christopherson 	} else if (is_mmio_spte(pte)) {
2314c50d8ae3SPaolo Bonzini 		mmu_spte_clear_no_track(spte);
2315ace569e0SSean Christopherson 	}
23162de4085cSBen Gardon 	return 0;
2317c50d8ae3SPaolo Bonzini }
2318c50d8ae3SPaolo Bonzini 
23192de4085cSBen Gardon static int kvm_mmu_page_unlink_children(struct kvm *kvm,
23202de4085cSBen Gardon 					struct kvm_mmu_page *sp,
23212de4085cSBen Gardon 					struct list_head *invalid_list)
2322c50d8ae3SPaolo Bonzini {
23232de4085cSBen Gardon 	int zapped = 0;
2324c50d8ae3SPaolo Bonzini 	unsigned i;
2325c50d8ae3SPaolo Bonzini 
2326c50d8ae3SPaolo Bonzini 	for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
23272de4085cSBen Gardon 		zapped += mmu_page_zap_pte(kvm, sp, sp->spt + i, invalid_list);
23282de4085cSBen Gardon 
23292de4085cSBen Gardon 	return zapped;
2330c50d8ae3SPaolo Bonzini }
2331c50d8ae3SPaolo Bonzini 
2332c50d8ae3SPaolo Bonzini static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
2333c50d8ae3SPaolo Bonzini {
2334c50d8ae3SPaolo Bonzini 	u64 *sptep;
2335c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
2336c50d8ae3SPaolo Bonzini 
2337c50d8ae3SPaolo Bonzini 	while ((sptep = rmap_get_first(&sp->parent_ptes, &iter)))
2338c50d8ae3SPaolo Bonzini 		drop_parent_pte(sp, sptep);
2339c50d8ae3SPaolo Bonzini }
2340c50d8ae3SPaolo Bonzini 
2341c50d8ae3SPaolo Bonzini static int mmu_zap_unsync_children(struct kvm *kvm,
2342c50d8ae3SPaolo Bonzini 				   struct kvm_mmu_page *parent,
2343c50d8ae3SPaolo Bonzini 				   struct list_head *invalid_list)
2344c50d8ae3SPaolo Bonzini {
2345c50d8ae3SPaolo Bonzini 	int i, zapped = 0;
2346c50d8ae3SPaolo Bonzini 	struct mmu_page_path parents;
2347c50d8ae3SPaolo Bonzini 	struct kvm_mmu_pages pages;
2348c50d8ae3SPaolo Bonzini 
23493bae0459SSean Christopherson 	if (parent->role.level == PG_LEVEL_4K)
2350c50d8ae3SPaolo Bonzini 		return 0;
2351c50d8ae3SPaolo Bonzini 
2352c50d8ae3SPaolo Bonzini 	while (mmu_unsync_walk(parent, &pages)) {
2353c50d8ae3SPaolo Bonzini 		struct kvm_mmu_page *sp;
2354c50d8ae3SPaolo Bonzini 
2355c50d8ae3SPaolo Bonzini 		for_each_sp(pages, sp, parents, i) {
2356c50d8ae3SPaolo Bonzini 			kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
2357c50d8ae3SPaolo Bonzini 			mmu_pages_clear_parents(&parents);
2358c50d8ae3SPaolo Bonzini 			zapped++;
2359c50d8ae3SPaolo Bonzini 		}
2360c50d8ae3SPaolo Bonzini 	}
2361c50d8ae3SPaolo Bonzini 
2362c50d8ae3SPaolo Bonzini 	return zapped;
2363c50d8ae3SPaolo Bonzini }
2364c50d8ae3SPaolo Bonzini 
2365c50d8ae3SPaolo Bonzini static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
2366c50d8ae3SPaolo Bonzini 				       struct kvm_mmu_page *sp,
2367c50d8ae3SPaolo Bonzini 				       struct list_head *invalid_list,
2368c50d8ae3SPaolo Bonzini 				       int *nr_zapped)
2369c50d8ae3SPaolo Bonzini {
2370c50d8ae3SPaolo Bonzini 	bool list_unstable;
2371c50d8ae3SPaolo Bonzini 
2372c50d8ae3SPaolo Bonzini 	trace_kvm_mmu_prepare_zap_page(sp);
2373c50d8ae3SPaolo Bonzini 	++kvm->stat.mmu_shadow_zapped;
2374c50d8ae3SPaolo Bonzini 	*nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list);
23752de4085cSBen Gardon 	*nr_zapped += kvm_mmu_page_unlink_children(kvm, sp, invalid_list);
2376c50d8ae3SPaolo Bonzini 	kvm_mmu_unlink_parents(kvm, sp);
2377c50d8ae3SPaolo Bonzini 
2378c50d8ae3SPaolo Bonzini 	/* Zapping children means active_mmu_pages has become unstable. */
2379c50d8ae3SPaolo Bonzini 	list_unstable = *nr_zapped;
2380c50d8ae3SPaolo Bonzini 
2381c50d8ae3SPaolo Bonzini 	if (!sp->role.invalid && !sp->role.direct)
2382c50d8ae3SPaolo Bonzini 		unaccount_shadowed(kvm, sp);
2383c50d8ae3SPaolo Bonzini 
2384c50d8ae3SPaolo Bonzini 	if (sp->unsync)
2385c50d8ae3SPaolo Bonzini 		kvm_unlink_unsync_page(kvm, sp);
2386c50d8ae3SPaolo Bonzini 	if (!sp->root_count) {
2387c50d8ae3SPaolo Bonzini 		/* Count self */
2388c50d8ae3SPaolo Bonzini 		(*nr_zapped)++;
2389f95eec9bSSean Christopherson 
2390f95eec9bSSean Christopherson 		/*
2391f95eec9bSSean Christopherson 		 * Already invalid pages (previously active roots) are not on
2392f95eec9bSSean Christopherson 		 * the active page list.  See list_del() in the "else" case of
2393f95eec9bSSean Christopherson 		 * !sp->root_count.
2394f95eec9bSSean Christopherson 		 */
2395f95eec9bSSean Christopherson 		if (sp->role.invalid)
2396f95eec9bSSean Christopherson 			list_add(&sp->link, invalid_list);
2397f95eec9bSSean Christopherson 		else
2398c50d8ae3SPaolo Bonzini 			list_move(&sp->link, invalid_list);
2399c50d8ae3SPaolo Bonzini 		kvm_mod_used_mmu_pages(kvm, -1);
2400c50d8ae3SPaolo Bonzini 	} else {
2401f95eec9bSSean Christopherson 		/*
2402f95eec9bSSean Christopherson 		 * Remove the active root from the active page list, the root
2403f95eec9bSSean Christopherson 		 * will be explicitly freed when the root_count hits zero.
2404f95eec9bSSean Christopherson 		 */
2405f95eec9bSSean Christopherson 		list_del(&sp->link);
2406c50d8ae3SPaolo Bonzini 
2407c50d8ae3SPaolo Bonzini 		/*
2408c50d8ae3SPaolo Bonzini 		 * Obsolete pages cannot be used on any vCPUs, see the comment
2409c50d8ae3SPaolo Bonzini 		 * in kvm_mmu_zap_all_fast().  Note, is_obsolete_sp() also
2410c50d8ae3SPaolo Bonzini 		 * treats invalid shadow pages as being obsolete.
2411c50d8ae3SPaolo Bonzini 		 */
2412c50d8ae3SPaolo Bonzini 		if (!is_obsolete_sp(kvm, sp))
2413c50d8ae3SPaolo Bonzini 			kvm_reload_remote_mmus(kvm);
2414c50d8ae3SPaolo Bonzini 	}
2415c50d8ae3SPaolo Bonzini 
2416c50d8ae3SPaolo Bonzini 	if (sp->lpage_disallowed)
2417c50d8ae3SPaolo Bonzini 		unaccount_huge_nx_page(kvm, sp);
2418c50d8ae3SPaolo Bonzini 
2419c50d8ae3SPaolo Bonzini 	sp->role.invalid = 1;
2420c50d8ae3SPaolo Bonzini 	return list_unstable;
2421c50d8ae3SPaolo Bonzini }
2422c50d8ae3SPaolo Bonzini 
2423c50d8ae3SPaolo Bonzini static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2424c50d8ae3SPaolo Bonzini 				     struct list_head *invalid_list)
2425c50d8ae3SPaolo Bonzini {
2426c50d8ae3SPaolo Bonzini 	int nr_zapped;
2427c50d8ae3SPaolo Bonzini 
2428c50d8ae3SPaolo Bonzini 	__kvm_mmu_prepare_zap_page(kvm, sp, invalid_list, &nr_zapped);
2429c50d8ae3SPaolo Bonzini 	return nr_zapped;
2430c50d8ae3SPaolo Bonzini }
2431c50d8ae3SPaolo Bonzini 
2432c50d8ae3SPaolo Bonzini static void kvm_mmu_commit_zap_page(struct kvm *kvm,
2433c50d8ae3SPaolo Bonzini 				    struct list_head *invalid_list)
2434c50d8ae3SPaolo Bonzini {
2435c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp, *nsp;
2436c50d8ae3SPaolo Bonzini 
2437c50d8ae3SPaolo Bonzini 	if (list_empty(invalid_list))
2438c50d8ae3SPaolo Bonzini 		return;
2439c50d8ae3SPaolo Bonzini 
2440c50d8ae3SPaolo Bonzini 	/*
2441c50d8ae3SPaolo Bonzini 	 * We need to make sure everyone sees our modifications to
2442c50d8ae3SPaolo Bonzini 	 * the page tables and see changes to vcpu->mode here. The barrier
2443c50d8ae3SPaolo Bonzini 	 * in the kvm_flush_remote_tlbs() achieves this. This pairs
2444c50d8ae3SPaolo Bonzini 	 * with vcpu_enter_guest and walk_shadow_page_lockless_begin/end.
2445c50d8ae3SPaolo Bonzini 	 *
2446c50d8ae3SPaolo Bonzini 	 * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit
2447c50d8ae3SPaolo Bonzini 	 * guest mode and/or lockless shadow page table walks.
2448c50d8ae3SPaolo Bonzini 	 */
2449c50d8ae3SPaolo Bonzini 	kvm_flush_remote_tlbs(kvm);
2450c50d8ae3SPaolo Bonzini 
2451c50d8ae3SPaolo Bonzini 	list_for_each_entry_safe(sp, nsp, invalid_list, link) {
2452c50d8ae3SPaolo Bonzini 		WARN_ON(!sp->role.invalid || sp->root_count);
2453c50d8ae3SPaolo Bonzini 		kvm_mmu_free_page(sp);
2454c50d8ae3SPaolo Bonzini 	}
2455c50d8ae3SPaolo Bonzini }
2456c50d8ae3SPaolo Bonzini 
24576b82ef2cSSean Christopherson static unsigned long kvm_mmu_zap_oldest_mmu_pages(struct kvm *kvm,
24586b82ef2cSSean Christopherson 						  unsigned long nr_to_zap)
2459c50d8ae3SPaolo Bonzini {
24606b82ef2cSSean Christopherson 	unsigned long total_zapped = 0;
24616b82ef2cSSean Christopherson 	struct kvm_mmu_page *sp, *tmp;
2462ba7888ddSSean Christopherson 	LIST_HEAD(invalid_list);
24636b82ef2cSSean Christopherson 	bool unstable;
24646b82ef2cSSean Christopherson 	int nr_zapped;
2465c50d8ae3SPaolo Bonzini 
2466c50d8ae3SPaolo Bonzini 	if (list_empty(&kvm->arch.active_mmu_pages))
2467ba7888ddSSean Christopherson 		return 0;
2468c50d8ae3SPaolo Bonzini 
24696b82ef2cSSean Christopherson restart:
24708fc51726SSean Christopherson 	list_for_each_entry_safe_reverse(sp, tmp, &kvm->arch.active_mmu_pages, link) {
24716b82ef2cSSean Christopherson 		/*
24726b82ef2cSSean Christopherson 		 * Don't zap active root pages, the page itself can't be freed
24736b82ef2cSSean Christopherson 		 * and zapping it will just force vCPUs to realloc and reload.
24746b82ef2cSSean Christopherson 		 */
24756b82ef2cSSean Christopherson 		if (sp->root_count)
24766b82ef2cSSean Christopherson 			continue;
24776b82ef2cSSean Christopherson 
24786b82ef2cSSean Christopherson 		unstable = __kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list,
24796b82ef2cSSean Christopherson 						      &nr_zapped);
24806b82ef2cSSean Christopherson 		total_zapped += nr_zapped;
24816b82ef2cSSean Christopherson 		if (total_zapped >= nr_to_zap)
2482ba7888ddSSean Christopherson 			break;
2483ba7888ddSSean Christopherson 
24846b82ef2cSSean Christopherson 		if (unstable)
24856b82ef2cSSean Christopherson 			goto restart;
2486ba7888ddSSean Christopherson 	}
24876b82ef2cSSean Christopherson 
24886b82ef2cSSean Christopherson 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
24896b82ef2cSSean Christopherson 
24906b82ef2cSSean Christopherson 	kvm->stat.mmu_recycled += total_zapped;
24916b82ef2cSSean Christopherson 	return total_zapped;
24926b82ef2cSSean Christopherson }
24936b82ef2cSSean Christopherson 
2494afe8d7e6SSean Christopherson static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
2495afe8d7e6SSean Christopherson {
2496afe8d7e6SSean Christopherson 	if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
2497afe8d7e6SSean Christopherson 		return kvm->arch.n_max_mmu_pages -
2498afe8d7e6SSean Christopherson 			kvm->arch.n_used_mmu_pages;
2499afe8d7e6SSean Christopherson 
2500afe8d7e6SSean Christopherson 	return 0;
2501c50d8ae3SPaolo Bonzini }
2502c50d8ae3SPaolo Bonzini 
2503ba7888ddSSean Christopherson static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
2504ba7888ddSSean Christopherson {
25056b82ef2cSSean Christopherson 	unsigned long avail = kvm_mmu_available_pages(vcpu->kvm);
2506ba7888ddSSean Christopherson 
25076b82ef2cSSean Christopherson 	if (likely(avail >= KVM_MIN_FREE_MMU_PAGES))
2508ba7888ddSSean Christopherson 		return 0;
2509ba7888ddSSean Christopherson 
25106b82ef2cSSean Christopherson 	kvm_mmu_zap_oldest_mmu_pages(vcpu->kvm, KVM_REFILL_PAGES - avail);
2511ba7888ddSSean Christopherson 
25126e6ec584SSean Christopherson 	/*
25136e6ec584SSean Christopherson 	 * Note, this check is intentionally soft, it only guarantees that one
25146e6ec584SSean Christopherson 	 * page is available, while the caller may end up allocating as many as
25156e6ec584SSean Christopherson 	 * four pages, e.g. for PAE roots or for 5-level paging.  Temporarily
25166e6ec584SSean Christopherson 	 * exceeding the (arbitrary by default) limit will not harm the host,
2517c4342633SIngo Molnar 	 * being too aggressive may unnecessarily kill the guest, and getting an
25186e6ec584SSean Christopherson 	 * exact count is far more trouble than it's worth, especially in the
25196e6ec584SSean Christopherson 	 * page fault paths.
25206e6ec584SSean Christopherson 	 */
2521ba7888ddSSean Christopherson 	if (!kvm_mmu_available_pages(vcpu->kvm))
2522ba7888ddSSean Christopherson 		return -ENOSPC;
2523ba7888ddSSean Christopherson 	return 0;
2524ba7888ddSSean Christopherson }
2525ba7888ddSSean Christopherson 
2526c50d8ae3SPaolo Bonzini /*
2527c50d8ae3SPaolo Bonzini  * Changing the number of mmu pages allocated to the vm
2528c50d8ae3SPaolo Bonzini  * Note: if goal_nr_mmu_pages is too small, you will get dead lock
2529c50d8ae3SPaolo Bonzini  */
2530c50d8ae3SPaolo Bonzini void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
2531c50d8ae3SPaolo Bonzini {
2532531810caSBen Gardon 	write_lock(&kvm->mmu_lock);
2533c50d8ae3SPaolo Bonzini 
2534c50d8ae3SPaolo Bonzini 	if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
25356b82ef2cSSean Christopherson 		kvm_mmu_zap_oldest_mmu_pages(kvm, kvm->arch.n_used_mmu_pages -
25366b82ef2cSSean Christopherson 						  goal_nr_mmu_pages);
2537c50d8ae3SPaolo Bonzini 
2538c50d8ae3SPaolo Bonzini 		goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
2539c50d8ae3SPaolo Bonzini 	}
2540c50d8ae3SPaolo Bonzini 
2541c50d8ae3SPaolo Bonzini 	kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
2542c50d8ae3SPaolo Bonzini 
2543531810caSBen Gardon 	write_unlock(&kvm->mmu_lock);
2544c50d8ae3SPaolo Bonzini }
2545c50d8ae3SPaolo Bonzini 
2546c50d8ae3SPaolo Bonzini int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
2547c50d8ae3SPaolo Bonzini {
2548c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
2549c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
2550c50d8ae3SPaolo Bonzini 	int r;
2551c50d8ae3SPaolo Bonzini 
2552c50d8ae3SPaolo Bonzini 	pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
2553c50d8ae3SPaolo Bonzini 	r = 0;
2554531810caSBen Gardon 	write_lock(&kvm->mmu_lock);
2555c50d8ae3SPaolo Bonzini 	for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
2556c50d8ae3SPaolo Bonzini 		pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
2557c50d8ae3SPaolo Bonzini 			 sp->role.word);
2558c50d8ae3SPaolo Bonzini 		r = 1;
2559c50d8ae3SPaolo Bonzini 		kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
2560c50d8ae3SPaolo Bonzini 	}
2561c50d8ae3SPaolo Bonzini 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
2562531810caSBen Gardon 	write_unlock(&kvm->mmu_lock);
2563c50d8ae3SPaolo Bonzini 
2564c50d8ae3SPaolo Bonzini 	return r;
2565c50d8ae3SPaolo Bonzini }
256696ad91aeSSean Christopherson 
256796ad91aeSSean Christopherson static int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
256896ad91aeSSean Christopherson {
256996ad91aeSSean Christopherson 	gpa_t gpa;
257096ad91aeSSean Christopherson 	int r;
257196ad91aeSSean Christopherson 
257296ad91aeSSean Christopherson 	if (vcpu->arch.mmu->direct_map)
257396ad91aeSSean Christopherson 		return 0;
257496ad91aeSSean Christopherson 
257596ad91aeSSean Christopherson 	gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
257696ad91aeSSean Christopherson 
257796ad91aeSSean Christopherson 	r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
257896ad91aeSSean Christopherson 
257996ad91aeSSean Christopherson 	return r;
258096ad91aeSSean Christopherson }
2581c50d8ae3SPaolo Bonzini 
2582c50d8ae3SPaolo Bonzini static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
2583c50d8ae3SPaolo Bonzini {
2584c50d8ae3SPaolo Bonzini 	trace_kvm_mmu_unsync_page(sp);
2585c50d8ae3SPaolo Bonzini 	++vcpu->kvm->stat.mmu_unsync;
2586c50d8ae3SPaolo Bonzini 	sp->unsync = 1;
2587c50d8ae3SPaolo Bonzini 
2588c50d8ae3SPaolo Bonzini 	kvm_mmu_mark_parents_unsync(sp);
2589c50d8ae3SPaolo Bonzini }
2590c50d8ae3SPaolo Bonzini 
25910337f585SSean Christopherson /*
25920337f585SSean Christopherson  * Attempt to unsync any shadow pages that can be reached by the specified gfn,
25930337f585SSean Christopherson  * KVM is creating a writable mapping for said gfn.  Returns 0 if all pages
25940337f585SSean Christopherson  * were marked unsync (or if there is no shadow page), -EPERM if the SPTE must
25950337f585SSean Christopherson  * be write-protected.
25960337f585SSean Christopherson  */
25970337f585SSean Christopherson int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync)
2598c50d8ae3SPaolo Bonzini {
2599c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
2600ce25681dSSean Christopherson 	bool locked = false;
2601c50d8ae3SPaolo Bonzini 
26020337f585SSean Christopherson 	/*
26030337f585SSean Christopherson 	 * Force write-protection if the page is being tracked.  Note, the page
26040337f585SSean Christopherson 	 * track machinery is used to write-protect upper-level shadow pages,
26050337f585SSean Christopherson 	 * i.e. this guards the role.level == 4K assertion below!
26060337f585SSean Christopherson 	 */
2607c50d8ae3SPaolo Bonzini 	if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
26080337f585SSean Christopherson 		return -EPERM;
2609c50d8ae3SPaolo Bonzini 
26100337f585SSean Christopherson 	/*
26110337f585SSean Christopherson 	 * The page is not write-tracked, mark existing shadow pages unsync
26120337f585SSean Christopherson 	 * unless KVM is synchronizing an unsync SP (can_unsync = false).  In
26130337f585SSean Christopherson 	 * that case, KVM must complete emulation of the guest TLB flush before
26140337f585SSean Christopherson 	 * allowing shadow pages to become unsync (writable by the guest).
26150337f585SSean Christopherson 	 */
2616c50d8ae3SPaolo Bonzini 	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
2617c50d8ae3SPaolo Bonzini 		if (!can_unsync)
26180337f585SSean Christopherson 			return -EPERM;
2619c50d8ae3SPaolo Bonzini 
2620c50d8ae3SPaolo Bonzini 		if (sp->unsync)
2621c50d8ae3SPaolo Bonzini 			continue;
2622c50d8ae3SPaolo Bonzini 
2623ce25681dSSean Christopherson 		/*
2624ce25681dSSean Christopherson 		 * TDP MMU page faults require an additional spinlock as they
2625ce25681dSSean Christopherson 		 * run with mmu_lock held for read, not write, and the unsync
2626ce25681dSSean Christopherson 		 * logic is not thread safe.  Take the spinklock regardless of
2627ce25681dSSean Christopherson 		 * the MMU type to avoid extra conditionals/parameters, there's
2628ce25681dSSean Christopherson 		 * no meaningful penalty if mmu_lock is held for write.
2629ce25681dSSean Christopherson 		 */
2630ce25681dSSean Christopherson 		if (!locked) {
2631ce25681dSSean Christopherson 			locked = true;
2632ce25681dSSean Christopherson 			spin_lock(&vcpu->kvm->arch.mmu_unsync_pages_lock);
2633ce25681dSSean Christopherson 
2634ce25681dSSean Christopherson 			/*
2635ce25681dSSean Christopherson 			 * Recheck after taking the spinlock, a different vCPU
2636ce25681dSSean Christopherson 			 * may have since marked the page unsync.  A false
2637ce25681dSSean Christopherson 			 * positive on the unprotected check above is not
2638ce25681dSSean Christopherson 			 * possible as clearing sp->unsync _must_ hold mmu_lock
2639ce25681dSSean Christopherson 			 * for write, i.e. unsync cannot transition from 0->1
2640ce25681dSSean Christopherson 			 * while this CPU holds mmu_lock for read (or write).
2641ce25681dSSean Christopherson 			 */
2642ce25681dSSean Christopherson 			if (READ_ONCE(sp->unsync))
2643ce25681dSSean Christopherson 				continue;
2644ce25681dSSean Christopherson 		}
2645ce25681dSSean Christopherson 
26463bae0459SSean Christopherson 		WARN_ON(sp->role.level != PG_LEVEL_4K);
2647c50d8ae3SPaolo Bonzini 		kvm_unsync_page(vcpu, sp);
2648c50d8ae3SPaolo Bonzini 	}
2649ce25681dSSean Christopherson 	if (locked)
2650ce25681dSSean Christopherson 		spin_unlock(&vcpu->kvm->arch.mmu_unsync_pages_lock);
2651c50d8ae3SPaolo Bonzini 
2652c50d8ae3SPaolo Bonzini 	/*
2653c50d8ae3SPaolo Bonzini 	 * We need to ensure that the marking of unsync pages is visible
2654c50d8ae3SPaolo Bonzini 	 * before the SPTE is updated to allow writes because
2655c50d8ae3SPaolo Bonzini 	 * kvm_mmu_sync_roots() checks the unsync flags without holding
2656c50d8ae3SPaolo Bonzini 	 * the MMU lock and so can race with this. If the SPTE was updated
2657c50d8ae3SPaolo Bonzini 	 * before the page had been marked as unsync-ed, something like the
2658c50d8ae3SPaolo Bonzini 	 * following could happen:
2659c50d8ae3SPaolo Bonzini 	 *
2660c50d8ae3SPaolo Bonzini 	 * CPU 1                    CPU 2
2661c50d8ae3SPaolo Bonzini 	 * ---------------------------------------------------------------------
2662c50d8ae3SPaolo Bonzini 	 * 1.2 Host updates SPTE
2663c50d8ae3SPaolo Bonzini 	 *     to be writable
2664c50d8ae3SPaolo Bonzini 	 *                      2.1 Guest writes a GPTE for GVA X.
2665c50d8ae3SPaolo Bonzini 	 *                          (GPTE being in the guest page table shadowed
2666c50d8ae3SPaolo Bonzini 	 *                           by the SP from CPU 1.)
2667c50d8ae3SPaolo Bonzini 	 *                          This reads SPTE during the page table walk.
2668c50d8ae3SPaolo Bonzini 	 *                          Since SPTE.W is read as 1, there is no
2669c50d8ae3SPaolo Bonzini 	 *                          fault.
2670c50d8ae3SPaolo Bonzini 	 *
2671c50d8ae3SPaolo Bonzini 	 *                      2.2 Guest issues TLB flush.
2672c50d8ae3SPaolo Bonzini 	 *                          That causes a VM Exit.
2673c50d8ae3SPaolo Bonzini 	 *
26740337f585SSean Christopherson 	 *                      2.3 Walking of unsync pages sees sp->unsync is
26750337f585SSean Christopherson 	 *                          false and skips the page.
2676c50d8ae3SPaolo Bonzini 	 *
2677c50d8ae3SPaolo Bonzini 	 *                      2.4 Guest accesses GVA X.
2678c50d8ae3SPaolo Bonzini 	 *                          Since the mapping in the SP was not updated,
2679c50d8ae3SPaolo Bonzini 	 *                          so the old mapping for GVA X incorrectly
2680c50d8ae3SPaolo Bonzini 	 *                          gets used.
2681c50d8ae3SPaolo Bonzini 	 * 1.1 Host marks SP
2682c50d8ae3SPaolo Bonzini 	 *     as unsync
2683c50d8ae3SPaolo Bonzini 	 *     (sp->unsync = true)
2684c50d8ae3SPaolo Bonzini 	 *
2685c50d8ae3SPaolo Bonzini 	 * The write barrier below ensures that 1.1 happens before 1.2 and thus
2686c50d8ae3SPaolo Bonzini 	 * the situation in 2.4 does not arise. The implicit barrier in 2.2
2687c50d8ae3SPaolo Bonzini 	 * pairs with this write barrier.
2688c50d8ae3SPaolo Bonzini 	 */
2689c50d8ae3SPaolo Bonzini 	smp_wmb();
2690c50d8ae3SPaolo Bonzini 
26910337f585SSean Christopherson 	return 0;
2692c50d8ae3SPaolo Bonzini }
2693c50d8ae3SPaolo Bonzini 
2694799a4190SBen Gardon static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2695799a4190SBen Gardon 		    unsigned int pte_access, int level,
2696799a4190SBen Gardon 		    gfn_t gfn, kvm_pfn_t pfn, bool speculative,
2697799a4190SBen Gardon 		    bool can_unsync, bool host_writable)
2698799a4190SBen Gardon {
2699799a4190SBen Gardon 	u64 spte;
2700799a4190SBen Gardon 	struct kvm_mmu_page *sp;
2701799a4190SBen Gardon 	int ret;
2702799a4190SBen Gardon 
2703799a4190SBen Gardon 	sp = sptep_to_sp(sptep);
2704799a4190SBen Gardon 
2705799a4190SBen Gardon 	ret = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative,
2706799a4190SBen Gardon 			can_unsync, host_writable, sp_ad_disabled(sp), &spte);
2707799a4190SBen Gardon 
2708799a4190SBen Gardon 	if (spte & PT_WRITABLE_MASK)
2709799a4190SBen Gardon 		kvm_vcpu_mark_page_dirty(vcpu, gfn);
2710799a4190SBen Gardon 
271112703759SSean Christopherson 	if (*sptep == spte)
271212703759SSean Christopherson 		ret |= SET_SPTE_SPURIOUS;
271312703759SSean Christopherson 	else if (mmu_spte_update(sptep, spte))
2714c50d8ae3SPaolo Bonzini 		ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
2715c50d8ae3SPaolo Bonzini 	return ret;
2716c50d8ae3SPaolo Bonzini }
2717c50d8ae3SPaolo Bonzini 
27180a2b64c5SBen Gardon static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2719e88b8093SSean Christopherson 			unsigned int pte_access, bool write_fault, int level,
27200a2b64c5SBen Gardon 			gfn_t gfn, kvm_pfn_t pfn, bool speculative,
27210a2b64c5SBen Gardon 			bool host_writable)
2722c50d8ae3SPaolo Bonzini {
2723c50d8ae3SPaolo Bonzini 	int was_rmapped = 0;
2724c50d8ae3SPaolo Bonzini 	int rmap_count;
2725c50d8ae3SPaolo Bonzini 	int set_spte_ret;
2726c4371c2aSSean Christopherson 	int ret = RET_PF_FIXED;
2727c50d8ae3SPaolo Bonzini 	bool flush = false;
2728c50d8ae3SPaolo Bonzini 
2729c50d8ae3SPaolo Bonzini 	pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
2730c50d8ae3SPaolo Bonzini 		 *sptep, write_fault, gfn);
2731c50d8ae3SPaolo Bonzini 
2732a54aa15cSSean Christopherson 	if (unlikely(is_noslot_pfn(pfn))) {
2733a54aa15cSSean Christopherson 		mark_mmio_spte(vcpu, sptep, gfn, pte_access);
2734a54aa15cSSean Christopherson 		return RET_PF_EMULATE;
2735a54aa15cSSean Christopherson 	}
2736a54aa15cSSean Christopherson 
2737c50d8ae3SPaolo Bonzini 	if (is_shadow_present_pte(*sptep)) {
2738c50d8ae3SPaolo Bonzini 		/*
2739c50d8ae3SPaolo Bonzini 		 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
2740c50d8ae3SPaolo Bonzini 		 * the parent of the now unreachable PTE.
2741c50d8ae3SPaolo Bonzini 		 */
27423bae0459SSean Christopherson 		if (level > PG_LEVEL_4K && !is_large_pte(*sptep)) {
2743c50d8ae3SPaolo Bonzini 			struct kvm_mmu_page *child;
2744c50d8ae3SPaolo Bonzini 			u64 pte = *sptep;
2745c50d8ae3SPaolo Bonzini 
2746e47c4aeeSSean Christopherson 			child = to_shadow_page(pte & PT64_BASE_ADDR_MASK);
2747c50d8ae3SPaolo Bonzini 			drop_parent_pte(child, sptep);
2748c50d8ae3SPaolo Bonzini 			flush = true;
2749c50d8ae3SPaolo Bonzini 		} else if (pfn != spte_to_pfn(*sptep)) {
2750c50d8ae3SPaolo Bonzini 			pgprintk("hfn old %llx new %llx\n",
2751c50d8ae3SPaolo Bonzini 				 spte_to_pfn(*sptep), pfn);
2752c50d8ae3SPaolo Bonzini 			drop_spte(vcpu->kvm, sptep);
2753c50d8ae3SPaolo Bonzini 			flush = true;
2754c50d8ae3SPaolo Bonzini 		} else
2755c50d8ae3SPaolo Bonzini 			was_rmapped = 1;
2756c50d8ae3SPaolo Bonzini 	}
2757c50d8ae3SPaolo Bonzini 
2758c50d8ae3SPaolo Bonzini 	set_spte_ret = set_spte(vcpu, sptep, pte_access, level, gfn, pfn,
2759c50d8ae3SPaolo Bonzini 				speculative, true, host_writable);
2760c50d8ae3SPaolo Bonzini 	if (set_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
2761c50d8ae3SPaolo Bonzini 		if (write_fault)
2762c50d8ae3SPaolo Bonzini 			ret = RET_PF_EMULATE;
27638c8560b8SSean Christopherson 		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
2764c50d8ae3SPaolo Bonzini 	}
2765c50d8ae3SPaolo Bonzini 
2766c50d8ae3SPaolo Bonzini 	if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH || flush)
2767c50d8ae3SPaolo Bonzini 		kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn,
2768c50d8ae3SPaolo Bonzini 				KVM_PAGES_PER_HPAGE(level));
2769c50d8ae3SPaolo Bonzini 
277012703759SSean Christopherson 	/*
277112703759SSean Christopherson 	 * The fault is fully spurious if and only if the new SPTE and old SPTE
277212703759SSean Christopherson 	 * are identical, and emulation is not required.
277312703759SSean Christopherson 	 */
277412703759SSean Christopherson 	if ((set_spte_ret & SET_SPTE_SPURIOUS) && ret == RET_PF_FIXED) {
277512703759SSean Christopherson 		WARN_ON_ONCE(!was_rmapped);
277612703759SSean Christopherson 		return RET_PF_SPURIOUS;
277712703759SSean Christopherson 	}
277812703759SSean Christopherson 
2779c50d8ae3SPaolo Bonzini 	pgprintk("%s: setting spte %llx\n", __func__, *sptep);
2780c50d8ae3SPaolo Bonzini 	trace_kvm_mmu_set_spte(level, gfn, sptep);
2781c50d8ae3SPaolo Bonzini 
2782c50d8ae3SPaolo Bonzini 	if (!was_rmapped) {
278371f51d2cSMingwei Zhang 		kvm_update_page_stats(vcpu->kvm, level, 1);
2784c50d8ae3SPaolo Bonzini 		rmap_count = rmap_add(vcpu, sptep, gfn);
2785c50d8ae3SPaolo Bonzini 		if (rmap_count > RMAP_RECYCLE_THRESHOLD)
2786c50d8ae3SPaolo Bonzini 			rmap_recycle(vcpu, sptep, gfn);
2787c50d8ae3SPaolo Bonzini 	}
2788c50d8ae3SPaolo Bonzini 
2789c50d8ae3SPaolo Bonzini 	return ret;
2790c50d8ae3SPaolo Bonzini }
2791c50d8ae3SPaolo Bonzini 
2792c50d8ae3SPaolo Bonzini static kvm_pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
2793c50d8ae3SPaolo Bonzini 				     bool no_dirty_log)
2794c50d8ae3SPaolo Bonzini {
2795c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
2796c50d8ae3SPaolo Bonzini 
2797c50d8ae3SPaolo Bonzini 	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log);
2798c50d8ae3SPaolo Bonzini 	if (!slot)
2799c50d8ae3SPaolo Bonzini 		return KVM_PFN_ERR_FAULT;
2800c50d8ae3SPaolo Bonzini 
2801c50d8ae3SPaolo Bonzini 	return gfn_to_pfn_memslot_atomic(slot, gfn);
2802c50d8ae3SPaolo Bonzini }
2803c50d8ae3SPaolo Bonzini 
2804c50d8ae3SPaolo Bonzini static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
2805c50d8ae3SPaolo Bonzini 				    struct kvm_mmu_page *sp,
2806c50d8ae3SPaolo Bonzini 				    u64 *start, u64 *end)
2807c50d8ae3SPaolo Bonzini {
2808c50d8ae3SPaolo Bonzini 	struct page *pages[PTE_PREFETCH_NUM];
2809c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
28100a2b64c5SBen Gardon 	unsigned int access = sp->role.access;
2811c50d8ae3SPaolo Bonzini 	int i, ret;
2812c50d8ae3SPaolo Bonzini 	gfn_t gfn;
2813c50d8ae3SPaolo Bonzini 
2814c50d8ae3SPaolo Bonzini 	gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
2815c50d8ae3SPaolo Bonzini 	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
2816c50d8ae3SPaolo Bonzini 	if (!slot)
2817c50d8ae3SPaolo Bonzini 		return -1;
2818c50d8ae3SPaolo Bonzini 
2819c50d8ae3SPaolo Bonzini 	ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start);
2820c50d8ae3SPaolo Bonzini 	if (ret <= 0)
2821c50d8ae3SPaolo Bonzini 		return -1;
2822c50d8ae3SPaolo Bonzini 
2823c50d8ae3SPaolo Bonzini 	for (i = 0; i < ret; i++, gfn++, start++) {
2824e88b8093SSean Christopherson 		mmu_set_spte(vcpu, start, access, false, sp->role.level, gfn,
2825c50d8ae3SPaolo Bonzini 			     page_to_pfn(pages[i]), true, true);
2826c50d8ae3SPaolo Bonzini 		put_page(pages[i]);
2827c50d8ae3SPaolo Bonzini 	}
2828c50d8ae3SPaolo Bonzini 
2829c50d8ae3SPaolo Bonzini 	return 0;
2830c50d8ae3SPaolo Bonzini }
2831c50d8ae3SPaolo Bonzini 
2832c50d8ae3SPaolo Bonzini static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
2833c50d8ae3SPaolo Bonzini 				  struct kvm_mmu_page *sp, u64 *sptep)
2834c50d8ae3SPaolo Bonzini {
2835c50d8ae3SPaolo Bonzini 	u64 *spte, *start = NULL;
2836c50d8ae3SPaolo Bonzini 	int i;
2837c50d8ae3SPaolo Bonzini 
2838c50d8ae3SPaolo Bonzini 	WARN_ON(!sp->role.direct);
2839c50d8ae3SPaolo Bonzini 
2840c50d8ae3SPaolo Bonzini 	i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
2841c50d8ae3SPaolo Bonzini 	spte = sp->spt + i;
2842c50d8ae3SPaolo Bonzini 
2843c50d8ae3SPaolo Bonzini 	for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
2844c50d8ae3SPaolo Bonzini 		if (is_shadow_present_pte(*spte) || spte == sptep) {
2845c50d8ae3SPaolo Bonzini 			if (!start)
2846c50d8ae3SPaolo Bonzini 				continue;
2847c50d8ae3SPaolo Bonzini 			if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
2848c50d8ae3SPaolo Bonzini 				break;
2849c50d8ae3SPaolo Bonzini 			start = NULL;
2850c50d8ae3SPaolo Bonzini 		} else if (!start)
2851c50d8ae3SPaolo Bonzini 			start = spte;
2852c50d8ae3SPaolo Bonzini 	}
2853c50d8ae3SPaolo Bonzini }
2854c50d8ae3SPaolo Bonzini 
2855c50d8ae3SPaolo Bonzini static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
2856c50d8ae3SPaolo Bonzini {
2857c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
2858c50d8ae3SPaolo Bonzini 
285957354682SSean Christopherson 	sp = sptep_to_sp(sptep);
2860c50d8ae3SPaolo Bonzini 
2861c50d8ae3SPaolo Bonzini 	/*
2862c50d8ae3SPaolo Bonzini 	 * Without accessed bits, there's no way to distinguish between
2863c50d8ae3SPaolo Bonzini 	 * actually accessed translations and prefetched, so disable pte
2864c50d8ae3SPaolo Bonzini 	 * prefetch if accessed bits aren't available.
2865c50d8ae3SPaolo Bonzini 	 */
2866c50d8ae3SPaolo Bonzini 	if (sp_ad_disabled(sp))
2867c50d8ae3SPaolo Bonzini 		return;
2868c50d8ae3SPaolo Bonzini 
28693bae0459SSean Christopherson 	if (sp->role.level > PG_LEVEL_4K)
2870c50d8ae3SPaolo Bonzini 		return;
2871c50d8ae3SPaolo Bonzini 
28724a42d848SDavid Stevens 	/*
28734a42d848SDavid Stevens 	 * If addresses are being invalidated, skip prefetching to avoid
28744a42d848SDavid Stevens 	 * accidentally prefetching those addresses.
28754a42d848SDavid Stevens 	 */
28764a42d848SDavid Stevens 	if (unlikely(vcpu->kvm->mmu_notifier_count))
28774a42d848SDavid Stevens 		return;
28784a42d848SDavid Stevens 
2879c50d8ae3SPaolo Bonzini 	__direct_pte_prefetch(vcpu, sp, sptep);
2880c50d8ae3SPaolo Bonzini }
2881c50d8ae3SPaolo Bonzini 
28821b6d9d9eSSean Christopherson static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
28838ca6f063SBen Gardon 				  const struct kvm_memory_slot *slot)
2884db543216SSean Christopherson {
2885db543216SSean Christopherson 	unsigned long hva;
2886db543216SSean Christopherson 	pte_t *pte;
2887db543216SSean Christopherson 	int level;
2888db543216SSean Christopherson 
2889e851265aSSean Christopherson 	if (!PageCompound(pfn_to_page(pfn)) && !kvm_is_zone_device_pfn(pfn))
28903bae0459SSean Christopherson 		return PG_LEVEL_4K;
2891db543216SSean Christopherson 
2892293e306eSSean Christopherson 	/*
2893293e306eSSean Christopherson 	 * Note, using the already-retrieved memslot and __gfn_to_hva_memslot()
2894293e306eSSean Christopherson 	 * is not solely for performance, it's also necessary to avoid the
2895293e306eSSean Christopherson 	 * "writable" check in __gfn_to_hva_many(), which will always fail on
2896293e306eSSean Christopherson 	 * read-only memslots due to gfn_to_hva() assuming writes.  Earlier
2897293e306eSSean Christopherson 	 * page fault steps have already verified the guest isn't writing a
2898293e306eSSean Christopherson 	 * read-only memslot.
2899293e306eSSean Christopherson 	 */
2900db543216SSean Christopherson 	hva = __gfn_to_hva_memslot(slot, gfn);
2901db543216SSean Christopherson 
29021b6d9d9eSSean Christopherson 	pte = lookup_address_in_mm(kvm->mm, hva, &level);
2903db543216SSean Christopherson 	if (unlikely(!pte))
29043bae0459SSean Christopherson 		return PG_LEVEL_4K;
2905db543216SSean Christopherson 
2906db543216SSean Christopherson 	return level;
2907db543216SSean Christopherson }
2908db543216SSean Christopherson 
29098ca6f063SBen Gardon int kvm_mmu_max_mapping_level(struct kvm *kvm,
29108ca6f063SBen Gardon 			      const struct kvm_memory_slot *slot, gfn_t gfn,
29118ca6f063SBen Gardon 			      kvm_pfn_t pfn, int max_level)
29121b6d9d9eSSean Christopherson {
29131b6d9d9eSSean Christopherson 	struct kvm_lpage_info *linfo;
2914ec607a56SPaolo Bonzini 	int host_level;
29151b6d9d9eSSean Christopherson 
29161b6d9d9eSSean Christopherson 	max_level = min(max_level, max_huge_page_level);
29171b6d9d9eSSean Christopherson 	for ( ; max_level > PG_LEVEL_4K; max_level--) {
29181b6d9d9eSSean Christopherson 		linfo = lpage_info_slot(gfn, slot, max_level);
29191b6d9d9eSSean Christopherson 		if (!linfo->disallow_lpage)
29201b6d9d9eSSean Christopherson 			break;
29211b6d9d9eSSean Christopherson 	}
29221b6d9d9eSSean Christopherson 
29231b6d9d9eSSean Christopherson 	if (max_level == PG_LEVEL_4K)
29241b6d9d9eSSean Christopherson 		return PG_LEVEL_4K;
29251b6d9d9eSSean Christopherson 
2926ec607a56SPaolo Bonzini 	host_level = host_pfn_mapping_level(kvm, gfn, pfn, slot);
2927ec607a56SPaolo Bonzini 	return min(host_level, max_level);
29281b6d9d9eSSean Christopherson }
29291b6d9d9eSSean Christopherson 
2930bb18842eSBen Gardon int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
29313cf06612SSean Christopherson 			    int max_level, kvm_pfn_t *pfnp,
29323cf06612SSean Christopherson 			    bool huge_page_disallowed, int *req_level)
29330885904dSSean Christopherson {
2934293e306eSSean Christopherson 	struct kvm_memory_slot *slot;
29350885904dSSean Christopherson 	kvm_pfn_t pfn = *pfnp;
293617eff019SSean Christopherson 	kvm_pfn_t mask;
293783f06fa7SSean Christopherson 	int level;
29380885904dSSean Christopherson 
29393cf06612SSean Christopherson 	*req_level = PG_LEVEL_4K;
29403cf06612SSean Christopherson 
29413bae0459SSean Christopherson 	if (unlikely(max_level == PG_LEVEL_4K))
29423bae0459SSean Christopherson 		return PG_LEVEL_4K;
294317eff019SSean Christopherson 
2944e851265aSSean Christopherson 	if (is_error_noslot_pfn(pfn) || kvm_is_reserved_pfn(pfn))
29453bae0459SSean Christopherson 		return PG_LEVEL_4K;
294617eff019SSean Christopherson 
2947293e306eSSean Christopherson 	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, true);
2948293e306eSSean Christopherson 	if (!slot)
29493bae0459SSean Christopherson 		return PG_LEVEL_4K;
2950293e306eSSean Christopherson 
29513cf06612SSean Christopherson 	/*
29523cf06612SSean Christopherson 	 * Enforce the iTLB multihit workaround after capturing the requested
29533cf06612SSean Christopherson 	 * level, which will be used to do precise, accurate accounting.
29543cf06612SSean Christopherson 	 */
2955ec607a56SPaolo Bonzini 	*req_level = level = kvm_mmu_max_mapping_level(vcpu->kvm, slot, gfn, pfn, max_level);
2956ec607a56SPaolo Bonzini 	if (level == PG_LEVEL_4K || huge_page_disallowed)
29573cf06612SSean Christopherson 		return PG_LEVEL_4K;
29584cd071d1SSean Christopherson 
29590885904dSSean Christopherson 	/*
29604cd071d1SSean Christopherson 	 * mmu_notifier_retry() was successful and mmu_lock is held, so
29614cd071d1SSean Christopherson 	 * the pmd can't be split from under us.
29620885904dSSean Christopherson 	 */
29630885904dSSean Christopherson 	mask = KVM_PAGES_PER_HPAGE(level) - 1;
29640885904dSSean Christopherson 	VM_BUG_ON((gfn & mask) != (pfn & mask));
29654cd071d1SSean Christopherson 	*pfnp = pfn & ~mask;
296683f06fa7SSean Christopherson 
296783f06fa7SSean Christopherson 	return level;
29680885904dSSean Christopherson }
29690885904dSSean Christopherson 
2970bb18842eSBen Gardon void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level,
2971bb18842eSBen Gardon 				kvm_pfn_t *pfnp, int *goal_levelp)
2972c50d8ae3SPaolo Bonzini {
2973bb18842eSBen Gardon 	int level = *goal_levelp;
2974c50d8ae3SPaolo Bonzini 
29757d945312SBen Gardon 	if (cur_level == level && level > PG_LEVEL_4K &&
2976c50d8ae3SPaolo Bonzini 	    is_shadow_present_pte(spte) &&
2977c50d8ae3SPaolo Bonzini 	    !is_large_pte(spte)) {
2978c50d8ae3SPaolo Bonzini 		/*
2979c50d8ae3SPaolo Bonzini 		 * A small SPTE exists for this pfn, but FNAME(fetch)
2980c50d8ae3SPaolo Bonzini 		 * and __direct_map would like to create a large PTE
2981c50d8ae3SPaolo Bonzini 		 * instead: just force them to go down another level,
2982c50d8ae3SPaolo Bonzini 		 * patching back for them into pfn the next 9 bits of
2983c50d8ae3SPaolo Bonzini 		 * the address.
2984c50d8ae3SPaolo Bonzini 		 */
29857d945312SBen Gardon 		u64 page_mask = KVM_PAGES_PER_HPAGE(level) -
29867d945312SBen Gardon 				KVM_PAGES_PER_HPAGE(level - 1);
2987c50d8ae3SPaolo Bonzini 		*pfnp |= gfn & page_mask;
2988bb18842eSBen Gardon 		(*goal_levelp)--;
2989c50d8ae3SPaolo Bonzini 	}
2990c50d8ae3SPaolo Bonzini }
2991c50d8ae3SPaolo Bonzini 
29926c2fd34fSSean Christopherson static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
299383f06fa7SSean Christopherson 			int map_writable, int max_level, kvm_pfn_t pfn,
29946c2fd34fSSean Christopherson 			bool prefault, bool is_tdp)
2995c50d8ae3SPaolo Bonzini {
29966c2fd34fSSean Christopherson 	bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled();
29976c2fd34fSSean Christopherson 	bool write = error_code & PFERR_WRITE_MASK;
29986c2fd34fSSean Christopherson 	bool exec = error_code & PFERR_FETCH_MASK;
29996c2fd34fSSean Christopherson 	bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
3000c50d8ae3SPaolo Bonzini 	struct kvm_shadow_walk_iterator it;
3001c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
30023cf06612SSean Christopherson 	int level, req_level, ret;
3003c50d8ae3SPaolo Bonzini 	gfn_t gfn = gpa >> PAGE_SHIFT;
3004c50d8ae3SPaolo Bonzini 	gfn_t base_gfn = gfn;
3005c50d8ae3SPaolo Bonzini 
30063cf06612SSean Christopherson 	level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
30073cf06612SSean Christopherson 					huge_page_disallowed, &req_level);
30084cd071d1SSean Christopherson 
3009c50d8ae3SPaolo Bonzini 	trace_kvm_mmu_spte_requested(gpa, level, pfn);
3010c50d8ae3SPaolo Bonzini 	for_each_shadow_entry(vcpu, gpa, it) {
3011c50d8ae3SPaolo Bonzini 		/*
3012c50d8ae3SPaolo Bonzini 		 * We cannot overwrite existing page tables with an NX
3013c50d8ae3SPaolo Bonzini 		 * large page, as the leaf could be executable.
3014c50d8ae3SPaolo Bonzini 		 */
3015dcc70651SSean Christopherson 		if (nx_huge_page_workaround_enabled)
30167d945312SBen Gardon 			disallowed_hugepage_adjust(*it.sptep, gfn, it.level,
30177d945312SBen Gardon 						   &pfn, &level);
3018c50d8ae3SPaolo Bonzini 
3019c50d8ae3SPaolo Bonzini 		base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
3020c50d8ae3SPaolo Bonzini 		if (it.level == level)
3021c50d8ae3SPaolo Bonzini 			break;
3022c50d8ae3SPaolo Bonzini 
3023c50d8ae3SPaolo Bonzini 		drop_large_spte(vcpu, it.sptep);
302403fffc54SSean Christopherson 		if (is_shadow_present_pte(*it.sptep))
302503fffc54SSean Christopherson 			continue;
302603fffc54SSean Christopherson 
3027c50d8ae3SPaolo Bonzini 		sp = kvm_mmu_get_page(vcpu, base_gfn, it.addr,
3028c50d8ae3SPaolo Bonzini 				      it.level - 1, true, ACC_ALL);
3029c50d8ae3SPaolo Bonzini 
3030c50d8ae3SPaolo Bonzini 		link_shadow_page(vcpu, it.sptep, sp);
30315bcaf3e1SSean Christopherson 		if (is_tdp && huge_page_disallowed &&
30325bcaf3e1SSean Christopherson 		    req_level >= it.level)
3033c50d8ae3SPaolo Bonzini 			account_huge_nx_page(vcpu->kvm, sp);
3034c50d8ae3SPaolo Bonzini 	}
3035c50d8ae3SPaolo Bonzini 
3036c50d8ae3SPaolo Bonzini 	ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL,
3037c50d8ae3SPaolo Bonzini 			   write, level, base_gfn, pfn, prefault,
3038c50d8ae3SPaolo Bonzini 			   map_writable);
303912703759SSean Christopherson 	if (ret == RET_PF_SPURIOUS)
304012703759SSean Christopherson 		return ret;
304112703759SSean Christopherson 
3042c50d8ae3SPaolo Bonzini 	direct_pte_prefetch(vcpu, it.sptep);
3043c50d8ae3SPaolo Bonzini 	++vcpu->stat.pf_fixed;
3044c50d8ae3SPaolo Bonzini 	return ret;
3045c50d8ae3SPaolo Bonzini }
3046c50d8ae3SPaolo Bonzini 
3047c50d8ae3SPaolo Bonzini static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk)
3048c50d8ae3SPaolo Bonzini {
3049c50d8ae3SPaolo Bonzini 	send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, PAGE_SHIFT, tsk);
3050c50d8ae3SPaolo Bonzini }
3051c50d8ae3SPaolo Bonzini 
3052c50d8ae3SPaolo Bonzini static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
3053c50d8ae3SPaolo Bonzini {
3054c50d8ae3SPaolo Bonzini 	/*
3055c50d8ae3SPaolo Bonzini 	 * Do not cache the mmio info caused by writing the readonly gfn
3056c50d8ae3SPaolo Bonzini 	 * into the spte otherwise read access on readonly gfn also can
3057c50d8ae3SPaolo Bonzini 	 * caused mmio page fault and treat it as mmio access.
3058c50d8ae3SPaolo Bonzini 	 */
3059c50d8ae3SPaolo Bonzini 	if (pfn == KVM_PFN_ERR_RO_FAULT)
3060c50d8ae3SPaolo Bonzini 		return RET_PF_EMULATE;
3061c50d8ae3SPaolo Bonzini 
3062c50d8ae3SPaolo Bonzini 	if (pfn == KVM_PFN_ERR_HWPOISON) {
3063c50d8ae3SPaolo Bonzini 		kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current);
3064c50d8ae3SPaolo Bonzini 		return RET_PF_RETRY;
3065c50d8ae3SPaolo Bonzini 	}
3066c50d8ae3SPaolo Bonzini 
3067c50d8ae3SPaolo Bonzini 	return -EFAULT;
3068c50d8ae3SPaolo Bonzini }
3069c50d8ae3SPaolo Bonzini 
3070c50d8ae3SPaolo Bonzini static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
30710a2b64c5SBen Gardon 				kvm_pfn_t pfn, unsigned int access,
30720a2b64c5SBen Gardon 				int *ret_val)
3073c50d8ae3SPaolo Bonzini {
3074c50d8ae3SPaolo Bonzini 	/* The pfn is invalid, report the error! */
3075c50d8ae3SPaolo Bonzini 	if (unlikely(is_error_pfn(pfn))) {
3076c50d8ae3SPaolo Bonzini 		*ret_val = kvm_handle_bad_page(vcpu, gfn, pfn);
3077c50d8ae3SPaolo Bonzini 		return true;
3078c50d8ae3SPaolo Bonzini 	}
3079c50d8ae3SPaolo Bonzini 
308030ab5901SSean Christopherson 	if (unlikely(is_noslot_pfn(pfn))) {
3081c50d8ae3SPaolo Bonzini 		vcpu_cache_mmio_info(vcpu, gva, gfn,
3082c50d8ae3SPaolo Bonzini 				     access & shadow_mmio_access_mask);
308330ab5901SSean Christopherson 		/*
308430ab5901SSean Christopherson 		 * If MMIO caching is disabled, emulate immediately without
308530ab5901SSean Christopherson 		 * touching the shadow page tables as attempting to install an
308630ab5901SSean Christopherson 		 * MMIO SPTE will just be an expensive nop.
308730ab5901SSean Christopherson 		 */
308830ab5901SSean Christopherson 		if (unlikely(!shadow_mmio_value)) {
308930ab5901SSean Christopherson 			*ret_val = RET_PF_EMULATE;
309030ab5901SSean Christopherson 			return true;
309130ab5901SSean Christopherson 		}
309230ab5901SSean Christopherson 	}
3093c50d8ae3SPaolo Bonzini 
3094c50d8ae3SPaolo Bonzini 	return false;
3095c50d8ae3SPaolo Bonzini }
3096c50d8ae3SPaolo Bonzini 
3097c50d8ae3SPaolo Bonzini static bool page_fault_can_be_fast(u32 error_code)
3098c50d8ae3SPaolo Bonzini {
3099c50d8ae3SPaolo Bonzini 	/*
3100c50d8ae3SPaolo Bonzini 	 * Do not fix the mmio spte with invalid generation number which
3101c50d8ae3SPaolo Bonzini 	 * need to be updated by slow page fault path.
3102c50d8ae3SPaolo Bonzini 	 */
3103c50d8ae3SPaolo Bonzini 	if (unlikely(error_code & PFERR_RSVD_MASK))
3104c50d8ae3SPaolo Bonzini 		return false;
3105c50d8ae3SPaolo Bonzini 
3106c50d8ae3SPaolo Bonzini 	/* See if the page fault is due to an NX violation */
3107c50d8ae3SPaolo Bonzini 	if (unlikely(((error_code & (PFERR_FETCH_MASK | PFERR_PRESENT_MASK))
3108c50d8ae3SPaolo Bonzini 		      == (PFERR_FETCH_MASK | PFERR_PRESENT_MASK))))
3109c50d8ae3SPaolo Bonzini 		return false;
3110c50d8ae3SPaolo Bonzini 
3111c50d8ae3SPaolo Bonzini 	/*
3112c50d8ae3SPaolo Bonzini 	 * #PF can be fast if:
3113c50d8ae3SPaolo Bonzini 	 * 1. The shadow page table entry is not present, which could mean that
3114c50d8ae3SPaolo Bonzini 	 *    the fault is potentially caused by access tracking (if enabled).
3115c50d8ae3SPaolo Bonzini 	 * 2. The shadow page table entry is present and the fault
3116c50d8ae3SPaolo Bonzini 	 *    is caused by write-protect, that means we just need change the W
3117c50d8ae3SPaolo Bonzini 	 *    bit of the spte which can be done out of mmu-lock.
3118c50d8ae3SPaolo Bonzini 	 *
3119c50d8ae3SPaolo Bonzini 	 * However, if access tracking is disabled we know that a non-present
3120c50d8ae3SPaolo Bonzini 	 * page must be a genuine page fault where we have to create a new SPTE.
3121c50d8ae3SPaolo Bonzini 	 * So, if access tracking is disabled, we return true only for write
3122c50d8ae3SPaolo Bonzini 	 * accesses to a present page.
3123c50d8ae3SPaolo Bonzini 	 */
3124c50d8ae3SPaolo Bonzini 
3125c50d8ae3SPaolo Bonzini 	return shadow_acc_track_mask != 0 ||
3126c50d8ae3SPaolo Bonzini 	       ((error_code & (PFERR_WRITE_MASK | PFERR_PRESENT_MASK))
3127c50d8ae3SPaolo Bonzini 		== (PFERR_WRITE_MASK | PFERR_PRESENT_MASK));
3128c50d8ae3SPaolo Bonzini }
3129c50d8ae3SPaolo Bonzini 
3130c50d8ae3SPaolo Bonzini /*
3131c50d8ae3SPaolo Bonzini  * Returns true if the SPTE was fixed successfully. Otherwise,
3132c50d8ae3SPaolo Bonzini  * someone else modified the SPTE from its original value.
3133c50d8ae3SPaolo Bonzini  */
3134c50d8ae3SPaolo Bonzini static bool
3135c50d8ae3SPaolo Bonzini fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
3136c50d8ae3SPaolo Bonzini 			u64 *sptep, u64 old_spte, u64 new_spte)
3137c50d8ae3SPaolo Bonzini {
3138c50d8ae3SPaolo Bonzini 	gfn_t gfn;
3139c50d8ae3SPaolo Bonzini 
3140c50d8ae3SPaolo Bonzini 	WARN_ON(!sp->role.direct);
3141c50d8ae3SPaolo Bonzini 
3142c50d8ae3SPaolo Bonzini 	/*
3143c50d8ae3SPaolo Bonzini 	 * Theoretically we could also set dirty bit (and flush TLB) here in
3144c50d8ae3SPaolo Bonzini 	 * order to eliminate unnecessary PML logging. See comments in
3145c50d8ae3SPaolo Bonzini 	 * set_spte. But fast_page_fault is very unlikely to happen with PML
3146c50d8ae3SPaolo Bonzini 	 * enabled, so we do not do this. This might result in the same GPA
3147c50d8ae3SPaolo Bonzini 	 * to be logged in PML buffer again when the write really happens, and
3148c50d8ae3SPaolo Bonzini 	 * eventually to be called by mark_page_dirty twice. But it's also no
3149c50d8ae3SPaolo Bonzini 	 * harm. This also avoids the TLB flush needed after setting dirty bit
3150c50d8ae3SPaolo Bonzini 	 * so non-PML cases won't be impacted.
3151c50d8ae3SPaolo Bonzini 	 *
3152c50d8ae3SPaolo Bonzini 	 * Compare with set_spte where instead shadow_dirty_mask is set.
3153c50d8ae3SPaolo Bonzini 	 */
3154c50d8ae3SPaolo Bonzini 	if (cmpxchg64(sptep, old_spte, new_spte) != old_spte)
3155c50d8ae3SPaolo Bonzini 		return false;
3156c50d8ae3SPaolo Bonzini 
3157c50d8ae3SPaolo Bonzini 	if (is_writable_pte(new_spte) && !is_writable_pte(old_spte)) {
3158c50d8ae3SPaolo Bonzini 		/*
3159c50d8ae3SPaolo Bonzini 		 * The gfn of direct spte is stable since it is
3160c50d8ae3SPaolo Bonzini 		 * calculated by sp->gfn.
3161c50d8ae3SPaolo Bonzini 		 */
3162c50d8ae3SPaolo Bonzini 		gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
3163c50d8ae3SPaolo Bonzini 		kvm_vcpu_mark_page_dirty(vcpu, gfn);
3164c50d8ae3SPaolo Bonzini 	}
3165c50d8ae3SPaolo Bonzini 
3166c50d8ae3SPaolo Bonzini 	return true;
3167c50d8ae3SPaolo Bonzini }
3168c50d8ae3SPaolo Bonzini 
3169c50d8ae3SPaolo Bonzini static bool is_access_allowed(u32 fault_err_code, u64 spte)
3170c50d8ae3SPaolo Bonzini {
3171c50d8ae3SPaolo Bonzini 	if (fault_err_code & PFERR_FETCH_MASK)
3172c50d8ae3SPaolo Bonzini 		return is_executable_pte(spte);
3173c50d8ae3SPaolo Bonzini 
3174c50d8ae3SPaolo Bonzini 	if (fault_err_code & PFERR_WRITE_MASK)
3175c50d8ae3SPaolo Bonzini 		return is_writable_pte(spte);
3176c50d8ae3SPaolo Bonzini 
3177c50d8ae3SPaolo Bonzini 	/* Fault was on Read access */
3178c50d8ae3SPaolo Bonzini 	return spte & PT_PRESENT_MASK;
3179c50d8ae3SPaolo Bonzini }
3180c50d8ae3SPaolo Bonzini 
3181c50d8ae3SPaolo Bonzini /*
31826e8eb206SDavid Matlack  * Returns the last level spte pointer of the shadow page walk for the given
31836e8eb206SDavid Matlack  * gpa, and sets *spte to the spte value. This spte may be non-preset. If no
31846e8eb206SDavid Matlack  * walk could be performed, returns NULL and *spte does not contain valid data.
31856e8eb206SDavid Matlack  *
31866e8eb206SDavid Matlack  * Contract:
31876e8eb206SDavid Matlack  *  - Must be called between walk_shadow_page_lockless_{begin,end}.
31886e8eb206SDavid Matlack  *  - The returned sptep must not be used after walk_shadow_page_lockless_end.
31896e8eb206SDavid Matlack  */
31906e8eb206SDavid Matlack static u64 *fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gpa_t gpa, u64 *spte)
31916e8eb206SDavid Matlack {
31926e8eb206SDavid Matlack 	struct kvm_shadow_walk_iterator iterator;
31936e8eb206SDavid Matlack 	u64 old_spte;
31946e8eb206SDavid Matlack 	u64 *sptep = NULL;
31956e8eb206SDavid Matlack 
31966e8eb206SDavid Matlack 	for_each_shadow_entry_lockless(vcpu, gpa, iterator, old_spte) {
31976e8eb206SDavid Matlack 		sptep = iterator.sptep;
31986e8eb206SDavid Matlack 		*spte = old_spte;
31996e8eb206SDavid Matlack 
32006e8eb206SDavid Matlack 		if (!is_shadow_present_pte(old_spte))
32016e8eb206SDavid Matlack 			break;
32026e8eb206SDavid Matlack 	}
32036e8eb206SDavid Matlack 
32046e8eb206SDavid Matlack 	return sptep;
32056e8eb206SDavid Matlack }
32066e8eb206SDavid Matlack 
32076e8eb206SDavid Matlack /*
3208c4371c2aSSean Christopherson  * Returns one of RET_PF_INVALID, RET_PF_FIXED or RET_PF_SPURIOUS.
3209c50d8ae3SPaolo Bonzini  */
321076cd325eSDavid Matlack static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code)
3211c50d8ae3SPaolo Bonzini {
3212c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
3213c4371c2aSSean Christopherson 	int ret = RET_PF_INVALID;
3214c50d8ae3SPaolo Bonzini 	u64 spte = 0ull;
32156e8eb206SDavid Matlack 	u64 *sptep = NULL;
3216c50d8ae3SPaolo Bonzini 	uint retry_count = 0;
3217c50d8ae3SPaolo Bonzini 
3218c50d8ae3SPaolo Bonzini 	if (!page_fault_can_be_fast(error_code))
3219c4371c2aSSean Christopherson 		return ret;
3220c50d8ae3SPaolo Bonzini 
3221c50d8ae3SPaolo Bonzini 	walk_shadow_page_lockless_begin(vcpu);
3222c50d8ae3SPaolo Bonzini 
3223c50d8ae3SPaolo Bonzini 	do {
3224c50d8ae3SPaolo Bonzini 		u64 new_spte;
3225c50d8ae3SPaolo Bonzini 
32266e8eb206SDavid Matlack 		if (is_tdp_mmu(vcpu->arch.mmu))
32276e8eb206SDavid Matlack 			sptep = kvm_tdp_mmu_fast_pf_get_last_sptep(vcpu, gpa, &spte);
32286e8eb206SDavid Matlack 		else
32296e8eb206SDavid Matlack 			sptep = fast_pf_get_last_sptep(vcpu, gpa, &spte);
3230c50d8ae3SPaolo Bonzini 
3231ec89e643SSean Christopherson 		if (!is_shadow_present_pte(spte))
3232ec89e643SSean Christopherson 			break;
3233ec89e643SSean Christopherson 
32346e8eb206SDavid Matlack 		sp = sptep_to_sp(sptep);
3235c50d8ae3SPaolo Bonzini 		if (!is_last_spte(spte, sp->role.level))
3236c50d8ae3SPaolo Bonzini 			break;
3237c50d8ae3SPaolo Bonzini 
3238c50d8ae3SPaolo Bonzini 		/*
3239c50d8ae3SPaolo Bonzini 		 * Check whether the memory access that caused the fault would
3240c50d8ae3SPaolo Bonzini 		 * still cause it if it were to be performed right now. If not,
3241c50d8ae3SPaolo Bonzini 		 * then this is a spurious fault caused by TLB lazily flushed,
3242c50d8ae3SPaolo Bonzini 		 * or some other CPU has already fixed the PTE after the
3243c50d8ae3SPaolo Bonzini 		 * current CPU took the fault.
3244c50d8ae3SPaolo Bonzini 		 *
3245c50d8ae3SPaolo Bonzini 		 * Need not check the access of upper level table entries since
3246c50d8ae3SPaolo Bonzini 		 * they are always ACC_ALL.
3247c50d8ae3SPaolo Bonzini 		 */
3248c50d8ae3SPaolo Bonzini 		if (is_access_allowed(error_code, spte)) {
3249c4371c2aSSean Christopherson 			ret = RET_PF_SPURIOUS;
3250c50d8ae3SPaolo Bonzini 			break;
3251c50d8ae3SPaolo Bonzini 		}
3252c50d8ae3SPaolo Bonzini 
3253c50d8ae3SPaolo Bonzini 		new_spte = spte;
3254c50d8ae3SPaolo Bonzini 
3255c50d8ae3SPaolo Bonzini 		if (is_access_track_spte(spte))
3256c50d8ae3SPaolo Bonzini 			new_spte = restore_acc_track_spte(new_spte);
3257c50d8ae3SPaolo Bonzini 
3258c50d8ae3SPaolo Bonzini 		/*
3259c50d8ae3SPaolo Bonzini 		 * Currently, to simplify the code, write-protection can
3260c50d8ae3SPaolo Bonzini 		 * be removed in the fast path only if the SPTE was
3261c50d8ae3SPaolo Bonzini 		 * write-protected for dirty-logging or access tracking.
3262c50d8ae3SPaolo Bonzini 		 */
3263c50d8ae3SPaolo Bonzini 		if ((error_code & PFERR_WRITE_MASK) &&
3264e6302698SMiaohe Lin 		    spte_can_locklessly_be_made_writable(spte)) {
3265c50d8ae3SPaolo Bonzini 			new_spte |= PT_WRITABLE_MASK;
3266c50d8ae3SPaolo Bonzini 
3267c50d8ae3SPaolo Bonzini 			/*
3268c50d8ae3SPaolo Bonzini 			 * Do not fix write-permission on the large spte.  Since
3269c50d8ae3SPaolo Bonzini 			 * we only dirty the first page into the dirty-bitmap in
3270c50d8ae3SPaolo Bonzini 			 * fast_pf_fix_direct_spte(), other pages are missed
3271c50d8ae3SPaolo Bonzini 			 * if its slot has dirty logging enabled.
3272c50d8ae3SPaolo Bonzini 			 *
3273c50d8ae3SPaolo Bonzini 			 * Instead, we let the slow page fault path create a
3274c50d8ae3SPaolo Bonzini 			 * normal spte to fix the access.
3275c50d8ae3SPaolo Bonzini 			 *
3276c50d8ae3SPaolo Bonzini 			 * See the comments in kvm_arch_commit_memory_region().
3277c50d8ae3SPaolo Bonzini 			 */
32783bae0459SSean Christopherson 			if (sp->role.level > PG_LEVEL_4K)
3279c50d8ae3SPaolo Bonzini 				break;
3280c50d8ae3SPaolo Bonzini 		}
3281c50d8ae3SPaolo Bonzini 
3282c50d8ae3SPaolo Bonzini 		/* Verify that the fault can be handled in the fast path */
3283c50d8ae3SPaolo Bonzini 		if (new_spte == spte ||
3284c50d8ae3SPaolo Bonzini 		    !is_access_allowed(error_code, new_spte))
3285c50d8ae3SPaolo Bonzini 			break;
3286c50d8ae3SPaolo Bonzini 
3287c50d8ae3SPaolo Bonzini 		/*
3288c50d8ae3SPaolo Bonzini 		 * Currently, fast page fault only works for direct mapping
3289c50d8ae3SPaolo Bonzini 		 * since the gfn is not stable for indirect shadow page. See
32903ecad8c2SMauro Carvalho Chehab 		 * Documentation/virt/kvm/locking.rst to get more detail.
3291c50d8ae3SPaolo Bonzini 		 */
32926e8eb206SDavid Matlack 		if (fast_pf_fix_direct_spte(vcpu, sp, sptep, spte, new_spte)) {
3293c4371c2aSSean Christopherson 			ret = RET_PF_FIXED;
3294c50d8ae3SPaolo Bonzini 			break;
3295c4371c2aSSean Christopherson 		}
3296c50d8ae3SPaolo Bonzini 
3297c50d8ae3SPaolo Bonzini 		if (++retry_count > 4) {
3298c50d8ae3SPaolo Bonzini 			printk_once(KERN_WARNING
3299c50d8ae3SPaolo Bonzini 				"kvm: Fast #PF retrying more than 4 times.\n");
3300c50d8ae3SPaolo Bonzini 			break;
3301c50d8ae3SPaolo Bonzini 		}
3302c50d8ae3SPaolo Bonzini 
3303c50d8ae3SPaolo Bonzini 	} while (true);
3304c50d8ae3SPaolo Bonzini 
33056e8eb206SDavid Matlack 	trace_fast_page_fault(vcpu, gpa, error_code, sptep, spte, ret);
3306c50d8ae3SPaolo Bonzini 	walk_shadow_page_lockless_end(vcpu);
3307c50d8ae3SPaolo Bonzini 
3308c4371c2aSSean Christopherson 	return ret;
3309c50d8ae3SPaolo Bonzini }
3310c50d8ae3SPaolo Bonzini 
3311c50d8ae3SPaolo Bonzini static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
3312c50d8ae3SPaolo Bonzini 			       struct list_head *invalid_list)
3313c50d8ae3SPaolo Bonzini {
3314c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
3315c50d8ae3SPaolo Bonzini 
3316c50d8ae3SPaolo Bonzini 	if (!VALID_PAGE(*root_hpa))
3317c50d8ae3SPaolo Bonzini 		return;
3318c50d8ae3SPaolo Bonzini 
3319e47c4aeeSSean Christopherson 	sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK);
332002c00b3aSBen Gardon 
3321897218ffSPaolo Bonzini 	if (is_tdp_mmu_page(sp))
33226103bc07SBen Gardon 		kvm_tdp_mmu_put_root(kvm, sp, false);
332376eb54e7SBen Gardon 	else if (!--sp->root_count && sp->role.invalid)
3324c50d8ae3SPaolo Bonzini 		kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
3325c50d8ae3SPaolo Bonzini 
3326c50d8ae3SPaolo Bonzini 	*root_hpa = INVALID_PAGE;
3327c50d8ae3SPaolo Bonzini }
3328c50d8ae3SPaolo Bonzini 
3329c50d8ae3SPaolo Bonzini /* roots_to_free must be some combination of the KVM_MMU_ROOT_* flags */
3330c50d8ae3SPaolo Bonzini void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
3331c50d8ae3SPaolo Bonzini 			ulong roots_to_free)
3332c50d8ae3SPaolo Bonzini {
33334d710de9SSean Christopherson 	struct kvm *kvm = vcpu->kvm;
3334c50d8ae3SPaolo Bonzini 	int i;
3335c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
3336c50d8ae3SPaolo Bonzini 	bool free_active_root = roots_to_free & KVM_MMU_ROOT_CURRENT;
3337c50d8ae3SPaolo Bonzini 
3338c50d8ae3SPaolo Bonzini 	BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG);
3339c50d8ae3SPaolo Bonzini 
3340c50d8ae3SPaolo Bonzini 	/* Before acquiring the MMU lock, see if we need to do any real work. */
3341c50d8ae3SPaolo Bonzini 	if (!(free_active_root && VALID_PAGE(mmu->root_hpa))) {
3342c50d8ae3SPaolo Bonzini 		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
3343c50d8ae3SPaolo Bonzini 			if ((roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) &&
3344c50d8ae3SPaolo Bonzini 			    VALID_PAGE(mmu->prev_roots[i].hpa))
3345c50d8ae3SPaolo Bonzini 				break;
3346c50d8ae3SPaolo Bonzini 
3347c50d8ae3SPaolo Bonzini 		if (i == KVM_MMU_NUM_PREV_ROOTS)
3348c50d8ae3SPaolo Bonzini 			return;
3349c50d8ae3SPaolo Bonzini 	}
3350c50d8ae3SPaolo Bonzini 
3351531810caSBen Gardon 	write_lock(&kvm->mmu_lock);
3352c50d8ae3SPaolo Bonzini 
3353c50d8ae3SPaolo Bonzini 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
3354c50d8ae3SPaolo Bonzini 		if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i))
33554d710de9SSean Christopherson 			mmu_free_root_page(kvm, &mmu->prev_roots[i].hpa,
3356c50d8ae3SPaolo Bonzini 					   &invalid_list);
3357c50d8ae3SPaolo Bonzini 
3358c50d8ae3SPaolo Bonzini 	if (free_active_root) {
3359c50d8ae3SPaolo Bonzini 		if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
3360c50d8ae3SPaolo Bonzini 		    (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) {
33614d710de9SSean Christopherson 			mmu_free_root_page(kvm, &mmu->root_hpa, &invalid_list);
336204d45551SSean Christopherson 		} else if (mmu->pae_root) {
3363c834e5e4SSean Christopherson 			for (i = 0; i < 4; ++i) {
3364c834e5e4SSean Christopherson 				if (!IS_VALID_PAE_ROOT(mmu->pae_root[i]))
3365c834e5e4SSean Christopherson 					continue;
3366c834e5e4SSean Christopherson 
3367c834e5e4SSean Christopherson 				mmu_free_root_page(kvm, &mmu->pae_root[i],
3368c50d8ae3SPaolo Bonzini 						   &invalid_list);
3369c834e5e4SSean Christopherson 				mmu->pae_root[i] = INVALID_PAE_ROOT;
3370c50d8ae3SPaolo Bonzini 			}
3371c50d8ae3SPaolo Bonzini 		}
337204d45551SSean Christopherson 		mmu->root_hpa = INVALID_PAGE;
3373be01e8e2SSean Christopherson 		mmu->root_pgd = 0;
3374c50d8ae3SPaolo Bonzini 	}
3375c50d8ae3SPaolo Bonzini 
33764d710de9SSean Christopherson 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
3377531810caSBen Gardon 	write_unlock(&kvm->mmu_lock);
3378c50d8ae3SPaolo Bonzini }
3379c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_free_roots);
3380c50d8ae3SPaolo Bonzini 
338125b62c62SSean Christopherson void kvm_mmu_free_guest_mode_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
338225b62c62SSean Christopherson {
338325b62c62SSean Christopherson 	unsigned long roots_to_free = 0;
338425b62c62SSean Christopherson 	hpa_t root_hpa;
338525b62c62SSean Christopherson 	int i;
338625b62c62SSean Christopherson 
338725b62c62SSean Christopherson 	/*
338825b62c62SSean Christopherson 	 * This should not be called while L2 is active, L2 can't invalidate
338925b62c62SSean Christopherson 	 * _only_ its own roots, e.g. INVVPID unconditionally exits.
339025b62c62SSean Christopherson 	 */
339125b62c62SSean Christopherson 	WARN_ON_ONCE(mmu->mmu_role.base.guest_mode);
339225b62c62SSean Christopherson 
339325b62c62SSean Christopherson 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
339425b62c62SSean Christopherson 		root_hpa = mmu->prev_roots[i].hpa;
339525b62c62SSean Christopherson 		if (!VALID_PAGE(root_hpa))
339625b62c62SSean Christopherson 			continue;
339725b62c62SSean Christopherson 
339825b62c62SSean Christopherson 		if (!to_shadow_page(root_hpa) ||
339925b62c62SSean Christopherson 			to_shadow_page(root_hpa)->role.guest_mode)
340025b62c62SSean Christopherson 			roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
340125b62c62SSean Christopherson 	}
340225b62c62SSean Christopherson 
340325b62c62SSean Christopherson 	kvm_mmu_free_roots(vcpu, mmu, roots_to_free);
340425b62c62SSean Christopherson }
340525b62c62SSean Christopherson EXPORT_SYMBOL_GPL(kvm_mmu_free_guest_mode_roots);
340625b62c62SSean Christopherson 
340725b62c62SSean Christopherson 
3408c50d8ae3SPaolo Bonzini static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
3409c50d8ae3SPaolo Bonzini {
3410c50d8ae3SPaolo Bonzini 	int ret = 0;
3411c50d8ae3SPaolo Bonzini 
3412995decb6SVitaly Kuznetsov 	if (!kvm_vcpu_is_visible_gfn(vcpu, root_gfn)) {
3413c50d8ae3SPaolo Bonzini 		kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
3414c50d8ae3SPaolo Bonzini 		ret = 1;
3415c50d8ae3SPaolo Bonzini 	}
3416c50d8ae3SPaolo Bonzini 
3417c50d8ae3SPaolo Bonzini 	return ret;
3418c50d8ae3SPaolo Bonzini }
3419c50d8ae3SPaolo Bonzini 
34208123f265SSean Christopherson static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva,
34218123f265SSean Christopherson 			    u8 level, bool direct)
3422c50d8ae3SPaolo Bonzini {
3423c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
34248123f265SSean Christopherson 
34258123f265SSean Christopherson 	sp = kvm_mmu_get_page(vcpu, gfn, gva, level, direct, ACC_ALL);
34268123f265SSean Christopherson 	++sp->root_count;
34278123f265SSean Christopherson 
34288123f265SSean Christopherson 	return __pa(sp->spt);
34298123f265SSean Christopherson }
34308123f265SSean Christopherson 
34318123f265SSean Christopherson static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
34328123f265SSean Christopherson {
3433b37233c9SSean Christopherson 	struct kvm_mmu *mmu = vcpu->arch.mmu;
3434b37233c9SSean Christopherson 	u8 shadow_root_level = mmu->shadow_root_level;
34358123f265SSean Christopherson 	hpa_t root;
3436c50d8ae3SPaolo Bonzini 	unsigned i;
34374a38162eSPaolo Bonzini 	int r;
34384a38162eSPaolo Bonzini 
34394a38162eSPaolo Bonzini 	write_lock(&vcpu->kvm->mmu_lock);
34404a38162eSPaolo Bonzini 	r = make_mmu_pages_available(vcpu);
34414a38162eSPaolo Bonzini 	if (r < 0)
34424a38162eSPaolo Bonzini 		goto out_unlock;
3443c50d8ae3SPaolo Bonzini 
3444897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(vcpu->kvm)) {
344502c00b3aSBen Gardon 		root = kvm_tdp_mmu_get_vcpu_root_hpa(vcpu);
3446b37233c9SSean Christopherson 		mmu->root_hpa = root;
344702c00b3aSBen Gardon 	} else if (shadow_root_level >= PT64_ROOT_4LEVEL) {
34486e6ec584SSean Christopherson 		root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level, true);
3449b37233c9SSean Christopherson 		mmu->root_hpa = root;
34508123f265SSean Christopherson 	} else if (shadow_root_level == PT32E_ROOT_LEVEL) {
34514a38162eSPaolo Bonzini 		if (WARN_ON_ONCE(!mmu->pae_root)) {
34524a38162eSPaolo Bonzini 			r = -EIO;
34534a38162eSPaolo Bonzini 			goto out_unlock;
34544a38162eSPaolo Bonzini 		}
345573ad1606SSean Christopherson 
3456c50d8ae3SPaolo Bonzini 		for (i = 0; i < 4; ++i) {
3457c834e5e4SSean Christopherson 			WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
3458c50d8ae3SPaolo Bonzini 
34598123f265SSean Christopherson 			root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT),
34608123f265SSean Christopherson 					      i << 30, PT32_ROOT_LEVEL, true);
346117e368d9SSean Christopherson 			mmu->pae_root[i] = root | PT_PRESENT_MASK |
346217e368d9SSean Christopherson 					   shadow_me_mask;
3463c50d8ae3SPaolo Bonzini 		}
3464b37233c9SSean Christopherson 		mmu->root_hpa = __pa(mmu->pae_root);
346573ad1606SSean Christopherson 	} else {
346673ad1606SSean Christopherson 		WARN_ONCE(1, "Bad TDP root level = %d\n", shadow_root_level);
34674a38162eSPaolo Bonzini 		r = -EIO;
34684a38162eSPaolo Bonzini 		goto out_unlock;
346973ad1606SSean Christopherson 	}
34703651c7fcSSean Christopherson 
3471be01e8e2SSean Christopherson 	/* root_pgd is ignored for direct MMUs. */
3472b37233c9SSean Christopherson 	mmu->root_pgd = 0;
34734a38162eSPaolo Bonzini out_unlock:
34744a38162eSPaolo Bonzini 	write_unlock(&vcpu->kvm->mmu_lock);
34754a38162eSPaolo Bonzini 	return r;
3476c50d8ae3SPaolo Bonzini }
3477c50d8ae3SPaolo Bonzini 
3478c50d8ae3SPaolo Bonzini static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
3479c50d8ae3SPaolo Bonzini {
3480b37233c9SSean Christopherson 	struct kvm_mmu *mmu = vcpu->arch.mmu;
34816e0918aeSSean Christopherson 	u64 pdptrs[4], pm_mask;
3482be01e8e2SSean Christopherson 	gfn_t root_gfn, root_pgd;
34838123f265SSean Christopherson 	hpa_t root;
34844a38162eSPaolo Bonzini 	unsigned i;
34854a38162eSPaolo Bonzini 	int r;
3486c50d8ae3SPaolo Bonzini 
3487b37233c9SSean Christopherson 	root_pgd = mmu->get_guest_pgd(vcpu);
3488be01e8e2SSean Christopherson 	root_gfn = root_pgd >> PAGE_SHIFT;
3489c50d8ae3SPaolo Bonzini 
3490c50d8ae3SPaolo Bonzini 	if (mmu_check_root(vcpu, root_gfn))
3491c50d8ae3SPaolo Bonzini 		return 1;
3492c50d8ae3SPaolo Bonzini 
3493c50d8ae3SPaolo Bonzini 	/*
34944a38162eSPaolo Bonzini 	 * On SVM, reading PDPTRs might access guest memory, which might fault
34954a38162eSPaolo Bonzini 	 * and thus might sleep.  Grab the PDPTRs before acquiring mmu_lock.
34964a38162eSPaolo Bonzini 	 */
34976e0918aeSSean Christopherson 	if (mmu->root_level == PT32E_ROOT_LEVEL) {
34986e0918aeSSean Christopherson 		for (i = 0; i < 4; ++i) {
34996e0918aeSSean Christopherson 			pdptrs[i] = mmu->get_pdptr(vcpu, i);
35006e0918aeSSean Christopherson 			if (!(pdptrs[i] & PT_PRESENT_MASK))
35016e0918aeSSean Christopherson 				continue;
35026e0918aeSSean Christopherson 
35036e0918aeSSean Christopherson 			if (mmu_check_root(vcpu, pdptrs[i] >> PAGE_SHIFT))
35046e0918aeSSean Christopherson 				return 1;
35056e0918aeSSean Christopherson 		}
35066e0918aeSSean Christopherson 	}
35076e0918aeSSean Christopherson 
3508d501f747SBen Gardon 	r = alloc_all_memslots_rmaps(vcpu->kvm);
3509d501f747SBen Gardon 	if (r)
3510d501f747SBen Gardon 		return r;
3511d501f747SBen Gardon 
35124a38162eSPaolo Bonzini 	write_lock(&vcpu->kvm->mmu_lock);
35134a38162eSPaolo Bonzini 	r = make_mmu_pages_available(vcpu);
35144a38162eSPaolo Bonzini 	if (r < 0)
35154a38162eSPaolo Bonzini 		goto out_unlock;
35164a38162eSPaolo Bonzini 
3517c50d8ae3SPaolo Bonzini 	/*
3518c50d8ae3SPaolo Bonzini 	 * Do we shadow a long mode page table? If so we need to
3519c50d8ae3SPaolo Bonzini 	 * write-protect the guests page table root.
3520c50d8ae3SPaolo Bonzini 	 */
3521b37233c9SSean Christopherson 	if (mmu->root_level >= PT64_ROOT_4LEVEL) {
35228123f265SSean Christopherson 		root = mmu_alloc_root(vcpu, root_gfn, 0,
3523b37233c9SSean Christopherson 				      mmu->shadow_root_level, false);
3524b37233c9SSean Christopherson 		mmu->root_hpa = root;
3525be01e8e2SSean Christopherson 		goto set_root_pgd;
3526c50d8ae3SPaolo Bonzini 	}
3527c50d8ae3SPaolo Bonzini 
35284a38162eSPaolo Bonzini 	if (WARN_ON_ONCE(!mmu->pae_root)) {
35294a38162eSPaolo Bonzini 		r = -EIO;
35304a38162eSPaolo Bonzini 		goto out_unlock;
35314a38162eSPaolo Bonzini 	}
353273ad1606SSean Christopherson 
3533c50d8ae3SPaolo Bonzini 	/*
3534c50d8ae3SPaolo Bonzini 	 * We shadow a 32 bit page table. This may be a legacy 2-level
3535c50d8ae3SPaolo Bonzini 	 * or a PAE 3-level page table. In either case we need to be aware that
3536c50d8ae3SPaolo Bonzini 	 * the shadow page table may be a PAE or a long mode page table.
3537c50d8ae3SPaolo Bonzini 	 */
353817e368d9SSean Christopherson 	pm_mask = PT_PRESENT_MASK | shadow_me_mask;
3539748e52b9SSean Christopherson 	if (mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
3540c50d8ae3SPaolo Bonzini 		pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
3541c50d8ae3SPaolo Bonzini 
354203ca4589SSean Christopherson 		if (WARN_ON_ONCE(!mmu->pml4_root)) {
35434a38162eSPaolo Bonzini 			r = -EIO;
35444a38162eSPaolo Bonzini 			goto out_unlock;
35454a38162eSPaolo Bonzini 		}
354673ad1606SSean Christopherson 
354703ca4589SSean Christopherson 		mmu->pml4_root[0] = __pa(mmu->pae_root) | pm_mask;
354804d45551SSean Christopherson 	}
354904d45551SSean Christopherson 
3550c50d8ae3SPaolo Bonzini 	for (i = 0; i < 4; ++i) {
3551c834e5e4SSean Christopherson 		WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
35526e6ec584SSean Christopherson 
3553b37233c9SSean Christopherson 		if (mmu->root_level == PT32E_ROOT_LEVEL) {
35546e0918aeSSean Christopherson 			if (!(pdptrs[i] & PT_PRESENT_MASK)) {
3555c834e5e4SSean Christopherson 				mmu->pae_root[i] = INVALID_PAE_ROOT;
3556c50d8ae3SPaolo Bonzini 				continue;
3557c50d8ae3SPaolo Bonzini 			}
35586e0918aeSSean Christopherson 			root_gfn = pdptrs[i] >> PAGE_SHIFT;
3559c50d8ae3SPaolo Bonzini 		}
3560c50d8ae3SPaolo Bonzini 
35618123f265SSean Christopherson 		root = mmu_alloc_root(vcpu, root_gfn, i << 30,
35628123f265SSean Christopherson 				      PT32_ROOT_LEVEL, false);
3563b37233c9SSean Christopherson 		mmu->pae_root[i] = root | pm_mask;
3564c50d8ae3SPaolo Bonzini 	}
3565c50d8ae3SPaolo Bonzini 
3566ba0a194fSSean Christopherson 	if (mmu->shadow_root_level == PT64_ROOT_4LEVEL)
356703ca4589SSean Christopherson 		mmu->root_hpa = __pa(mmu->pml4_root);
3568ba0a194fSSean Christopherson 	else
3569ba0a194fSSean Christopherson 		mmu->root_hpa = __pa(mmu->pae_root);
3570c50d8ae3SPaolo Bonzini 
3571be01e8e2SSean Christopherson set_root_pgd:
3572b37233c9SSean Christopherson 	mmu->root_pgd = root_pgd;
35734a38162eSPaolo Bonzini out_unlock:
35744a38162eSPaolo Bonzini 	write_unlock(&vcpu->kvm->mmu_lock);
3575c50d8ae3SPaolo Bonzini 
3576c50d8ae3SPaolo Bonzini 	return 0;
3577c50d8ae3SPaolo Bonzini }
3578c50d8ae3SPaolo Bonzini 
3579748e52b9SSean Christopherson static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
3580c50d8ae3SPaolo Bonzini {
3581748e52b9SSean Christopherson 	struct kvm_mmu *mmu = vcpu->arch.mmu;
358203ca4589SSean Christopherson 	u64 *pml4_root, *pae_root;
3583748e52b9SSean Christopherson 
3584748e52b9SSean Christopherson 	/*
3585748e52b9SSean Christopherson 	 * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP
3586748e52b9SSean Christopherson 	 * tables are allocated and initialized at root creation as there is no
3587748e52b9SSean Christopherson 	 * equivalent level in the guest's NPT to shadow.  Allocate the tables
3588748e52b9SSean Christopherson 	 * on demand, as running a 32-bit L1 VMM on 64-bit KVM is very rare.
3589748e52b9SSean Christopherson 	 */
3590748e52b9SSean Christopherson 	if (mmu->direct_map || mmu->root_level >= PT64_ROOT_4LEVEL ||
3591748e52b9SSean Christopherson 	    mmu->shadow_root_level < PT64_ROOT_4LEVEL)
3592748e52b9SSean Christopherson 		return 0;
3593748e52b9SSean Christopherson 
3594748e52b9SSean Christopherson 	/*
3595748e52b9SSean Christopherson 	 * This mess only works with 4-level paging and needs to be updated to
3596748e52b9SSean Christopherson 	 * work with 5-level paging.
3597748e52b9SSean Christopherson 	 */
3598748e52b9SSean Christopherson 	if (WARN_ON_ONCE(mmu->shadow_root_level != PT64_ROOT_4LEVEL))
3599748e52b9SSean Christopherson 		return -EIO;
3600748e52b9SSean Christopherson 
360103ca4589SSean Christopherson 	if (mmu->pae_root && mmu->pml4_root)
3602748e52b9SSean Christopherson 		return 0;
3603748e52b9SSean Christopherson 
3604748e52b9SSean Christopherson 	/*
3605748e52b9SSean Christopherson 	 * The special roots should always be allocated in concert.  Yell and
3606748e52b9SSean Christopherson 	 * bail if KVM ends up in a state where only one of the roots is valid.
3607748e52b9SSean Christopherson 	 */
360803ca4589SSean Christopherson 	if (WARN_ON_ONCE(!tdp_enabled || mmu->pae_root || mmu->pml4_root))
3609748e52b9SSean Christopherson 		return -EIO;
3610748e52b9SSean Christopherson 
36114a98623dSSean Christopherson 	/*
36124a98623dSSean Christopherson 	 * Unlike 32-bit NPT, the PDP table doesn't need to be in low mem, and
36134a98623dSSean Christopherson 	 * doesn't need to be decrypted.
36144a98623dSSean Christopherson 	 */
3615748e52b9SSean Christopherson 	pae_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3616748e52b9SSean Christopherson 	if (!pae_root)
3617748e52b9SSean Christopherson 		return -ENOMEM;
3618748e52b9SSean Christopherson 
361903ca4589SSean Christopherson 	pml4_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
362003ca4589SSean Christopherson 	if (!pml4_root) {
3621748e52b9SSean Christopherson 		free_page((unsigned long)pae_root);
3622748e52b9SSean Christopherson 		return -ENOMEM;
3623748e52b9SSean Christopherson 	}
3624748e52b9SSean Christopherson 
3625748e52b9SSean Christopherson 	mmu->pae_root = pae_root;
362603ca4589SSean Christopherson 	mmu->pml4_root = pml4_root;
3627748e52b9SSean Christopherson 
3628748e52b9SSean Christopherson 	return 0;
3629c50d8ae3SPaolo Bonzini }
3630c50d8ae3SPaolo Bonzini 
3631c50d8ae3SPaolo Bonzini void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
3632c50d8ae3SPaolo Bonzini {
3633c50d8ae3SPaolo Bonzini 	int i;
3634c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
3635c50d8ae3SPaolo Bonzini 
3636c50d8ae3SPaolo Bonzini 	if (vcpu->arch.mmu->direct_map)
3637c50d8ae3SPaolo Bonzini 		return;
3638c50d8ae3SPaolo Bonzini 
3639c50d8ae3SPaolo Bonzini 	if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
3640c50d8ae3SPaolo Bonzini 		return;
3641c50d8ae3SPaolo Bonzini 
3642c50d8ae3SPaolo Bonzini 	vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
3643c50d8ae3SPaolo Bonzini 
3644c50d8ae3SPaolo Bonzini 	if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
3645c50d8ae3SPaolo Bonzini 		hpa_t root = vcpu->arch.mmu->root_hpa;
3646e47c4aeeSSean Christopherson 		sp = to_shadow_page(root);
3647c50d8ae3SPaolo Bonzini 
3648c50d8ae3SPaolo Bonzini 		/*
3649c50d8ae3SPaolo Bonzini 		 * Even if another CPU was marking the SP as unsync-ed
3650c50d8ae3SPaolo Bonzini 		 * simultaneously, any guest page table changes are not
3651c50d8ae3SPaolo Bonzini 		 * guaranteed to be visible anyway until this VCPU issues a TLB
3652c50d8ae3SPaolo Bonzini 		 * flush strictly after those changes are made. We only need to
3653c50d8ae3SPaolo Bonzini 		 * ensure that the other CPU sets these flags before any actual
3654c50d8ae3SPaolo Bonzini 		 * changes to the page tables are made. The comments in
36550337f585SSean Christopherson 		 * mmu_try_to_unsync_pages() describe what could go wrong if
36560337f585SSean Christopherson 		 * this requirement isn't satisfied.
3657c50d8ae3SPaolo Bonzini 		 */
3658c50d8ae3SPaolo Bonzini 		if (!smp_load_acquire(&sp->unsync) &&
3659c50d8ae3SPaolo Bonzini 		    !smp_load_acquire(&sp->unsync_children))
3660c50d8ae3SPaolo Bonzini 			return;
3661c50d8ae3SPaolo Bonzini 
3662531810caSBen Gardon 		write_lock(&vcpu->kvm->mmu_lock);
3663c50d8ae3SPaolo Bonzini 		kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
3664c50d8ae3SPaolo Bonzini 
3665c50d8ae3SPaolo Bonzini 		mmu_sync_children(vcpu, sp);
3666c50d8ae3SPaolo Bonzini 
3667c50d8ae3SPaolo Bonzini 		kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
3668531810caSBen Gardon 		write_unlock(&vcpu->kvm->mmu_lock);
3669c50d8ae3SPaolo Bonzini 		return;
3670c50d8ae3SPaolo Bonzini 	}
3671c50d8ae3SPaolo Bonzini 
3672531810caSBen Gardon 	write_lock(&vcpu->kvm->mmu_lock);
3673c50d8ae3SPaolo Bonzini 	kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
3674c50d8ae3SPaolo Bonzini 
3675c50d8ae3SPaolo Bonzini 	for (i = 0; i < 4; ++i) {
3676c50d8ae3SPaolo Bonzini 		hpa_t root = vcpu->arch.mmu->pae_root[i];
3677c50d8ae3SPaolo Bonzini 
3678c834e5e4SSean Christopherson 		if (IS_VALID_PAE_ROOT(root)) {
3679c50d8ae3SPaolo Bonzini 			root &= PT64_BASE_ADDR_MASK;
3680e47c4aeeSSean Christopherson 			sp = to_shadow_page(root);
3681c50d8ae3SPaolo Bonzini 			mmu_sync_children(vcpu, sp);
3682c50d8ae3SPaolo Bonzini 		}
3683c50d8ae3SPaolo Bonzini 	}
3684c50d8ae3SPaolo Bonzini 
3685c50d8ae3SPaolo Bonzini 	kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
3686531810caSBen Gardon 	write_unlock(&vcpu->kvm->mmu_lock);
3687c50d8ae3SPaolo Bonzini }
3688c50d8ae3SPaolo Bonzini 
3689736c291cSSean Christopherson static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gpa_t vaddr,
3690c50d8ae3SPaolo Bonzini 				  u32 access, struct x86_exception *exception)
3691c50d8ae3SPaolo Bonzini {
3692c50d8ae3SPaolo Bonzini 	if (exception)
3693c50d8ae3SPaolo Bonzini 		exception->error_code = 0;
3694c50d8ae3SPaolo Bonzini 	return vaddr;
3695c50d8ae3SPaolo Bonzini }
3696c50d8ae3SPaolo Bonzini 
3697736c291cSSean Christopherson static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gpa_t vaddr,
3698c50d8ae3SPaolo Bonzini 					 u32 access,
3699c50d8ae3SPaolo Bonzini 					 struct x86_exception *exception)
3700c50d8ae3SPaolo Bonzini {
3701c50d8ae3SPaolo Bonzini 	if (exception)
3702c50d8ae3SPaolo Bonzini 		exception->error_code = 0;
3703c50d8ae3SPaolo Bonzini 	return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception);
3704c50d8ae3SPaolo Bonzini }
3705c50d8ae3SPaolo Bonzini 
3706c50d8ae3SPaolo Bonzini static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3707c50d8ae3SPaolo Bonzini {
3708c50d8ae3SPaolo Bonzini 	/*
3709c50d8ae3SPaolo Bonzini 	 * A nested guest cannot use the MMIO cache if it is using nested
3710c50d8ae3SPaolo Bonzini 	 * page tables, because cr2 is a nGPA while the cache stores GPAs.
3711c50d8ae3SPaolo Bonzini 	 */
3712c50d8ae3SPaolo Bonzini 	if (mmu_is_nested(vcpu))
3713c50d8ae3SPaolo Bonzini 		return false;
3714c50d8ae3SPaolo Bonzini 
3715c50d8ae3SPaolo Bonzini 	if (direct)
3716c50d8ae3SPaolo Bonzini 		return vcpu_match_mmio_gpa(vcpu, addr);
3717c50d8ae3SPaolo Bonzini 
3718c50d8ae3SPaolo Bonzini 	return vcpu_match_mmio_gva(vcpu, addr);
3719c50d8ae3SPaolo Bonzini }
3720c50d8ae3SPaolo Bonzini 
372195fb5b02SBen Gardon /*
372295fb5b02SBen Gardon  * Return the level of the lowest level SPTE added to sptes.
372395fb5b02SBen Gardon  * That SPTE may be non-present.
3724c5c8c7c5SDavid Matlack  *
3725c5c8c7c5SDavid Matlack  * Must be called between walk_shadow_page_lockless_{begin,end}.
372695fb5b02SBen Gardon  */
372739b4d43eSSean Christopherson static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level)
3728c50d8ae3SPaolo Bonzini {
3729c50d8ae3SPaolo Bonzini 	struct kvm_shadow_walk_iterator iterator;
37302aa07893SSean Christopherson 	int leaf = -1;
373195fb5b02SBen Gardon 	u64 spte;
3732c50d8ae3SPaolo Bonzini 
373339b4d43eSSean Christopherson 	for (shadow_walk_init(&iterator, vcpu, addr),
373439b4d43eSSean Christopherson 	     *root_level = iterator.level;
3735c50d8ae3SPaolo Bonzini 	     shadow_walk_okay(&iterator);
3736c50d8ae3SPaolo Bonzini 	     __shadow_walk_next(&iterator, spte)) {
373795fb5b02SBen Gardon 		leaf = iterator.level;
3738c50d8ae3SPaolo Bonzini 		spte = mmu_spte_get_lockless(iterator.sptep);
3739c50d8ae3SPaolo Bonzini 
3740dde81f94SSean Christopherson 		sptes[leaf] = spte;
3741c50d8ae3SPaolo Bonzini 
3742c50d8ae3SPaolo Bonzini 		if (!is_shadow_present_pte(spte))
3743c50d8ae3SPaolo Bonzini 			break;
374495fb5b02SBen Gardon 	}
374595fb5b02SBen Gardon 
374695fb5b02SBen Gardon 	return leaf;
374795fb5b02SBen Gardon }
374895fb5b02SBen Gardon 
37499aa41879SSean Christopherson /* return true if reserved bit(s) are detected on a valid, non-MMIO SPTE. */
375095fb5b02SBen Gardon static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
375195fb5b02SBen Gardon {
3752dde81f94SSean Christopherson 	u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
375395fb5b02SBen Gardon 	struct rsvd_bits_validate *rsvd_check;
375439b4d43eSSean Christopherson 	int root, leaf, level;
375595fb5b02SBen Gardon 	bool reserved = false;
375695fb5b02SBen Gardon 
3757c5c8c7c5SDavid Matlack 	walk_shadow_page_lockless_begin(vcpu);
3758c5c8c7c5SDavid Matlack 
375963c0cac9SDavid Matlack 	if (is_tdp_mmu(vcpu->arch.mmu))
376039b4d43eSSean Christopherson 		leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root);
376195fb5b02SBen Gardon 	else
376239b4d43eSSean Christopherson 		leaf = get_walk(vcpu, addr, sptes, &root);
376395fb5b02SBen Gardon 
3764c5c8c7c5SDavid Matlack 	walk_shadow_page_lockless_end(vcpu);
3765c5c8c7c5SDavid Matlack 
37662aa07893SSean Christopherson 	if (unlikely(leaf < 0)) {
37672aa07893SSean Christopherson 		*sptep = 0ull;
37682aa07893SSean Christopherson 		return reserved;
37692aa07893SSean Christopherson 	}
37702aa07893SSean Christopherson 
37719aa41879SSean Christopherson 	*sptep = sptes[leaf];
37729aa41879SSean Christopherson 
37739aa41879SSean Christopherson 	/*
37749aa41879SSean Christopherson 	 * Skip reserved bits checks on the terminal leaf if it's not a valid
37759aa41879SSean Christopherson 	 * SPTE.  Note, this also (intentionally) skips MMIO SPTEs, which, by
37769aa41879SSean Christopherson 	 * design, always have reserved bits set.  The purpose of the checks is
37779aa41879SSean Christopherson 	 * to detect reserved bits on non-MMIO SPTEs. i.e. buggy SPTEs.
37789aa41879SSean Christopherson 	 */
37799aa41879SSean Christopherson 	if (!is_shadow_present_pte(sptes[leaf]))
37809aa41879SSean Christopherson 		leaf++;
378195fb5b02SBen Gardon 
378295fb5b02SBen Gardon 	rsvd_check = &vcpu->arch.mmu->shadow_zero_check;
378395fb5b02SBen Gardon 
37849aa41879SSean Christopherson 	for (level = root; level >= leaf; level--)
3785961f8445SSean Christopherson 		reserved |= is_rsvd_spte(rsvd_check, sptes[level], level);
3786c50d8ae3SPaolo Bonzini 
3787c50d8ae3SPaolo Bonzini 	if (reserved) {
3788bb4cdf3aSSean Christopherson 		pr_err("%s: reserved bits set on MMU-present spte, addr 0x%llx, hierarchy:\n",
3789c50d8ae3SPaolo Bonzini 		       __func__, addr);
379095fb5b02SBen Gardon 		for (level = root; level >= leaf; level--)
3791bb4cdf3aSSean Christopherson 			pr_err("------ spte = 0x%llx level = %d, rsvd bits = 0x%llx",
3792bb4cdf3aSSean Christopherson 			       sptes[level], level,
3793961f8445SSean Christopherson 			       get_rsvd_bits(rsvd_check, sptes[level], level));
3794c50d8ae3SPaolo Bonzini 	}
3795ddce6208SSean Christopherson 
3796c50d8ae3SPaolo Bonzini 	return reserved;
3797c50d8ae3SPaolo Bonzini }
3798c50d8ae3SPaolo Bonzini 
3799c50d8ae3SPaolo Bonzini static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3800c50d8ae3SPaolo Bonzini {
3801c50d8ae3SPaolo Bonzini 	u64 spte;
3802c50d8ae3SPaolo Bonzini 	bool reserved;
3803c50d8ae3SPaolo Bonzini 
3804c50d8ae3SPaolo Bonzini 	if (mmio_info_in_cache(vcpu, addr, direct))
3805c50d8ae3SPaolo Bonzini 		return RET_PF_EMULATE;
3806c50d8ae3SPaolo Bonzini 
380795fb5b02SBen Gardon 	reserved = get_mmio_spte(vcpu, addr, &spte);
3808c50d8ae3SPaolo Bonzini 	if (WARN_ON(reserved))
3809c50d8ae3SPaolo Bonzini 		return -EINVAL;
3810c50d8ae3SPaolo Bonzini 
3811c50d8ae3SPaolo Bonzini 	if (is_mmio_spte(spte)) {
3812c50d8ae3SPaolo Bonzini 		gfn_t gfn = get_mmio_spte_gfn(spte);
38130a2b64c5SBen Gardon 		unsigned int access = get_mmio_spte_access(spte);
3814c50d8ae3SPaolo Bonzini 
3815c50d8ae3SPaolo Bonzini 		if (!check_mmio_spte(vcpu, spte))
3816c50d8ae3SPaolo Bonzini 			return RET_PF_INVALID;
3817c50d8ae3SPaolo Bonzini 
3818c50d8ae3SPaolo Bonzini 		if (direct)
3819c50d8ae3SPaolo Bonzini 			addr = 0;
3820c50d8ae3SPaolo Bonzini 
3821c50d8ae3SPaolo Bonzini 		trace_handle_mmio_page_fault(addr, gfn, access);
3822c50d8ae3SPaolo Bonzini 		vcpu_cache_mmio_info(vcpu, addr, gfn, access);
3823c50d8ae3SPaolo Bonzini 		return RET_PF_EMULATE;
3824c50d8ae3SPaolo Bonzini 	}
3825c50d8ae3SPaolo Bonzini 
3826c50d8ae3SPaolo Bonzini 	/*
3827c50d8ae3SPaolo Bonzini 	 * If the page table is zapped by other cpus, let CPU fault again on
3828c50d8ae3SPaolo Bonzini 	 * the address.
3829c50d8ae3SPaolo Bonzini 	 */
3830c50d8ae3SPaolo Bonzini 	return RET_PF_RETRY;
3831c50d8ae3SPaolo Bonzini }
3832c50d8ae3SPaolo Bonzini 
3833c50d8ae3SPaolo Bonzini static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
3834c50d8ae3SPaolo Bonzini 					 u32 error_code, gfn_t gfn)
3835c50d8ae3SPaolo Bonzini {
3836c50d8ae3SPaolo Bonzini 	if (unlikely(error_code & PFERR_RSVD_MASK))
3837c50d8ae3SPaolo Bonzini 		return false;
3838c50d8ae3SPaolo Bonzini 
3839c50d8ae3SPaolo Bonzini 	if (!(error_code & PFERR_PRESENT_MASK) ||
3840c50d8ae3SPaolo Bonzini 	      !(error_code & PFERR_WRITE_MASK))
3841c50d8ae3SPaolo Bonzini 		return false;
3842c50d8ae3SPaolo Bonzini 
3843c50d8ae3SPaolo Bonzini 	/*
3844c50d8ae3SPaolo Bonzini 	 * guest is writing the page which is write tracked which can
3845c50d8ae3SPaolo Bonzini 	 * not be fixed by page fault handler.
3846c50d8ae3SPaolo Bonzini 	 */
3847c50d8ae3SPaolo Bonzini 	if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
3848c50d8ae3SPaolo Bonzini 		return true;
3849c50d8ae3SPaolo Bonzini 
3850c50d8ae3SPaolo Bonzini 	return false;
3851c50d8ae3SPaolo Bonzini }
3852c50d8ae3SPaolo Bonzini 
3853c50d8ae3SPaolo Bonzini static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
3854c50d8ae3SPaolo Bonzini {
3855c50d8ae3SPaolo Bonzini 	struct kvm_shadow_walk_iterator iterator;
3856c50d8ae3SPaolo Bonzini 	u64 spte;
3857c50d8ae3SPaolo Bonzini 
3858c50d8ae3SPaolo Bonzini 	walk_shadow_page_lockless_begin(vcpu);
3859c50d8ae3SPaolo Bonzini 	for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
3860c50d8ae3SPaolo Bonzini 		clear_sp_write_flooding_count(iterator.sptep);
3861c50d8ae3SPaolo Bonzini 		if (!is_shadow_present_pte(spte))
3862c50d8ae3SPaolo Bonzini 			break;
3863c50d8ae3SPaolo Bonzini 	}
3864c50d8ae3SPaolo Bonzini 	walk_shadow_page_lockless_end(vcpu);
3865c50d8ae3SPaolo Bonzini }
3866c50d8ae3SPaolo Bonzini 
3867e8c22266SVitaly Kuznetsov static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
38689f1a8526SSean Christopherson 				    gfn_t gfn)
3869c50d8ae3SPaolo Bonzini {
3870c50d8ae3SPaolo Bonzini 	struct kvm_arch_async_pf arch;
3871c50d8ae3SPaolo Bonzini 
3872c50d8ae3SPaolo Bonzini 	arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
3873c50d8ae3SPaolo Bonzini 	arch.gfn = gfn;
3874c50d8ae3SPaolo Bonzini 	arch.direct_map = vcpu->arch.mmu->direct_map;
3875d8dd54e0SSean Christopherson 	arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
3876c50d8ae3SPaolo Bonzini 
38779f1a8526SSean Christopherson 	return kvm_setup_async_pf(vcpu, cr2_or_gpa,
38789f1a8526SSean Christopherson 				  kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
3879c50d8ae3SPaolo Bonzini }
3880c50d8ae3SPaolo Bonzini 
388133a5c000SMaxim Levitsky static bool kvm_faultin_pfn(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
38824a42d848SDavid Stevens 			 gpa_t cr2_or_gpa, kvm_pfn_t *pfn, hva_t *hva,
38838f32d5e5SMaxim Levitsky 			 bool write, bool *writable, int *r)
3884c50d8ae3SPaolo Bonzini {
3885c36b7150SPaolo Bonzini 	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3886c50d8ae3SPaolo Bonzini 	bool async;
3887c50d8ae3SPaolo Bonzini 
3888e0c37868SSean Christopherson 	/*
3889e0c37868SSean Christopherson 	 * Retry the page fault if the gfn hit a memslot that is being deleted
3890e0c37868SSean Christopherson 	 * or moved.  This ensures any existing SPTEs for the old memslot will
3891e0c37868SSean Christopherson 	 * be zapped before KVM inserts a new MMIO SPTE for the gfn.
3892e0c37868SSean Christopherson 	 */
3893e0c37868SSean Christopherson 	if (slot && (slot->flags & KVM_MEMSLOT_INVALID))
38948f32d5e5SMaxim Levitsky 		goto out_retry;
3895e0c37868SSean Christopherson 
38969cc13d60SMaxim Levitsky 	if (!kvm_is_visible_memslot(slot)) {
3897c36b7150SPaolo Bonzini 		/* Don't expose private memslots to L2. */
38989cc13d60SMaxim Levitsky 		if (is_guest_mode(vcpu)) {
3899c50d8ae3SPaolo Bonzini 			*pfn = KVM_PFN_NOSLOT;
3900c583eed6SSean Christopherson 			*writable = false;
3901c50d8ae3SPaolo Bonzini 			return false;
3902c50d8ae3SPaolo Bonzini 		}
39039cc13d60SMaxim Levitsky 		/*
39049cc13d60SMaxim Levitsky 		 * If the APIC access page exists but is disabled, go directly
39059cc13d60SMaxim Levitsky 		 * to emulation without caching the MMIO access or creating a
39069cc13d60SMaxim Levitsky 		 * MMIO SPTE.  That way the cache doesn't need to be purged
39079cc13d60SMaxim Levitsky 		 * when the AVIC is re-enabled.
39089cc13d60SMaxim Levitsky 		 */
39099cc13d60SMaxim Levitsky 		if (slot && slot->id == APIC_ACCESS_PAGE_PRIVATE_MEMSLOT &&
39109cc13d60SMaxim Levitsky 		    !kvm_apicv_activated(vcpu->kvm)) {
39119cc13d60SMaxim Levitsky 			*r = RET_PF_EMULATE;
39129cc13d60SMaxim Levitsky 			return true;
39139cc13d60SMaxim Levitsky 		}
39149cc13d60SMaxim Levitsky 	}
3915c50d8ae3SPaolo Bonzini 
3916c50d8ae3SPaolo Bonzini 	async = false;
39174a42d848SDavid Stevens 	*pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async,
39184a42d848SDavid Stevens 				    write, writable, hva);
3919c50d8ae3SPaolo Bonzini 	if (!async)
3920c50d8ae3SPaolo Bonzini 		return false; /* *pfn has correct page already */
3921c50d8ae3SPaolo Bonzini 
3922c50d8ae3SPaolo Bonzini 	if (!prefault && kvm_can_do_async_pf(vcpu)) {
39239f1a8526SSean Christopherson 		trace_kvm_try_async_get_page(cr2_or_gpa, gfn);
3924c50d8ae3SPaolo Bonzini 		if (kvm_find_async_pf_gfn(vcpu, gfn)) {
39259f1a8526SSean Christopherson 			trace_kvm_async_pf_doublefault(cr2_or_gpa, gfn);
3926c50d8ae3SPaolo Bonzini 			kvm_make_request(KVM_REQ_APF_HALT, vcpu);
39278f32d5e5SMaxim Levitsky 			goto out_retry;
39289f1a8526SSean Christopherson 		} else if (kvm_arch_setup_async_pf(vcpu, cr2_or_gpa, gfn))
39298f32d5e5SMaxim Levitsky 			goto out_retry;
3930c50d8ae3SPaolo Bonzini 	}
3931c50d8ae3SPaolo Bonzini 
39324a42d848SDavid Stevens 	*pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL,
39334a42d848SDavid Stevens 				    write, writable, hva);
39348f32d5e5SMaxim Levitsky 
39358f32d5e5SMaxim Levitsky out_retry:
39368f32d5e5SMaxim Levitsky 	*r = RET_PF_RETRY;
39378f32d5e5SMaxim Levitsky 	return true;
3938c50d8ae3SPaolo Bonzini }
3939c50d8ae3SPaolo Bonzini 
39400f90e1c1SSean Christopherson static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
39410f90e1c1SSean Christopherson 			     bool prefault, int max_level, bool is_tdp)
3942c50d8ae3SPaolo Bonzini {
394363c0cac9SDavid Matlack 	bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu);
3944367fd790SSean Christopherson 	bool write = error_code & PFERR_WRITE_MASK;
39450f90e1c1SSean Christopherson 	bool map_writable;
3946c50d8ae3SPaolo Bonzini 
39470f90e1c1SSean Christopherson 	gfn_t gfn = gpa >> PAGE_SHIFT;
39480f90e1c1SSean Christopherson 	unsigned long mmu_seq;
39490f90e1c1SSean Christopherson 	kvm_pfn_t pfn;
39504a42d848SDavid Stevens 	hva_t hva;
395183f06fa7SSean Christopherson 	int r;
3952c50d8ae3SPaolo Bonzini 
3953c50d8ae3SPaolo Bonzini 	if (page_fault_handle_page_track(vcpu, error_code, gfn))
3954c50d8ae3SPaolo Bonzini 		return RET_PF_EMULATE;
3955c50d8ae3SPaolo Bonzini 
3956c4371c2aSSean Christopherson 	r = fast_page_fault(vcpu, gpa, error_code);
3957c4371c2aSSean Christopherson 	if (r != RET_PF_INVALID)
3958c4371c2aSSean Christopherson 		return r;
395983291445SSean Christopherson 
3960378f5cd6SSean Christopherson 	r = mmu_topup_memory_caches(vcpu, false);
3961c50d8ae3SPaolo Bonzini 	if (r)
3962c50d8ae3SPaolo Bonzini 		return r;
3963c50d8ae3SPaolo Bonzini 
3964367fd790SSean Christopherson 	mmu_seq = vcpu->kvm->mmu_notifier_seq;
3965367fd790SSean Christopherson 	smp_rmb();
3966367fd790SSean Christopherson 
396733a5c000SMaxim Levitsky 	if (kvm_faultin_pfn(vcpu, prefault, gfn, gpa, &pfn, &hva,
39688f32d5e5SMaxim Levitsky 			 write, &map_writable, &r))
39698f32d5e5SMaxim Levitsky 		return r;
3970367fd790SSean Christopherson 
39710f90e1c1SSean Christopherson 	if (handle_abnormal_pfn(vcpu, is_tdp ? 0 : gpa, gfn, pfn, ACC_ALL, &r))
3972367fd790SSean Christopherson 		return r;
3973367fd790SSean Christopherson 
3974367fd790SSean Christopherson 	r = RET_PF_RETRY;
3975a2855afcSBen Gardon 
39760b873fd7SDavid Matlack 	if (is_tdp_mmu_fault)
3977a2855afcSBen Gardon 		read_lock(&vcpu->kvm->mmu_lock);
3978a2855afcSBen Gardon 	else
3979531810caSBen Gardon 		write_lock(&vcpu->kvm->mmu_lock);
3980a2855afcSBen Gardon 
39814a42d848SDavid Stevens 	if (!is_noslot_pfn(pfn) && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, hva))
3982367fd790SSean Christopherson 		goto out_unlock;
39837bd7ded6SSean Christopherson 	r = make_mmu_pages_available(vcpu);
39847bd7ded6SSean Christopherson 	if (r)
3985367fd790SSean Christopherson 		goto out_unlock;
3986bb18842eSBen Gardon 
39870b873fd7SDavid Matlack 	if (is_tdp_mmu_fault)
3988bb18842eSBen Gardon 		r = kvm_tdp_mmu_map(vcpu, gpa, error_code, map_writable, max_level,
3989bb18842eSBen Gardon 				    pfn, prefault);
3990bb18842eSBen Gardon 	else
39916c2fd34fSSean Christopherson 		r = __direct_map(vcpu, gpa, error_code, map_writable, max_level, pfn,
39926c2fd34fSSean Christopherson 				 prefault, is_tdp);
39930f90e1c1SSean Christopherson 
3994367fd790SSean Christopherson out_unlock:
39950b873fd7SDavid Matlack 	if (is_tdp_mmu_fault)
3996a2855afcSBen Gardon 		read_unlock(&vcpu->kvm->mmu_lock);
3997a2855afcSBen Gardon 	else
3998531810caSBen Gardon 		write_unlock(&vcpu->kvm->mmu_lock);
3999367fd790SSean Christopherson 	kvm_release_pfn_clean(pfn);
4000367fd790SSean Christopherson 	return r;
4001c50d8ae3SPaolo Bonzini }
4002c50d8ae3SPaolo Bonzini 
40030f90e1c1SSean Christopherson static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa,
40040f90e1c1SSean Christopherson 				u32 error_code, bool prefault)
40050f90e1c1SSean Christopherson {
40060f90e1c1SSean Christopherson 	pgprintk("%s: gva %lx error %x\n", __func__, gpa, error_code);
40070f90e1c1SSean Christopherson 
40080f90e1c1SSean Christopherson 	/* This path builds a PAE pagetable, we can map 2mb pages at maximum. */
40090f90e1c1SSean Christopherson 	return direct_page_fault(vcpu, gpa & PAGE_MASK, error_code, prefault,
40103bae0459SSean Christopherson 				 PG_LEVEL_2M, false);
40110f90e1c1SSean Christopherson }
40120f90e1c1SSean Christopherson 
4013c50d8ae3SPaolo Bonzini int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
4014c50d8ae3SPaolo Bonzini 				u64 fault_address, char *insn, int insn_len)
4015c50d8ae3SPaolo Bonzini {
4016c50d8ae3SPaolo Bonzini 	int r = 1;
40179ce372b3SVitaly Kuznetsov 	u32 flags = vcpu->arch.apf.host_apf_flags;
4018c50d8ae3SPaolo Bonzini 
4019736c291cSSean Christopherson #ifndef CONFIG_X86_64
4020736c291cSSean Christopherson 	/* A 64-bit CR2 should be impossible on 32-bit KVM. */
4021736c291cSSean Christopherson 	if (WARN_ON_ONCE(fault_address >> 32))
4022736c291cSSean Christopherson 		return -EFAULT;
4023736c291cSSean Christopherson #endif
4024736c291cSSean Christopherson 
4025c50d8ae3SPaolo Bonzini 	vcpu->arch.l1tf_flush_l1d = true;
40269ce372b3SVitaly Kuznetsov 	if (!flags) {
4027c50d8ae3SPaolo Bonzini 		trace_kvm_page_fault(fault_address, error_code);
4028c50d8ae3SPaolo Bonzini 
4029c50d8ae3SPaolo Bonzini 		if (kvm_event_needs_reinjection(vcpu))
4030c50d8ae3SPaolo Bonzini 			kvm_mmu_unprotect_page_virt(vcpu, fault_address);
4031c50d8ae3SPaolo Bonzini 		r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn,
4032c50d8ae3SPaolo Bonzini 				insn_len);
40339ce372b3SVitaly Kuznetsov 	} else if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) {
403468fd66f1SVitaly Kuznetsov 		vcpu->arch.apf.host_apf_flags = 0;
4035c50d8ae3SPaolo Bonzini 		local_irq_disable();
40366bca69adSThomas Gleixner 		kvm_async_pf_task_wait_schedule(fault_address);
4037c50d8ae3SPaolo Bonzini 		local_irq_enable();
40389ce372b3SVitaly Kuznetsov 	} else {
40399ce372b3SVitaly Kuznetsov 		WARN_ONCE(1, "Unexpected host async PF flags: %x\n", flags);
4040c50d8ae3SPaolo Bonzini 	}
40419ce372b3SVitaly Kuznetsov 
4042c50d8ae3SPaolo Bonzini 	return r;
4043c50d8ae3SPaolo Bonzini }
4044c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_handle_page_fault);
4045c50d8ae3SPaolo Bonzini 
40467a02674dSSean Christopherson int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
4047c50d8ae3SPaolo Bonzini 		       bool prefault)
4048c50d8ae3SPaolo Bonzini {
4049cb9b88c6SSean Christopherson 	int max_level;
4050c50d8ae3SPaolo Bonzini 
4051e662ec3eSSean Christopherson 	for (max_level = KVM_MAX_HUGEPAGE_LEVEL;
40523bae0459SSean Christopherson 	     max_level > PG_LEVEL_4K;
4053cb9b88c6SSean Christopherson 	     max_level--) {
4054cb9b88c6SSean Christopherson 		int page_num = KVM_PAGES_PER_HPAGE(max_level);
40550f90e1c1SSean Christopherson 		gfn_t base = (gpa >> PAGE_SHIFT) & ~(page_num - 1);
4056c50d8ae3SPaolo Bonzini 
4057cb9b88c6SSean Christopherson 		if (kvm_mtrr_check_gfn_range_consistency(vcpu, base, page_num))
4058cb9b88c6SSean Christopherson 			break;
4059c50d8ae3SPaolo Bonzini 	}
4060c50d8ae3SPaolo Bonzini 
40610f90e1c1SSean Christopherson 	return direct_page_fault(vcpu, gpa, error_code, prefault,
40620f90e1c1SSean Christopherson 				 max_level, true);
4063c50d8ae3SPaolo Bonzini }
4064c50d8ae3SPaolo Bonzini 
406584a16226SSean Christopherson static void nonpaging_init_context(struct kvm_mmu *context)
4066c50d8ae3SPaolo Bonzini {
4067c50d8ae3SPaolo Bonzini 	context->page_fault = nonpaging_page_fault;
4068c50d8ae3SPaolo Bonzini 	context->gva_to_gpa = nonpaging_gva_to_gpa;
4069c50d8ae3SPaolo Bonzini 	context->sync_page = nonpaging_sync_page;
40705efac074SPaolo Bonzini 	context->invlpg = NULL;
4071c50d8ae3SPaolo Bonzini 	context->direct_map = true;
4072c50d8ae3SPaolo Bonzini }
4073c50d8ae3SPaolo Bonzini 
4074be01e8e2SSean Christopherson static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
40750be44352SSean Christopherson 				  union kvm_mmu_page_role role)
40760be44352SSean Christopherson {
4077be01e8e2SSean Christopherson 	return (role.direct || pgd == root->pgd) &&
4078e47c4aeeSSean Christopherson 	       VALID_PAGE(root->hpa) && to_shadow_page(root->hpa) &&
4079e47c4aeeSSean Christopherson 	       role.word == to_shadow_page(root->hpa)->role.word;
40800be44352SSean Christopherson }
40810be44352SSean Christopherson 
4082c50d8ae3SPaolo Bonzini /*
4083be01e8e2SSean Christopherson  * Find out if a previously cached root matching the new pgd/role is available.
4084c50d8ae3SPaolo Bonzini  * The current root is also inserted into the cache.
4085c50d8ae3SPaolo Bonzini  * If a matching root was found, it is assigned to kvm_mmu->root_hpa and true is
4086c50d8ae3SPaolo Bonzini  * returned.
4087c50d8ae3SPaolo Bonzini  * Otherwise, the LRU root from the cache is assigned to kvm_mmu->root_hpa and
4088c50d8ae3SPaolo Bonzini  * false is returned. This root should now be freed by the caller.
4089c50d8ae3SPaolo Bonzini  */
4090be01e8e2SSean Christopherson static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_pgd,
4091c50d8ae3SPaolo Bonzini 				  union kvm_mmu_page_role new_role)
4092c50d8ae3SPaolo Bonzini {
4093c50d8ae3SPaolo Bonzini 	uint i;
4094c50d8ae3SPaolo Bonzini 	struct kvm_mmu_root_info root;
4095c50d8ae3SPaolo Bonzini 	struct kvm_mmu *mmu = vcpu->arch.mmu;
4096c50d8ae3SPaolo Bonzini 
4097be01e8e2SSean Christopherson 	root.pgd = mmu->root_pgd;
4098c50d8ae3SPaolo Bonzini 	root.hpa = mmu->root_hpa;
4099c50d8ae3SPaolo Bonzini 
4100be01e8e2SSean Christopherson 	if (is_root_usable(&root, new_pgd, new_role))
41010be44352SSean Christopherson 		return true;
41020be44352SSean Christopherson 
4103c50d8ae3SPaolo Bonzini 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
4104c50d8ae3SPaolo Bonzini 		swap(root, mmu->prev_roots[i]);
4105c50d8ae3SPaolo Bonzini 
4106be01e8e2SSean Christopherson 		if (is_root_usable(&root, new_pgd, new_role))
4107c50d8ae3SPaolo Bonzini 			break;
4108c50d8ae3SPaolo Bonzini 	}
4109c50d8ae3SPaolo Bonzini 
4110c50d8ae3SPaolo Bonzini 	mmu->root_hpa = root.hpa;
4111be01e8e2SSean Christopherson 	mmu->root_pgd = root.pgd;
4112c50d8ae3SPaolo Bonzini 
4113c50d8ae3SPaolo Bonzini 	return i < KVM_MMU_NUM_PREV_ROOTS;
4114c50d8ae3SPaolo Bonzini }
4115c50d8ae3SPaolo Bonzini 
4116be01e8e2SSean Christopherson static bool fast_pgd_switch(struct kvm_vcpu *vcpu, gpa_t new_pgd,
4117b869855bSSean Christopherson 			    union kvm_mmu_page_role new_role)
4118c50d8ae3SPaolo Bonzini {
4119c50d8ae3SPaolo Bonzini 	struct kvm_mmu *mmu = vcpu->arch.mmu;
4120c50d8ae3SPaolo Bonzini 
4121c50d8ae3SPaolo Bonzini 	/*
4122c50d8ae3SPaolo Bonzini 	 * For now, limit the fast switch to 64-bit hosts+VMs in order to avoid
4123c50d8ae3SPaolo Bonzini 	 * having to deal with PDPTEs. We may add support for 32-bit hosts/VMs
4124c50d8ae3SPaolo Bonzini 	 * later if necessary.
4125c50d8ae3SPaolo Bonzini 	 */
4126c50d8ae3SPaolo Bonzini 	if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
4127b869855bSSean Christopherson 	    mmu->root_level >= PT64_ROOT_4LEVEL)
4128fe9304d3SVitaly Kuznetsov 		return cached_root_available(vcpu, new_pgd, new_role);
4129c50d8ae3SPaolo Bonzini 
4130c50d8ae3SPaolo Bonzini 	return false;
4131c50d8ae3SPaolo Bonzini }
4132c50d8ae3SPaolo Bonzini 
4133be01e8e2SSean Christopherson static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd,
4134b5129100SSean Christopherson 			      union kvm_mmu_page_role new_role)
4135c50d8ae3SPaolo Bonzini {
4136be01e8e2SSean Christopherson 	if (!fast_pgd_switch(vcpu, new_pgd, new_role)) {
4137b869855bSSean Christopherson 		kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, KVM_MMU_ROOT_CURRENT);
4138b869855bSSean Christopherson 		return;
4139c50d8ae3SPaolo Bonzini 	}
4140c50d8ae3SPaolo Bonzini 
4141c50d8ae3SPaolo Bonzini 	/*
4142b869855bSSean Christopherson 	 * It's possible that the cached previous root page is obsolete because
4143b869855bSSean Christopherson 	 * of a change in the MMU generation number. However, changing the
4144b869855bSSean Christopherson 	 * generation number is accompanied by KVM_REQ_MMU_RELOAD, which will
4145b869855bSSean Christopherson 	 * free the root set here and allocate a new one.
4146b869855bSSean Christopherson 	 */
4147b869855bSSean Christopherson 	kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
4148b869855bSSean Christopherson 
4149b5129100SSean Christopherson 	if (force_flush_and_sync_on_reuse) {
4150b869855bSSean Christopherson 		kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
4151b869855bSSean Christopherson 		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
4152b5129100SSean Christopherson 	}
4153b869855bSSean Christopherson 
4154b869855bSSean Christopherson 	/*
4155b869855bSSean Christopherson 	 * The last MMIO access's GVA and GPA are cached in the VCPU. When
4156b869855bSSean Christopherson 	 * switching to a new CR3, that GVA->GPA mapping may no longer be
4157b869855bSSean Christopherson 	 * valid. So clear any cached MMIO info even when we don't need to sync
4158b869855bSSean Christopherson 	 * the shadow page tables.
4159c50d8ae3SPaolo Bonzini 	 */
4160c50d8ae3SPaolo Bonzini 	vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
4161c50d8ae3SPaolo Bonzini 
4162daa5b6c1SBen Gardon 	/*
4163daa5b6c1SBen Gardon 	 * If this is a direct root page, it doesn't have a write flooding
4164daa5b6c1SBen Gardon 	 * count. Otherwise, clear the write flooding count.
4165daa5b6c1SBen Gardon 	 */
4166daa5b6c1SBen Gardon 	if (!new_role.direct)
4167daa5b6c1SBen Gardon 		__clear_sp_write_flooding_count(
4168daa5b6c1SBen Gardon 				to_shadow_page(vcpu->arch.mmu->root_hpa));
4169c50d8ae3SPaolo Bonzini }
4170c50d8ae3SPaolo Bonzini 
4171b5129100SSean Christopherson void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
4172c50d8ae3SPaolo Bonzini {
4173b5129100SSean Christopherson 	__kvm_mmu_new_pgd(vcpu, new_pgd, kvm_mmu_calc_root_page_role(vcpu));
4174c50d8ae3SPaolo Bonzini }
4175be01e8e2SSean Christopherson EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd);
4176c50d8ae3SPaolo Bonzini 
4177c50d8ae3SPaolo Bonzini static unsigned long get_cr3(struct kvm_vcpu *vcpu)
4178c50d8ae3SPaolo Bonzini {
4179c50d8ae3SPaolo Bonzini 	return kvm_read_cr3(vcpu);
4180c50d8ae3SPaolo Bonzini }
4181c50d8ae3SPaolo Bonzini 
4182c50d8ae3SPaolo Bonzini static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
41830a2b64c5SBen Gardon 			   unsigned int access, int *nr_present)
4184c50d8ae3SPaolo Bonzini {
4185c50d8ae3SPaolo Bonzini 	if (unlikely(is_mmio_spte(*sptep))) {
4186c50d8ae3SPaolo Bonzini 		if (gfn != get_mmio_spte_gfn(*sptep)) {
4187c50d8ae3SPaolo Bonzini 			mmu_spte_clear_no_track(sptep);
4188c50d8ae3SPaolo Bonzini 			return true;
4189c50d8ae3SPaolo Bonzini 		}
4190c50d8ae3SPaolo Bonzini 
4191c50d8ae3SPaolo Bonzini 		(*nr_present)++;
4192c50d8ae3SPaolo Bonzini 		mark_mmio_spte(vcpu, sptep, gfn, access);
4193c50d8ae3SPaolo Bonzini 		return true;
4194c50d8ae3SPaolo Bonzini 	}
4195c50d8ae3SPaolo Bonzini 
4196c50d8ae3SPaolo Bonzini 	return false;
4197c50d8ae3SPaolo Bonzini }
4198c50d8ae3SPaolo Bonzini 
4199c50d8ae3SPaolo Bonzini #define PTTYPE_EPT 18 /* arbitrary */
4200c50d8ae3SPaolo Bonzini #define PTTYPE PTTYPE_EPT
4201c50d8ae3SPaolo Bonzini #include "paging_tmpl.h"
4202c50d8ae3SPaolo Bonzini #undef PTTYPE
4203c50d8ae3SPaolo Bonzini 
4204c50d8ae3SPaolo Bonzini #define PTTYPE 64
4205c50d8ae3SPaolo Bonzini #include "paging_tmpl.h"
4206c50d8ae3SPaolo Bonzini #undef PTTYPE
4207c50d8ae3SPaolo Bonzini 
4208c50d8ae3SPaolo Bonzini #define PTTYPE 32
4209c50d8ae3SPaolo Bonzini #include "paging_tmpl.h"
4210c50d8ae3SPaolo Bonzini #undef PTTYPE
4211c50d8ae3SPaolo Bonzini 
4212c50d8ae3SPaolo Bonzini static void
4213b705a277SSean Christopherson __reset_rsvds_bits_mask(struct rsvd_bits_validate *rsvd_check,
42145b7f575cSSean Christopherson 			u64 pa_bits_rsvd, int level, bool nx, bool gbpages,
4215c50d8ae3SPaolo Bonzini 			bool pse, bool amd)
4216c50d8ae3SPaolo Bonzini {
4217c50d8ae3SPaolo Bonzini 	u64 gbpages_bit_rsvd = 0;
4218c50d8ae3SPaolo Bonzini 	u64 nonleaf_bit8_rsvd = 0;
42195b7f575cSSean Christopherson 	u64 high_bits_rsvd;
4220c50d8ae3SPaolo Bonzini 
4221c50d8ae3SPaolo Bonzini 	rsvd_check->bad_mt_xwr = 0;
4222c50d8ae3SPaolo Bonzini 
4223c50d8ae3SPaolo Bonzini 	if (!gbpages)
4224c50d8ae3SPaolo Bonzini 		gbpages_bit_rsvd = rsvd_bits(7, 7);
4225c50d8ae3SPaolo Bonzini 
42265b7f575cSSean Christopherson 	if (level == PT32E_ROOT_LEVEL)
42275b7f575cSSean Christopherson 		high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 62);
42285b7f575cSSean Christopherson 	else
42295b7f575cSSean Christopherson 		high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);
42305b7f575cSSean Christopherson 
42315b7f575cSSean Christopherson 	/* Note, NX doesn't exist in PDPTEs, this is handled below. */
42325b7f575cSSean Christopherson 	if (!nx)
42335b7f575cSSean Christopherson 		high_bits_rsvd |= rsvd_bits(63, 63);
42345b7f575cSSean Christopherson 
4235c50d8ae3SPaolo Bonzini 	/*
4236c50d8ae3SPaolo Bonzini 	 * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for
4237c50d8ae3SPaolo Bonzini 	 * leaf entries) on AMD CPUs only.
4238c50d8ae3SPaolo Bonzini 	 */
4239c50d8ae3SPaolo Bonzini 	if (amd)
4240c50d8ae3SPaolo Bonzini 		nonleaf_bit8_rsvd = rsvd_bits(8, 8);
4241c50d8ae3SPaolo Bonzini 
4242c50d8ae3SPaolo Bonzini 	switch (level) {
4243c50d8ae3SPaolo Bonzini 	case PT32_ROOT_LEVEL:
4244c50d8ae3SPaolo Bonzini 		/* no rsvd bits for 2 level 4K page table entries */
4245c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[0][1] = 0;
4246c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[0][0] = 0;
4247c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[1][0] =
4248c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[0][0];
4249c50d8ae3SPaolo Bonzini 
4250c50d8ae3SPaolo Bonzini 		if (!pse) {
4251c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[1][1] = 0;
4252c50d8ae3SPaolo Bonzini 			break;
4253c50d8ae3SPaolo Bonzini 		}
4254c50d8ae3SPaolo Bonzini 
4255c50d8ae3SPaolo Bonzini 		if (is_cpuid_PSE36())
4256c50d8ae3SPaolo Bonzini 			/* 36bits PSE 4MB page */
4257c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
4258c50d8ae3SPaolo Bonzini 		else
4259c50d8ae3SPaolo Bonzini 			/* 32 bits PSE 4MB page */
4260c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
4261c50d8ae3SPaolo Bonzini 		break;
4262c50d8ae3SPaolo Bonzini 	case PT32E_ROOT_LEVEL:
42635b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[0][2] = rsvd_bits(63, 63) |
42645b7f575cSSean Christopherson 						   high_bits_rsvd |
42655b7f575cSSean Christopherson 						   rsvd_bits(5, 8) |
42665b7f575cSSean Christopherson 						   rsvd_bits(1, 2);	/* PDPTE */
42675b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd;	/* PDE */
42685b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;	/* PTE */
42695b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
4270c50d8ae3SPaolo Bonzini 						   rsvd_bits(13, 20);	/* large page */
4271c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[1][0] =
4272c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[0][0];
4273c50d8ae3SPaolo Bonzini 		break;
4274c50d8ae3SPaolo Bonzini 	case PT64_ROOT_5LEVEL:
42755b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd |
42765b7f575cSSean Christopherson 						   nonleaf_bit8_rsvd |
42775b7f575cSSean Christopherson 						   rsvd_bits(7, 7);
4278c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[1][4] =
4279c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[0][4];
4280df561f66SGustavo A. R. Silva 		fallthrough;
4281c50d8ae3SPaolo Bonzini 	case PT64_ROOT_4LEVEL:
42825b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd |
42835b7f575cSSean Christopherson 						   nonleaf_bit8_rsvd |
42845b7f575cSSean Christopherson 						   rsvd_bits(7, 7);
42855b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd |
42865b7f575cSSean Christopherson 						   gbpages_bit_rsvd;
42875b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd;
42885b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
4289c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[1][3] =
4290c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[0][3];
42915b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd |
42925b7f575cSSean Christopherson 						   gbpages_bit_rsvd |
4293c50d8ae3SPaolo Bonzini 						   rsvd_bits(13, 29);
42945b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
4295c50d8ae3SPaolo Bonzini 						   rsvd_bits(13, 20); /* large page */
4296c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[1][0] =
4297c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[0][0];
4298c50d8ae3SPaolo Bonzini 		break;
4299c50d8ae3SPaolo Bonzini 	}
4300c50d8ae3SPaolo Bonzini }
4301c50d8ae3SPaolo Bonzini 
430227de9250SSean Christopherson static bool guest_can_use_gbpages(struct kvm_vcpu *vcpu)
430327de9250SSean Christopherson {
430427de9250SSean Christopherson 	/*
430527de9250SSean Christopherson 	 * If TDP is enabled, let the guest use GBPAGES if they're supported in
430627de9250SSean Christopherson 	 * hardware.  The hardware page walker doesn't let KVM disable GBPAGES,
430727de9250SSean Christopherson 	 * i.e. won't treat them as reserved, and KVM doesn't redo the GVA->GPA
430827de9250SSean Christopherson 	 * walk for performance and complexity reasons.  Not to mention KVM
430927de9250SSean Christopherson 	 * _can't_ solve the problem because GVA->GPA walks aren't visible to
431027de9250SSean Christopherson 	 * KVM once a TDP translation is installed.  Mimic hardware behavior so
431127de9250SSean Christopherson 	 * that KVM's is at least consistent, i.e. doesn't randomly inject #PF.
431227de9250SSean Christopherson 	 */
431327de9250SSean Christopherson 	return tdp_enabled ? boot_cpu_has(X86_FEATURE_GBPAGES) :
431427de9250SSean Christopherson 			     guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES);
431527de9250SSean Christopherson }
431627de9250SSean Christopherson 
4317c50d8ae3SPaolo Bonzini static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
4318c50d8ae3SPaolo Bonzini 				  struct kvm_mmu *context)
4319c50d8ae3SPaolo Bonzini {
4320b705a277SSean Christopherson 	__reset_rsvds_bits_mask(&context->guest_rsvd_check,
43215b7f575cSSean Christopherson 				vcpu->arch.reserved_gpa_bits,
432290599c28SSean Christopherson 				context->root_level, is_efer_nx(context),
432327de9250SSean Christopherson 				guest_can_use_gbpages(vcpu),
43244e9c0d80SSean Christopherson 				is_cr4_pse(context),
432523493d0aSSean Christopherson 				guest_cpuid_is_amd_or_hygon(vcpu));
4326c50d8ae3SPaolo Bonzini }
4327c50d8ae3SPaolo Bonzini 
4328c50d8ae3SPaolo Bonzini static void
4329c50d8ae3SPaolo Bonzini __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
43305b7f575cSSean Christopherson 			    u64 pa_bits_rsvd, bool execonly)
4331c50d8ae3SPaolo Bonzini {
43325b7f575cSSean Christopherson 	u64 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);
4333c50d8ae3SPaolo Bonzini 	u64 bad_mt_xwr;
4334c50d8ae3SPaolo Bonzini 
43355b7f575cSSean Christopherson 	rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd | rsvd_bits(3, 7);
43365b7f575cSSean Christopherson 	rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd | rsvd_bits(3, 7);
43375b7f575cSSean Christopherson 	rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd | rsvd_bits(3, 6);
43385b7f575cSSean Christopherson 	rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd | rsvd_bits(3, 6);
43395b7f575cSSean Christopherson 	rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
4340c50d8ae3SPaolo Bonzini 
4341c50d8ae3SPaolo Bonzini 	/* large page */
4342c50d8ae3SPaolo Bonzini 	rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4];
4343c50d8ae3SPaolo Bonzini 	rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3];
43445b7f575cSSean Christopherson 	rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd | rsvd_bits(12, 29);
43455b7f575cSSean Christopherson 	rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd | rsvd_bits(12, 20);
4346c50d8ae3SPaolo Bonzini 	rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0];
4347c50d8ae3SPaolo Bonzini 
4348c50d8ae3SPaolo Bonzini 	bad_mt_xwr = 0xFFull << (2 * 8);	/* bits 3..5 must not be 2 */
4349c50d8ae3SPaolo Bonzini 	bad_mt_xwr |= 0xFFull << (3 * 8);	/* bits 3..5 must not be 3 */
4350c50d8ae3SPaolo Bonzini 	bad_mt_xwr |= 0xFFull << (7 * 8);	/* bits 3..5 must not be 7 */
4351c50d8ae3SPaolo Bonzini 	bad_mt_xwr |= REPEAT_BYTE(1ull << 2);	/* bits 0..2 must not be 010 */
4352c50d8ae3SPaolo Bonzini 	bad_mt_xwr |= REPEAT_BYTE(1ull << 6);	/* bits 0..2 must not be 110 */
4353c50d8ae3SPaolo Bonzini 	if (!execonly) {
4354c50d8ae3SPaolo Bonzini 		/* bits 0..2 must not be 100 unless VMX capabilities allow it */
4355c50d8ae3SPaolo Bonzini 		bad_mt_xwr |= REPEAT_BYTE(1ull << 4);
4356c50d8ae3SPaolo Bonzini 	}
4357c50d8ae3SPaolo Bonzini 	rsvd_check->bad_mt_xwr = bad_mt_xwr;
4358c50d8ae3SPaolo Bonzini }
4359c50d8ae3SPaolo Bonzini 
4360c50d8ae3SPaolo Bonzini static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
4361c50d8ae3SPaolo Bonzini 		struct kvm_mmu *context, bool execonly)
4362c50d8ae3SPaolo Bonzini {
4363c50d8ae3SPaolo Bonzini 	__reset_rsvds_bits_mask_ept(&context->guest_rsvd_check,
43645b7f575cSSean Christopherson 				    vcpu->arch.reserved_gpa_bits, execonly);
4365c50d8ae3SPaolo Bonzini }
4366c50d8ae3SPaolo Bonzini 
43676f8e65a6SSean Christopherson static inline u64 reserved_hpa_bits(void)
43686f8e65a6SSean Christopherson {
43696f8e65a6SSean Christopherson 	return rsvd_bits(shadow_phys_bits, 63);
43706f8e65a6SSean Christopherson }
43716f8e65a6SSean Christopherson 
4372c50d8ae3SPaolo Bonzini /*
4373c50d8ae3SPaolo Bonzini  * the page table on host is the shadow page table for the page
4374c50d8ae3SPaolo Bonzini  * table in guest or amd nested guest, its mmu features completely
4375c50d8ae3SPaolo Bonzini  * follow the features in guest.
4376c50d8ae3SPaolo Bonzini  */
437716be1d12SSean Christopherson static void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
437816be1d12SSean Christopherson 					struct kvm_mmu *context)
4379c50d8ae3SPaolo Bonzini {
4380112022bdSSean Christopherson 	/*
4381112022bdSSean Christopherson 	 * KVM uses NX when TDP is disabled to handle a variety of scenarios,
4382112022bdSSean Christopherson 	 * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and
4383112022bdSSean Christopherson 	 * to generate correct permissions for CR0.WP=0/CR4.SMEP=1/EFER.NX=0.
4384112022bdSSean Christopherson 	 * The iTLB multi-hit workaround can be toggled at any time, so assume
4385112022bdSSean Christopherson 	 * NX can be used by any non-nested shadow MMU to avoid having to reset
4386112022bdSSean Christopherson 	 * MMU contexts.  Note, KVM forces EFER.NX=1 when TDP is disabled.
4387112022bdSSean Christopherson 	 */
438890599c28SSean Christopherson 	bool uses_nx = is_efer_nx(context) || !tdp_enabled;
43898c985b2dSSean Christopherson 
43908c985b2dSSean Christopherson 	/* @amd adds a check on bit of SPTEs, which KVM shouldn't use anyways. */
43918c985b2dSSean Christopherson 	bool is_amd = true;
43928c985b2dSSean Christopherson 	/* KVM doesn't use 2-level page tables for the shadow MMU. */
43938c985b2dSSean Christopherson 	bool is_pse = false;
4394c50d8ae3SPaolo Bonzini 	struct rsvd_bits_validate *shadow_zero_check;
4395c50d8ae3SPaolo Bonzini 	int i;
4396c50d8ae3SPaolo Bonzini 
43978c985b2dSSean Christopherson 	WARN_ON_ONCE(context->shadow_root_level < PT32E_ROOT_LEVEL);
43988c985b2dSSean Christopherson 
4399c50d8ae3SPaolo Bonzini 	shadow_zero_check = &context->shadow_zero_check;
4400b705a277SSean Christopherson 	__reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
4401c50d8ae3SPaolo Bonzini 				context->shadow_root_level, uses_nx,
440227de9250SSean Christopherson 				guest_can_use_gbpages(vcpu), is_pse, is_amd);
4403c50d8ae3SPaolo Bonzini 
4404c50d8ae3SPaolo Bonzini 	if (!shadow_me_mask)
4405c50d8ae3SPaolo Bonzini 		return;
4406c50d8ae3SPaolo Bonzini 
4407c50d8ae3SPaolo Bonzini 	for (i = context->shadow_root_level; --i >= 0;) {
4408c50d8ae3SPaolo Bonzini 		shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
4409c50d8ae3SPaolo Bonzini 		shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
4410c50d8ae3SPaolo Bonzini 	}
4411c50d8ae3SPaolo Bonzini 
4412c50d8ae3SPaolo Bonzini }
4413c50d8ae3SPaolo Bonzini 
4414c50d8ae3SPaolo Bonzini static inline bool boot_cpu_is_amd(void)
4415c50d8ae3SPaolo Bonzini {
4416c50d8ae3SPaolo Bonzini 	WARN_ON_ONCE(!tdp_enabled);
4417c50d8ae3SPaolo Bonzini 	return shadow_x_mask == 0;
4418c50d8ae3SPaolo Bonzini }
4419c50d8ae3SPaolo Bonzini 
4420c50d8ae3SPaolo Bonzini /*
4421c50d8ae3SPaolo Bonzini  * the direct page table on host, use as much mmu features as
4422c50d8ae3SPaolo Bonzini  * possible, however, kvm currently does not do execution-protection.
4423c50d8ae3SPaolo Bonzini  */
4424c50d8ae3SPaolo Bonzini static void
4425c50d8ae3SPaolo Bonzini reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
4426c50d8ae3SPaolo Bonzini 				struct kvm_mmu *context)
4427c50d8ae3SPaolo Bonzini {
4428c50d8ae3SPaolo Bonzini 	struct rsvd_bits_validate *shadow_zero_check;
4429c50d8ae3SPaolo Bonzini 	int i;
4430c50d8ae3SPaolo Bonzini 
4431c50d8ae3SPaolo Bonzini 	shadow_zero_check = &context->shadow_zero_check;
4432c50d8ae3SPaolo Bonzini 
4433c50d8ae3SPaolo Bonzini 	if (boot_cpu_is_amd())
4434b705a277SSean Christopherson 		__reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
4435c50d8ae3SPaolo Bonzini 					context->shadow_root_level, false,
4436c50d8ae3SPaolo Bonzini 					boot_cpu_has(X86_FEATURE_GBPAGES),
44378c985b2dSSean Christopherson 					false, true);
4438c50d8ae3SPaolo Bonzini 	else
4439c50d8ae3SPaolo Bonzini 		__reset_rsvds_bits_mask_ept(shadow_zero_check,
44406f8e65a6SSean Christopherson 					    reserved_hpa_bits(), false);
4441c50d8ae3SPaolo Bonzini 
4442c50d8ae3SPaolo Bonzini 	if (!shadow_me_mask)
4443c50d8ae3SPaolo Bonzini 		return;
4444c50d8ae3SPaolo Bonzini 
4445c50d8ae3SPaolo Bonzini 	for (i = context->shadow_root_level; --i >= 0;) {
4446c50d8ae3SPaolo Bonzini 		shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
4447c50d8ae3SPaolo Bonzini 		shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
4448c50d8ae3SPaolo Bonzini 	}
4449c50d8ae3SPaolo Bonzini }
4450c50d8ae3SPaolo Bonzini 
4451c50d8ae3SPaolo Bonzini /*
4452c50d8ae3SPaolo Bonzini  * as the comments in reset_shadow_zero_bits_mask() except it
4453c50d8ae3SPaolo Bonzini  * is the shadow page table for intel nested guest.
4454c50d8ae3SPaolo Bonzini  */
4455c50d8ae3SPaolo Bonzini static void
4456c50d8ae3SPaolo Bonzini reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
4457c50d8ae3SPaolo Bonzini 				struct kvm_mmu *context, bool execonly)
4458c50d8ae3SPaolo Bonzini {
4459c50d8ae3SPaolo Bonzini 	__reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
44606f8e65a6SSean Christopherson 				    reserved_hpa_bits(), execonly);
4461c50d8ae3SPaolo Bonzini }
4462c50d8ae3SPaolo Bonzini 
4463c50d8ae3SPaolo Bonzini #define BYTE_MASK(access) \
4464c50d8ae3SPaolo Bonzini 	((1 & (access) ? 2 : 0) | \
4465c50d8ae3SPaolo Bonzini 	 (2 & (access) ? 4 : 0) | \
4466c50d8ae3SPaolo Bonzini 	 (3 & (access) ? 8 : 0) | \
4467c50d8ae3SPaolo Bonzini 	 (4 & (access) ? 16 : 0) | \
4468c50d8ae3SPaolo Bonzini 	 (5 & (access) ? 32 : 0) | \
4469c50d8ae3SPaolo Bonzini 	 (6 & (access) ? 64 : 0) | \
4470c50d8ae3SPaolo Bonzini 	 (7 & (access) ? 128 : 0))
4471c50d8ae3SPaolo Bonzini 
4472c50d8ae3SPaolo Bonzini 
4473c596f147SSean Christopherson static void update_permission_bitmask(struct kvm_mmu *mmu, bool ept)
4474c50d8ae3SPaolo Bonzini {
4475c50d8ae3SPaolo Bonzini 	unsigned byte;
4476c50d8ae3SPaolo Bonzini 
4477c50d8ae3SPaolo Bonzini 	const u8 x = BYTE_MASK(ACC_EXEC_MASK);
4478c50d8ae3SPaolo Bonzini 	const u8 w = BYTE_MASK(ACC_WRITE_MASK);
4479c50d8ae3SPaolo Bonzini 	const u8 u = BYTE_MASK(ACC_USER_MASK);
4480c50d8ae3SPaolo Bonzini 
4481c596f147SSean Christopherson 	bool cr4_smep = is_cr4_smep(mmu);
4482c596f147SSean Christopherson 	bool cr4_smap = is_cr4_smap(mmu);
4483c596f147SSean Christopherson 	bool cr0_wp = is_cr0_wp(mmu);
448490599c28SSean Christopherson 	bool efer_nx = is_efer_nx(mmu);
4485c50d8ae3SPaolo Bonzini 
4486c50d8ae3SPaolo Bonzini 	for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) {
4487c50d8ae3SPaolo Bonzini 		unsigned pfec = byte << 1;
4488c50d8ae3SPaolo Bonzini 
4489c50d8ae3SPaolo Bonzini 		/*
4490c50d8ae3SPaolo Bonzini 		 * Each "*f" variable has a 1 bit for each UWX value
4491c50d8ae3SPaolo Bonzini 		 * that causes a fault with the given PFEC.
4492c50d8ae3SPaolo Bonzini 		 */
4493c50d8ae3SPaolo Bonzini 
4494c50d8ae3SPaolo Bonzini 		/* Faults from writes to non-writable pages */
4495c50d8ae3SPaolo Bonzini 		u8 wf = (pfec & PFERR_WRITE_MASK) ? (u8)~w : 0;
4496c50d8ae3SPaolo Bonzini 		/* Faults from user mode accesses to supervisor pages */
4497c50d8ae3SPaolo Bonzini 		u8 uf = (pfec & PFERR_USER_MASK) ? (u8)~u : 0;
4498c50d8ae3SPaolo Bonzini 		/* Faults from fetches of non-executable pages*/
4499c50d8ae3SPaolo Bonzini 		u8 ff = (pfec & PFERR_FETCH_MASK) ? (u8)~x : 0;
4500c50d8ae3SPaolo Bonzini 		/* Faults from kernel mode fetches of user pages */
4501c50d8ae3SPaolo Bonzini 		u8 smepf = 0;
4502c50d8ae3SPaolo Bonzini 		/* Faults from kernel mode accesses of user pages */
4503c50d8ae3SPaolo Bonzini 		u8 smapf = 0;
4504c50d8ae3SPaolo Bonzini 
4505c50d8ae3SPaolo Bonzini 		if (!ept) {
4506c50d8ae3SPaolo Bonzini 			/* Faults from kernel mode accesses to user pages */
4507c50d8ae3SPaolo Bonzini 			u8 kf = (pfec & PFERR_USER_MASK) ? 0 : u;
4508c50d8ae3SPaolo Bonzini 
4509c50d8ae3SPaolo Bonzini 			/* Not really needed: !nx will cause pte.nx to fault */
451090599c28SSean Christopherson 			if (!efer_nx)
4511c50d8ae3SPaolo Bonzini 				ff = 0;
4512c50d8ae3SPaolo Bonzini 
4513c50d8ae3SPaolo Bonzini 			/* Allow supervisor writes if !cr0.wp */
4514c50d8ae3SPaolo Bonzini 			if (!cr0_wp)
4515c50d8ae3SPaolo Bonzini 				wf = (pfec & PFERR_USER_MASK) ? wf : 0;
4516c50d8ae3SPaolo Bonzini 
4517c50d8ae3SPaolo Bonzini 			/* Disallow supervisor fetches of user code if cr4.smep */
4518c50d8ae3SPaolo Bonzini 			if (cr4_smep)
4519c50d8ae3SPaolo Bonzini 				smepf = (pfec & PFERR_FETCH_MASK) ? kf : 0;
4520c50d8ae3SPaolo Bonzini 
4521c50d8ae3SPaolo Bonzini 			/*
4522c50d8ae3SPaolo Bonzini 			 * SMAP:kernel-mode data accesses from user-mode
4523c50d8ae3SPaolo Bonzini 			 * mappings should fault. A fault is considered
4524c50d8ae3SPaolo Bonzini 			 * as a SMAP violation if all of the following
4525c50d8ae3SPaolo Bonzini 			 * conditions are true:
4526c50d8ae3SPaolo Bonzini 			 *   - X86_CR4_SMAP is set in CR4
4527c50d8ae3SPaolo Bonzini 			 *   - A user page is accessed
4528c50d8ae3SPaolo Bonzini 			 *   - The access is not a fetch
4529c50d8ae3SPaolo Bonzini 			 *   - Page fault in kernel mode
4530c50d8ae3SPaolo Bonzini 			 *   - if CPL = 3 or X86_EFLAGS_AC is clear
4531c50d8ae3SPaolo Bonzini 			 *
4532c50d8ae3SPaolo Bonzini 			 * Here, we cover the first three conditions.
4533c50d8ae3SPaolo Bonzini 			 * The fourth is computed dynamically in permission_fault();
4534c50d8ae3SPaolo Bonzini 			 * PFERR_RSVD_MASK bit will be set in PFEC if the access is
4535c50d8ae3SPaolo Bonzini 			 * *not* subject to SMAP restrictions.
4536c50d8ae3SPaolo Bonzini 			 */
4537c50d8ae3SPaolo Bonzini 			if (cr4_smap)
4538c50d8ae3SPaolo Bonzini 				smapf = (pfec & (PFERR_RSVD_MASK|PFERR_FETCH_MASK)) ? 0 : kf;
4539c50d8ae3SPaolo Bonzini 		}
4540c50d8ae3SPaolo Bonzini 
4541c50d8ae3SPaolo Bonzini 		mmu->permissions[byte] = ff | uf | wf | smepf | smapf;
4542c50d8ae3SPaolo Bonzini 	}
4543c50d8ae3SPaolo Bonzini }
4544c50d8ae3SPaolo Bonzini 
4545c50d8ae3SPaolo Bonzini /*
4546c50d8ae3SPaolo Bonzini * PKU is an additional mechanism by which the paging controls access to
4547c50d8ae3SPaolo Bonzini * user-mode addresses based on the value in the PKRU register.  Protection
4548c50d8ae3SPaolo Bonzini * key violations are reported through a bit in the page fault error code.
4549c50d8ae3SPaolo Bonzini * Unlike other bits of the error code, the PK bit is not known at the
4550c50d8ae3SPaolo Bonzini * call site of e.g. gva_to_gpa; it must be computed directly in
4551c50d8ae3SPaolo Bonzini * permission_fault based on two bits of PKRU, on some machine state (CR4,
4552c50d8ae3SPaolo Bonzini * CR0, EFER, CPL), and on other bits of the error code and the page tables.
4553c50d8ae3SPaolo Bonzini *
4554c50d8ae3SPaolo Bonzini * In particular the following conditions come from the error code, the
4555c50d8ae3SPaolo Bonzini * page tables and the machine state:
4556c50d8ae3SPaolo Bonzini * - PK is always zero unless CR4.PKE=1 and EFER.LMA=1
4557c50d8ae3SPaolo Bonzini * - PK is always zero if RSVD=1 (reserved bit set) or F=1 (instruction fetch)
4558c50d8ae3SPaolo Bonzini * - PK is always zero if U=0 in the page tables
4559c50d8ae3SPaolo Bonzini * - PKRU.WD is ignored if CR0.WP=0 and the access is a supervisor access.
4560c50d8ae3SPaolo Bonzini *
4561c50d8ae3SPaolo Bonzini * The PKRU bitmask caches the result of these four conditions.  The error
4562c50d8ae3SPaolo Bonzini * code (minus the P bit) and the page table's U bit form an index into the
4563c50d8ae3SPaolo Bonzini * PKRU bitmask.  Two bits of the PKRU bitmask are then extracted and ANDed
4564c50d8ae3SPaolo Bonzini * with the two bits of the PKRU register corresponding to the protection key.
4565c50d8ae3SPaolo Bonzini * For the first three conditions above the bits will be 00, thus masking
4566c50d8ae3SPaolo Bonzini * away both AD and WD.  For all reads or if the last condition holds, WD
4567c50d8ae3SPaolo Bonzini * only will be masked away.
4568c50d8ae3SPaolo Bonzini */
45692e4c0661SSean Christopherson static void update_pkru_bitmask(struct kvm_mmu *mmu)
4570c50d8ae3SPaolo Bonzini {
4571c50d8ae3SPaolo Bonzini 	unsigned bit;
4572c50d8ae3SPaolo Bonzini 	bool wp;
4573c50d8ae3SPaolo Bonzini 
45742e4c0661SSean Christopherson 	if (!is_cr4_pke(mmu)) {
4575c50d8ae3SPaolo Bonzini 		mmu->pkru_mask = 0;
4576c50d8ae3SPaolo Bonzini 		return;
4577c50d8ae3SPaolo Bonzini 	}
4578c50d8ae3SPaolo Bonzini 
45792e4c0661SSean Christopherson 	wp = is_cr0_wp(mmu);
4580c50d8ae3SPaolo Bonzini 
4581c50d8ae3SPaolo Bonzini 	for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) {
4582c50d8ae3SPaolo Bonzini 		unsigned pfec, pkey_bits;
4583c50d8ae3SPaolo Bonzini 		bool check_pkey, check_write, ff, uf, wf, pte_user;
4584c50d8ae3SPaolo Bonzini 
4585c50d8ae3SPaolo Bonzini 		pfec = bit << 1;
4586c50d8ae3SPaolo Bonzini 		ff = pfec & PFERR_FETCH_MASK;
4587c50d8ae3SPaolo Bonzini 		uf = pfec & PFERR_USER_MASK;
4588c50d8ae3SPaolo Bonzini 		wf = pfec & PFERR_WRITE_MASK;
4589c50d8ae3SPaolo Bonzini 
4590c50d8ae3SPaolo Bonzini 		/* PFEC.RSVD is replaced by ACC_USER_MASK. */
4591c50d8ae3SPaolo Bonzini 		pte_user = pfec & PFERR_RSVD_MASK;
4592c50d8ae3SPaolo Bonzini 
4593c50d8ae3SPaolo Bonzini 		/*
4594c50d8ae3SPaolo Bonzini 		 * Only need to check the access which is not an
4595c50d8ae3SPaolo Bonzini 		 * instruction fetch and is to a user page.
4596c50d8ae3SPaolo Bonzini 		 */
4597c50d8ae3SPaolo Bonzini 		check_pkey = (!ff && pte_user);
4598c50d8ae3SPaolo Bonzini 		/*
4599c50d8ae3SPaolo Bonzini 		 * write access is controlled by PKRU if it is a
4600c50d8ae3SPaolo Bonzini 		 * user access or CR0.WP = 1.
4601c50d8ae3SPaolo Bonzini 		 */
4602c50d8ae3SPaolo Bonzini 		check_write = check_pkey && wf && (uf || wp);
4603c50d8ae3SPaolo Bonzini 
4604c50d8ae3SPaolo Bonzini 		/* PKRU.AD stops both read and write access. */
4605c50d8ae3SPaolo Bonzini 		pkey_bits = !!check_pkey;
4606c50d8ae3SPaolo Bonzini 		/* PKRU.WD stops write access. */
4607c50d8ae3SPaolo Bonzini 		pkey_bits |= (!!check_write) << 1;
4608c50d8ae3SPaolo Bonzini 
4609c50d8ae3SPaolo Bonzini 		mmu->pkru_mask |= (pkey_bits & 3) << pfec;
4610c50d8ae3SPaolo Bonzini 	}
4611c50d8ae3SPaolo Bonzini }
4612c50d8ae3SPaolo Bonzini 
4613533f9a4bSSean Christopherson static void reset_guest_paging_metadata(struct kvm_vcpu *vcpu,
4614533f9a4bSSean Christopherson 					struct kvm_mmu *mmu)
4615c50d8ae3SPaolo Bonzini {
4616533f9a4bSSean Christopherson 	if (!is_cr0_pg(mmu))
4617533f9a4bSSean Christopherson 		return;
4618c50d8ae3SPaolo Bonzini 
4619533f9a4bSSean Christopherson 	reset_rsvds_bits_mask(vcpu, mmu);
4620533f9a4bSSean Christopherson 	update_permission_bitmask(mmu, false);
4621533f9a4bSSean Christopherson 	update_pkru_bitmask(mmu);
4622c50d8ae3SPaolo Bonzini }
4623c50d8ae3SPaolo Bonzini 
4624fe660f72SSean Christopherson static void paging64_init_context(struct kvm_mmu *context)
4625c50d8ae3SPaolo Bonzini {
4626c50d8ae3SPaolo Bonzini 	context->page_fault = paging64_page_fault;
4627c50d8ae3SPaolo Bonzini 	context->gva_to_gpa = paging64_gva_to_gpa;
4628c50d8ae3SPaolo Bonzini 	context->sync_page = paging64_sync_page;
4629c50d8ae3SPaolo Bonzini 	context->invlpg = paging64_invlpg;
4630c50d8ae3SPaolo Bonzini 	context->direct_map = false;
4631c50d8ae3SPaolo Bonzini }
4632c50d8ae3SPaolo Bonzini 
463384a16226SSean Christopherson static void paging32_init_context(struct kvm_mmu *context)
4634c50d8ae3SPaolo Bonzini {
4635c50d8ae3SPaolo Bonzini 	context->page_fault = paging32_page_fault;
4636c50d8ae3SPaolo Bonzini 	context->gva_to_gpa = paging32_gva_to_gpa;
4637c50d8ae3SPaolo Bonzini 	context->sync_page = paging32_sync_page;
4638c50d8ae3SPaolo Bonzini 	context->invlpg = paging32_invlpg;
4639c50d8ae3SPaolo Bonzini 	context->direct_map = false;
4640c50d8ae3SPaolo Bonzini }
4641c50d8ae3SPaolo Bonzini 
46428626c120SSean Christopherson static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu,
46438626c120SSean Christopherson 							 struct kvm_mmu_role_regs *regs)
4644c50d8ae3SPaolo Bonzini {
4645c50d8ae3SPaolo Bonzini 	union kvm_mmu_extended_role ext = {0};
4646c50d8ae3SPaolo Bonzini 
4647ca8d664fSSean Christopherson 	if (____is_cr0_pg(regs)) {
4648ca8d664fSSean Christopherson 		ext.cr0_pg = 1;
46498626c120SSean Christopherson 		ext.cr4_pae = ____is_cr4_pae(regs);
46508626c120SSean Christopherson 		ext.cr4_smep = ____is_cr4_smep(regs);
46518626c120SSean Christopherson 		ext.cr4_smap = ____is_cr4_smap(regs);
46528626c120SSean Christopherson 		ext.cr4_pse = ____is_cr4_pse(regs);
465384c679f5SSean Christopherson 
465484c679f5SSean Christopherson 		/* PKEY and LA57 are active iff long mode is active. */
465584c679f5SSean Christopherson 		ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs);
465684c679f5SSean Christopherson 		ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs);
4657ca8d664fSSean Christopherson 	}
4658c50d8ae3SPaolo Bonzini 
4659c50d8ae3SPaolo Bonzini 	ext.valid = 1;
4660c50d8ae3SPaolo Bonzini 
4661c50d8ae3SPaolo Bonzini 	return ext;
4662c50d8ae3SPaolo Bonzini }
4663c50d8ae3SPaolo Bonzini 
4664c50d8ae3SPaolo Bonzini static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
46658626c120SSean Christopherson 						   struct kvm_mmu_role_regs *regs,
4666c50d8ae3SPaolo Bonzini 						   bool base_only)
4667c50d8ae3SPaolo Bonzini {
4668c50d8ae3SPaolo Bonzini 	union kvm_mmu_role role = {0};
4669c50d8ae3SPaolo Bonzini 
4670c50d8ae3SPaolo Bonzini 	role.base.access = ACC_ALL;
4671ca8d664fSSean Christopherson 	if (____is_cr0_pg(regs)) {
4672167f8a5cSSean Christopherson 		role.base.efer_nx = ____is_efer_nx(regs);
46738626c120SSean Christopherson 		role.base.cr0_wp = ____is_cr0_wp(regs);
4674ca8d664fSSean Christopherson 	}
4675c50d8ae3SPaolo Bonzini 	role.base.smm = is_smm(vcpu);
4676c50d8ae3SPaolo Bonzini 	role.base.guest_mode = is_guest_mode(vcpu);
4677c50d8ae3SPaolo Bonzini 
4678c50d8ae3SPaolo Bonzini 	if (base_only)
4679c50d8ae3SPaolo Bonzini 		return role;
4680c50d8ae3SPaolo Bonzini 
46818626c120SSean Christopherson 	role.ext = kvm_calc_mmu_role_ext(vcpu, regs);
4682c50d8ae3SPaolo Bonzini 
4683c50d8ae3SPaolo Bonzini 	return role;
4684c50d8ae3SPaolo Bonzini }
4685c50d8ae3SPaolo Bonzini 
4686d468d94bSSean Christopherson static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
4687d468d94bSSean Christopherson {
4688*746700d2SWei Huang 	/* tdp_root_level is architecture forced level, use it if nonzero */
4689*746700d2SWei Huang 	if (tdp_root_level)
4690*746700d2SWei Huang 		return tdp_root_level;
4691*746700d2SWei Huang 
4692d468d94bSSean Christopherson 	/* Use 5-level TDP if and only if it's useful/necessary. */
469383013059SSean Christopherson 	if (max_tdp_level == 5 && cpuid_maxphyaddr(vcpu) <= 48)
4694d468d94bSSean Christopherson 		return 4;
4695d468d94bSSean Christopherson 
469683013059SSean Christopherson 	return max_tdp_level;
4697d468d94bSSean Christopherson }
4698d468d94bSSean Christopherson 
4699c50d8ae3SPaolo Bonzini static union kvm_mmu_role
47008626c120SSean Christopherson kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
47018626c120SSean Christopherson 				struct kvm_mmu_role_regs *regs, bool base_only)
4702c50d8ae3SPaolo Bonzini {
47038626c120SSean Christopherson 	union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs, base_only);
4704c50d8ae3SPaolo Bonzini 
4705c50d8ae3SPaolo Bonzini 	role.base.ad_disabled = (shadow_accessed_mask == 0);
4706d468d94bSSean Christopherson 	role.base.level = kvm_mmu_get_tdp_level(vcpu);
4707c50d8ae3SPaolo Bonzini 	role.base.direct = true;
4708c50d8ae3SPaolo Bonzini 	role.base.gpte_is_8_bytes = true;
4709c50d8ae3SPaolo Bonzini 
4710c50d8ae3SPaolo Bonzini 	return role;
4711c50d8ae3SPaolo Bonzini }
4712c50d8ae3SPaolo Bonzini 
4713c50d8ae3SPaolo Bonzini static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
4714c50d8ae3SPaolo Bonzini {
47158c008659SPaolo Bonzini 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
47168626c120SSean Christopherson 	struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
4717c50d8ae3SPaolo Bonzini 	union kvm_mmu_role new_role =
47188626c120SSean Christopherson 		kvm_calc_tdp_mmu_root_page_role(vcpu, &regs, false);
4719c50d8ae3SPaolo Bonzini 
4720c50d8ae3SPaolo Bonzini 	if (new_role.as_u64 == context->mmu_role.as_u64)
4721c50d8ae3SPaolo Bonzini 		return;
4722c50d8ae3SPaolo Bonzini 
4723c50d8ae3SPaolo Bonzini 	context->mmu_role.as_u64 = new_role.as_u64;
47247a02674dSSean Christopherson 	context->page_fault = kvm_tdp_page_fault;
4725c50d8ae3SPaolo Bonzini 	context->sync_page = nonpaging_sync_page;
47265efac074SPaolo Bonzini 	context->invlpg = NULL;
4727d468d94bSSean Christopherson 	context->shadow_root_level = kvm_mmu_get_tdp_level(vcpu);
4728c50d8ae3SPaolo Bonzini 	context->direct_map = true;
4729d8dd54e0SSean Christopherson 	context->get_guest_pgd = get_cr3;
4730c50d8ae3SPaolo Bonzini 	context->get_pdptr = kvm_pdptr_read;
4731c50d8ae3SPaolo Bonzini 	context->inject_page_fault = kvm_inject_page_fault;
4732f4bd6f73SSean Christopherson 	context->root_level = role_regs_to_root_level(&regs);
4733c50d8ae3SPaolo Bonzini 
473436f26787SSean Christopherson 	if (!is_cr0_pg(context))
4735c50d8ae3SPaolo Bonzini 		context->gva_to_gpa = nonpaging_gva_to_gpa;
473636f26787SSean Christopherson 	else if (is_cr4_pae(context))
4737c50d8ae3SPaolo Bonzini 		context->gva_to_gpa = paging64_gva_to_gpa;
4738f4bd6f73SSean Christopherson 	else
4739c50d8ae3SPaolo Bonzini 		context->gva_to_gpa = paging32_gva_to_gpa;
4740c50d8ae3SPaolo Bonzini 
4741533f9a4bSSean Christopherson 	reset_guest_paging_metadata(vcpu, context);
4742c50d8ae3SPaolo Bonzini 	reset_tdp_shadow_zero_bits_mask(vcpu, context);
4743c50d8ae3SPaolo Bonzini }
4744c50d8ae3SPaolo Bonzini 
4745c50d8ae3SPaolo Bonzini static union kvm_mmu_role
47468626c120SSean Christopherson kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu,
47478626c120SSean Christopherson 				      struct kvm_mmu_role_regs *regs, bool base_only)
4748c50d8ae3SPaolo Bonzini {
47498626c120SSean Christopherson 	union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs, base_only);
4750c50d8ae3SPaolo Bonzini 
47518626c120SSean Christopherson 	role.base.smep_andnot_wp = role.ext.cr4_smep && !____is_cr0_wp(regs);
47528626c120SSean Christopherson 	role.base.smap_andnot_wp = role.ext.cr4_smap && !____is_cr0_wp(regs);
4753ca8d664fSSean Christopherson 	role.base.gpte_is_8_bytes = ____is_cr0_pg(regs) && ____is_cr4_pae(regs);
4754c50d8ae3SPaolo Bonzini 
475559505b55SSean Christopherson 	return role;
475659505b55SSean Christopherson }
475759505b55SSean Christopherson 
475859505b55SSean Christopherson static union kvm_mmu_role
47598626c120SSean Christopherson kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu,
47608626c120SSean Christopherson 				   struct kvm_mmu_role_regs *regs, bool base_only)
476159505b55SSean Christopherson {
476259505b55SSean Christopherson 	union kvm_mmu_role role =
47638626c120SSean Christopherson 		kvm_calc_shadow_root_page_role_common(vcpu, regs, base_only);
476459505b55SSean Christopherson 
47658626c120SSean Christopherson 	role.base.direct = !____is_cr0_pg(regs);
476659505b55SSean Christopherson 
47678626c120SSean Christopherson 	if (!____is_efer_lma(regs))
4768c50d8ae3SPaolo Bonzini 		role.base.level = PT32E_ROOT_LEVEL;
47698626c120SSean Christopherson 	else if (____is_cr4_la57(regs))
4770c50d8ae3SPaolo Bonzini 		role.base.level = PT64_ROOT_5LEVEL;
4771c50d8ae3SPaolo Bonzini 	else
4772c50d8ae3SPaolo Bonzini 		role.base.level = PT64_ROOT_4LEVEL;
4773c50d8ae3SPaolo Bonzini 
4774c50d8ae3SPaolo Bonzini 	return role;
4775c50d8ae3SPaolo Bonzini }
4776c50d8ae3SPaolo Bonzini 
47778c008659SPaolo Bonzini static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
4778594e91a1SSean Christopherson 				    struct kvm_mmu_role_regs *regs,
47798c008659SPaolo Bonzini 				    union kvm_mmu_role new_role)
4780c50d8ae3SPaolo Bonzini {
478118db1b17SSean Christopherson 	if (new_role.as_u64 == context->mmu_role.as_u64)
478218db1b17SSean Christopherson 		return;
4783c50d8ae3SPaolo Bonzini 
4784c50d8ae3SPaolo Bonzini 	context->mmu_role.as_u64 = new_role.as_u64;
478518db1b17SSean Christopherson 
478636f26787SSean Christopherson 	if (!is_cr0_pg(context))
478784a16226SSean Christopherson 		nonpaging_init_context(context);
478836f26787SSean Christopherson 	else if (is_cr4_pae(context))
4789fe660f72SSean Christopherson 		paging64_init_context(context);
4790c50d8ae3SPaolo Bonzini 	else
479184a16226SSean Christopherson 		paging32_init_context(context);
4792f4bd6f73SSean Christopherson 	context->root_level = role_regs_to_root_level(regs);
4793c50d8ae3SPaolo Bonzini 
4794533f9a4bSSean Christopherson 	reset_guest_paging_metadata(vcpu, context);
4795d555f705SSean Christopherson 	context->shadow_root_level = new_role.base.level;
4796d555f705SSean Christopherson 
4797c50d8ae3SPaolo Bonzini 	reset_shadow_zero_bits_mask(vcpu, context);
4798c50d8ae3SPaolo Bonzini }
47990f04a2acSVitaly Kuznetsov 
4800594e91a1SSean Christopherson static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu,
4801594e91a1SSean Christopherson 				struct kvm_mmu_role_regs *regs)
48020f04a2acSVitaly Kuznetsov {
48038c008659SPaolo Bonzini 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
48040f04a2acSVitaly Kuznetsov 	union kvm_mmu_role new_role =
48058626c120SSean Christopherson 		kvm_calc_shadow_mmu_root_page_role(vcpu, regs, false);
48060f04a2acSVitaly Kuznetsov 
4807594e91a1SSean Christopherson 	shadow_mmu_init_context(vcpu, context, regs, new_role);
48080f04a2acSVitaly Kuznetsov }
48090f04a2acSVitaly Kuznetsov 
481059505b55SSean Christopherson static union kvm_mmu_role
48118626c120SSean Christopherson kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu,
48128626c120SSean Christopherson 				   struct kvm_mmu_role_regs *regs)
481359505b55SSean Christopherson {
481459505b55SSean Christopherson 	union kvm_mmu_role role =
48158626c120SSean Christopherson 		kvm_calc_shadow_root_page_role_common(vcpu, regs, false);
481659505b55SSean Christopherson 
481759505b55SSean Christopherson 	role.base.direct = false;
4818d468d94bSSean Christopherson 	role.base.level = kvm_mmu_get_tdp_level(vcpu);
481959505b55SSean Christopherson 
482059505b55SSean Christopherson 	return role;
482159505b55SSean Christopherson }
482259505b55SSean Christopherson 
4823dbc4739bSSean Christopherson void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
4824dbc4739bSSean Christopherson 			     unsigned long cr4, u64 efer, gpa_t nested_cr3)
48250f04a2acSVitaly Kuznetsov {
48268c008659SPaolo Bonzini 	struct kvm_mmu *context = &vcpu->arch.guest_mmu;
4827594e91a1SSean Christopherson 	struct kvm_mmu_role_regs regs = {
4828594e91a1SSean Christopherson 		.cr0 = cr0,
4829594e91a1SSean Christopherson 		.cr4 = cr4,
4830594e91a1SSean Christopherson 		.efer = efer,
4831594e91a1SSean Christopherson 	};
48328626c120SSean Christopherson 	union kvm_mmu_role new_role;
48330f04a2acSVitaly Kuznetsov 
48348626c120SSean Christopherson 	new_role = kvm_calc_shadow_npt_root_page_role(vcpu, &regs);
4835a506fdd2SVitaly Kuznetsov 
4836b5129100SSean Christopherson 	__kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base);
4837a3322d5cSSean Christopherson 
4838594e91a1SSean Christopherson 	shadow_mmu_init_context(vcpu, context, &regs, new_role);
48390f04a2acSVitaly Kuznetsov }
48400f04a2acSVitaly Kuznetsov EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);
4841c50d8ae3SPaolo Bonzini 
4842c50d8ae3SPaolo Bonzini static union kvm_mmu_role
4843c50d8ae3SPaolo Bonzini kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
4844bb1fcc70SSean Christopherson 				   bool execonly, u8 level)
4845c50d8ae3SPaolo Bonzini {
4846c50d8ae3SPaolo Bonzini 	union kvm_mmu_role role = {0};
4847c50d8ae3SPaolo Bonzini 
4848c50d8ae3SPaolo Bonzini 	/* SMM flag is inherited from root_mmu */
4849c50d8ae3SPaolo Bonzini 	role.base.smm = vcpu->arch.root_mmu.mmu_role.base.smm;
4850c50d8ae3SPaolo Bonzini 
4851bb1fcc70SSean Christopherson 	role.base.level = level;
4852c50d8ae3SPaolo Bonzini 	role.base.gpte_is_8_bytes = true;
4853c50d8ae3SPaolo Bonzini 	role.base.direct = false;
4854c50d8ae3SPaolo Bonzini 	role.base.ad_disabled = !accessed_dirty;
4855c50d8ae3SPaolo Bonzini 	role.base.guest_mode = true;
4856c50d8ae3SPaolo Bonzini 	role.base.access = ACC_ALL;
4857c50d8ae3SPaolo Bonzini 
4858cd6767c3SSean Christopherson 	/* EPT, and thus nested EPT, does not consume CR0, CR4, nor EFER. */
4859cd6767c3SSean Christopherson 	role.ext.word = 0;
4860c50d8ae3SPaolo Bonzini 	role.ext.execonly = execonly;
4861cd6767c3SSean Christopherson 	role.ext.valid = 1;
4862c50d8ae3SPaolo Bonzini 
4863c50d8ae3SPaolo Bonzini 	return role;
4864c50d8ae3SPaolo Bonzini }
4865c50d8ae3SPaolo Bonzini 
4866c50d8ae3SPaolo Bonzini void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
4867c50d8ae3SPaolo Bonzini 			     bool accessed_dirty, gpa_t new_eptp)
4868c50d8ae3SPaolo Bonzini {
48698c008659SPaolo Bonzini 	struct kvm_mmu *context = &vcpu->arch.guest_mmu;
4870bb1fcc70SSean Christopherson 	u8 level = vmx_eptp_page_walk_level(new_eptp);
4871c50d8ae3SPaolo Bonzini 	union kvm_mmu_role new_role =
4872c50d8ae3SPaolo Bonzini 		kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
4873bb1fcc70SSean Christopherson 						   execonly, level);
4874c50d8ae3SPaolo Bonzini 
4875b5129100SSean Christopherson 	__kvm_mmu_new_pgd(vcpu, new_eptp, new_role.base);
4876c50d8ae3SPaolo Bonzini 
4877c50d8ae3SPaolo Bonzini 	if (new_role.as_u64 == context->mmu_role.as_u64)
4878c50d8ae3SPaolo Bonzini 		return;
4879c50d8ae3SPaolo Bonzini 
488018db1b17SSean Christopherson 	context->mmu_role.as_u64 = new_role.as_u64;
488118db1b17SSean Christopherson 
4882bb1fcc70SSean Christopherson 	context->shadow_root_level = level;
4883c50d8ae3SPaolo Bonzini 
4884c50d8ae3SPaolo Bonzini 	context->ept_ad = accessed_dirty;
4885c50d8ae3SPaolo Bonzini 	context->page_fault = ept_page_fault;
4886c50d8ae3SPaolo Bonzini 	context->gva_to_gpa = ept_gva_to_gpa;
4887c50d8ae3SPaolo Bonzini 	context->sync_page = ept_sync_page;
4888c50d8ae3SPaolo Bonzini 	context->invlpg = ept_invlpg;
4889bb1fcc70SSean Christopherson 	context->root_level = level;
4890c50d8ae3SPaolo Bonzini 	context->direct_map = false;
4891c50d8ae3SPaolo Bonzini 
4892c596f147SSean Christopherson 	update_permission_bitmask(context, true);
48932e4c0661SSean Christopherson 	update_pkru_bitmask(context);
4894c50d8ae3SPaolo Bonzini 	reset_rsvds_bits_mask_ept(vcpu, context, execonly);
4895c50d8ae3SPaolo Bonzini 	reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
4896c50d8ae3SPaolo Bonzini }
4897c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
4898c50d8ae3SPaolo Bonzini 
4899c50d8ae3SPaolo Bonzini static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
4900c50d8ae3SPaolo Bonzini {
49018c008659SPaolo Bonzini 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
4902594e91a1SSean Christopherson 	struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
4903c50d8ae3SPaolo Bonzini 
4904594e91a1SSean Christopherson 	kvm_init_shadow_mmu(vcpu, &regs);
4905929d1cfaSPaolo Bonzini 
4906d8dd54e0SSean Christopherson 	context->get_guest_pgd     = get_cr3;
4907c50d8ae3SPaolo Bonzini 	context->get_pdptr         = kvm_pdptr_read;
4908c50d8ae3SPaolo Bonzini 	context->inject_page_fault = kvm_inject_page_fault;
4909c50d8ae3SPaolo Bonzini }
4910c50d8ae3SPaolo Bonzini 
49118626c120SSean Christopherson static union kvm_mmu_role
49128626c120SSean Christopherson kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu, struct kvm_mmu_role_regs *regs)
4913654430efSSean Christopherson {
49148626c120SSean Christopherson 	union kvm_mmu_role role;
49158626c120SSean Christopherson 
49168626c120SSean Christopherson 	role = kvm_calc_shadow_root_page_role_common(vcpu, regs, false);
4917654430efSSean Christopherson 
4918654430efSSean Christopherson 	/*
4919654430efSSean Christopherson 	 * Nested MMUs are used only for walking L2's gva->gpa, they never have
4920654430efSSean Christopherson 	 * shadow pages of their own and so "direct" has no meaning.   Set it
4921654430efSSean Christopherson 	 * to "true" to try to detect bogus usage of the nested MMU.
4922654430efSSean Christopherson 	 */
4923654430efSSean Christopherson 	role.base.direct = true;
4924f4bd6f73SSean Christopherson 	role.base.level = role_regs_to_root_level(regs);
4925654430efSSean Christopherson 	return role;
4926654430efSSean Christopherson }
4927654430efSSean Christopherson 
4928c50d8ae3SPaolo Bonzini static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
4929c50d8ae3SPaolo Bonzini {
49308626c120SSean Christopherson 	struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
49318626c120SSean Christopherson 	union kvm_mmu_role new_role = kvm_calc_nested_mmu_role(vcpu, &regs);
4932c50d8ae3SPaolo Bonzini 	struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
4933c50d8ae3SPaolo Bonzini 
4934c50d8ae3SPaolo Bonzini 	if (new_role.as_u64 == g_context->mmu_role.as_u64)
4935c50d8ae3SPaolo Bonzini 		return;
4936c50d8ae3SPaolo Bonzini 
4937c50d8ae3SPaolo Bonzini 	g_context->mmu_role.as_u64 = new_role.as_u64;
4938d8dd54e0SSean Christopherson 	g_context->get_guest_pgd     = get_cr3;
4939c50d8ae3SPaolo Bonzini 	g_context->get_pdptr         = kvm_pdptr_read;
4940c50d8ae3SPaolo Bonzini 	g_context->inject_page_fault = kvm_inject_page_fault;
49415472fcd4SSean Christopherson 	g_context->root_level        = new_role.base.level;
4942c50d8ae3SPaolo Bonzini 
4943c50d8ae3SPaolo Bonzini 	/*
49445efac074SPaolo Bonzini 	 * L2 page tables are never shadowed, so there is no need to sync
49455efac074SPaolo Bonzini 	 * SPTEs.
49465efac074SPaolo Bonzini 	 */
49475efac074SPaolo Bonzini 	g_context->invlpg            = NULL;
49485efac074SPaolo Bonzini 
49495efac074SPaolo Bonzini 	/*
4950c50d8ae3SPaolo Bonzini 	 * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using
4951c50d8ae3SPaolo Bonzini 	 * L1's nested page tables (e.g. EPT12). The nested translation
4952c50d8ae3SPaolo Bonzini 	 * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using
4953c50d8ae3SPaolo Bonzini 	 * L2's page tables as the first level of translation and L1's
4954c50d8ae3SPaolo Bonzini 	 * nested page tables as the second level of translation. Basically
4955c50d8ae3SPaolo Bonzini 	 * the gva_to_gpa functions between mmu and nested_mmu are swapped.
4956c50d8ae3SPaolo Bonzini 	 */
4957fa4b5588SSean Christopherson 	if (!is_paging(vcpu))
4958c50d8ae3SPaolo Bonzini 		g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested;
4959fa4b5588SSean Christopherson 	else if (is_long_mode(vcpu))
4960c50d8ae3SPaolo Bonzini 		g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
4961fa4b5588SSean Christopherson 	else if (is_pae(vcpu))
4962c50d8ae3SPaolo Bonzini 		g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
4963fa4b5588SSean Christopherson 	else
4964c50d8ae3SPaolo Bonzini 		g_context->gva_to_gpa = paging32_gva_to_gpa_nested;
4965fa4b5588SSean Christopherson 
4966533f9a4bSSean Christopherson 	reset_guest_paging_metadata(vcpu, g_context);
4967c50d8ae3SPaolo Bonzini }
4968c50d8ae3SPaolo Bonzini 
4969c9060662SSean Christopherson void kvm_init_mmu(struct kvm_vcpu *vcpu)
4970c50d8ae3SPaolo Bonzini {
4971c50d8ae3SPaolo Bonzini 	if (mmu_is_nested(vcpu))
4972c50d8ae3SPaolo Bonzini 		init_kvm_nested_mmu(vcpu);
4973c50d8ae3SPaolo Bonzini 	else if (tdp_enabled)
4974c50d8ae3SPaolo Bonzini 		init_kvm_tdp_mmu(vcpu);
4975c50d8ae3SPaolo Bonzini 	else
4976c50d8ae3SPaolo Bonzini 		init_kvm_softmmu(vcpu);
4977c50d8ae3SPaolo Bonzini }
4978c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_init_mmu);
4979c50d8ae3SPaolo Bonzini 
4980c50d8ae3SPaolo Bonzini static union kvm_mmu_page_role
4981c50d8ae3SPaolo Bonzini kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu)
4982c50d8ae3SPaolo Bonzini {
49838626c120SSean Christopherson 	struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
4984c50d8ae3SPaolo Bonzini 	union kvm_mmu_role role;
4985c50d8ae3SPaolo Bonzini 
4986c50d8ae3SPaolo Bonzini 	if (tdp_enabled)
49878626c120SSean Christopherson 		role = kvm_calc_tdp_mmu_root_page_role(vcpu, &regs, true);
4988c50d8ae3SPaolo Bonzini 	else
49898626c120SSean Christopherson 		role = kvm_calc_shadow_mmu_root_page_role(vcpu, &regs, true);
4990c50d8ae3SPaolo Bonzini 
4991c50d8ae3SPaolo Bonzini 	return role.base;
4992c50d8ae3SPaolo Bonzini }
4993c50d8ae3SPaolo Bonzini 
499449c6f875SSean Christopherson void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
499549c6f875SSean Christopherson {
499649c6f875SSean Christopherson 	/*
499749c6f875SSean Christopherson 	 * Invalidate all MMU roles to force them to reinitialize as CPUID
499849c6f875SSean Christopherson 	 * information is factored into reserved bit calculations.
499949c6f875SSean Christopherson 	 */
500049c6f875SSean Christopherson 	vcpu->arch.root_mmu.mmu_role.ext.valid = 0;
500149c6f875SSean Christopherson 	vcpu->arch.guest_mmu.mmu_role.ext.valid = 0;
500249c6f875SSean Christopherson 	vcpu->arch.nested_mmu.mmu_role.ext.valid = 0;
500349c6f875SSean Christopherson 	kvm_mmu_reset_context(vcpu);
500463f5a190SSean Christopherson 
500563f5a190SSean Christopherson 	/*
500663f5a190SSean Christopherson 	 * KVM does not correctly handle changing guest CPUID after KVM_RUN, as
500763f5a190SSean Christopherson 	 * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't
500863f5a190SSean Christopherson 	 * tracked in kvm_mmu_page_role.  As a result, KVM may miss guest page
500963f5a190SSean Christopherson 	 * faults due to reusing SPs/SPTEs.  Alert userspace, but otherwise
501063f5a190SSean Christopherson 	 * sweep the problem under the rug.
501163f5a190SSean Christopherson 	 *
501263f5a190SSean Christopherson 	 * KVM's horrific CPUID ABI makes the problem all but impossible to
501363f5a190SSean Christopherson 	 * solve, as correctly handling multiple vCPU models (with respect to
501463f5a190SSean Christopherson 	 * paging and physical address properties) in a single VM would require
501563f5a190SSean Christopherson 	 * tracking all relevant CPUID information in kvm_mmu_page_role.  That
501663f5a190SSean Christopherson 	 * is very undesirable as it would double the memory requirements for
501763f5a190SSean Christopherson 	 * gfn_track (see struct kvm_mmu_page_role comments), and in practice
501863f5a190SSean Christopherson 	 * no sane VMM mucks with the core vCPU model on the fly.
501963f5a190SSean Christopherson 	 */
502063f5a190SSean Christopherson 	if (vcpu->arch.last_vmentry_cpu != -1) {
502163f5a190SSean Christopherson 		pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} after KVM_RUN may cause guest instability\n");
502263f5a190SSean Christopherson 		pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} will fail after KVM_RUN starting with Linux 5.16\n");
502363f5a190SSean Christopherson 	}
502449c6f875SSean Christopherson }
502549c6f875SSean Christopherson 
5026c50d8ae3SPaolo Bonzini void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
5027c50d8ae3SPaolo Bonzini {
5028c50d8ae3SPaolo Bonzini 	kvm_mmu_unload(vcpu);
5029c9060662SSean Christopherson 	kvm_init_mmu(vcpu);
5030c50d8ae3SPaolo Bonzini }
5031c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
5032c50d8ae3SPaolo Bonzini 
5033c50d8ae3SPaolo Bonzini int kvm_mmu_load(struct kvm_vcpu *vcpu)
5034c50d8ae3SPaolo Bonzini {
5035c50d8ae3SPaolo Bonzini 	int r;
5036c50d8ae3SPaolo Bonzini 
5037378f5cd6SSean Christopherson 	r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->direct_map);
5038c50d8ae3SPaolo Bonzini 	if (r)
5039c50d8ae3SPaolo Bonzini 		goto out;
5040748e52b9SSean Christopherson 	r = mmu_alloc_special_roots(vcpu);
5041c50d8ae3SPaolo Bonzini 	if (r)
5042c50d8ae3SPaolo Bonzini 		goto out;
50434a38162eSPaolo Bonzini 	if (vcpu->arch.mmu->direct_map)
50446e6ec584SSean Christopherson 		r = mmu_alloc_direct_roots(vcpu);
50456e6ec584SSean Christopherson 	else
50466e6ec584SSean Christopherson 		r = mmu_alloc_shadow_roots(vcpu);
5047c50d8ae3SPaolo Bonzini 	if (r)
5048c50d8ae3SPaolo Bonzini 		goto out;
5049a91f387bSSean Christopherson 
5050a91f387bSSean Christopherson 	kvm_mmu_sync_roots(vcpu);
5051a91f387bSSean Christopherson 
5052727a7e27SPaolo Bonzini 	kvm_mmu_load_pgd(vcpu);
5053b3646477SJason Baron 	static_call(kvm_x86_tlb_flush_current)(vcpu);
5054c50d8ae3SPaolo Bonzini out:
5055c50d8ae3SPaolo Bonzini 	return r;
5056c50d8ae3SPaolo Bonzini }
5057c50d8ae3SPaolo Bonzini 
5058c50d8ae3SPaolo Bonzini void kvm_mmu_unload(struct kvm_vcpu *vcpu)
5059c50d8ae3SPaolo Bonzini {
5060c50d8ae3SPaolo Bonzini 	kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL);
5061c50d8ae3SPaolo Bonzini 	WARN_ON(VALID_PAGE(vcpu->arch.root_mmu.root_hpa));
5062c50d8ae3SPaolo Bonzini 	kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
5063c50d8ae3SPaolo Bonzini 	WARN_ON(VALID_PAGE(vcpu->arch.guest_mmu.root_hpa));
5064c50d8ae3SPaolo Bonzini }
5065c50d8ae3SPaolo Bonzini 
5066c50d8ae3SPaolo Bonzini static bool need_remote_flush(u64 old, u64 new)
5067c50d8ae3SPaolo Bonzini {
5068c50d8ae3SPaolo Bonzini 	if (!is_shadow_present_pte(old))
5069c50d8ae3SPaolo Bonzini 		return false;
5070c50d8ae3SPaolo Bonzini 	if (!is_shadow_present_pte(new))
5071c50d8ae3SPaolo Bonzini 		return true;
5072c50d8ae3SPaolo Bonzini 	if ((old ^ new) & PT64_BASE_ADDR_MASK)
5073c50d8ae3SPaolo Bonzini 		return true;
5074c50d8ae3SPaolo Bonzini 	old ^= shadow_nx_mask;
5075c50d8ae3SPaolo Bonzini 	new ^= shadow_nx_mask;
5076c50d8ae3SPaolo Bonzini 	return (old & ~new & PT64_PERM_MASK) != 0;
5077c50d8ae3SPaolo Bonzini }
5078c50d8ae3SPaolo Bonzini 
5079c50d8ae3SPaolo Bonzini static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
5080c50d8ae3SPaolo Bonzini 				    int *bytes)
5081c50d8ae3SPaolo Bonzini {
5082c50d8ae3SPaolo Bonzini 	u64 gentry = 0;
5083c50d8ae3SPaolo Bonzini 	int r;
5084c50d8ae3SPaolo Bonzini 
5085c50d8ae3SPaolo Bonzini 	/*
5086c50d8ae3SPaolo Bonzini 	 * Assume that the pte write on a page table of the same type
5087c50d8ae3SPaolo Bonzini 	 * as the current vcpu paging mode since we update the sptes only
5088c50d8ae3SPaolo Bonzini 	 * when they have the same mode.
5089c50d8ae3SPaolo Bonzini 	 */
5090c50d8ae3SPaolo Bonzini 	if (is_pae(vcpu) && *bytes == 4) {
5091c50d8ae3SPaolo Bonzini 		/* Handle a 32-bit guest writing two halves of a 64-bit gpte */
5092c50d8ae3SPaolo Bonzini 		*gpa &= ~(gpa_t)7;
5093c50d8ae3SPaolo Bonzini 		*bytes = 8;
5094c50d8ae3SPaolo Bonzini 	}
5095c50d8ae3SPaolo Bonzini 
5096c50d8ae3SPaolo Bonzini 	if (*bytes == 4 || *bytes == 8) {
5097c50d8ae3SPaolo Bonzini 		r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes);
5098c50d8ae3SPaolo Bonzini 		if (r)
5099c50d8ae3SPaolo Bonzini 			gentry = 0;
5100c50d8ae3SPaolo Bonzini 	}
5101c50d8ae3SPaolo Bonzini 
5102c50d8ae3SPaolo Bonzini 	return gentry;
5103c50d8ae3SPaolo Bonzini }
5104c50d8ae3SPaolo Bonzini 
5105c50d8ae3SPaolo Bonzini /*
5106c50d8ae3SPaolo Bonzini  * If we're seeing too many writes to a page, it may no longer be a page table,
5107c50d8ae3SPaolo Bonzini  * or we may be forking, in which case it is better to unmap the page.
5108c50d8ae3SPaolo Bonzini  */
5109c50d8ae3SPaolo Bonzini static bool detect_write_flooding(struct kvm_mmu_page *sp)
5110c50d8ae3SPaolo Bonzini {
5111c50d8ae3SPaolo Bonzini 	/*
5112c50d8ae3SPaolo Bonzini 	 * Skip write-flooding detected for the sp whose level is 1, because
5113c50d8ae3SPaolo Bonzini 	 * it can become unsync, then the guest page is not write-protected.
5114c50d8ae3SPaolo Bonzini 	 */
51153bae0459SSean Christopherson 	if (sp->role.level == PG_LEVEL_4K)
5116c50d8ae3SPaolo Bonzini 		return false;
5117c50d8ae3SPaolo Bonzini 
5118c50d8ae3SPaolo Bonzini 	atomic_inc(&sp->write_flooding_count);
5119c50d8ae3SPaolo Bonzini 	return atomic_read(&sp->write_flooding_count) >= 3;
5120c50d8ae3SPaolo Bonzini }
5121c50d8ae3SPaolo Bonzini 
5122c50d8ae3SPaolo Bonzini /*
5123c50d8ae3SPaolo Bonzini  * Misaligned accesses are too much trouble to fix up; also, they usually
5124c50d8ae3SPaolo Bonzini  * indicate a page is not used as a page table.
5125c50d8ae3SPaolo Bonzini  */
5126c50d8ae3SPaolo Bonzini static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
5127c50d8ae3SPaolo Bonzini 				    int bytes)
5128c50d8ae3SPaolo Bonzini {
5129c50d8ae3SPaolo Bonzini 	unsigned offset, pte_size, misaligned;
5130c50d8ae3SPaolo Bonzini 
5131c50d8ae3SPaolo Bonzini 	pgprintk("misaligned: gpa %llx bytes %d role %x\n",
5132c50d8ae3SPaolo Bonzini 		 gpa, bytes, sp->role.word);
5133c50d8ae3SPaolo Bonzini 
5134c50d8ae3SPaolo Bonzini 	offset = offset_in_page(gpa);
5135c50d8ae3SPaolo Bonzini 	pte_size = sp->role.gpte_is_8_bytes ? 8 : 4;
5136c50d8ae3SPaolo Bonzini 
5137c50d8ae3SPaolo Bonzini 	/*
5138c50d8ae3SPaolo Bonzini 	 * Sometimes, the OS only writes the last one bytes to update status
5139c50d8ae3SPaolo Bonzini 	 * bits, for example, in linux, andb instruction is used in clear_bit().
5140c50d8ae3SPaolo Bonzini 	 */
5141c50d8ae3SPaolo Bonzini 	if (!(offset & (pte_size - 1)) && bytes == 1)
5142c50d8ae3SPaolo Bonzini 		return false;
5143c50d8ae3SPaolo Bonzini 
5144c50d8ae3SPaolo Bonzini 	misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
5145c50d8ae3SPaolo Bonzini 	misaligned |= bytes < 4;
5146c50d8ae3SPaolo Bonzini 
5147c50d8ae3SPaolo Bonzini 	return misaligned;
5148c50d8ae3SPaolo Bonzini }
5149c50d8ae3SPaolo Bonzini 
5150c50d8ae3SPaolo Bonzini static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
5151c50d8ae3SPaolo Bonzini {
5152c50d8ae3SPaolo Bonzini 	unsigned page_offset, quadrant;
5153c50d8ae3SPaolo Bonzini 	u64 *spte;
5154c50d8ae3SPaolo Bonzini 	int level;
5155c50d8ae3SPaolo Bonzini 
5156c50d8ae3SPaolo Bonzini 	page_offset = offset_in_page(gpa);
5157c50d8ae3SPaolo Bonzini 	level = sp->role.level;
5158c50d8ae3SPaolo Bonzini 	*nspte = 1;
5159c50d8ae3SPaolo Bonzini 	if (!sp->role.gpte_is_8_bytes) {
5160c50d8ae3SPaolo Bonzini 		page_offset <<= 1;	/* 32->64 */
5161c50d8ae3SPaolo Bonzini 		/*
5162c50d8ae3SPaolo Bonzini 		 * A 32-bit pde maps 4MB while the shadow pdes map
5163c50d8ae3SPaolo Bonzini 		 * only 2MB.  So we need to double the offset again
5164c50d8ae3SPaolo Bonzini 		 * and zap two pdes instead of one.
5165c50d8ae3SPaolo Bonzini 		 */
5166c50d8ae3SPaolo Bonzini 		if (level == PT32_ROOT_LEVEL) {
5167c50d8ae3SPaolo Bonzini 			page_offset &= ~7; /* kill rounding error */
5168c50d8ae3SPaolo Bonzini 			page_offset <<= 1;
5169c50d8ae3SPaolo Bonzini 			*nspte = 2;
5170c50d8ae3SPaolo Bonzini 		}
5171c50d8ae3SPaolo Bonzini 		quadrant = page_offset >> PAGE_SHIFT;
5172c50d8ae3SPaolo Bonzini 		page_offset &= ~PAGE_MASK;
5173c50d8ae3SPaolo Bonzini 		if (quadrant != sp->role.quadrant)
5174c50d8ae3SPaolo Bonzini 			return NULL;
5175c50d8ae3SPaolo Bonzini 	}
5176c50d8ae3SPaolo Bonzini 
5177c50d8ae3SPaolo Bonzini 	spte = &sp->spt[page_offset / sizeof(*spte)];
5178c50d8ae3SPaolo Bonzini 	return spte;
5179c50d8ae3SPaolo Bonzini }
5180c50d8ae3SPaolo Bonzini 
5181c50d8ae3SPaolo Bonzini static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
5182c50d8ae3SPaolo Bonzini 			      const u8 *new, int bytes,
5183c50d8ae3SPaolo Bonzini 			      struct kvm_page_track_notifier_node *node)
5184c50d8ae3SPaolo Bonzini {
5185c50d8ae3SPaolo Bonzini 	gfn_t gfn = gpa >> PAGE_SHIFT;
5186c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
5187c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
5188c50d8ae3SPaolo Bonzini 	u64 entry, gentry, *spte;
5189c50d8ae3SPaolo Bonzini 	int npte;
5190c50d8ae3SPaolo Bonzini 	bool remote_flush, local_flush;
5191c50d8ae3SPaolo Bonzini 
5192c50d8ae3SPaolo Bonzini 	/*
5193c50d8ae3SPaolo Bonzini 	 * If we don't have indirect shadow pages, it means no page is
5194c50d8ae3SPaolo Bonzini 	 * write-protected, so we can exit simply.
5195c50d8ae3SPaolo Bonzini 	 */
5196c50d8ae3SPaolo Bonzini 	if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
5197c50d8ae3SPaolo Bonzini 		return;
5198c50d8ae3SPaolo Bonzini 
5199c50d8ae3SPaolo Bonzini 	remote_flush = local_flush = false;
5200c50d8ae3SPaolo Bonzini 
5201c50d8ae3SPaolo Bonzini 	pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
5202c50d8ae3SPaolo Bonzini 
5203c50d8ae3SPaolo Bonzini 	/*
5204c50d8ae3SPaolo Bonzini 	 * No need to care whether allocation memory is successful
5205d9f6e12fSIngo Molnar 	 * or not since pte prefetch is skipped if it does not have
5206c50d8ae3SPaolo Bonzini 	 * enough objects in the cache.
5207c50d8ae3SPaolo Bonzini 	 */
5208378f5cd6SSean Christopherson 	mmu_topup_memory_caches(vcpu, true);
5209c50d8ae3SPaolo Bonzini 
5210531810caSBen Gardon 	write_lock(&vcpu->kvm->mmu_lock);
5211c50d8ae3SPaolo Bonzini 
5212c50d8ae3SPaolo Bonzini 	gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes);
5213c50d8ae3SPaolo Bonzini 
5214c50d8ae3SPaolo Bonzini 	++vcpu->kvm->stat.mmu_pte_write;
5215c50d8ae3SPaolo Bonzini 	kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
5216c50d8ae3SPaolo Bonzini 
5217c50d8ae3SPaolo Bonzini 	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
5218c50d8ae3SPaolo Bonzini 		if (detect_write_misaligned(sp, gpa, bytes) ||
5219c50d8ae3SPaolo Bonzini 		      detect_write_flooding(sp)) {
5220c50d8ae3SPaolo Bonzini 			kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
5221c50d8ae3SPaolo Bonzini 			++vcpu->kvm->stat.mmu_flooded;
5222c50d8ae3SPaolo Bonzini 			continue;
5223c50d8ae3SPaolo Bonzini 		}
5224c50d8ae3SPaolo Bonzini 
5225c50d8ae3SPaolo Bonzini 		spte = get_written_sptes(sp, gpa, &npte);
5226c50d8ae3SPaolo Bonzini 		if (!spte)
5227c50d8ae3SPaolo Bonzini 			continue;
5228c50d8ae3SPaolo Bonzini 
5229c50d8ae3SPaolo Bonzini 		local_flush = true;
5230c50d8ae3SPaolo Bonzini 		while (npte--) {
5231c50d8ae3SPaolo Bonzini 			entry = *spte;
52322de4085cSBen Gardon 			mmu_page_zap_pte(vcpu->kvm, sp, spte, NULL);
5233c5e2184dSSean Christopherson 			if (gentry && sp->role.level != PG_LEVEL_4K)
5234c5e2184dSSean Christopherson 				++vcpu->kvm->stat.mmu_pde_zapped;
5235c50d8ae3SPaolo Bonzini 			if (need_remote_flush(entry, *spte))
5236c50d8ae3SPaolo Bonzini 				remote_flush = true;
5237c50d8ae3SPaolo Bonzini 			++spte;
5238c50d8ae3SPaolo Bonzini 		}
5239c50d8ae3SPaolo Bonzini 	}
5240c50d8ae3SPaolo Bonzini 	kvm_mmu_flush_or_zap(vcpu, &invalid_list, remote_flush, local_flush);
5241c50d8ae3SPaolo Bonzini 	kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
5242531810caSBen Gardon 	write_unlock(&vcpu->kvm->mmu_lock);
5243c50d8ae3SPaolo Bonzini }
5244c50d8ae3SPaolo Bonzini 
5245736c291cSSean Christopherson int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
5246c50d8ae3SPaolo Bonzini 		       void *insn, int insn_len)
5247c50d8ae3SPaolo Bonzini {
524892daa48bSSean Christopherson 	int r, emulation_type = EMULTYPE_PF;
5249c50d8ae3SPaolo Bonzini 	bool direct = vcpu->arch.mmu->direct_map;
5250c50d8ae3SPaolo Bonzini 
52516948199aSSean Christopherson 	if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
5252ddce6208SSean Christopherson 		return RET_PF_RETRY;
5253ddce6208SSean Christopherson 
5254c50d8ae3SPaolo Bonzini 	r = RET_PF_INVALID;
5255c50d8ae3SPaolo Bonzini 	if (unlikely(error_code & PFERR_RSVD_MASK)) {
5256736c291cSSean Christopherson 		r = handle_mmio_page_fault(vcpu, cr2_or_gpa, direct);
5257c50d8ae3SPaolo Bonzini 		if (r == RET_PF_EMULATE)
5258c50d8ae3SPaolo Bonzini 			goto emulate;
5259c50d8ae3SPaolo Bonzini 	}
5260c50d8ae3SPaolo Bonzini 
5261c50d8ae3SPaolo Bonzini 	if (r == RET_PF_INVALID) {
52627a02674dSSean Christopherson 		r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa,
52637a02674dSSean Christopherson 					  lower_32_bits(error_code), false);
526419025e7bSSean Christopherson 		if (KVM_BUG_ON(r == RET_PF_INVALID, vcpu->kvm))
52657b367bc9SSean Christopherson 			return -EIO;
5266c50d8ae3SPaolo Bonzini 	}
5267c50d8ae3SPaolo Bonzini 
5268c50d8ae3SPaolo Bonzini 	if (r < 0)
5269c50d8ae3SPaolo Bonzini 		return r;
527083a2ba4cSSean Christopherson 	if (r != RET_PF_EMULATE)
527183a2ba4cSSean Christopherson 		return 1;
5272c50d8ae3SPaolo Bonzini 
5273c50d8ae3SPaolo Bonzini 	/*
5274c50d8ae3SPaolo Bonzini 	 * Before emulating the instruction, check if the error code
5275c50d8ae3SPaolo Bonzini 	 * was due to a RO violation while translating the guest page.
5276c50d8ae3SPaolo Bonzini 	 * This can occur when using nested virtualization with nested
5277c50d8ae3SPaolo Bonzini 	 * paging in both guests. If true, we simply unprotect the page
5278c50d8ae3SPaolo Bonzini 	 * and resume the guest.
5279c50d8ae3SPaolo Bonzini 	 */
5280c50d8ae3SPaolo Bonzini 	if (vcpu->arch.mmu->direct_map &&
5281c50d8ae3SPaolo Bonzini 	    (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) {
5282736c291cSSean Christopherson 		kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2_or_gpa));
5283c50d8ae3SPaolo Bonzini 		return 1;
5284c50d8ae3SPaolo Bonzini 	}
5285c50d8ae3SPaolo Bonzini 
5286c50d8ae3SPaolo Bonzini 	/*
5287c50d8ae3SPaolo Bonzini 	 * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still
5288c50d8ae3SPaolo Bonzini 	 * optimistically try to just unprotect the page and let the processor
5289c50d8ae3SPaolo Bonzini 	 * re-execute the instruction that caused the page fault.  Do not allow
5290c50d8ae3SPaolo Bonzini 	 * retrying MMIO emulation, as it's not only pointless but could also
5291c50d8ae3SPaolo Bonzini 	 * cause us to enter an infinite loop because the processor will keep
5292c50d8ae3SPaolo Bonzini 	 * faulting on the non-existent MMIO address.  Retrying an instruction
5293c50d8ae3SPaolo Bonzini 	 * from a nested guest is also pointless and dangerous as we are only
5294c50d8ae3SPaolo Bonzini 	 * explicitly shadowing L1's page tables, i.e. unprotecting something
5295c50d8ae3SPaolo Bonzini 	 * for L1 isn't going to magically fix whatever issue cause L2 to fail.
5296c50d8ae3SPaolo Bonzini 	 */
5297736c291cSSean Christopherson 	if (!mmio_info_in_cache(vcpu, cr2_or_gpa, direct) && !is_guest_mode(vcpu))
529892daa48bSSean Christopherson 		emulation_type |= EMULTYPE_ALLOW_RETRY_PF;
5299c50d8ae3SPaolo Bonzini emulate:
5300736c291cSSean Christopherson 	return x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn,
5301c50d8ae3SPaolo Bonzini 				       insn_len);
5302c50d8ae3SPaolo Bonzini }
5303c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
5304c50d8ae3SPaolo Bonzini 
53055efac074SPaolo Bonzini void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
53065efac074SPaolo Bonzini 			    gva_t gva, hpa_t root_hpa)
5307c50d8ae3SPaolo Bonzini {
5308c50d8ae3SPaolo Bonzini 	int i;
5309c50d8ae3SPaolo Bonzini 
53105efac074SPaolo Bonzini 	/* It's actually a GPA for vcpu->arch.guest_mmu.  */
53115efac074SPaolo Bonzini 	if (mmu != &vcpu->arch.guest_mmu) {
53125efac074SPaolo Bonzini 		/* INVLPG on a non-canonical address is a NOP according to the SDM.  */
5313c50d8ae3SPaolo Bonzini 		if (is_noncanonical_address(gva, vcpu))
5314c50d8ae3SPaolo Bonzini 			return;
5315c50d8ae3SPaolo Bonzini 
5316b3646477SJason Baron 		static_call(kvm_x86_tlb_flush_gva)(vcpu, gva);
53175efac074SPaolo Bonzini 	}
53185efac074SPaolo Bonzini 
53195efac074SPaolo Bonzini 	if (!mmu->invlpg)
53205efac074SPaolo Bonzini 		return;
53215efac074SPaolo Bonzini 
53225efac074SPaolo Bonzini 	if (root_hpa == INVALID_PAGE) {
5323c50d8ae3SPaolo Bonzini 		mmu->invlpg(vcpu, gva, mmu->root_hpa);
5324c50d8ae3SPaolo Bonzini 
5325c50d8ae3SPaolo Bonzini 		/*
5326c50d8ae3SPaolo Bonzini 		 * INVLPG is required to invalidate any global mappings for the VA,
5327c50d8ae3SPaolo Bonzini 		 * irrespective of PCID. Since it would take us roughly similar amount
5328c50d8ae3SPaolo Bonzini 		 * of work to determine whether any of the prev_root mappings of the VA
5329c50d8ae3SPaolo Bonzini 		 * is marked global, or to just sync it blindly, so we might as well
5330c50d8ae3SPaolo Bonzini 		 * just always sync it.
5331c50d8ae3SPaolo Bonzini 		 *
5332c50d8ae3SPaolo Bonzini 		 * Mappings not reachable via the current cr3 or the prev_roots will be
5333c50d8ae3SPaolo Bonzini 		 * synced when switching to that cr3, so nothing needs to be done here
5334c50d8ae3SPaolo Bonzini 		 * for them.
5335c50d8ae3SPaolo Bonzini 		 */
5336c50d8ae3SPaolo Bonzini 		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
5337c50d8ae3SPaolo Bonzini 			if (VALID_PAGE(mmu->prev_roots[i].hpa))
5338c50d8ae3SPaolo Bonzini 				mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
53395efac074SPaolo Bonzini 	} else {
53405efac074SPaolo Bonzini 		mmu->invlpg(vcpu, gva, root_hpa);
53415efac074SPaolo Bonzini 	}
53425efac074SPaolo Bonzini }
5343c50d8ae3SPaolo Bonzini 
53445efac074SPaolo Bonzini void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
53455efac074SPaolo Bonzini {
53465efac074SPaolo Bonzini 	kvm_mmu_invalidate_gva(vcpu, vcpu->arch.mmu, gva, INVALID_PAGE);
5347c50d8ae3SPaolo Bonzini 	++vcpu->stat.invlpg;
5348c50d8ae3SPaolo Bonzini }
5349c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
5350c50d8ae3SPaolo Bonzini 
53515efac074SPaolo Bonzini 
5352c50d8ae3SPaolo Bonzini void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
5353c50d8ae3SPaolo Bonzini {
5354c50d8ae3SPaolo Bonzini 	struct kvm_mmu *mmu = vcpu->arch.mmu;
5355c50d8ae3SPaolo Bonzini 	bool tlb_flush = false;
5356c50d8ae3SPaolo Bonzini 	uint i;
5357c50d8ae3SPaolo Bonzini 
5358c50d8ae3SPaolo Bonzini 	if (pcid == kvm_get_active_pcid(vcpu)) {
5359c50d8ae3SPaolo Bonzini 		mmu->invlpg(vcpu, gva, mmu->root_hpa);
5360c50d8ae3SPaolo Bonzini 		tlb_flush = true;
5361c50d8ae3SPaolo Bonzini 	}
5362c50d8ae3SPaolo Bonzini 
5363c50d8ae3SPaolo Bonzini 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
5364c50d8ae3SPaolo Bonzini 		if (VALID_PAGE(mmu->prev_roots[i].hpa) &&
5365be01e8e2SSean Christopherson 		    pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd)) {
5366c50d8ae3SPaolo Bonzini 			mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
5367c50d8ae3SPaolo Bonzini 			tlb_flush = true;
5368c50d8ae3SPaolo Bonzini 		}
5369c50d8ae3SPaolo Bonzini 	}
5370c50d8ae3SPaolo Bonzini 
5371c50d8ae3SPaolo Bonzini 	if (tlb_flush)
5372b3646477SJason Baron 		static_call(kvm_x86_tlb_flush_gva)(vcpu, gva);
5373c50d8ae3SPaolo Bonzini 
5374c50d8ae3SPaolo Bonzini 	++vcpu->stat.invlpg;
5375c50d8ae3SPaolo Bonzini 
5376c50d8ae3SPaolo Bonzini 	/*
5377c50d8ae3SPaolo Bonzini 	 * Mappings not reachable via the current cr3 or the prev_roots will be
5378c50d8ae3SPaolo Bonzini 	 * synced when switching to that cr3, so nothing needs to be done here
5379c50d8ae3SPaolo Bonzini 	 * for them.
5380c50d8ae3SPaolo Bonzini 	 */
5381c50d8ae3SPaolo Bonzini }
5382c50d8ae3SPaolo Bonzini 
5383*746700d2SWei Huang void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
5384*746700d2SWei Huang 		       int tdp_max_root_level, int tdp_huge_page_level)
5385c50d8ae3SPaolo Bonzini {
5386bde77235SSean Christopherson 	tdp_enabled = enable_tdp;
5387*746700d2SWei Huang 	tdp_root_level = tdp_forced_root_level;
538883013059SSean Christopherson 	max_tdp_level = tdp_max_root_level;
5389703c335dSSean Christopherson 
5390703c335dSSean Christopherson 	/*
53911d92d2e8SSean Christopherson 	 * max_huge_page_level reflects KVM's MMU capabilities irrespective
5392703c335dSSean Christopherson 	 * of kernel support, e.g. KVM may be capable of using 1GB pages when
5393703c335dSSean Christopherson 	 * the kernel is not.  But, KVM never creates a page size greater than
5394703c335dSSean Christopherson 	 * what is used by the kernel for any given HVA, i.e. the kernel's
5395703c335dSSean Christopherson 	 * capabilities are ultimately consulted by kvm_mmu_hugepage_adjust().
5396703c335dSSean Christopherson 	 */
5397703c335dSSean Christopherson 	if (tdp_enabled)
53981d92d2e8SSean Christopherson 		max_huge_page_level = tdp_huge_page_level;
5399703c335dSSean Christopherson 	else if (boot_cpu_has(X86_FEATURE_GBPAGES))
54001d92d2e8SSean Christopherson 		max_huge_page_level = PG_LEVEL_1G;
5401703c335dSSean Christopherson 	else
54021d92d2e8SSean Christopherson 		max_huge_page_level = PG_LEVEL_2M;
5403c50d8ae3SPaolo Bonzini }
5404bde77235SSean Christopherson EXPORT_SYMBOL_GPL(kvm_configure_mmu);
5405c50d8ae3SPaolo Bonzini 
5406c50d8ae3SPaolo Bonzini /* The return value indicates if tlb flush on all vcpus is needed. */
5407269e9552SHamza Mahfooz typedef bool (*slot_level_handler) (struct kvm *kvm,
5408269e9552SHamza Mahfooz 				    struct kvm_rmap_head *rmap_head,
5409269e9552SHamza Mahfooz 				    const struct kvm_memory_slot *slot);
5410c50d8ae3SPaolo Bonzini 
5411c50d8ae3SPaolo Bonzini /* The caller should hold mmu-lock before calling this function. */
5412c50d8ae3SPaolo Bonzini static __always_inline bool
5413269e9552SHamza Mahfooz slot_handle_level_range(struct kvm *kvm, const struct kvm_memory_slot *memslot,
5414c50d8ae3SPaolo Bonzini 			slot_level_handler fn, int start_level, int end_level,
54151a61b7dbSSean Christopherson 			gfn_t start_gfn, gfn_t end_gfn, bool flush_on_yield,
54161a61b7dbSSean Christopherson 			bool flush)
5417c50d8ae3SPaolo Bonzini {
5418c50d8ae3SPaolo Bonzini 	struct slot_rmap_walk_iterator iterator;
5419c50d8ae3SPaolo Bonzini 
5420c50d8ae3SPaolo Bonzini 	for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn,
5421c50d8ae3SPaolo Bonzini 			end_gfn, &iterator) {
5422c50d8ae3SPaolo Bonzini 		if (iterator.rmap)
54230a234f5dSSean Christopherson 			flush |= fn(kvm, iterator.rmap, memslot);
5424c50d8ae3SPaolo Bonzini 
5425531810caSBen Gardon 		if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
5426302695a5SSean Christopherson 			if (flush && flush_on_yield) {
5427c50d8ae3SPaolo Bonzini 				kvm_flush_remote_tlbs_with_address(kvm,
5428c50d8ae3SPaolo Bonzini 						start_gfn,
5429c50d8ae3SPaolo Bonzini 						iterator.gfn - start_gfn + 1);
5430c50d8ae3SPaolo Bonzini 				flush = false;
5431c50d8ae3SPaolo Bonzini 			}
5432531810caSBen Gardon 			cond_resched_rwlock_write(&kvm->mmu_lock);
5433c50d8ae3SPaolo Bonzini 		}
5434c50d8ae3SPaolo Bonzini 	}
5435c50d8ae3SPaolo Bonzini 
5436c50d8ae3SPaolo Bonzini 	return flush;
5437c50d8ae3SPaolo Bonzini }
5438c50d8ae3SPaolo Bonzini 
5439c50d8ae3SPaolo Bonzini static __always_inline bool
5440269e9552SHamza Mahfooz slot_handle_level(struct kvm *kvm, const struct kvm_memory_slot *memslot,
5441c50d8ae3SPaolo Bonzini 		  slot_level_handler fn, int start_level, int end_level,
5442302695a5SSean Christopherson 		  bool flush_on_yield)
5443c50d8ae3SPaolo Bonzini {
5444c50d8ae3SPaolo Bonzini 	return slot_handle_level_range(kvm, memslot, fn, start_level,
5445c50d8ae3SPaolo Bonzini 			end_level, memslot->base_gfn,
5446c50d8ae3SPaolo Bonzini 			memslot->base_gfn + memslot->npages - 1,
54471a61b7dbSSean Christopherson 			flush_on_yield, false);
5448c50d8ae3SPaolo Bonzini }
5449c50d8ae3SPaolo Bonzini 
5450c50d8ae3SPaolo Bonzini static __always_inline bool
5451269e9552SHamza Mahfooz slot_handle_leaf(struct kvm *kvm, const struct kvm_memory_slot *memslot,
5452302695a5SSean Christopherson 		 slot_level_handler fn, bool flush_on_yield)
5453c50d8ae3SPaolo Bonzini {
54543bae0459SSean Christopherson 	return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K,
5455302695a5SSean Christopherson 				 PG_LEVEL_4K, flush_on_yield);
5456c50d8ae3SPaolo Bonzini }
5457c50d8ae3SPaolo Bonzini 
5458c50d8ae3SPaolo Bonzini static void free_mmu_pages(struct kvm_mmu *mmu)
5459c50d8ae3SPaolo Bonzini {
54604a98623dSSean Christopherson 	if (!tdp_enabled && mmu->pae_root)
54614a98623dSSean Christopherson 		set_memory_encrypted((unsigned long)mmu->pae_root, 1);
5462c50d8ae3SPaolo Bonzini 	free_page((unsigned long)mmu->pae_root);
546303ca4589SSean Christopherson 	free_page((unsigned long)mmu->pml4_root);
5464c50d8ae3SPaolo Bonzini }
5465c50d8ae3SPaolo Bonzini 
546604d28e37SSean Christopherson static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
5467c50d8ae3SPaolo Bonzini {
5468c50d8ae3SPaolo Bonzini 	struct page *page;
5469c50d8ae3SPaolo Bonzini 	int i;
5470c50d8ae3SPaolo Bonzini 
547104d28e37SSean Christopherson 	mmu->root_hpa = INVALID_PAGE;
547204d28e37SSean Christopherson 	mmu->root_pgd = 0;
547304d28e37SSean Christopherson 	mmu->translate_gpa = translate_gpa;
547404d28e37SSean Christopherson 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
547504d28e37SSean Christopherson 		mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
547604d28e37SSean Christopherson 
5477c50d8ae3SPaolo Bonzini 	/*
5478c50d8ae3SPaolo Bonzini 	 * When using PAE paging, the four PDPTEs are treated as 'root' pages,
5479c50d8ae3SPaolo Bonzini 	 * while the PDP table is a per-vCPU construct that's allocated at MMU
5480c50d8ae3SPaolo Bonzini 	 * creation.  When emulating 32-bit mode, cr3 is only 32 bits even on
5481c50d8ae3SPaolo Bonzini 	 * x86_64.  Therefore we need to allocate the PDP table in the first
548204d45551SSean Christopherson 	 * 4GB of memory, which happens to fit the DMA32 zone.  TDP paging
548304d45551SSean Christopherson 	 * generally doesn't use PAE paging and can skip allocating the PDP
548404d45551SSean Christopherson 	 * table.  The main exception, handled here, is SVM's 32-bit NPT.  The
548504d45551SSean Christopherson 	 * other exception is for shadowing L1's 32-bit or PAE NPT on 64-bit
548604d45551SSean Christopherson 	 * KVM; that horror is handled on-demand by mmu_alloc_shadow_roots().
5487c50d8ae3SPaolo Bonzini 	 */
5488d468d94bSSean Christopherson 	if (tdp_enabled && kvm_mmu_get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
5489c50d8ae3SPaolo Bonzini 		return 0;
5490c50d8ae3SPaolo Bonzini 
5491c50d8ae3SPaolo Bonzini 	page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32);
5492c50d8ae3SPaolo Bonzini 	if (!page)
5493c50d8ae3SPaolo Bonzini 		return -ENOMEM;
5494c50d8ae3SPaolo Bonzini 
5495c50d8ae3SPaolo Bonzini 	mmu->pae_root = page_address(page);
54964a98623dSSean Christopherson 
54974a98623dSSean Christopherson 	/*
54984a98623dSSean Christopherson 	 * CR3 is only 32 bits when PAE paging is used, thus it's impossible to
54994a98623dSSean Christopherson 	 * get the CPU to treat the PDPTEs as encrypted.  Decrypt the page so
55004a98623dSSean Christopherson 	 * that KVM's writes and the CPU's reads get along.  Note, this is
55014a98623dSSean Christopherson 	 * only necessary when using shadow paging, as 64-bit NPT can get at
55024a98623dSSean Christopherson 	 * the C-bit even when shadowing 32-bit NPT, and SME isn't supported
55034a98623dSSean Christopherson 	 * by 32-bit kernels (when KVM itself uses 32-bit NPT).
55044a98623dSSean Christopherson 	 */
55054a98623dSSean Christopherson 	if (!tdp_enabled)
55064a98623dSSean Christopherson 		set_memory_decrypted((unsigned long)mmu->pae_root, 1);
55074a98623dSSean Christopherson 	else
55084a98623dSSean Christopherson 		WARN_ON_ONCE(shadow_me_mask);
55094a98623dSSean Christopherson 
5510c50d8ae3SPaolo Bonzini 	for (i = 0; i < 4; ++i)
5511c834e5e4SSean Christopherson 		mmu->pae_root[i] = INVALID_PAE_ROOT;
5512c50d8ae3SPaolo Bonzini 
5513c50d8ae3SPaolo Bonzini 	return 0;
5514c50d8ae3SPaolo Bonzini }
5515c50d8ae3SPaolo Bonzini 
5516c50d8ae3SPaolo Bonzini int kvm_mmu_create(struct kvm_vcpu *vcpu)
5517c50d8ae3SPaolo Bonzini {
5518c50d8ae3SPaolo Bonzini 	int ret;
5519c50d8ae3SPaolo Bonzini 
55205962bfb7SSean Christopherson 	vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
55215f6078f9SSean Christopherson 	vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;
55225f6078f9SSean Christopherson 
55235962bfb7SSean Christopherson 	vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
55245f6078f9SSean Christopherson 	vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
55255962bfb7SSean Christopherson 
552696880883SSean Christopherson 	vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
552796880883SSean Christopherson 
5528c50d8ae3SPaolo Bonzini 	vcpu->arch.mmu = &vcpu->arch.root_mmu;
5529c50d8ae3SPaolo Bonzini 	vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
5530c50d8ae3SPaolo Bonzini 
5531c50d8ae3SPaolo Bonzini 	vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
5532c50d8ae3SPaolo Bonzini 
553304d28e37SSean Christopherson 	ret = __kvm_mmu_create(vcpu, &vcpu->arch.guest_mmu);
5534c50d8ae3SPaolo Bonzini 	if (ret)
5535c50d8ae3SPaolo Bonzini 		return ret;
5536c50d8ae3SPaolo Bonzini 
553704d28e37SSean Christopherson 	ret = __kvm_mmu_create(vcpu, &vcpu->arch.root_mmu);
5538c50d8ae3SPaolo Bonzini 	if (ret)
5539c50d8ae3SPaolo Bonzini 		goto fail_allocate_root;
5540c50d8ae3SPaolo Bonzini 
5541c50d8ae3SPaolo Bonzini 	return ret;
5542c50d8ae3SPaolo Bonzini  fail_allocate_root:
5543c50d8ae3SPaolo Bonzini 	free_mmu_pages(&vcpu->arch.guest_mmu);
5544c50d8ae3SPaolo Bonzini 	return ret;
5545c50d8ae3SPaolo Bonzini }
5546c50d8ae3SPaolo Bonzini 
5547c50d8ae3SPaolo Bonzini #define BATCH_ZAP_PAGES	10
5548c50d8ae3SPaolo Bonzini static void kvm_zap_obsolete_pages(struct kvm *kvm)
5549c50d8ae3SPaolo Bonzini {
5550c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp, *node;
5551c50d8ae3SPaolo Bonzini 	int nr_zapped, batch = 0;
5552c50d8ae3SPaolo Bonzini 
5553c50d8ae3SPaolo Bonzini restart:
5554c50d8ae3SPaolo Bonzini 	list_for_each_entry_safe_reverse(sp, node,
5555c50d8ae3SPaolo Bonzini 	      &kvm->arch.active_mmu_pages, link) {
5556c50d8ae3SPaolo Bonzini 		/*
5557c50d8ae3SPaolo Bonzini 		 * No obsolete valid page exists before a newly created page
5558c50d8ae3SPaolo Bonzini 		 * since active_mmu_pages is a FIFO list.
5559c50d8ae3SPaolo Bonzini 		 */
5560c50d8ae3SPaolo Bonzini 		if (!is_obsolete_sp(kvm, sp))
5561c50d8ae3SPaolo Bonzini 			break;
5562c50d8ae3SPaolo Bonzini 
5563c50d8ae3SPaolo Bonzini 		/*
5564f95eec9bSSean Christopherson 		 * Invalid pages should never land back on the list of active
5565f95eec9bSSean Christopherson 		 * pages.  Skip the bogus page, otherwise we'll get stuck in an
5566f95eec9bSSean Christopherson 		 * infinite loop if the page gets put back on the list (again).
5567c50d8ae3SPaolo Bonzini 		 */
5568f95eec9bSSean Christopherson 		if (WARN_ON(sp->role.invalid))
5569c50d8ae3SPaolo Bonzini 			continue;
5570c50d8ae3SPaolo Bonzini 
5571c50d8ae3SPaolo Bonzini 		/*
5572c50d8ae3SPaolo Bonzini 		 * No need to flush the TLB since we're only zapping shadow
5573c50d8ae3SPaolo Bonzini 		 * pages with an obsolete generation number and all vCPUS have
5574c50d8ae3SPaolo Bonzini 		 * loaded a new root, i.e. the shadow pages being zapped cannot
5575c50d8ae3SPaolo Bonzini 		 * be in active use by the guest.
5576c50d8ae3SPaolo Bonzini 		 */
5577c50d8ae3SPaolo Bonzini 		if (batch >= BATCH_ZAP_PAGES &&
5578531810caSBen Gardon 		    cond_resched_rwlock_write(&kvm->mmu_lock)) {
5579c50d8ae3SPaolo Bonzini 			batch = 0;
5580c50d8ae3SPaolo Bonzini 			goto restart;
5581c50d8ae3SPaolo Bonzini 		}
5582c50d8ae3SPaolo Bonzini 
5583c50d8ae3SPaolo Bonzini 		if (__kvm_mmu_prepare_zap_page(kvm, sp,
5584c50d8ae3SPaolo Bonzini 				&kvm->arch.zapped_obsolete_pages, &nr_zapped)) {
5585c50d8ae3SPaolo Bonzini 			batch += nr_zapped;
5586c50d8ae3SPaolo Bonzini 			goto restart;
5587c50d8ae3SPaolo Bonzini 		}
5588c50d8ae3SPaolo Bonzini 	}
5589c50d8ae3SPaolo Bonzini 
5590c50d8ae3SPaolo Bonzini 	/*
5591c50d8ae3SPaolo Bonzini 	 * Trigger a remote TLB flush before freeing the page tables to ensure
5592c50d8ae3SPaolo Bonzini 	 * KVM is not in the middle of a lockless shadow page table walk, which
5593c50d8ae3SPaolo Bonzini 	 * may reference the pages.
5594c50d8ae3SPaolo Bonzini 	 */
5595c50d8ae3SPaolo Bonzini 	kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages);
5596c50d8ae3SPaolo Bonzini }
5597c50d8ae3SPaolo Bonzini 
5598c50d8ae3SPaolo Bonzini /*
5599c50d8ae3SPaolo Bonzini  * Fast invalidate all shadow pages and use lock-break technique
5600c50d8ae3SPaolo Bonzini  * to zap obsolete pages.
5601c50d8ae3SPaolo Bonzini  *
5602c50d8ae3SPaolo Bonzini  * It's required when memslot is being deleted or VM is being
5603c50d8ae3SPaolo Bonzini  * destroyed, in these cases, we should ensure that KVM MMU does
5604c50d8ae3SPaolo Bonzini  * not use any resource of the being-deleted slot or all slots
5605c50d8ae3SPaolo Bonzini  * after calling the function.
5606c50d8ae3SPaolo Bonzini  */
5607c50d8ae3SPaolo Bonzini static void kvm_mmu_zap_all_fast(struct kvm *kvm)
5608c50d8ae3SPaolo Bonzini {
5609c50d8ae3SPaolo Bonzini 	lockdep_assert_held(&kvm->slots_lock);
5610c50d8ae3SPaolo Bonzini 
5611531810caSBen Gardon 	write_lock(&kvm->mmu_lock);
5612c50d8ae3SPaolo Bonzini 	trace_kvm_mmu_zap_all_fast(kvm);
5613c50d8ae3SPaolo Bonzini 
5614c50d8ae3SPaolo Bonzini 	/*
5615c50d8ae3SPaolo Bonzini 	 * Toggle mmu_valid_gen between '0' and '1'.  Because slots_lock is
5616c50d8ae3SPaolo Bonzini 	 * held for the entire duration of zapping obsolete pages, it's
5617c50d8ae3SPaolo Bonzini 	 * impossible for there to be multiple invalid generations associated
5618c50d8ae3SPaolo Bonzini 	 * with *valid* shadow pages at any given time, i.e. there is exactly
5619c50d8ae3SPaolo Bonzini 	 * one valid generation and (at most) one invalid generation.
5620c50d8ae3SPaolo Bonzini 	 */
5621c50d8ae3SPaolo Bonzini 	kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1;
5622c50d8ae3SPaolo Bonzini 
5623b7cccd39SBen Gardon 	/* In order to ensure all threads see this change when
5624b7cccd39SBen Gardon 	 * handling the MMU reload signal, this must happen in the
5625b7cccd39SBen Gardon 	 * same critical section as kvm_reload_remote_mmus, and
5626b7cccd39SBen Gardon 	 * before kvm_zap_obsolete_pages as kvm_zap_obsolete_pages
5627b7cccd39SBen Gardon 	 * could drop the MMU lock and yield.
5628b7cccd39SBen Gardon 	 */
5629b7cccd39SBen Gardon 	if (is_tdp_mmu_enabled(kvm))
5630b7cccd39SBen Gardon 		kvm_tdp_mmu_invalidate_all_roots(kvm);
5631b7cccd39SBen Gardon 
5632c50d8ae3SPaolo Bonzini 	/*
5633c50d8ae3SPaolo Bonzini 	 * Notify all vcpus to reload its shadow page table and flush TLB.
5634c50d8ae3SPaolo Bonzini 	 * Then all vcpus will switch to new shadow page table with the new
5635c50d8ae3SPaolo Bonzini 	 * mmu_valid_gen.
5636c50d8ae3SPaolo Bonzini 	 *
5637c50d8ae3SPaolo Bonzini 	 * Note: we need to do this under the protection of mmu_lock,
5638c50d8ae3SPaolo Bonzini 	 * otherwise, vcpu would purge shadow page but miss tlb flush.
5639c50d8ae3SPaolo Bonzini 	 */
5640c50d8ae3SPaolo Bonzini 	kvm_reload_remote_mmus(kvm);
5641c50d8ae3SPaolo Bonzini 
5642c50d8ae3SPaolo Bonzini 	kvm_zap_obsolete_pages(kvm);
5643faaf05b0SBen Gardon 
5644531810caSBen Gardon 	write_unlock(&kvm->mmu_lock);
56454c6654bdSBen Gardon 
56464c6654bdSBen Gardon 	if (is_tdp_mmu_enabled(kvm)) {
56474c6654bdSBen Gardon 		read_lock(&kvm->mmu_lock);
56484c6654bdSBen Gardon 		kvm_tdp_mmu_zap_invalidated_roots(kvm);
56494c6654bdSBen Gardon 		read_unlock(&kvm->mmu_lock);
56504c6654bdSBen Gardon 	}
5651c50d8ae3SPaolo Bonzini }
5652c50d8ae3SPaolo Bonzini 
5653c50d8ae3SPaolo Bonzini static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
5654c50d8ae3SPaolo Bonzini {
5655c50d8ae3SPaolo Bonzini 	return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
5656c50d8ae3SPaolo Bonzini }
5657c50d8ae3SPaolo Bonzini 
5658c50d8ae3SPaolo Bonzini static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
5659c50d8ae3SPaolo Bonzini 			struct kvm_memory_slot *slot,
5660c50d8ae3SPaolo Bonzini 			struct kvm_page_track_notifier_node *node)
5661c50d8ae3SPaolo Bonzini {
5662c50d8ae3SPaolo Bonzini 	kvm_mmu_zap_all_fast(kvm);
5663c50d8ae3SPaolo Bonzini }
5664c50d8ae3SPaolo Bonzini 
5665c50d8ae3SPaolo Bonzini void kvm_mmu_init_vm(struct kvm *kvm)
5666c50d8ae3SPaolo Bonzini {
5667c50d8ae3SPaolo Bonzini 	struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
5668c50d8ae3SPaolo Bonzini 
5669ce25681dSSean Christopherson 	spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
5670ce25681dSSean Christopherson 
5671d501f747SBen Gardon 	if (!kvm_mmu_init_tdp_mmu(kvm))
5672d501f747SBen Gardon 		/*
5673d501f747SBen Gardon 		 * No smp_load/store wrappers needed here as we are in
5674d501f747SBen Gardon 		 * VM init and there cannot be any memslots / other threads
5675d501f747SBen Gardon 		 * accessing this struct kvm yet.
5676d501f747SBen Gardon 		 */
5677a2557408SBen Gardon 		kvm->arch.memslots_have_rmaps = true;
5678fe5db27dSBen Gardon 
5679c50d8ae3SPaolo Bonzini 	node->track_write = kvm_mmu_pte_write;
5680c50d8ae3SPaolo Bonzini 	node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
5681c50d8ae3SPaolo Bonzini 	kvm_page_track_register_notifier(kvm, node);
5682c50d8ae3SPaolo Bonzini }
5683c50d8ae3SPaolo Bonzini 
5684c50d8ae3SPaolo Bonzini void kvm_mmu_uninit_vm(struct kvm *kvm)
5685c50d8ae3SPaolo Bonzini {
5686c50d8ae3SPaolo Bonzini 	struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
5687c50d8ae3SPaolo Bonzini 
5688c50d8ae3SPaolo Bonzini 	kvm_page_track_unregister_notifier(kvm, node);
5689fe5db27dSBen Gardon 
5690fe5db27dSBen Gardon 	kvm_mmu_uninit_tdp_mmu(kvm);
5691c50d8ae3SPaolo Bonzini }
5692c50d8ae3SPaolo Bonzini 
569388f58535SMaxim Levitsky /*
569488f58535SMaxim Levitsky  * Invalidate (zap) SPTEs that cover GFNs from gfn_start and up to gfn_end
569588f58535SMaxim Levitsky  * (not including it)
569688f58535SMaxim Levitsky  */
5697c50d8ae3SPaolo Bonzini void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
5698c50d8ae3SPaolo Bonzini {
5699c50d8ae3SPaolo Bonzini 	struct kvm_memslots *slots;
5700c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *memslot;
5701c50d8ae3SPaolo Bonzini 	int i;
57021a61b7dbSSean Christopherson 	bool flush = false;
5703c50d8ae3SPaolo Bonzini 
5704531810caSBen Gardon 	write_lock(&kvm->mmu_lock);
57055a324c24SSean Christopherson 
5706edb298c6SMaxim Levitsky 	kvm_inc_notifier_count(kvm, gfn_start, gfn_end);
5707edb298c6SMaxim Levitsky 
57085a324c24SSean Christopherson 	if (kvm_memslots_have_rmaps(kvm)) {
5709c50d8ae3SPaolo Bonzini 		for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
5710c50d8ae3SPaolo Bonzini 			slots = __kvm_memslots(kvm, i);
5711c50d8ae3SPaolo Bonzini 			kvm_for_each_memslot(memslot, slots) {
5712c50d8ae3SPaolo Bonzini 				gfn_t start, end;
5713c50d8ae3SPaolo Bonzini 
5714c50d8ae3SPaolo Bonzini 				start = max(gfn_start, memslot->base_gfn);
5715c50d8ae3SPaolo Bonzini 				end = min(gfn_end, memslot->base_gfn + memslot->npages);
5716c50d8ae3SPaolo Bonzini 				if (start >= end)
5717c50d8ae3SPaolo Bonzini 					continue;
5718c50d8ae3SPaolo Bonzini 
5719269e9552SHamza Mahfooz 				flush = slot_handle_level_range(kvm,
5720269e9552SHamza Mahfooz 						(const struct kvm_memory_slot *) memslot,
5721e2209710SBen Gardon 						kvm_zap_rmapp, PG_LEVEL_4K,
5722e2209710SBen Gardon 						KVM_MAX_HUGEPAGE_LEVEL, start,
5723e2209710SBen Gardon 						end - 1, true, flush);
5724c50d8ae3SPaolo Bonzini 			}
5725c50d8ae3SPaolo Bonzini 		}
5726faaf05b0SBen Gardon 		if (flush)
57272822da44SMaxim Levitsky 			kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
57282822da44SMaxim Levitsky 							   gfn_end - gfn_start);
5729e2209710SBen Gardon 	}
57306103bc07SBen Gardon 
57316103bc07SBen Gardon 	if (is_tdp_mmu_enabled(kvm)) {
57326103bc07SBen Gardon 		for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
57336103bc07SBen Gardon 			flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, gfn_start,
57345a324c24SSean Christopherson 							  gfn_end, flush);
57352822da44SMaxim Levitsky 		if (flush)
57362822da44SMaxim Levitsky 			kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
57372822da44SMaxim Levitsky 							   gfn_end - gfn_start);
57386103bc07SBen Gardon 	}
57395a324c24SSean Christopherson 
57405a324c24SSean Christopherson 	if (flush)
57415a324c24SSean Christopherson 		kvm_flush_remote_tlbs_with_address(kvm, gfn_start, gfn_end);
57425a324c24SSean Christopherson 
5743edb298c6SMaxim Levitsky 	kvm_dec_notifier_count(kvm, gfn_start, gfn_end);
5744edb298c6SMaxim Levitsky 
57455a324c24SSean Christopherson 	write_unlock(&kvm->mmu_lock);
5746c50d8ae3SPaolo Bonzini }
5747c50d8ae3SPaolo Bonzini 
5748c50d8ae3SPaolo Bonzini static bool slot_rmap_write_protect(struct kvm *kvm,
57490a234f5dSSean Christopherson 				    struct kvm_rmap_head *rmap_head,
5750269e9552SHamza Mahfooz 				    const struct kvm_memory_slot *slot)
5751c50d8ae3SPaolo Bonzini {
5752c50d8ae3SPaolo Bonzini 	return __rmap_write_protect(kvm, rmap_head, false);
5753c50d8ae3SPaolo Bonzini }
5754c50d8ae3SPaolo Bonzini 
5755c50d8ae3SPaolo Bonzini void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
5756269e9552SHamza Mahfooz 				      const struct kvm_memory_slot *memslot,
57573c9bd400SJay Zhou 				      int start_level)
5758c50d8ae3SPaolo Bonzini {
5759e2209710SBen Gardon 	bool flush = false;
5760c50d8ae3SPaolo Bonzini 
5761e2209710SBen Gardon 	if (kvm_memslots_have_rmaps(kvm)) {
5762531810caSBen Gardon 		write_lock(&kvm->mmu_lock);
57633c9bd400SJay Zhou 		flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
5764e2209710SBen Gardon 					  start_level, KVM_MAX_HUGEPAGE_LEVEL,
5765e2209710SBen Gardon 					  false);
5766531810caSBen Gardon 		write_unlock(&kvm->mmu_lock);
5767e2209710SBen Gardon 	}
5768c50d8ae3SPaolo Bonzini 
576924ae4cfaSBen Gardon 	if (is_tdp_mmu_enabled(kvm)) {
577024ae4cfaSBen Gardon 		read_lock(&kvm->mmu_lock);
577124ae4cfaSBen Gardon 		flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, start_level);
577224ae4cfaSBen Gardon 		read_unlock(&kvm->mmu_lock);
577324ae4cfaSBen Gardon 	}
577424ae4cfaSBen Gardon 
5775c50d8ae3SPaolo Bonzini 	/*
5776c50d8ae3SPaolo Bonzini 	 * We can flush all the TLBs out of the mmu lock without TLB
5777c50d8ae3SPaolo Bonzini 	 * corruption since we just change the spte from writable to
5778c50d8ae3SPaolo Bonzini 	 * readonly so that we only need to care the case of changing
5779c50d8ae3SPaolo Bonzini 	 * spte from present to present (changing the spte from present
5780c50d8ae3SPaolo Bonzini 	 * to nonpresent will flush all the TLBs immediately), in other
5781c50d8ae3SPaolo Bonzini 	 * words, the only case we care is mmu_spte_update() where we
57825fc3424fSSean Christopherson 	 * have checked Host-writable | MMU-writable instead of
57835fc3424fSSean Christopherson 	 * PT_WRITABLE_MASK, that means it does not depend on PT_WRITABLE_MASK
57845fc3424fSSean Christopherson 	 * anymore.
5785c50d8ae3SPaolo Bonzini 	 */
5786c50d8ae3SPaolo Bonzini 	if (flush)
57877f42aa76SSean Christopherson 		kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
5788c50d8ae3SPaolo Bonzini }
5789c50d8ae3SPaolo Bonzini 
5790c50d8ae3SPaolo Bonzini static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
57910a234f5dSSean Christopherson 					 struct kvm_rmap_head *rmap_head,
5792269e9552SHamza Mahfooz 					 const struct kvm_memory_slot *slot)
5793c50d8ae3SPaolo Bonzini {
5794c50d8ae3SPaolo Bonzini 	u64 *sptep;
5795c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
5796c50d8ae3SPaolo Bonzini 	int need_tlb_flush = 0;
5797c50d8ae3SPaolo Bonzini 	kvm_pfn_t pfn;
5798c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
5799c50d8ae3SPaolo Bonzini 
5800c50d8ae3SPaolo Bonzini restart:
5801c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(rmap_head, &iter, sptep) {
580257354682SSean Christopherson 		sp = sptep_to_sp(sptep);
5803c50d8ae3SPaolo Bonzini 		pfn = spte_to_pfn(*sptep);
5804c50d8ae3SPaolo Bonzini 
5805c50d8ae3SPaolo Bonzini 		/*
5806c50d8ae3SPaolo Bonzini 		 * We cannot do huge page mapping for indirect shadow pages,
5807c50d8ae3SPaolo Bonzini 		 * which are found on the last rmap (level = 1) when not using
5808c50d8ae3SPaolo Bonzini 		 * tdp; such shadow pages are synced with the page table in
5809c50d8ae3SPaolo Bonzini 		 * the guest, and the guest page table is using 4K page size
5810c50d8ae3SPaolo Bonzini 		 * mapping if the indirect sp has level = 1.
5811c50d8ae3SPaolo Bonzini 		 */
5812c50d8ae3SPaolo Bonzini 		if (sp->role.direct && !kvm_is_reserved_pfn(pfn) &&
58139eba50f8SSean Christopherson 		    sp->role.level < kvm_mmu_max_mapping_level(kvm, slot, sp->gfn,
58149eba50f8SSean Christopherson 							       pfn, PG_LEVEL_NUM)) {
581571f51d2cSMingwei Zhang 			pte_list_remove(kvm, rmap_head, sptep);
5816c50d8ae3SPaolo Bonzini 
5817c50d8ae3SPaolo Bonzini 			if (kvm_available_flush_tlb_with_range())
5818c50d8ae3SPaolo Bonzini 				kvm_flush_remote_tlbs_with_address(kvm, sp->gfn,
5819c50d8ae3SPaolo Bonzini 					KVM_PAGES_PER_HPAGE(sp->role.level));
5820c50d8ae3SPaolo Bonzini 			else
5821c50d8ae3SPaolo Bonzini 				need_tlb_flush = 1;
5822c50d8ae3SPaolo Bonzini 
5823c50d8ae3SPaolo Bonzini 			goto restart;
5824c50d8ae3SPaolo Bonzini 		}
5825c50d8ae3SPaolo Bonzini 	}
5826c50d8ae3SPaolo Bonzini 
5827c50d8ae3SPaolo Bonzini 	return need_tlb_flush;
5828c50d8ae3SPaolo Bonzini }
5829c50d8ae3SPaolo Bonzini 
5830c50d8ae3SPaolo Bonzini void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
5831269e9552SHamza Mahfooz 				   const struct kvm_memory_slot *slot)
5832c50d8ae3SPaolo Bonzini {
583331c65657SColin Ian King 	bool flush = false;
58349eba50f8SSean Christopherson 
5835e2209710SBen Gardon 	if (kvm_memslots_have_rmaps(kvm)) {
5836531810caSBen Gardon 		write_lock(&kvm->mmu_lock);
5837302695a5SSean Christopherson 		flush = slot_handle_leaf(kvm, slot, kvm_mmu_zap_collapsible_spte, true);
5838302695a5SSean Christopherson 		if (flush)
5839302695a5SSean Christopherson 			kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
5840531810caSBen Gardon 		write_unlock(&kvm->mmu_lock);
5841e2209710SBen Gardon 	}
58422db6f772SBen Gardon 
58432db6f772SBen Gardon 	if (is_tdp_mmu_enabled(kvm)) {
58442db6f772SBen Gardon 		read_lock(&kvm->mmu_lock);
58452db6f772SBen Gardon 		flush = kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot, flush);
58462db6f772SBen Gardon 		if (flush)
58472db6f772SBen Gardon 			kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
58482db6f772SBen Gardon 		read_unlock(&kvm->mmu_lock);
58492db6f772SBen Gardon 	}
5850c50d8ae3SPaolo Bonzini }
5851c50d8ae3SPaolo Bonzini 
5852b3594ffbSSean Christopherson void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
58536c9dd6d2SPaolo Bonzini 					const struct kvm_memory_slot *memslot)
5854b3594ffbSSean Christopherson {
5855b3594ffbSSean Christopherson 	/*
58567f42aa76SSean Christopherson 	 * All current use cases for flushing the TLBs for a specific memslot
5857302695a5SSean Christopherson 	 * related to dirty logging, and many do the TLB flush out of mmu_lock.
58587f42aa76SSean Christopherson 	 * The interaction between the various operations on memslot must be
58597f42aa76SSean Christopherson 	 * serialized by slots_locks to ensure the TLB flush from one operation
58607f42aa76SSean Christopherson 	 * is observed by any other operation on the same memslot.
5861b3594ffbSSean Christopherson 	 */
5862b3594ffbSSean Christopherson 	lockdep_assert_held(&kvm->slots_lock);
5863cec37648SSean Christopherson 	kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
5864cec37648SSean Christopherson 					   memslot->npages);
5865b3594ffbSSean Christopherson }
5866b3594ffbSSean Christopherson 
5867c50d8ae3SPaolo Bonzini void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
5868269e9552SHamza Mahfooz 				   const struct kvm_memory_slot *memslot)
5869c50d8ae3SPaolo Bonzini {
5870e2209710SBen Gardon 	bool flush = false;
5871c50d8ae3SPaolo Bonzini 
5872e2209710SBen Gardon 	if (kvm_memslots_have_rmaps(kvm)) {
5873531810caSBen Gardon 		write_lock(&kvm->mmu_lock);
5874e2209710SBen Gardon 		flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty,
5875e2209710SBen Gardon 					 false);
5876531810caSBen Gardon 		write_unlock(&kvm->mmu_lock);
5877e2209710SBen Gardon 	}
5878c50d8ae3SPaolo Bonzini 
587924ae4cfaSBen Gardon 	if (is_tdp_mmu_enabled(kvm)) {
588024ae4cfaSBen Gardon 		read_lock(&kvm->mmu_lock);
588124ae4cfaSBen Gardon 		flush |= kvm_tdp_mmu_clear_dirty_slot(kvm, memslot);
588224ae4cfaSBen Gardon 		read_unlock(&kvm->mmu_lock);
588324ae4cfaSBen Gardon 	}
588424ae4cfaSBen Gardon 
5885c50d8ae3SPaolo Bonzini 	/*
5886c50d8ae3SPaolo Bonzini 	 * It's also safe to flush TLBs out of mmu lock here as currently this
5887c50d8ae3SPaolo Bonzini 	 * function is only used for dirty logging, in which case flushing TLB
5888c50d8ae3SPaolo Bonzini 	 * out of mmu lock also guarantees no dirty pages will be lost in
5889c50d8ae3SPaolo Bonzini 	 * dirty_bitmap.
5890c50d8ae3SPaolo Bonzini 	 */
5891c50d8ae3SPaolo Bonzini 	if (flush)
58927f42aa76SSean Christopherson 		kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
5893c50d8ae3SPaolo Bonzini }
5894c50d8ae3SPaolo Bonzini 
5895c50d8ae3SPaolo Bonzini void kvm_mmu_zap_all(struct kvm *kvm)
5896c50d8ae3SPaolo Bonzini {
5897c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp, *node;
5898c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
5899c50d8ae3SPaolo Bonzini 	int ign;
5900c50d8ae3SPaolo Bonzini 
5901531810caSBen Gardon 	write_lock(&kvm->mmu_lock);
5902c50d8ae3SPaolo Bonzini restart:
5903c50d8ae3SPaolo Bonzini 	list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
5904f95eec9bSSean Christopherson 		if (WARN_ON(sp->role.invalid))
5905c50d8ae3SPaolo Bonzini 			continue;
5906c50d8ae3SPaolo Bonzini 		if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
5907c50d8ae3SPaolo Bonzini 			goto restart;
5908531810caSBen Gardon 		if (cond_resched_rwlock_write(&kvm->mmu_lock))
5909c50d8ae3SPaolo Bonzini 			goto restart;
5910c50d8ae3SPaolo Bonzini 	}
5911c50d8ae3SPaolo Bonzini 
5912c50d8ae3SPaolo Bonzini 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
5913faaf05b0SBen Gardon 
5914897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
5915faaf05b0SBen Gardon 		kvm_tdp_mmu_zap_all(kvm);
5916faaf05b0SBen Gardon 
5917531810caSBen Gardon 	write_unlock(&kvm->mmu_lock);
5918c50d8ae3SPaolo Bonzini }
5919c50d8ae3SPaolo Bonzini 
5920c50d8ae3SPaolo Bonzini void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
5921c50d8ae3SPaolo Bonzini {
5922c50d8ae3SPaolo Bonzini 	WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
5923c50d8ae3SPaolo Bonzini 
5924c50d8ae3SPaolo Bonzini 	gen &= MMIO_SPTE_GEN_MASK;
5925c50d8ae3SPaolo Bonzini 
5926c50d8ae3SPaolo Bonzini 	/*
5927c50d8ae3SPaolo Bonzini 	 * Generation numbers are incremented in multiples of the number of
5928c50d8ae3SPaolo Bonzini 	 * address spaces in order to provide unique generations across all
5929c50d8ae3SPaolo Bonzini 	 * address spaces.  Strip what is effectively the address space
5930c50d8ae3SPaolo Bonzini 	 * modifier prior to checking for a wrap of the MMIO generation so
5931c50d8ae3SPaolo Bonzini 	 * that a wrap in any address space is detected.
5932c50d8ae3SPaolo Bonzini 	 */
5933c50d8ae3SPaolo Bonzini 	gen &= ~((u64)KVM_ADDRESS_SPACE_NUM - 1);
5934c50d8ae3SPaolo Bonzini 
5935c50d8ae3SPaolo Bonzini 	/*
5936c50d8ae3SPaolo Bonzini 	 * The very rare case: if the MMIO generation number has wrapped,
5937c50d8ae3SPaolo Bonzini 	 * zap all shadow pages.
5938c50d8ae3SPaolo Bonzini 	 */
5939c50d8ae3SPaolo Bonzini 	if (unlikely(gen == 0)) {
5940c50d8ae3SPaolo Bonzini 		kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
5941c50d8ae3SPaolo Bonzini 		kvm_mmu_zap_all_fast(kvm);
5942c50d8ae3SPaolo Bonzini 	}
5943c50d8ae3SPaolo Bonzini }
5944c50d8ae3SPaolo Bonzini 
5945c50d8ae3SPaolo Bonzini static unsigned long
5946c50d8ae3SPaolo Bonzini mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
5947c50d8ae3SPaolo Bonzini {
5948c50d8ae3SPaolo Bonzini 	struct kvm *kvm;
5949c50d8ae3SPaolo Bonzini 	int nr_to_scan = sc->nr_to_scan;
5950c50d8ae3SPaolo Bonzini 	unsigned long freed = 0;
5951c50d8ae3SPaolo Bonzini 
5952c50d8ae3SPaolo Bonzini 	mutex_lock(&kvm_lock);
5953c50d8ae3SPaolo Bonzini 
5954c50d8ae3SPaolo Bonzini 	list_for_each_entry(kvm, &vm_list, vm_list) {
5955c50d8ae3SPaolo Bonzini 		int idx;
5956c50d8ae3SPaolo Bonzini 		LIST_HEAD(invalid_list);
5957c50d8ae3SPaolo Bonzini 
5958c50d8ae3SPaolo Bonzini 		/*
5959c50d8ae3SPaolo Bonzini 		 * Never scan more than sc->nr_to_scan VM instances.
5960c50d8ae3SPaolo Bonzini 		 * Will not hit this condition practically since we do not try
5961c50d8ae3SPaolo Bonzini 		 * to shrink more than one VM and it is very unlikely to see
5962c50d8ae3SPaolo Bonzini 		 * !n_used_mmu_pages so many times.
5963c50d8ae3SPaolo Bonzini 		 */
5964c50d8ae3SPaolo Bonzini 		if (!nr_to_scan--)
5965c50d8ae3SPaolo Bonzini 			break;
5966c50d8ae3SPaolo Bonzini 		/*
5967c50d8ae3SPaolo Bonzini 		 * n_used_mmu_pages is accessed without holding kvm->mmu_lock
5968c50d8ae3SPaolo Bonzini 		 * here. We may skip a VM instance errorneosly, but we do not
5969c50d8ae3SPaolo Bonzini 		 * want to shrink a VM that only started to populate its MMU
5970c50d8ae3SPaolo Bonzini 		 * anyway.
5971c50d8ae3SPaolo Bonzini 		 */
5972c50d8ae3SPaolo Bonzini 		if (!kvm->arch.n_used_mmu_pages &&
5973c50d8ae3SPaolo Bonzini 		    !kvm_has_zapped_obsolete_pages(kvm))
5974c50d8ae3SPaolo Bonzini 			continue;
5975c50d8ae3SPaolo Bonzini 
5976c50d8ae3SPaolo Bonzini 		idx = srcu_read_lock(&kvm->srcu);
5977531810caSBen Gardon 		write_lock(&kvm->mmu_lock);
5978c50d8ae3SPaolo Bonzini 
5979c50d8ae3SPaolo Bonzini 		if (kvm_has_zapped_obsolete_pages(kvm)) {
5980c50d8ae3SPaolo Bonzini 			kvm_mmu_commit_zap_page(kvm,
5981c50d8ae3SPaolo Bonzini 			      &kvm->arch.zapped_obsolete_pages);
5982c50d8ae3SPaolo Bonzini 			goto unlock;
5983c50d8ae3SPaolo Bonzini 		}
5984c50d8ae3SPaolo Bonzini 
5985ebdb292dSSean Christopherson 		freed = kvm_mmu_zap_oldest_mmu_pages(kvm, sc->nr_to_scan);
5986c50d8ae3SPaolo Bonzini 
5987c50d8ae3SPaolo Bonzini unlock:
5988531810caSBen Gardon 		write_unlock(&kvm->mmu_lock);
5989c50d8ae3SPaolo Bonzini 		srcu_read_unlock(&kvm->srcu, idx);
5990c50d8ae3SPaolo Bonzini 
5991c50d8ae3SPaolo Bonzini 		/*
5992c50d8ae3SPaolo Bonzini 		 * unfair on small ones
5993c50d8ae3SPaolo Bonzini 		 * per-vm shrinkers cry out
5994c50d8ae3SPaolo Bonzini 		 * sadness comes quickly
5995c50d8ae3SPaolo Bonzini 		 */
5996c50d8ae3SPaolo Bonzini 		list_move_tail(&kvm->vm_list, &vm_list);
5997c50d8ae3SPaolo Bonzini 		break;
5998c50d8ae3SPaolo Bonzini 	}
5999c50d8ae3SPaolo Bonzini 
6000c50d8ae3SPaolo Bonzini 	mutex_unlock(&kvm_lock);
6001c50d8ae3SPaolo Bonzini 	return freed;
6002c50d8ae3SPaolo Bonzini }
6003c50d8ae3SPaolo Bonzini 
6004c50d8ae3SPaolo Bonzini static unsigned long
6005c50d8ae3SPaolo Bonzini mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
6006c50d8ae3SPaolo Bonzini {
6007c50d8ae3SPaolo Bonzini 	return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
6008c50d8ae3SPaolo Bonzini }
6009c50d8ae3SPaolo Bonzini 
6010c50d8ae3SPaolo Bonzini static struct shrinker mmu_shrinker = {
6011c50d8ae3SPaolo Bonzini 	.count_objects = mmu_shrink_count,
6012c50d8ae3SPaolo Bonzini 	.scan_objects = mmu_shrink_scan,
6013c50d8ae3SPaolo Bonzini 	.seeks = DEFAULT_SEEKS * 10,
6014c50d8ae3SPaolo Bonzini };
6015c50d8ae3SPaolo Bonzini 
6016c50d8ae3SPaolo Bonzini static void mmu_destroy_caches(void)
6017c50d8ae3SPaolo Bonzini {
6018c50d8ae3SPaolo Bonzini 	kmem_cache_destroy(pte_list_desc_cache);
6019c50d8ae3SPaolo Bonzini 	kmem_cache_destroy(mmu_page_header_cache);
6020c50d8ae3SPaolo Bonzini }
6021c50d8ae3SPaolo Bonzini 
6022c50d8ae3SPaolo Bonzini static bool get_nx_auto_mode(void)
6023c50d8ae3SPaolo Bonzini {
6024c50d8ae3SPaolo Bonzini 	/* Return true when CPU has the bug, and mitigations are ON */
6025c50d8ae3SPaolo Bonzini 	return boot_cpu_has_bug(X86_BUG_ITLB_MULTIHIT) && !cpu_mitigations_off();
6026c50d8ae3SPaolo Bonzini }
6027c50d8ae3SPaolo Bonzini 
6028c50d8ae3SPaolo Bonzini static void __set_nx_huge_pages(bool val)
6029c50d8ae3SPaolo Bonzini {
6030c50d8ae3SPaolo Bonzini 	nx_huge_pages = itlb_multihit_kvm_mitigation = val;
6031c50d8ae3SPaolo Bonzini }
6032c50d8ae3SPaolo Bonzini 
6033c50d8ae3SPaolo Bonzini static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
6034c50d8ae3SPaolo Bonzini {
6035c50d8ae3SPaolo Bonzini 	bool old_val = nx_huge_pages;
6036c50d8ae3SPaolo Bonzini 	bool new_val;
6037c50d8ae3SPaolo Bonzini 
6038c50d8ae3SPaolo Bonzini 	/* In "auto" mode deploy workaround only if CPU has the bug. */
6039c50d8ae3SPaolo Bonzini 	if (sysfs_streq(val, "off"))
6040c50d8ae3SPaolo Bonzini 		new_val = 0;
6041c50d8ae3SPaolo Bonzini 	else if (sysfs_streq(val, "force"))
6042c50d8ae3SPaolo Bonzini 		new_val = 1;
6043c50d8ae3SPaolo Bonzini 	else if (sysfs_streq(val, "auto"))
6044c50d8ae3SPaolo Bonzini 		new_val = get_nx_auto_mode();
6045c50d8ae3SPaolo Bonzini 	else if (strtobool(val, &new_val) < 0)
6046c50d8ae3SPaolo Bonzini 		return -EINVAL;
6047c50d8ae3SPaolo Bonzini 
6048c50d8ae3SPaolo Bonzini 	__set_nx_huge_pages(new_val);
6049c50d8ae3SPaolo Bonzini 
6050c50d8ae3SPaolo Bonzini 	if (new_val != old_val) {
6051c50d8ae3SPaolo Bonzini 		struct kvm *kvm;
6052c50d8ae3SPaolo Bonzini 
6053c50d8ae3SPaolo Bonzini 		mutex_lock(&kvm_lock);
6054c50d8ae3SPaolo Bonzini 
6055c50d8ae3SPaolo Bonzini 		list_for_each_entry(kvm, &vm_list, vm_list) {
6056c50d8ae3SPaolo Bonzini 			mutex_lock(&kvm->slots_lock);
6057c50d8ae3SPaolo Bonzini 			kvm_mmu_zap_all_fast(kvm);
6058c50d8ae3SPaolo Bonzini 			mutex_unlock(&kvm->slots_lock);
6059c50d8ae3SPaolo Bonzini 
6060c50d8ae3SPaolo Bonzini 			wake_up_process(kvm->arch.nx_lpage_recovery_thread);
6061c50d8ae3SPaolo Bonzini 		}
6062c50d8ae3SPaolo Bonzini 		mutex_unlock(&kvm_lock);
6063c50d8ae3SPaolo Bonzini 	}
6064c50d8ae3SPaolo Bonzini 
6065c50d8ae3SPaolo Bonzini 	return 0;
6066c50d8ae3SPaolo Bonzini }
6067c50d8ae3SPaolo Bonzini 
6068c50d8ae3SPaolo Bonzini int kvm_mmu_module_init(void)
6069c50d8ae3SPaolo Bonzini {
6070c50d8ae3SPaolo Bonzini 	int ret = -ENOMEM;
6071c50d8ae3SPaolo Bonzini 
6072c50d8ae3SPaolo Bonzini 	if (nx_huge_pages == -1)
6073c50d8ae3SPaolo Bonzini 		__set_nx_huge_pages(get_nx_auto_mode());
6074c50d8ae3SPaolo Bonzini 
6075c50d8ae3SPaolo Bonzini 	/*
6076c50d8ae3SPaolo Bonzini 	 * MMU roles use union aliasing which is, generally speaking, an
6077c50d8ae3SPaolo Bonzini 	 * undefined behavior. However, we supposedly know how compilers behave
6078c50d8ae3SPaolo Bonzini 	 * and the current status quo is unlikely to change. Guardians below are
6079c50d8ae3SPaolo Bonzini 	 * supposed to let us know if the assumption becomes false.
6080c50d8ae3SPaolo Bonzini 	 */
6081c50d8ae3SPaolo Bonzini 	BUILD_BUG_ON(sizeof(union kvm_mmu_page_role) != sizeof(u32));
6082c50d8ae3SPaolo Bonzini 	BUILD_BUG_ON(sizeof(union kvm_mmu_extended_role) != sizeof(u32));
6083c50d8ae3SPaolo Bonzini 	BUILD_BUG_ON(sizeof(union kvm_mmu_role) != sizeof(u64));
6084c50d8ae3SPaolo Bonzini 
6085c50d8ae3SPaolo Bonzini 	kvm_mmu_reset_all_pte_masks();
6086c50d8ae3SPaolo Bonzini 
6087c50d8ae3SPaolo Bonzini 	pte_list_desc_cache = kmem_cache_create("pte_list_desc",
6088c50d8ae3SPaolo Bonzini 					    sizeof(struct pte_list_desc),
6089c50d8ae3SPaolo Bonzini 					    0, SLAB_ACCOUNT, NULL);
6090c50d8ae3SPaolo Bonzini 	if (!pte_list_desc_cache)
6091c50d8ae3SPaolo Bonzini 		goto out;
6092c50d8ae3SPaolo Bonzini 
6093c50d8ae3SPaolo Bonzini 	mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
6094c50d8ae3SPaolo Bonzini 						  sizeof(struct kvm_mmu_page),
6095c50d8ae3SPaolo Bonzini 						  0, SLAB_ACCOUNT, NULL);
6096c50d8ae3SPaolo Bonzini 	if (!mmu_page_header_cache)
6097c50d8ae3SPaolo Bonzini 		goto out;
6098c50d8ae3SPaolo Bonzini 
6099c50d8ae3SPaolo Bonzini 	if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL))
6100c50d8ae3SPaolo Bonzini 		goto out;
6101c50d8ae3SPaolo Bonzini 
6102c50d8ae3SPaolo Bonzini 	ret = register_shrinker(&mmu_shrinker);
6103c50d8ae3SPaolo Bonzini 	if (ret)
6104c50d8ae3SPaolo Bonzini 		goto out;
6105c50d8ae3SPaolo Bonzini 
6106c50d8ae3SPaolo Bonzini 	return 0;
6107c50d8ae3SPaolo Bonzini 
6108c50d8ae3SPaolo Bonzini out:
6109c50d8ae3SPaolo Bonzini 	mmu_destroy_caches();
6110c50d8ae3SPaolo Bonzini 	return ret;
6111c50d8ae3SPaolo Bonzini }
6112c50d8ae3SPaolo Bonzini 
6113c50d8ae3SPaolo Bonzini /*
6114c50d8ae3SPaolo Bonzini  * Calculate mmu pages needed for kvm.
6115c50d8ae3SPaolo Bonzini  */
6116c50d8ae3SPaolo Bonzini unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
6117c50d8ae3SPaolo Bonzini {
6118c50d8ae3SPaolo Bonzini 	unsigned long nr_mmu_pages;
6119c50d8ae3SPaolo Bonzini 	unsigned long nr_pages = 0;
6120c50d8ae3SPaolo Bonzini 	struct kvm_memslots *slots;
6121c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *memslot;
6122c50d8ae3SPaolo Bonzini 	int i;
6123c50d8ae3SPaolo Bonzini 
6124c50d8ae3SPaolo Bonzini 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
6125c50d8ae3SPaolo Bonzini 		slots = __kvm_memslots(kvm, i);
6126c50d8ae3SPaolo Bonzini 
6127c50d8ae3SPaolo Bonzini 		kvm_for_each_memslot(memslot, slots)
6128c50d8ae3SPaolo Bonzini 			nr_pages += memslot->npages;
6129c50d8ae3SPaolo Bonzini 	}
6130c50d8ae3SPaolo Bonzini 
6131c50d8ae3SPaolo Bonzini 	nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
6132c50d8ae3SPaolo Bonzini 	nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES);
6133c50d8ae3SPaolo Bonzini 
6134c50d8ae3SPaolo Bonzini 	return nr_mmu_pages;
6135c50d8ae3SPaolo Bonzini }
6136c50d8ae3SPaolo Bonzini 
6137c50d8ae3SPaolo Bonzini void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
6138c50d8ae3SPaolo Bonzini {
6139c50d8ae3SPaolo Bonzini 	kvm_mmu_unload(vcpu);
6140c50d8ae3SPaolo Bonzini 	free_mmu_pages(&vcpu->arch.root_mmu);
6141c50d8ae3SPaolo Bonzini 	free_mmu_pages(&vcpu->arch.guest_mmu);
6142c50d8ae3SPaolo Bonzini 	mmu_free_memory_caches(vcpu);
6143c50d8ae3SPaolo Bonzini }
6144c50d8ae3SPaolo Bonzini 
6145c50d8ae3SPaolo Bonzini void kvm_mmu_module_exit(void)
6146c50d8ae3SPaolo Bonzini {
6147c50d8ae3SPaolo Bonzini 	mmu_destroy_caches();
6148c50d8ae3SPaolo Bonzini 	percpu_counter_destroy(&kvm_total_used_mmu_pages);
6149c50d8ae3SPaolo Bonzini 	unregister_shrinker(&mmu_shrinker);
6150c50d8ae3SPaolo Bonzini 	mmu_audit_disable();
6151c50d8ae3SPaolo Bonzini }
6152c50d8ae3SPaolo Bonzini 
6153c50d8ae3SPaolo Bonzini static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp)
6154c50d8ae3SPaolo Bonzini {
6155c50d8ae3SPaolo Bonzini 	unsigned int old_val;
6156c50d8ae3SPaolo Bonzini 	int err;
6157c50d8ae3SPaolo Bonzini 
6158c50d8ae3SPaolo Bonzini 	old_val = nx_huge_pages_recovery_ratio;
6159c50d8ae3SPaolo Bonzini 	err = param_set_uint(val, kp);
6160c50d8ae3SPaolo Bonzini 	if (err)
6161c50d8ae3SPaolo Bonzini 		return err;
6162c50d8ae3SPaolo Bonzini 
6163c50d8ae3SPaolo Bonzini 	if (READ_ONCE(nx_huge_pages) &&
6164c50d8ae3SPaolo Bonzini 	    !old_val && nx_huge_pages_recovery_ratio) {
6165c50d8ae3SPaolo Bonzini 		struct kvm *kvm;
6166c50d8ae3SPaolo Bonzini 
6167c50d8ae3SPaolo Bonzini 		mutex_lock(&kvm_lock);
6168c50d8ae3SPaolo Bonzini 
6169c50d8ae3SPaolo Bonzini 		list_for_each_entry(kvm, &vm_list, vm_list)
6170c50d8ae3SPaolo Bonzini 			wake_up_process(kvm->arch.nx_lpage_recovery_thread);
6171c50d8ae3SPaolo Bonzini 
6172c50d8ae3SPaolo Bonzini 		mutex_unlock(&kvm_lock);
6173c50d8ae3SPaolo Bonzini 	}
6174c50d8ae3SPaolo Bonzini 
6175c50d8ae3SPaolo Bonzini 	return err;
6176c50d8ae3SPaolo Bonzini }
6177c50d8ae3SPaolo Bonzini 
6178c50d8ae3SPaolo Bonzini static void kvm_recover_nx_lpages(struct kvm *kvm)
6179c50d8ae3SPaolo Bonzini {
6180ade74e14SSean Christopherson 	unsigned long nx_lpage_splits = kvm->stat.nx_lpage_splits;
6181c50d8ae3SPaolo Bonzini 	int rcu_idx;
6182c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
6183c50d8ae3SPaolo Bonzini 	unsigned int ratio;
6184c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
6185048f4980SSean Christopherson 	bool flush = false;
6186c50d8ae3SPaolo Bonzini 	ulong to_zap;
6187c50d8ae3SPaolo Bonzini 
6188c50d8ae3SPaolo Bonzini 	rcu_idx = srcu_read_lock(&kvm->srcu);
6189531810caSBen Gardon 	write_lock(&kvm->mmu_lock);
6190c50d8ae3SPaolo Bonzini 
6191c50d8ae3SPaolo Bonzini 	ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
6192ade74e14SSean Christopherson 	to_zap = ratio ? DIV_ROUND_UP(nx_lpage_splits, ratio) : 0;
61937d919c7aSSean Christopherson 	for ( ; to_zap; --to_zap) {
61947d919c7aSSean Christopherson 		if (list_empty(&kvm->arch.lpage_disallowed_mmu_pages))
61957d919c7aSSean Christopherson 			break;
61967d919c7aSSean Christopherson 
6197c50d8ae3SPaolo Bonzini 		/*
6198c50d8ae3SPaolo Bonzini 		 * We use a separate list instead of just using active_mmu_pages
6199c50d8ae3SPaolo Bonzini 		 * because the number of lpage_disallowed pages is expected to
6200c50d8ae3SPaolo Bonzini 		 * be relatively small compared to the total.
6201c50d8ae3SPaolo Bonzini 		 */
6202c50d8ae3SPaolo Bonzini 		sp = list_first_entry(&kvm->arch.lpage_disallowed_mmu_pages,
6203c50d8ae3SPaolo Bonzini 				      struct kvm_mmu_page,
6204c50d8ae3SPaolo Bonzini 				      lpage_disallowed_link);
6205c50d8ae3SPaolo Bonzini 		WARN_ON_ONCE(!sp->lpage_disallowed);
6206897218ffSPaolo Bonzini 		if (is_tdp_mmu_page(sp)) {
6207315f02c6SPaolo Bonzini 			flush |= kvm_tdp_mmu_zap_sp(kvm, sp);
62088d1a182eSBen Gardon 		} else {
6209c50d8ae3SPaolo Bonzini 			kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
6210c50d8ae3SPaolo Bonzini 			WARN_ON_ONCE(sp->lpage_disallowed);
621129cf0f50SBen Gardon 		}
6212c50d8ae3SPaolo Bonzini 
6213531810caSBen Gardon 		if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
6214048f4980SSean Christopherson 			kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
6215531810caSBen Gardon 			cond_resched_rwlock_write(&kvm->mmu_lock);
6216048f4980SSean Christopherson 			flush = false;
6217c50d8ae3SPaolo Bonzini 		}
6218c50d8ae3SPaolo Bonzini 	}
6219048f4980SSean Christopherson 	kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
6220c50d8ae3SPaolo Bonzini 
6221531810caSBen Gardon 	write_unlock(&kvm->mmu_lock);
6222c50d8ae3SPaolo Bonzini 	srcu_read_unlock(&kvm->srcu, rcu_idx);
6223c50d8ae3SPaolo Bonzini }
6224c50d8ae3SPaolo Bonzini 
6225c50d8ae3SPaolo Bonzini static long get_nx_lpage_recovery_timeout(u64 start_time)
6226c50d8ae3SPaolo Bonzini {
6227c50d8ae3SPaolo Bonzini 	return READ_ONCE(nx_huge_pages) && READ_ONCE(nx_huge_pages_recovery_ratio)
6228c50d8ae3SPaolo Bonzini 		? start_time + 60 * HZ - get_jiffies_64()
6229c50d8ae3SPaolo Bonzini 		: MAX_SCHEDULE_TIMEOUT;
6230c50d8ae3SPaolo Bonzini }
6231c50d8ae3SPaolo Bonzini 
6232c50d8ae3SPaolo Bonzini static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data)
6233c50d8ae3SPaolo Bonzini {
6234c50d8ae3SPaolo Bonzini 	u64 start_time;
6235c50d8ae3SPaolo Bonzini 	long remaining_time;
6236c50d8ae3SPaolo Bonzini 
6237c50d8ae3SPaolo Bonzini 	while (true) {
6238c50d8ae3SPaolo Bonzini 		start_time = get_jiffies_64();
6239c50d8ae3SPaolo Bonzini 		remaining_time = get_nx_lpage_recovery_timeout(start_time);
6240c50d8ae3SPaolo Bonzini 
6241c50d8ae3SPaolo Bonzini 		set_current_state(TASK_INTERRUPTIBLE);
6242c50d8ae3SPaolo Bonzini 		while (!kthread_should_stop() && remaining_time > 0) {
6243c50d8ae3SPaolo Bonzini 			schedule_timeout(remaining_time);
6244c50d8ae3SPaolo Bonzini 			remaining_time = get_nx_lpage_recovery_timeout(start_time);
6245c50d8ae3SPaolo Bonzini 			set_current_state(TASK_INTERRUPTIBLE);
6246c50d8ae3SPaolo Bonzini 		}
6247c50d8ae3SPaolo Bonzini 
6248c50d8ae3SPaolo Bonzini 		set_current_state(TASK_RUNNING);
6249c50d8ae3SPaolo Bonzini 
6250c50d8ae3SPaolo Bonzini 		if (kthread_should_stop())
6251c50d8ae3SPaolo Bonzini 			return 0;
6252c50d8ae3SPaolo Bonzini 
6253c50d8ae3SPaolo Bonzini 		kvm_recover_nx_lpages(kvm);
6254c50d8ae3SPaolo Bonzini 	}
6255c50d8ae3SPaolo Bonzini }
6256c50d8ae3SPaolo Bonzini 
6257c50d8ae3SPaolo Bonzini int kvm_mmu_post_init_vm(struct kvm *kvm)
6258c50d8ae3SPaolo Bonzini {
6259c50d8ae3SPaolo Bonzini 	int err;
6260c50d8ae3SPaolo Bonzini 
6261c50d8ae3SPaolo Bonzini 	err = kvm_vm_create_worker_thread(kvm, kvm_nx_lpage_recovery_worker, 0,
6262c50d8ae3SPaolo Bonzini 					  "kvm-nx-lpage-recovery",
6263c50d8ae3SPaolo Bonzini 					  &kvm->arch.nx_lpage_recovery_thread);
6264c50d8ae3SPaolo Bonzini 	if (!err)
6265c50d8ae3SPaolo Bonzini 		kthread_unpark(kvm->arch.nx_lpage_recovery_thread);
6266c50d8ae3SPaolo Bonzini 
6267c50d8ae3SPaolo Bonzini 	return err;
6268c50d8ae3SPaolo Bonzini }
6269c50d8ae3SPaolo Bonzini 
6270c50d8ae3SPaolo Bonzini void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
6271c50d8ae3SPaolo Bonzini {
6272c50d8ae3SPaolo Bonzini 	if (kvm->arch.nx_lpage_recovery_thread)
6273c50d8ae3SPaolo Bonzini 		kthread_stop(kvm->arch.nx_lpage_recovery_thread);
6274c50d8ae3SPaolo Bonzini }
6275