xref: /linux/arch/x86/kvm/mmu/mmu.c (revision 4a98623d5d90175c0f99d185171e60807391e487)
1c50d8ae3SPaolo Bonzini // SPDX-License-Identifier: GPL-2.0-only
2c50d8ae3SPaolo Bonzini /*
3c50d8ae3SPaolo Bonzini  * Kernel-based Virtual Machine driver for Linux
4c50d8ae3SPaolo Bonzini  *
5c50d8ae3SPaolo Bonzini  * This module enables machines with Intel VT-x extensions to run virtual
6c50d8ae3SPaolo Bonzini  * machines without emulation or binary translation.
7c50d8ae3SPaolo Bonzini  *
8c50d8ae3SPaolo Bonzini  * MMU support
9c50d8ae3SPaolo Bonzini  *
10c50d8ae3SPaolo Bonzini  * Copyright (C) 2006 Qumranet, Inc.
11c50d8ae3SPaolo Bonzini  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
12c50d8ae3SPaolo Bonzini  *
13c50d8ae3SPaolo Bonzini  * Authors:
14c50d8ae3SPaolo Bonzini  *   Yaniv Kamay  <yaniv@qumranet.com>
15c50d8ae3SPaolo Bonzini  *   Avi Kivity   <avi@qumranet.com>
16c50d8ae3SPaolo Bonzini  */
17c50d8ae3SPaolo Bonzini 
18c50d8ae3SPaolo Bonzini #include "irq.h"
1988197e6aS彭浩(Richard) #include "ioapic.h"
20c50d8ae3SPaolo Bonzini #include "mmu.h"
216ca9a6f3SSean Christopherson #include "mmu_internal.h"
22fe5db27dSBen Gardon #include "tdp_mmu.h"
23c50d8ae3SPaolo Bonzini #include "x86.h"
24c50d8ae3SPaolo Bonzini #include "kvm_cache_regs.h"
252f728d66SSean Christopherson #include "kvm_emulate.h"
26c50d8ae3SPaolo Bonzini #include "cpuid.h"
275a9624afSPaolo Bonzini #include "spte.h"
28c50d8ae3SPaolo Bonzini 
29c50d8ae3SPaolo Bonzini #include <linux/kvm_host.h>
30c50d8ae3SPaolo Bonzini #include <linux/types.h>
31c50d8ae3SPaolo Bonzini #include <linux/string.h>
32c50d8ae3SPaolo Bonzini #include <linux/mm.h>
33c50d8ae3SPaolo Bonzini #include <linux/highmem.h>
34c50d8ae3SPaolo Bonzini #include <linux/moduleparam.h>
35c50d8ae3SPaolo Bonzini #include <linux/export.h>
36c50d8ae3SPaolo Bonzini #include <linux/swap.h>
37c50d8ae3SPaolo Bonzini #include <linux/hugetlb.h>
38c50d8ae3SPaolo Bonzini #include <linux/compiler.h>
39c50d8ae3SPaolo Bonzini #include <linux/srcu.h>
40c50d8ae3SPaolo Bonzini #include <linux/slab.h>
41c50d8ae3SPaolo Bonzini #include <linux/sched/signal.h>
42c50d8ae3SPaolo Bonzini #include <linux/uaccess.h>
43c50d8ae3SPaolo Bonzini #include <linux/hash.h>
44c50d8ae3SPaolo Bonzini #include <linux/kern_levels.h>
45c50d8ae3SPaolo Bonzini #include <linux/kthread.h>
46c50d8ae3SPaolo Bonzini 
47c50d8ae3SPaolo Bonzini #include <asm/page.h>
48eb243d1dSIngo Molnar #include <asm/memtype.h>
49c50d8ae3SPaolo Bonzini #include <asm/cmpxchg.h>
50c50d8ae3SPaolo Bonzini #include <asm/io.h>
51*4a98623dSSean Christopherson #include <asm/set_memory.h>
52c50d8ae3SPaolo Bonzini #include <asm/vmx.h>
53c50d8ae3SPaolo Bonzini #include <asm/kvm_page_track.h>
54c50d8ae3SPaolo Bonzini #include "trace.h"
55c50d8ae3SPaolo Bonzini 
56c50d8ae3SPaolo Bonzini extern bool itlb_multihit_kvm_mitigation;
57c50d8ae3SPaolo Bonzini 
58c50d8ae3SPaolo Bonzini static int __read_mostly nx_huge_pages = -1;
59c50d8ae3SPaolo Bonzini #ifdef CONFIG_PREEMPT_RT
60c50d8ae3SPaolo Bonzini /* Recovery can cause latency spikes, disable it for PREEMPT_RT.  */
61c50d8ae3SPaolo Bonzini static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
62c50d8ae3SPaolo Bonzini #else
63c50d8ae3SPaolo Bonzini static uint __read_mostly nx_huge_pages_recovery_ratio = 60;
64c50d8ae3SPaolo Bonzini #endif
65c50d8ae3SPaolo Bonzini 
66c50d8ae3SPaolo Bonzini static int set_nx_huge_pages(const char *val, const struct kernel_param *kp);
67c50d8ae3SPaolo Bonzini static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp);
68c50d8ae3SPaolo Bonzini 
69d5d6c18dSJoe Perches static const struct kernel_param_ops nx_huge_pages_ops = {
70c50d8ae3SPaolo Bonzini 	.set = set_nx_huge_pages,
71c50d8ae3SPaolo Bonzini 	.get = param_get_bool,
72c50d8ae3SPaolo Bonzini };
73c50d8ae3SPaolo Bonzini 
74d5d6c18dSJoe Perches static const struct kernel_param_ops nx_huge_pages_recovery_ratio_ops = {
75c50d8ae3SPaolo Bonzini 	.set = set_nx_huge_pages_recovery_ratio,
76c50d8ae3SPaolo Bonzini 	.get = param_get_uint,
77c50d8ae3SPaolo Bonzini };
78c50d8ae3SPaolo Bonzini 
79c50d8ae3SPaolo Bonzini module_param_cb(nx_huge_pages, &nx_huge_pages_ops, &nx_huge_pages, 0644);
80c50d8ae3SPaolo Bonzini __MODULE_PARM_TYPE(nx_huge_pages, "bool");
81c50d8ae3SPaolo Bonzini module_param_cb(nx_huge_pages_recovery_ratio, &nx_huge_pages_recovery_ratio_ops,
82c50d8ae3SPaolo Bonzini 		&nx_huge_pages_recovery_ratio, 0644);
83c50d8ae3SPaolo Bonzini __MODULE_PARM_TYPE(nx_huge_pages_recovery_ratio, "uint");
84c50d8ae3SPaolo Bonzini 
8571fe7013SSean Christopherson static bool __read_mostly force_flush_and_sync_on_reuse;
8671fe7013SSean Christopherson module_param_named(flush_on_reuse, force_flush_and_sync_on_reuse, bool, 0644);
8771fe7013SSean Christopherson 
88c50d8ae3SPaolo Bonzini /*
89c50d8ae3SPaolo Bonzini  * When setting this variable to true it enables Two-Dimensional-Paging
90c50d8ae3SPaolo Bonzini  * where the hardware walks 2 page tables:
91c50d8ae3SPaolo Bonzini  * 1. the guest-virtual to guest-physical
92c50d8ae3SPaolo Bonzini  * 2. while doing 1. it walks guest-physical to host-physical
93c50d8ae3SPaolo Bonzini  * If the hardware supports that we don't need to do shadow paging.
94c50d8ae3SPaolo Bonzini  */
95c50d8ae3SPaolo Bonzini bool tdp_enabled = false;
96c50d8ae3SPaolo Bonzini 
971d92d2e8SSean Christopherson static int max_huge_page_level __read_mostly;
9883013059SSean Christopherson static int max_tdp_level __read_mostly;
99703c335dSSean Christopherson 
100c50d8ae3SPaolo Bonzini enum {
101c50d8ae3SPaolo Bonzini 	AUDIT_PRE_PAGE_FAULT,
102c50d8ae3SPaolo Bonzini 	AUDIT_POST_PAGE_FAULT,
103c50d8ae3SPaolo Bonzini 	AUDIT_PRE_PTE_WRITE,
104c50d8ae3SPaolo Bonzini 	AUDIT_POST_PTE_WRITE,
105c50d8ae3SPaolo Bonzini 	AUDIT_PRE_SYNC,
106c50d8ae3SPaolo Bonzini 	AUDIT_POST_SYNC
107c50d8ae3SPaolo Bonzini };
108c50d8ae3SPaolo Bonzini 
109c50d8ae3SPaolo Bonzini #ifdef MMU_DEBUG
1105a9624afSPaolo Bonzini bool dbg = 0;
111c50d8ae3SPaolo Bonzini module_param(dbg, bool, 0644);
112c50d8ae3SPaolo Bonzini #endif
113c50d8ae3SPaolo Bonzini 
114c50d8ae3SPaolo Bonzini #define PTE_PREFETCH_NUM		8
115c50d8ae3SPaolo Bonzini 
116c50d8ae3SPaolo Bonzini #define PT32_LEVEL_BITS 10
117c50d8ae3SPaolo Bonzini 
118c50d8ae3SPaolo Bonzini #define PT32_LEVEL_SHIFT(level) \
119c50d8ae3SPaolo Bonzini 		(PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
120c50d8ae3SPaolo Bonzini 
121c50d8ae3SPaolo Bonzini #define PT32_LVL_OFFSET_MASK(level) \
122c50d8ae3SPaolo Bonzini 	(PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
123c50d8ae3SPaolo Bonzini 						* PT32_LEVEL_BITS))) - 1))
124c50d8ae3SPaolo Bonzini 
125c50d8ae3SPaolo Bonzini #define PT32_INDEX(address, level)\
126c50d8ae3SPaolo Bonzini 	(((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
127c50d8ae3SPaolo Bonzini 
128c50d8ae3SPaolo Bonzini 
129c50d8ae3SPaolo Bonzini #define PT32_BASE_ADDR_MASK PAGE_MASK
130c50d8ae3SPaolo Bonzini #define PT32_DIR_BASE_ADDR_MASK \
131c50d8ae3SPaolo Bonzini 	(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
132c50d8ae3SPaolo Bonzini #define PT32_LVL_ADDR_MASK(level) \
133c50d8ae3SPaolo Bonzini 	(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
134c50d8ae3SPaolo Bonzini 					    * PT32_LEVEL_BITS))) - 1))
135c50d8ae3SPaolo Bonzini 
136c50d8ae3SPaolo Bonzini #include <trace/events/kvm.h>
137c50d8ae3SPaolo Bonzini 
138c50d8ae3SPaolo Bonzini /* make pte_list_desc fit well in cache line */
139c50d8ae3SPaolo Bonzini #define PTE_LIST_EXT 3
140c50d8ae3SPaolo Bonzini 
141c50d8ae3SPaolo Bonzini struct pte_list_desc {
142c50d8ae3SPaolo Bonzini 	u64 *sptes[PTE_LIST_EXT];
143c50d8ae3SPaolo Bonzini 	struct pte_list_desc *more;
144c50d8ae3SPaolo Bonzini };
145c50d8ae3SPaolo Bonzini 
146c50d8ae3SPaolo Bonzini struct kvm_shadow_walk_iterator {
147c50d8ae3SPaolo Bonzini 	u64 addr;
148c50d8ae3SPaolo Bonzini 	hpa_t shadow_addr;
149c50d8ae3SPaolo Bonzini 	u64 *sptep;
150c50d8ae3SPaolo Bonzini 	int level;
151c50d8ae3SPaolo Bonzini 	unsigned index;
152c50d8ae3SPaolo Bonzini };
153c50d8ae3SPaolo Bonzini 
154c50d8ae3SPaolo Bonzini #define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker)     \
155c50d8ae3SPaolo Bonzini 	for (shadow_walk_init_using_root(&(_walker), (_vcpu),              \
156c50d8ae3SPaolo Bonzini 					 (_root), (_addr));                \
157c50d8ae3SPaolo Bonzini 	     shadow_walk_okay(&(_walker));			           \
158c50d8ae3SPaolo Bonzini 	     shadow_walk_next(&(_walker)))
159c50d8ae3SPaolo Bonzini 
160c50d8ae3SPaolo Bonzini #define for_each_shadow_entry(_vcpu, _addr, _walker)            \
161c50d8ae3SPaolo Bonzini 	for (shadow_walk_init(&(_walker), _vcpu, _addr);	\
162c50d8ae3SPaolo Bonzini 	     shadow_walk_okay(&(_walker));			\
163c50d8ae3SPaolo Bonzini 	     shadow_walk_next(&(_walker)))
164c50d8ae3SPaolo Bonzini 
165c50d8ae3SPaolo Bonzini #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte)	\
166c50d8ae3SPaolo Bonzini 	for (shadow_walk_init(&(_walker), _vcpu, _addr);		\
167c50d8ae3SPaolo Bonzini 	     shadow_walk_okay(&(_walker)) &&				\
168c50d8ae3SPaolo Bonzini 		({ spte = mmu_spte_get_lockless(_walker.sptep); 1; });	\
169c50d8ae3SPaolo Bonzini 	     __shadow_walk_next(&(_walker), spte))
170c50d8ae3SPaolo Bonzini 
171c50d8ae3SPaolo Bonzini static struct kmem_cache *pte_list_desc_cache;
17202c00b3aSBen Gardon struct kmem_cache *mmu_page_header_cache;
173c50d8ae3SPaolo Bonzini static struct percpu_counter kvm_total_used_mmu_pages;
174c50d8ae3SPaolo Bonzini 
175c50d8ae3SPaolo Bonzini static void mmu_spte_set(u64 *sptep, u64 spte);
176c50d8ae3SPaolo Bonzini static union kvm_mmu_page_role
177c50d8ae3SPaolo Bonzini kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
178c50d8ae3SPaolo Bonzini 
179c50d8ae3SPaolo Bonzini #define CREATE_TRACE_POINTS
180c50d8ae3SPaolo Bonzini #include "mmutrace.h"
181c50d8ae3SPaolo Bonzini 
182c50d8ae3SPaolo Bonzini 
183c50d8ae3SPaolo Bonzini static inline bool kvm_available_flush_tlb_with_range(void)
184c50d8ae3SPaolo Bonzini {
185afaf0b2fSSean Christopherson 	return kvm_x86_ops.tlb_remote_flush_with_range;
186c50d8ae3SPaolo Bonzini }
187c50d8ae3SPaolo Bonzini 
188c50d8ae3SPaolo Bonzini static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm,
189c50d8ae3SPaolo Bonzini 		struct kvm_tlb_range *range)
190c50d8ae3SPaolo Bonzini {
191c50d8ae3SPaolo Bonzini 	int ret = -ENOTSUPP;
192c50d8ae3SPaolo Bonzini 
193afaf0b2fSSean Christopherson 	if (range && kvm_x86_ops.tlb_remote_flush_with_range)
194b3646477SJason Baron 		ret = static_call(kvm_x86_tlb_remote_flush_with_range)(kvm, range);
195c50d8ae3SPaolo Bonzini 
196c50d8ae3SPaolo Bonzini 	if (ret)
197c50d8ae3SPaolo Bonzini 		kvm_flush_remote_tlbs(kvm);
198c50d8ae3SPaolo Bonzini }
199c50d8ae3SPaolo Bonzini 
2002f2fad08SBen Gardon void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
201c50d8ae3SPaolo Bonzini 		u64 start_gfn, u64 pages)
202c50d8ae3SPaolo Bonzini {
203c50d8ae3SPaolo Bonzini 	struct kvm_tlb_range range;
204c50d8ae3SPaolo Bonzini 
205c50d8ae3SPaolo Bonzini 	range.start_gfn = start_gfn;
206c50d8ae3SPaolo Bonzini 	range.pages = pages;
207c50d8ae3SPaolo Bonzini 
208c50d8ae3SPaolo Bonzini 	kvm_flush_remote_tlbs_with_range(kvm, &range);
209c50d8ae3SPaolo Bonzini }
210c50d8ae3SPaolo Bonzini 
2115a9624afSPaolo Bonzini bool is_nx_huge_page_enabled(void)
212c50d8ae3SPaolo Bonzini {
213c50d8ae3SPaolo Bonzini 	return READ_ONCE(nx_huge_pages);
214c50d8ae3SPaolo Bonzini }
215c50d8ae3SPaolo Bonzini 
2168f79b064SBen Gardon static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
2178f79b064SBen Gardon 			   unsigned int access)
2188f79b064SBen Gardon {
219c236d962SSean Christopherson 	u64 spte = make_mmio_spte(vcpu, gfn, access);
2208f79b064SBen Gardon 
221c236d962SSean Christopherson 	trace_mark_mmio_spte(sptep, gfn, spte);
222c236d962SSean Christopherson 	mmu_spte_set(sptep, spte);
223c50d8ae3SPaolo Bonzini }
224c50d8ae3SPaolo Bonzini 
225c50d8ae3SPaolo Bonzini static gfn_t get_mmio_spte_gfn(u64 spte)
226c50d8ae3SPaolo Bonzini {
227c50d8ae3SPaolo Bonzini 	u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
228c50d8ae3SPaolo Bonzini 
2298a967d65SPaolo Bonzini 	gpa |= (spte >> SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)
230c50d8ae3SPaolo Bonzini 	       & shadow_nonpresent_or_rsvd_mask;
231c50d8ae3SPaolo Bonzini 
232c50d8ae3SPaolo Bonzini 	return gpa >> PAGE_SHIFT;
233c50d8ae3SPaolo Bonzini }
234c50d8ae3SPaolo Bonzini 
235c50d8ae3SPaolo Bonzini static unsigned get_mmio_spte_access(u64 spte)
236c50d8ae3SPaolo Bonzini {
237c50d8ae3SPaolo Bonzini 	return spte & shadow_mmio_access_mask;
238c50d8ae3SPaolo Bonzini }
239c50d8ae3SPaolo Bonzini 
240c50d8ae3SPaolo Bonzini static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
241c50d8ae3SPaolo Bonzini {
242c50d8ae3SPaolo Bonzini 	u64 kvm_gen, spte_gen, gen;
243c50d8ae3SPaolo Bonzini 
244c50d8ae3SPaolo Bonzini 	gen = kvm_vcpu_memslots(vcpu)->generation;
245c50d8ae3SPaolo Bonzini 	if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
246c50d8ae3SPaolo Bonzini 		return false;
247c50d8ae3SPaolo Bonzini 
248c50d8ae3SPaolo Bonzini 	kvm_gen = gen & MMIO_SPTE_GEN_MASK;
249c50d8ae3SPaolo Bonzini 	spte_gen = get_mmio_spte_generation(spte);
250c50d8ae3SPaolo Bonzini 
251c50d8ae3SPaolo Bonzini 	trace_check_mmio_spte(spte, kvm_gen, spte_gen);
252c50d8ae3SPaolo Bonzini 	return likely(kvm_gen == spte_gen);
253c50d8ae3SPaolo Bonzini }
254c50d8ae3SPaolo Bonzini 
255cd313569SMohammed Gamal static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
256cd313569SMohammed Gamal                                   struct x86_exception *exception)
257cd313569SMohammed Gamal {
258ec7771abSMohammed Gamal 	/* Check if guest physical address doesn't exceed guest maximum */
259dc46515cSSean Christopherson 	if (kvm_vcpu_is_illegal_gpa(vcpu, gpa)) {
260ec7771abSMohammed Gamal 		exception->error_code |= PFERR_RSVD_MASK;
261ec7771abSMohammed Gamal 		return UNMAPPED_GVA;
262ec7771abSMohammed Gamal 	}
263ec7771abSMohammed Gamal 
264cd313569SMohammed Gamal         return gpa;
265cd313569SMohammed Gamal }
266cd313569SMohammed Gamal 
267c50d8ae3SPaolo Bonzini static int is_cpuid_PSE36(void)
268c50d8ae3SPaolo Bonzini {
269c50d8ae3SPaolo Bonzini 	return 1;
270c50d8ae3SPaolo Bonzini }
271c50d8ae3SPaolo Bonzini 
272c50d8ae3SPaolo Bonzini static int is_nx(struct kvm_vcpu *vcpu)
273c50d8ae3SPaolo Bonzini {
274c50d8ae3SPaolo Bonzini 	return vcpu->arch.efer & EFER_NX;
275c50d8ae3SPaolo Bonzini }
276c50d8ae3SPaolo Bonzini 
277c50d8ae3SPaolo Bonzini static gfn_t pse36_gfn_delta(u32 gpte)
278c50d8ae3SPaolo Bonzini {
279c50d8ae3SPaolo Bonzini 	int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
280c50d8ae3SPaolo Bonzini 
281c50d8ae3SPaolo Bonzini 	return (gpte & PT32_DIR_PSE36_MASK) << shift;
282c50d8ae3SPaolo Bonzini }
283c50d8ae3SPaolo Bonzini 
284c50d8ae3SPaolo Bonzini #ifdef CONFIG_X86_64
285c50d8ae3SPaolo Bonzini static void __set_spte(u64 *sptep, u64 spte)
286c50d8ae3SPaolo Bonzini {
287c50d8ae3SPaolo Bonzini 	WRITE_ONCE(*sptep, spte);
288c50d8ae3SPaolo Bonzini }
289c50d8ae3SPaolo Bonzini 
290c50d8ae3SPaolo Bonzini static void __update_clear_spte_fast(u64 *sptep, u64 spte)
291c50d8ae3SPaolo Bonzini {
292c50d8ae3SPaolo Bonzini 	WRITE_ONCE(*sptep, spte);
293c50d8ae3SPaolo Bonzini }
294c50d8ae3SPaolo Bonzini 
295c50d8ae3SPaolo Bonzini static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
296c50d8ae3SPaolo Bonzini {
297c50d8ae3SPaolo Bonzini 	return xchg(sptep, spte);
298c50d8ae3SPaolo Bonzini }
299c50d8ae3SPaolo Bonzini 
300c50d8ae3SPaolo Bonzini static u64 __get_spte_lockless(u64 *sptep)
301c50d8ae3SPaolo Bonzini {
302c50d8ae3SPaolo Bonzini 	return READ_ONCE(*sptep);
303c50d8ae3SPaolo Bonzini }
304c50d8ae3SPaolo Bonzini #else
305c50d8ae3SPaolo Bonzini union split_spte {
306c50d8ae3SPaolo Bonzini 	struct {
307c50d8ae3SPaolo Bonzini 		u32 spte_low;
308c50d8ae3SPaolo Bonzini 		u32 spte_high;
309c50d8ae3SPaolo Bonzini 	};
310c50d8ae3SPaolo Bonzini 	u64 spte;
311c50d8ae3SPaolo Bonzini };
312c50d8ae3SPaolo Bonzini 
313c50d8ae3SPaolo Bonzini static void count_spte_clear(u64 *sptep, u64 spte)
314c50d8ae3SPaolo Bonzini {
31557354682SSean Christopherson 	struct kvm_mmu_page *sp =  sptep_to_sp(sptep);
316c50d8ae3SPaolo Bonzini 
317c50d8ae3SPaolo Bonzini 	if (is_shadow_present_pte(spte))
318c50d8ae3SPaolo Bonzini 		return;
319c50d8ae3SPaolo Bonzini 
320c50d8ae3SPaolo Bonzini 	/* Ensure the spte is completely set before we increase the count */
321c50d8ae3SPaolo Bonzini 	smp_wmb();
322c50d8ae3SPaolo Bonzini 	sp->clear_spte_count++;
323c50d8ae3SPaolo Bonzini }
324c50d8ae3SPaolo Bonzini 
325c50d8ae3SPaolo Bonzini static void __set_spte(u64 *sptep, u64 spte)
326c50d8ae3SPaolo Bonzini {
327c50d8ae3SPaolo Bonzini 	union split_spte *ssptep, sspte;
328c50d8ae3SPaolo Bonzini 
329c50d8ae3SPaolo Bonzini 	ssptep = (union split_spte *)sptep;
330c50d8ae3SPaolo Bonzini 	sspte = (union split_spte)spte;
331c50d8ae3SPaolo Bonzini 
332c50d8ae3SPaolo Bonzini 	ssptep->spte_high = sspte.spte_high;
333c50d8ae3SPaolo Bonzini 
334c50d8ae3SPaolo Bonzini 	/*
335c50d8ae3SPaolo Bonzini 	 * If we map the spte from nonpresent to present, We should store
336c50d8ae3SPaolo Bonzini 	 * the high bits firstly, then set present bit, so cpu can not
337c50d8ae3SPaolo Bonzini 	 * fetch this spte while we are setting the spte.
338c50d8ae3SPaolo Bonzini 	 */
339c50d8ae3SPaolo Bonzini 	smp_wmb();
340c50d8ae3SPaolo Bonzini 
341c50d8ae3SPaolo Bonzini 	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
342c50d8ae3SPaolo Bonzini }
343c50d8ae3SPaolo Bonzini 
344c50d8ae3SPaolo Bonzini static void __update_clear_spte_fast(u64 *sptep, u64 spte)
345c50d8ae3SPaolo Bonzini {
346c50d8ae3SPaolo Bonzini 	union split_spte *ssptep, sspte;
347c50d8ae3SPaolo Bonzini 
348c50d8ae3SPaolo Bonzini 	ssptep = (union split_spte *)sptep;
349c50d8ae3SPaolo Bonzini 	sspte = (union split_spte)spte;
350c50d8ae3SPaolo Bonzini 
351c50d8ae3SPaolo Bonzini 	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
352c50d8ae3SPaolo Bonzini 
353c50d8ae3SPaolo Bonzini 	/*
354c50d8ae3SPaolo Bonzini 	 * If we map the spte from present to nonpresent, we should clear
355c50d8ae3SPaolo Bonzini 	 * present bit firstly to avoid vcpu fetch the old high bits.
356c50d8ae3SPaolo Bonzini 	 */
357c50d8ae3SPaolo Bonzini 	smp_wmb();
358c50d8ae3SPaolo Bonzini 
359c50d8ae3SPaolo Bonzini 	ssptep->spte_high = sspte.spte_high;
360c50d8ae3SPaolo Bonzini 	count_spte_clear(sptep, spte);
361c50d8ae3SPaolo Bonzini }
362c50d8ae3SPaolo Bonzini 
363c50d8ae3SPaolo Bonzini static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
364c50d8ae3SPaolo Bonzini {
365c50d8ae3SPaolo Bonzini 	union split_spte *ssptep, sspte, orig;
366c50d8ae3SPaolo Bonzini 
367c50d8ae3SPaolo Bonzini 	ssptep = (union split_spte *)sptep;
368c50d8ae3SPaolo Bonzini 	sspte = (union split_spte)spte;
369c50d8ae3SPaolo Bonzini 
370c50d8ae3SPaolo Bonzini 	/* xchg acts as a barrier before the setting of the high bits */
371c50d8ae3SPaolo Bonzini 	orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low);
372c50d8ae3SPaolo Bonzini 	orig.spte_high = ssptep->spte_high;
373c50d8ae3SPaolo Bonzini 	ssptep->spte_high = sspte.spte_high;
374c50d8ae3SPaolo Bonzini 	count_spte_clear(sptep, spte);
375c50d8ae3SPaolo Bonzini 
376c50d8ae3SPaolo Bonzini 	return orig.spte;
377c50d8ae3SPaolo Bonzini }
378c50d8ae3SPaolo Bonzini 
379c50d8ae3SPaolo Bonzini /*
380c50d8ae3SPaolo Bonzini  * The idea using the light way get the spte on x86_32 guest is from
381c50d8ae3SPaolo Bonzini  * gup_get_pte (mm/gup.c).
382c50d8ae3SPaolo Bonzini  *
383c50d8ae3SPaolo Bonzini  * An spte tlb flush may be pending, because kvm_set_pte_rmapp
384c50d8ae3SPaolo Bonzini  * coalesces them and we are running out of the MMU lock.  Therefore
385c50d8ae3SPaolo Bonzini  * we need to protect against in-progress updates of the spte.
386c50d8ae3SPaolo Bonzini  *
387c50d8ae3SPaolo Bonzini  * Reading the spte while an update is in progress may get the old value
388c50d8ae3SPaolo Bonzini  * for the high part of the spte.  The race is fine for a present->non-present
389c50d8ae3SPaolo Bonzini  * change (because the high part of the spte is ignored for non-present spte),
390c50d8ae3SPaolo Bonzini  * but for a present->present change we must reread the spte.
391c50d8ae3SPaolo Bonzini  *
392c50d8ae3SPaolo Bonzini  * All such changes are done in two steps (present->non-present and
393c50d8ae3SPaolo Bonzini  * non-present->present), hence it is enough to count the number of
394c50d8ae3SPaolo Bonzini  * present->non-present updates: if it changed while reading the spte,
395c50d8ae3SPaolo Bonzini  * we might have hit the race.  This is done using clear_spte_count.
396c50d8ae3SPaolo Bonzini  */
397c50d8ae3SPaolo Bonzini static u64 __get_spte_lockless(u64 *sptep)
398c50d8ae3SPaolo Bonzini {
39957354682SSean Christopherson 	struct kvm_mmu_page *sp =  sptep_to_sp(sptep);
400c50d8ae3SPaolo Bonzini 	union split_spte spte, *orig = (union split_spte *)sptep;
401c50d8ae3SPaolo Bonzini 	int count;
402c50d8ae3SPaolo Bonzini 
403c50d8ae3SPaolo Bonzini retry:
404c50d8ae3SPaolo Bonzini 	count = sp->clear_spte_count;
405c50d8ae3SPaolo Bonzini 	smp_rmb();
406c50d8ae3SPaolo Bonzini 
407c50d8ae3SPaolo Bonzini 	spte.spte_low = orig->spte_low;
408c50d8ae3SPaolo Bonzini 	smp_rmb();
409c50d8ae3SPaolo Bonzini 
410c50d8ae3SPaolo Bonzini 	spte.spte_high = orig->spte_high;
411c50d8ae3SPaolo Bonzini 	smp_rmb();
412c50d8ae3SPaolo Bonzini 
413c50d8ae3SPaolo Bonzini 	if (unlikely(spte.spte_low != orig->spte_low ||
414c50d8ae3SPaolo Bonzini 	      count != sp->clear_spte_count))
415c50d8ae3SPaolo Bonzini 		goto retry;
416c50d8ae3SPaolo Bonzini 
417c50d8ae3SPaolo Bonzini 	return spte.spte;
418c50d8ae3SPaolo Bonzini }
419c50d8ae3SPaolo Bonzini #endif
420c50d8ae3SPaolo Bonzini 
421c50d8ae3SPaolo Bonzini static bool spte_has_volatile_bits(u64 spte)
422c50d8ae3SPaolo Bonzini {
423c50d8ae3SPaolo Bonzini 	if (!is_shadow_present_pte(spte))
424c50d8ae3SPaolo Bonzini 		return false;
425c50d8ae3SPaolo Bonzini 
426c50d8ae3SPaolo Bonzini 	/*
427c50d8ae3SPaolo Bonzini 	 * Always atomically update spte if it can be updated
428c50d8ae3SPaolo Bonzini 	 * out of mmu-lock, it can ensure dirty bit is not lost,
429c50d8ae3SPaolo Bonzini 	 * also, it can help us to get a stable is_writable_pte()
430c50d8ae3SPaolo Bonzini 	 * to ensure tlb flush is not missed.
431c50d8ae3SPaolo Bonzini 	 */
432c50d8ae3SPaolo Bonzini 	if (spte_can_locklessly_be_made_writable(spte) ||
433c50d8ae3SPaolo Bonzini 	    is_access_track_spte(spte))
434c50d8ae3SPaolo Bonzini 		return true;
435c50d8ae3SPaolo Bonzini 
436c50d8ae3SPaolo Bonzini 	if (spte_ad_enabled(spte)) {
437c50d8ae3SPaolo Bonzini 		if ((spte & shadow_accessed_mask) == 0 ||
438c50d8ae3SPaolo Bonzini 	    	    (is_writable_pte(spte) && (spte & shadow_dirty_mask) == 0))
439c50d8ae3SPaolo Bonzini 			return true;
440c50d8ae3SPaolo Bonzini 	}
441c50d8ae3SPaolo Bonzini 
442c50d8ae3SPaolo Bonzini 	return false;
443c50d8ae3SPaolo Bonzini }
444c50d8ae3SPaolo Bonzini 
445c50d8ae3SPaolo Bonzini /* Rules for using mmu_spte_set:
446c50d8ae3SPaolo Bonzini  * Set the sptep from nonpresent to present.
447c50d8ae3SPaolo Bonzini  * Note: the sptep being assigned *must* be either not present
448c50d8ae3SPaolo Bonzini  * or in a state where the hardware will not attempt to update
449c50d8ae3SPaolo Bonzini  * the spte.
450c50d8ae3SPaolo Bonzini  */
451c50d8ae3SPaolo Bonzini static void mmu_spte_set(u64 *sptep, u64 new_spte)
452c50d8ae3SPaolo Bonzini {
453c50d8ae3SPaolo Bonzini 	WARN_ON(is_shadow_present_pte(*sptep));
454c50d8ae3SPaolo Bonzini 	__set_spte(sptep, new_spte);
455c50d8ae3SPaolo Bonzini }
456c50d8ae3SPaolo Bonzini 
457c50d8ae3SPaolo Bonzini /*
458c50d8ae3SPaolo Bonzini  * Update the SPTE (excluding the PFN), but do not track changes in its
459c50d8ae3SPaolo Bonzini  * accessed/dirty status.
460c50d8ae3SPaolo Bonzini  */
461c50d8ae3SPaolo Bonzini static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte)
462c50d8ae3SPaolo Bonzini {
463c50d8ae3SPaolo Bonzini 	u64 old_spte = *sptep;
464c50d8ae3SPaolo Bonzini 
465c50d8ae3SPaolo Bonzini 	WARN_ON(!is_shadow_present_pte(new_spte));
466c50d8ae3SPaolo Bonzini 
467c50d8ae3SPaolo Bonzini 	if (!is_shadow_present_pte(old_spte)) {
468c50d8ae3SPaolo Bonzini 		mmu_spte_set(sptep, new_spte);
469c50d8ae3SPaolo Bonzini 		return old_spte;
470c50d8ae3SPaolo Bonzini 	}
471c50d8ae3SPaolo Bonzini 
472c50d8ae3SPaolo Bonzini 	if (!spte_has_volatile_bits(old_spte))
473c50d8ae3SPaolo Bonzini 		__update_clear_spte_fast(sptep, new_spte);
474c50d8ae3SPaolo Bonzini 	else
475c50d8ae3SPaolo Bonzini 		old_spte = __update_clear_spte_slow(sptep, new_spte);
476c50d8ae3SPaolo Bonzini 
477c50d8ae3SPaolo Bonzini 	WARN_ON(spte_to_pfn(old_spte) != spte_to_pfn(new_spte));
478c50d8ae3SPaolo Bonzini 
479c50d8ae3SPaolo Bonzini 	return old_spte;
480c50d8ae3SPaolo Bonzini }
481c50d8ae3SPaolo Bonzini 
482c50d8ae3SPaolo Bonzini /* Rules for using mmu_spte_update:
483c50d8ae3SPaolo Bonzini  * Update the state bits, it means the mapped pfn is not changed.
484c50d8ae3SPaolo Bonzini  *
485c50d8ae3SPaolo Bonzini  * Whenever we overwrite a writable spte with a read-only one we
486c50d8ae3SPaolo Bonzini  * should flush remote TLBs. Otherwise rmap_write_protect
487c50d8ae3SPaolo Bonzini  * will find a read-only spte, even though the writable spte
488c50d8ae3SPaolo Bonzini  * might be cached on a CPU's TLB, the return value indicates this
489c50d8ae3SPaolo Bonzini  * case.
490c50d8ae3SPaolo Bonzini  *
491c50d8ae3SPaolo Bonzini  * Returns true if the TLB needs to be flushed
492c50d8ae3SPaolo Bonzini  */
493c50d8ae3SPaolo Bonzini static bool mmu_spte_update(u64 *sptep, u64 new_spte)
494c50d8ae3SPaolo Bonzini {
495c50d8ae3SPaolo Bonzini 	bool flush = false;
496c50d8ae3SPaolo Bonzini 	u64 old_spte = mmu_spte_update_no_track(sptep, new_spte);
497c50d8ae3SPaolo Bonzini 
498c50d8ae3SPaolo Bonzini 	if (!is_shadow_present_pte(old_spte))
499c50d8ae3SPaolo Bonzini 		return false;
500c50d8ae3SPaolo Bonzini 
501c50d8ae3SPaolo Bonzini 	/*
502c50d8ae3SPaolo Bonzini 	 * For the spte updated out of mmu-lock is safe, since
503c50d8ae3SPaolo Bonzini 	 * we always atomically update it, see the comments in
504c50d8ae3SPaolo Bonzini 	 * spte_has_volatile_bits().
505c50d8ae3SPaolo Bonzini 	 */
506c50d8ae3SPaolo Bonzini 	if (spte_can_locklessly_be_made_writable(old_spte) &&
507c50d8ae3SPaolo Bonzini 	      !is_writable_pte(new_spte))
508c50d8ae3SPaolo Bonzini 		flush = true;
509c50d8ae3SPaolo Bonzini 
510c50d8ae3SPaolo Bonzini 	/*
511c50d8ae3SPaolo Bonzini 	 * Flush TLB when accessed/dirty states are changed in the page tables,
512c50d8ae3SPaolo Bonzini 	 * to guarantee consistency between TLB and page tables.
513c50d8ae3SPaolo Bonzini 	 */
514c50d8ae3SPaolo Bonzini 
515c50d8ae3SPaolo Bonzini 	if (is_accessed_spte(old_spte) && !is_accessed_spte(new_spte)) {
516c50d8ae3SPaolo Bonzini 		flush = true;
517c50d8ae3SPaolo Bonzini 		kvm_set_pfn_accessed(spte_to_pfn(old_spte));
518c50d8ae3SPaolo Bonzini 	}
519c50d8ae3SPaolo Bonzini 
520c50d8ae3SPaolo Bonzini 	if (is_dirty_spte(old_spte) && !is_dirty_spte(new_spte)) {
521c50d8ae3SPaolo Bonzini 		flush = true;
522c50d8ae3SPaolo Bonzini 		kvm_set_pfn_dirty(spte_to_pfn(old_spte));
523c50d8ae3SPaolo Bonzini 	}
524c50d8ae3SPaolo Bonzini 
525c50d8ae3SPaolo Bonzini 	return flush;
526c50d8ae3SPaolo Bonzini }
527c50d8ae3SPaolo Bonzini 
528c50d8ae3SPaolo Bonzini /*
529c50d8ae3SPaolo Bonzini  * Rules for using mmu_spte_clear_track_bits:
530c50d8ae3SPaolo Bonzini  * It sets the sptep from present to nonpresent, and track the
531c50d8ae3SPaolo Bonzini  * state bits, it is used to clear the last level sptep.
532c50d8ae3SPaolo Bonzini  * Returns non-zero if the PTE was previously valid.
533c50d8ae3SPaolo Bonzini  */
534c50d8ae3SPaolo Bonzini static int mmu_spte_clear_track_bits(u64 *sptep)
535c50d8ae3SPaolo Bonzini {
536c50d8ae3SPaolo Bonzini 	kvm_pfn_t pfn;
537c50d8ae3SPaolo Bonzini 	u64 old_spte = *sptep;
538c50d8ae3SPaolo Bonzini 
539c50d8ae3SPaolo Bonzini 	if (!spte_has_volatile_bits(old_spte))
540c50d8ae3SPaolo Bonzini 		__update_clear_spte_fast(sptep, 0ull);
541c50d8ae3SPaolo Bonzini 	else
542c50d8ae3SPaolo Bonzini 		old_spte = __update_clear_spte_slow(sptep, 0ull);
543c50d8ae3SPaolo Bonzini 
544c50d8ae3SPaolo Bonzini 	if (!is_shadow_present_pte(old_spte))
545c50d8ae3SPaolo Bonzini 		return 0;
546c50d8ae3SPaolo Bonzini 
547c50d8ae3SPaolo Bonzini 	pfn = spte_to_pfn(old_spte);
548c50d8ae3SPaolo Bonzini 
549c50d8ae3SPaolo Bonzini 	/*
550c50d8ae3SPaolo Bonzini 	 * KVM does not hold the refcount of the page used by
551c50d8ae3SPaolo Bonzini 	 * kvm mmu, before reclaiming the page, we should
552c50d8ae3SPaolo Bonzini 	 * unmap it from mmu first.
553c50d8ae3SPaolo Bonzini 	 */
554c50d8ae3SPaolo Bonzini 	WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
555c50d8ae3SPaolo Bonzini 
556c50d8ae3SPaolo Bonzini 	if (is_accessed_spte(old_spte))
557c50d8ae3SPaolo Bonzini 		kvm_set_pfn_accessed(pfn);
558c50d8ae3SPaolo Bonzini 
559c50d8ae3SPaolo Bonzini 	if (is_dirty_spte(old_spte))
560c50d8ae3SPaolo Bonzini 		kvm_set_pfn_dirty(pfn);
561c50d8ae3SPaolo Bonzini 
562c50d8ae3SPaolo Bonzini 	return 1;
563c50d8ae3SPaolo Bonzini }
564c50d8ae3SPaolo Bonzini 
565c50d8ae3SPaolo Bonzini /*
566c50d8ae3SPaolo Bonzini  * Rules for using mmu_spte_clear_no_track:
567c50d8ae3SPaolo Bonzini  * Directly clear spte without caring the state bits of sptep,
568c50d8ae3SPaolo Bonzini  * it is used to set the upper level spte.
569c50d8ae3SPaolo Bonzini  */
570c50d8ae3SPaolo Bonzini static void mmu_spte_clear_no_track(u64 *sptep)
571c50d8ae3SPaolo Bonzini {
572c50d8ae3SPaolo Bonzini 	__update_clear_spte_fast(sptep, 0ull);
573c50d8ae3SPaolo Bonzini }
574c50d8ae3SPaolo Bonzini 
575c50d8ae3SPaolo Bonzini static u64 mmu_spte_get_lockless(u64 *sptep)
576c50d8ae3SPaolo Bonzini {
577c50d8ae3SPaolo Bonzini 	return __get_spte_lockless(sptep);
578c50d8ae3SPaolo Bonzini }
579c50d8ae3SPaolo Bonzini 
580c50d8ae3SPaolo Bonzini /* Restore an acc-track PTE back to a regular PTE */
581c50d8ae3SPaolo Bonzini static u64 restore_acc_track_spte(u64 spte)
582c50d8ae3SPaolo Bonzini {
583c50d8ae3SPaolo Bonzini 	u64 new_spte = spte;
5848a967d65SPaolo Bonzini 	u64 saved_bits = (spte >> SHADOW_ACC_TRACK_SAVED_BITS_SHIFT)
5858a967d65SPaolo Bonzini 			 & SHADOW_ACC_TRACK_SAVED_BITS_MASK;
586c50d8ae3SPaolo Bonzini 
587c50d8ae3SPaolo Bonzini 	WARN_ON_ONCE(spte_ad_enabled(spte));
588c50d8ae3SPaolo Bonzini 	WARN_ON_ONCE(!is_access_track_spte(spte));
589c50d8ae3SPaolo Bonzini 
590c50d8ae3SPaolo Bonzini 	new_spte &= ~shadow_acc_track_mask;
5918a967d65SPaolo Bonzini 	new_spte &= ~(SHADOW_ACC_TRACK_SAVED_BITS_MASK <<
5928a967d65SPaolo Bonzini 		      SHADOW_ACC_TRACK_SAVED_BITS_SHIFT);
593c50d8ae3SPaolo Bonzini 	new_spte |= saved_bits;
594c50d8ae3SPaolo Bonzini 
595c50d8ae3SPaolo Bonzini 	return new_spte;
596c50d8ae3SPaolo Bonzini }
597c50d8ae3SPaolo Bonzini 
598c50d8ae3SPaolo Bonzini /* Returns the Accessed status of the PTE and resets it at the same time. */
599c50d8ae3SPaolo Bonzini static bool mmu_spte_age(u64 *sptep)
600c50d8ae3SPaolo Bonzini {
601c50d8ae3SPaolo Bonzini 	u64 spte = mmu_spte_get_lockless(sptep);
602c50d8ae3SPaolo Bonzini 
603c50d8ae3SPaolo Bonzini 	if (!is_accessed_spte(spte))
604c50d8ae3SPaolo Bonzini 		return false;
605c50d8ae3SPaolo Bonzini 
606c50d8ae3SPaolo Bonzini 	if (spte_ad_enabled(spte)) {
607c50d8ae3SPaolo Bonzini 		clear_bit((ffs(shadow_accessed_mask) - 1),
608c50d8ae3SPaolo Bonzini 			  (unsigned long *)sptep);
609c50d8ae3SPaolo Bonzini 	} else {
610c50d8ae3SPaolo Bonzini 		/*
611c50d8ae3SPaolo Bonzini 		 * Capture the dirty status of the page, so that it doesn't get
612c50d8ae3SPaolo Bonzini 		 * lost when the SPTE is marked for access tracking.
613c50d8ae3SPaolo Bonzini 		 */
614c50d8ae3SPaolo Bonzini 		if (is_writable_pte(spte))
615c50d8ae3SPaolo Bonzini 			kvm_set_pfn_dirty(spte_to_pfn(spte));
616c50d8ae3SPaolo Bonzini 
617c50d8ae3SPaolo Bonzini 		spte = mark_spte_for_access_track(spte);
618c50d8ae3SPaolo Bonzini 		mmu_spte_update_no_track(sptep, spte);
619c50d8ae3SPaolo Bonzini 	}
620c50d8ae3SPaolo Bonzini 
621c50d8ae3SPaolo Bonzini 	return true;
622c50d8ae3SPaolo Bonzini }
623c50d8ae3SPaolo Bonzini 
624c50d8ae3SPaolo Bonzini static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
625c50d8ae3SPaolo Bonzini {
626c50d8ae3SPaolo Bonzini 	/*
627c50d8ae3SPaolo Bonzini 	 * Prevent page table teardown by making any free-er wait during
628c50d8ae3SPaolo Bonzini 	 * kvm_flush_remote_tlbs() IPI to all active vcpus.
629c50d8ae3SPaolo Bonzini 	 */
630c50d8ae3SPaolo Bonzini 	local_irq_disable();
631c50d8ae3SPaolo Bonzini 
632c50d8ae3SPaolo Bonzini 	/*
633c50d8ae3SPaolo Bonzini 	 * Make sure a following spte read is not reordered ahead of the write
634c50d8ae3SPaolo Bonzini 	 * to vcpu->mode.
635c50d8ae3SPaolo Bonzini 	 */
636c50d8ae3SPaolo Bonzini 	smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
637c50d8ae3SPaolo Bonzini }
638c50d8ae3SPaolo Bonzini 
639c50d8ae3SPaolo Bonzini static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
640c50d8ae3SPaolo Bonzini {
641c50d8ae3SPaolo Bonzini 	/*
642c50d8ae3SPaolo Bonzini 	 * Make sure the write to vcpu->mode is not reordered in front of
643c50d8ae3SPaolo Bonzini 	 * reads to sptes.  If it does, kvm_mmu_commit_zap_page() can see us
644c50d8ae3SPaolo Bonzini 	 * OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
645c50d8ae3SPaolo Bonzini 	 */
646c50d8ae3SPaolo Bonzini 	smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
647c50d8ae3SPaolo Bonzini 	local_irq_enable();
648c50d8ae3SPaolo Bonzini }
649c50d8ae3SPaolo Bonzini 
650378f5cd6SSean Christopherson static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
651c50d8ae3SPaolo Bonzini {
652c50d8ae3SPaolo Bonzini 	int r;
653c50d8ae3SPaolo Bonzini 
654531281adSSean Christopherson 	/* 1 rmap, 1 parent PTE per level, and the prefetched rmaps. */
65594ce87efSSean Christopherson 	r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
656531281adSSean Christopherson 				       1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);
657c50d8ae3SPaolo Bonzini 	if (r)
658c50d8ae3SPaolo Bonzini 		return r;
65994ce87efSSean Christopherson 	r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
660171a90d7SSean Christopherson 				       PT64_ROOT_MAX_LEVEL);
661171a90d7SSean Christopherson 	if (r)
662171a90d7SSean Christopherson 		return r;
663378f5cd6SSean Christopherson 	if (maybe_indirect) {
66494ce87efSSean Christopherson 		r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
665171a90d7SSean Christopherson 					       PT64_ROOT_MAX_LEVEL);
666c50d8ae3SPaolo Bonzini 		if (r)
667c50d8ae3SPaolo Bonzini 			return r;
668378f5cd6SSean Christopherson 	}
66994ce87efSSean Christopherson 	return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
670531281adSSean Christopherson 					  PT64_ROOT_MAX_LEVEL);
671c50d8ae3SPaolo Bonzini }
672c50d8ae3SPaolo Bonzini 
673c50d8ae3SPaolo Bonzini static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
674c50d8ae3SPaolo Bonzini {
67594ce87efSSean Christopherson 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
67694ce87efSSean Christopherson 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
67794ce87efSSean Christopherson 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_gfn_array_cache);
67894ce87efSSean Christopherson 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
679c50d8ae3SPaolo Bonzini }
680c50d8ae3SPaolo Bonzini 
681c50d8ae3SPaolo Bonzini static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu)
682c50d8ae3SPaolo Bonzini {
68394ce87efSSean Christopherson 	return kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache);
684c50d8ae3SPaolo Bonzini }
685c50d8ae3SPaolo Bonzini 
686c50d8ae3SPaolo Bonzini static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
687c50d8ae3SPaolo Bonzini {
688c50d8ae3SPaolo Bonzini 	kmem_cache_free(pte_list_desc_cache, pte_list_desc);
689c50d8ae3SPaolo Bonzini }
690c50d8ae3SPaolo Bonzini 
691c50d8ae3SPaolo Bonzini static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
692c50d8ae3SPaolo Bonzini {
693c50d8ae3SPaolo Bonzini 	if (!sp->role.direct)
694c50d8ae3SPaolo Bonzini 		return sp->gfns[index];
695c50d8ae3SPaolo Bonzini 
696c50d8ae3SPaolo Bonzini 	return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS));
697c50d8ae3SPaolo Bonzini }
698c50d8ae3SPaolo Bonzini 
699c50d8ae3SPaolo Bonzini static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
700c50d8ae3SPaolo Bonzini {
701c50d8ae3SPaolo Bonzini 	if (!sp->role.direct) {
702c50d8ae3SPaolo Bonzini 		sp->gfns[index] = gfn;
703c50d8ae3SPaolo Bonzini 		return;
704c50d8ae3SPaolo Bonzini 	}
705c50d8ae3SPaolo Bonzini 
706c50d8ae3SPaolo Bonzini 	if (WARN_ON(gfn != kvm_mmu_page_get_gfn(sp, index)))
707c50d8ae3SPaolo Bonzini 		pr_err_ratelimited("gfn mismatch under direct page %llx "
708c50d8ae3SPaolo Bonzini 				   "(expected %llx, got %llx)\n",
709c50d8ae3SPaolo Bonzini 				   sp->gfn,
710c50d8ae3SPaolo Bonzini 				   kvm_mmu_page_get_gfn(sp, index), gfn);
711c50d8ae3SPaolo Bonzini }
712c50d8ae3SPaolo Bonzini 
713c50d8ae3SPaolo Bonzini /*
714c50d8ae3SPaolo Bonzini  * Return the pointer to the large page information for a given gfn,
715c50d8ae3SPaolo Bonzini  * handling slots that are not large page aligned.
716c50d8ae3SPaolo Bonzini  */
717c50d8ae3SPaolo Bonzini static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
718c50d8ae3SPaolo Bonzini 					      struct kvm_memory_slot *slot,
719c50d8ae3SPaolo Bonzini 					      int level)
720c50d8ae3SPaolo Bonzini {
721c50d8ae3SPaolo Bonzini 	unsigned long idx;
722c50d8ae3SPaolo Bonzini 
723c50d8ae3SPaolo Bonzini 	idx = gfn_to_index(gfn, slot->base_gfn, level);
724c50d8ae3SPaolo Bonzini 	return &slot->arch.lpage_info[level - 2][idx];
725c50d8ae3SPaolo Bonzini }
726c50d8ae3SPaolo Bonzini 
727c50d8ae3SPaolo Bonzini static void update_gfn_disallow_lpage_count(struct kvm_memory_slot *slot,
728c50d8ae3SPaolo Bonzini 					    gfn_t gfn, int count)
729c50d8ae3SPaolo Bonzini {
730c50d8ae3SPaolo Bonzini 	struct kvm_lpage_info *linfo;
731c50d8ae3SPaolo Bonzini 	int i;
732c50d8ae3SPaolo Bonzini 
7333bae0459SSean Christopherson 	for (i = PG_LEVEL_2M; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
734c50d8ae3SPaolo Bonzini 		linfo = lpage_info_slot(gfn, slot, i);
735c50d8ae3SPaolo Bonzini 		linfo->disallow_lpage += count;
736c50d8ae3SPaolo Bonzini 		WARN_ON(linfo->disallow_lpage < 0);
737c50d8ae3SPaolo Bonzini 	}
738c50d8ae3SPaolo Bonzini }
739c50d8ae3SPaolo Bonzini 
740c50d8ae3SPaolo Bonzini void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn)
741c50d8ae3SPaolo Bonzini {
742c50d8ae3SPaolo Bonzini 	update_gfn_disallow_lpage_count(slot, gfn, 1);
743c50d8ae3SPaolo Bonzini }
744c50d8ae3SPaolo Bonzini 
745c50d8ae3SPaolo Bonzini void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn)
746c50d8ae3SPaolo Bonzini {
747c50d8ae3SPaolo Bonzini 	update_gfn_disallow_lpage_count(slot, gfn, -1);
748c50d8ae3SPaolo Bonzini }
749c50d8ae3SPaolo Bonzini 
750c50d8ae3SPaolo Bonzini static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
751c50d8ae3SPaolo Bonzini {
752c50d8ae3SPaolo Bonzini 	struct kvm_memslots *slots;
753c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
754c50d8ae3SPaolo Bonzini 	gfn_t gfn;
755c50d8ae3SPaolo Bonzini 
756c50d8ae3SPaolo Bonzini 	kvm->arch.indirect_shadow_pages++;
757c50d8ae3SPaolo Bonzini 	gfn = sp->gfn;
758c50d8ae3SPaolo Bonzini 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
759c50d8ae3SPaolo Bonzini 	slot = __gfn_to_memslot(slots, gfn);
760c50d8ae3SPaolo Bonzini 
761c50d8ae3SPaolo Bonzini 	/* the non-leaf shadow pages are keeping readonly. */
7623bae0459SSean Christopherson 	if (sp->role.level > PG_LEVEL_4K)
763c50d8ae3SPaolo Bonzini 		return kvm_slot_page_track_add_page(kvm, slot, gfn,
764c50d8ae3SPaolo Bonzini 						    KVM_PAGE_TRACK_WRITE);
765c50d8ae3SPaolo Bonzini 
766c50d8ae3SPaolo Bonzini 	kvm_mmu_gfn_disallow_lpage(slot, gfn);
767c50d8ae3SPaolo Bonzini }
768c50d8ae3SPaolo Bonzini 
76929cf0f50SBen Gardon void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
770c50d8ae3SPaolo Bonzini {
771c50d8ae3SPaolo Bonzini 	if (sp->lpage_disallowed)
772c50d8ae3SPaolo Bonzini 		return;
773c50d8ae3SPaolo Bonzini 
774c50d8ae3SPaolo Bonzini 	++kvm->stat.nx_lpage_splits;
775c50d8ae3SPaolo Bonzini 	list_add_tail(&sp->lpage_disallowed_link,
776c50d8ae3SPaolo Bonzini 		      &kvm->arch.lpage_disallowed_mmu_pages);
777c50d8ae3SPaolo Bonzini 	sp->lpage_disallowed = true;
778c50d8ae3SPaolo Bonzini }
779c50d8ae3SPaolo Bonzini 
780c50d8ae3SPaolo Bonzini static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
781c50d8ae3SPaolo Bonzini {
782c50d8ae3SPaolo Bonzini 	struct kvm_memslots *slots;
783c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
784c50d8ae3SPaolo Bonzini 	gfn_t gfn;
785c50d8ae3SPaolo Bonzini 
786c50d8ae3SPaolo Bonzini 	kvm->arch.indirect_shadow_pages--;
787c50d8ae3SPaolo Bonzini 	gfn = sp->gfn;
788c50d8ae3SPaolo Bonzini 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
789c50d8ae3SPaolo Bonzini 	slot = __gfn_to_memslot(slots, gfn);
7903bae0459SSean Christopherson 	if (sp->role.level > PG_LEVEL_4K)
791c50d8ae3SPaolo Bonzini 		return kvm_slot_page_track_remove_page(kvm, slot, gfn,
792c50d8ae3SPaolo Bonzini 						       KVM_PAGE_TRACK_WRITE);
793c50d8ae3SPaolo Bonzini 
794c50d8ae3SPaolo Bonzini 	kvm_mmu_gfn_allow_lpage(slot, gfn);
795c50d8ae3SPaolo Bonzini }
796c50d8ae3SPaolo Bonzini 
79729cf0f50SBen Gardon void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
798c50d8ae3SPaolo Bonzini {
799c50d8ae3SPaolo Bonzini 	--kvm->stat.nx_lpage_splits;
800c50d8ae3SPaolo Bonzini 	sp->lpage_disallowed = false;
801c50d8ae3SPaolo Bonzini 	list_del(&sp->lpage_disallowed_link);
802c50d8ae3SPaolo Bonzini }
803c50d8ae3SPaolo Bonzini 
804c50d8ae3SPaolo Bonzini static struct kvm_memory_slot *
805c50d8ae3SPaolo Bonzini gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
806c50d8ae3SPaolo Bonzini 			    bool no_dirty_log)
807c50d8ae3SPaolo Bonzini {
808c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
809c50d8ae3SPaolo Bonzini 
810c50d8ae3SPaolo Bonzini 	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
81191b0d268SPaolo Bonzini 	if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
81291b0d268SPaolo Bonzini 		return NULL;
813044c59c4SPeter Xu 	if (no_dirty_log && kvm_slot_dirty_track_enabled(slot))
81491b0d268SPaolo Bonzini 		return NULL;
815c50d8ae3SPaolo Bonzini 
816c50d8ae3SPaolo Bonzini 	return slot;
817c50d8ae3SPaolo Bonzini }
818c50d8ae3SPaolo Bonzini 
819c50d8ae3SPaolo Bonzini /*
820c50d8ae3SPaolo Bonzini  * About rmap_head encoding:
821c50d8ae3SPaolo Bonzini  *
822c50d8ae3SPaolo Bonzini  * If the bit zero of rmap_head->val is clear, then it points to the only spte
823c50d8ae3SPaolo Bonzini  * in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct
824c50d8ae3SPaolo Bonzini  * pte_list_desc containing more mappings.
825c50d8ae3SPaolo Bonzini  */
826c50d8ae3SPaolo Bonzini 
827c50d8ae3SPaolo Bonzini /*
828c50d8ae3SPaolo Bonzini  * Returns the number of pointers in the rmap chain, not counting the new one.
829c50d8ae3SPaolo Bonzini  */
830c50d8ae3SPaolo Bonzini static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
831c50d8ae3SPaolo Bonzini 			struct kvm_rmap_head *rmap_head)
832c50d8ae3SPaolo Bonzini {
833c50d8ae3SPaolo Bonzini 	struct pte_list_desc *desc;
834c50d8ae3SPaolo Bonzini 	int i, count = 0;
835c50d8ae3SPaolo Bonzini 
836c50d8ae3SPaolo Bonzini 	if (!rmap_head->val) {
837805a0f83SStephen Zhang 		rmap_printk("%p %llx 0->1\n", spte, *spte);
838c50d8ae3SPaolo Bonzini 		rmap_head->val = (unsigned long)spte;
839c50d8ae3SPaolo Bonzini 	} else if (!(rmap_head->val & 1)) {
840805a0f83SStephen Zhang 		rmap_printk("%p %llx 1->many\n", spte, *spte);
841c50d8ae3SPaolo Bonzini 		desc = mmu_alloc_pte_list_desc(vcpu);
842c50d8ae3SPaolo Bonzini 		desc->sptes[0] = (u64 *)rmap_head->val;
843c50d8ae3SPaolo Bonzini 		desc->sptes[1] = spte;
844c50d8ae3SPaolo Bonzini 		rmap_head->val = (unsigned long)desc | 1;
845c50d8ae3SPaolo Bonzini 		++count;
846c50d8ae3SPaolo Bonzini 	} else {
847805a0f83SStephen Zhang 		rmap_printk("%p %llx many->many\n", spte, *spte);
848c50d8ae3SPaolo Bonzini 		desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
849c6c4f961SLi RongQing 		while (desc->sptes[PTE_LIST_EXT-1]) {
850c50d8ae3SPaolo Bonzini 			count += PTE_LIST_EXT;
851c6c4f961SLi RongQing 
852c6c4f961SLi RongQing 			if (!desc->more) {
853c50d8ae3SPaolo Bonzini 				desc->more = mmu_alloc_pte_list_desc(vcpu);
854c50d8ae3SPaolo Bonzini 				desc = desc->more;
855c6c4f961SLi RongQing 				break;
856c6c4f961SLi RongQing 			}
857c6c4f961SLi RongQing 			desc = desc->more;
858c50d8ae3SPaolo Bonzini 		}
859c50d8ae3SPaolo Bonzini 		for (i = 0; desc->sptes[i]; ++i)
860c50d8ae3SPaolo Bonzini 			++count;
861c50d8ae3SPaolo Bonzini 		desc->sptes[i] = spte;
862c50d8ae3SPaolo Bonzini 	}
863c50d8ae3SPaolo Bonzini 	return count;
864c50d8ae3SPaolo Bonzini }
865c50d8ae3SPaolo Bonzini 
866c50d8ae3SPaolo Bonzini static void
867c50d8ae3SPaolo Bonzini pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
868c50d8ae3SPaolo Bonzini 			   struct pte_list_desc *desc, int i,
869c50d8ae3SPaolo Bonzini 			   struct pte_list_desc *prev_desc)
870c50d8ae3SPaolo Bonzini {
871c50d8ae3SPaolo Bonzini 	int j;
872c50d8ae3SPaolo Bonzini 
873c50d8ae3SPaolo Bonzini 	for (j = PTE_LIST_EXT - 1; !desc->sptes[j] && j > i; --j)
874c50d8ae3SPaolo Bonzini 		;
875c50d8ae3SPaolo Bonzini 	desc->sptes[i] = desc->sptes[j];
876c50d8ae3SPaolo Bonzini 	desc->sptes[j] = NULL;
877c50d8ae3SPaolo Bonzini 	if (j != 0)
878c50d8ae3SPaolo Bonzini 		return;
879c50d8ae3SPaolo Bonzini 	if (!prev_desc && !desc->more)
880fe3c2b4cSMiaohe Lin 		rmap_head->val = 0;
881c50d8ae3SPaolo Bonzini 	else
882c50d8ae3SPaolo Bonzini 		if (prev_desc)
883c50d8ae3SPaolo Bonzini 			prev_desc->more = desc->more;
884c50d8ae3SPaolo Bonzini 		else
885c50d8ae3SPaolo Bonzini 			rmap_head->val = (unsigned long)desc->more | 1;
886c50d8ae3SPaolo Bonzini 	mmu_free_pte_list_desc(desc);
887c50d8ae3SPaolo Bonzini }
888c50d8ae3SPaolo Bonzini 
889c50d8ae3SPaolo Bonzini static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
890c50d8ae3SPaolo Bonzini {
891c50d8ae3SPaolo Bonzini 	struct pte_list_desc *desc;
892c50d8ae3SPaolo Bonzini 	struct pte_list_desc *prev_desc;
893c50d8ae3SPaolo Bonzini 	int i;
894c50d8ae3SPaolo Bonzini 
895c50d8ae3SPaolo Bonzini 	if (!rmap_head->val) {
896c50d8ae3SPaolo Bonzini 		pr_err("%s: %p 0->BUG\n", __func__, spte);
897c50d8ae3SPaolo Bonzini 		BUG();
898c50d8ae3SPaolo Bonzini 	} else if (!(rmap_head->val & 1)) {
899805a0f83SStephen Zhang 		rmap_printk("%p 1->0\n", spte);
900c50d8ae3SPaolo Bonzini 		if ((u64 *)rmap_head->val != spte) {
901c50d8ae3SPaolo Bonzini 			pr_err("%s:  %p 1->BUG\n", __func__, spte);
902c50d8ae3SPaolo Bonzini 			BUG();
903c50d8ae3SPaolo Bonzini 		}
904c50d8ae3SPaolo Bonzini 		rmap_head->val = 0;
905c50d8ae3SPaolo Bonzini 	} else {
906805a0f83SStephen Zhang 		rmap_printk("%p many->many\n", spte);
907c50d8ae3SPaolo Bonzini 		desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
908c50d8ae3SPaolo Bonzini 		prev_desc = NULL;
909c50d8ae3SPaolo Bonzini 		while (desc) {
910c50d8ae3SPaolo Bonzini 			for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) {
911c50d8ae3SPaolo Bonzini 				if (desc->sptes[i] == spte) {
912c50d8ae3SPaolo Bonzini 					pte_list_desc_remove_entry(rmap_head,
913c50d8ae3SPaolo Bonzini 							desc, i, prev_desc);
914c50d8ae3SPaolo Bonzini 					return;
915c50d8ae3SPaolo Bonzini 				}
916c50d8ae3SPaolo Bonzini 			}
917c50d8ae3SPaolo Bonzini 			prev_desc = desc;
918c50d8ae3SPaolo Bonzini 			desc = desc->more;
919c50d8ae3SPaolo Bonzini 		}
920c50d8ae3SPaolo Bonzini 		pr_err("%s: %p many->many\n", __func__, spte);
921c50d8ae3SPaolo Bonzini 		BUG();
922c50d8ae3SPaolo Bonzini 	}
923c50d8ae3SPaolo Bonzini }
924c50d8ae3SPaolo Bonzini 
925c50d8ae3SPaolo Bonzini static void pte_list_remove(struct kvm_rmap_head *rmap_head, u64 *sptep)
926c50d8ae3SPaolo Bonzini {
927c50d8ae3SPaolo Bonzini 	mmu_spte_clear_track_bits(sptep);
928c50d8ae3SPaolo Bonzini 	__pte_list_remove(sptep, rmap_head);
929c50d8ae3SPaolo Bonzini }
930c50d8ae3SPaolo Bonzini 
931c50d8ae3SPaolo Bonzini static struct kvm_rmap_head *__gfn_to_rmap(gfn_t gfn, int level,
932c50d8ae3SPaolo Bonzini 					   struct kvm_memory_slot *slot)
933c50d8ae3SPaolo Bonzini {
934c50d8ae3SPaolo Bonzini 	unsigned long idx;
935c50d8ae3SPaolo Bonzini 
936c50d8ae3SPaolo Bonzini 	idx = gfn_to_index(gfn, slot->base_gfn, level);
9373bae0459SSean Christopherson 	return &slot->arch.rmap[level - PG_LEVEL_4K][idx];
938c50d8ae3SPaolo Bonzini }
939c50d8ae3SPaolo Bonzini 
940c50d8ae3SPaolo Bonzini static struct kvm_rmap_head *gfn_to_rmap(struct kvm *kvm, gfn_t gfn,
941c50d8ae3SPaolo Bonzini 					 struct kvm_mmu_page *sp)
942c50d8ae3SPaolo Bonzini {
943c50d8ae3SPaolo Bonzini 	struct kvm_memslots *slots;
944c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
945c50d8ae3SPaolo Bonzini 
946c50d8ae3SPaolo Bonzini 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
947c50d8ae3SPaolo Bonzini 	slot = __gfn_to_memslot(slots, gfn);
948c50d8ae3SPaolo Bonzini 	return __gfn_to_rmap(gfn, sp->role.level, slot);
949c50d8ae3SPaolo Bonzini }
950c50d8ae3SPaolo Bonzini 
951c50d8ae3SPaolo Bonzini static bool rmap_can_add(struct kvm_vcpu *vcpu)
952c50d8ae3SPaolo Bonzini {
953356ec69aSSean Christopherson 	struct kvm_mmu_memory_cache *mc;
954c50d8ae3SPaolo Bonzini 
955356ec69aSSean Christopherson 	mc = &vcpu->arch.mmu_pte_list_desc_cache;
95694ce87efSSean Christopherson 	return kvm_mmu_memory_cache_nr_free_objects(mc);
957c50d8ae3SPaolo Bonzini }
958c50d8ae3SPaolo Bonzini 
959c50d8ae3SPaolo Bonzini static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
960c50d8ae3SPaolo Bonzini {
961c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
962c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *rmap_head;
963c50d8ae3SPaolo Bonzini 
96457354682SSean Christopherson 	sp = sptep_to_sp(spte);
965c50d8ae3SPaolo Bonzini 	kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
966c50d8ae3SPaolo Bonzini 	rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
967c50d8ae3SPaolo Bonzini 	return pte_list_add(vcpu, spte, rmap_head);
968c50d8ae3SPaolo Bonzini }
969c50d8ae3SPaolo Bonzini 
970c50d8ae3SPaolo Bonzini static void rmap_remove(struct kvm *kvm, u64 *spte)
971c50d8ae3SPaolo Bonzini {
972c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
973c50d8ae3SPaolo Bonzini 	gfn_t gfn;
974c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *rmap_head;
975c50d8ae3SPaolo Bonzini 
97657354682SSean Christopherson 	sp = sptep_to_sp(spte);
977c50d8ae3SPaolo Bonzini 	gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
978c50d8ae3SPaolo Bonzini 	rmap_head = gfn_to_rmap(kvm, gfn, sp);
979c50d8ae3SPaolo Bonzini 	__pte_list_remove(spte, rmap_head);
980c50d8ae3SPaolo Bonzini }
981c50d8ae3SPaolo Bonzini 
982c50d8ae3SPaolo Bonzini /*
983c50d8ae3SPaolo Bonzini  * Used by the following functions to iterate through the sptes linked by a
984c50d8ae3SPaolo Bonzini  * rmap.  All fields are private and not assumed to be used outside.
985c50d8ae3SPaolo Bonzini  */
986c50d8ae3SPaolo Bonzini struct rmap_iterator {
987c50d8ae3SPaolo Bonzini 	/* private fields */
988c50d8ae3SPaolo Bonzini 	struct pte_list_desc *desc;	/* holds the sptep if not NULL */
989c50d8ae3SPaolo Bonzini 	int pos;			/* index of the sptep */
990c50d8ae3SPaolo Bonzini };
991c50d8ae3SPaolo Bonzini 
992c50d8ae3SPaolo Bonzini /*
993c50d8ae3SPaolo Bonzini  * Iteration must be started by this function.  This should also be used after
994c50d8ae3SPaolo Bonzini  * removing/dropping sptes from the rmap link because in such cases the
9950a03cbdaSMiaohe Lin  * information in the iterator may not be valid.
996c50d8ae3SPaolo Bonzini  *
997c50d8ae3SPaolo Bonzini  * Returns sptep if found, NULL otherwise.
998c50d8ae3SPaolo Bonzini  */
999c50d8ae3SPaolo Bonzini static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head,
1000c50d8ae3SPaolo Bonzini 			   struct rmap_iterator *iter)
1001c50d8ae3SPaolo Bonzini {
1002c50d8ae3SPaolo Bonzini 	u64 *sptep;
1003c50d8ae3SPaolo Bonzini 
1004c50d8ae3SPaolo Bonzini 	if (!rmap_head->val)
1005c50d8ae3SPaolo Bonzini 		return NULL;
1006c50d8ae3SPaolo Bonzini 
1007c50d8ae3SPaolo Bonzini 	if (!(rmap_head->val & 1)) {
1008c50d8ae3SPaolo Bonzini 		iter->desc = NULL;
1009c50d8ae3SPaolo Bonzini 		sptep = (u64 *)rmap_head->val;
1010c50d8ae3SPaolo Bonzini 		goto out;
1011c50d8ae3SPaolo Bonzini 	}
1012c50d8ae3SPaolo Bonzini 
1013c50d8ae3SPaolo Bonzini 	iter->desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1014c50d8ae3SPaolo Bonzini 	iter->pos = 0;
1015c50d8ae3SPaolo Bonzini 	sptep = iter->desc->sptes[iter->pos];
1016c50d8ae3SPaolo Bonzini out:
1017c50d8ae3SPaolo Bonzini 	BUG_ON(!is_shadow_present_pte(*sptep));
1018c50d8ae3SPaolo Bonzini 	return sptep;
1019c50d8ae3SPaolo Bonzini }
1020c50d8ae3SPaolo Bonzini 
1021c50d8ae3SPaolo Bonzini /*
1022c50d8ae3SPaolo Bonzini  * Must be used with a valid iterator: e.g. after rmap_get_first().
1023c50d8ae3SPaolo Bonzini  *
1024c50d8ae3SPaolo Bonzini  * Returns sptep if found, NULL otherwise.
1025c50d8ae3SPaolo Bonzini  */
1026c50d8ae3SPaolo Bonzini static u64 *rmap_get_next(struct rmap_iterator *iter)
1027c50d8ae3SPaolo Bonzini {
1028c50d8ae3SPaolo Bonzini 	u64 *sptep;
1029c50d8ae3SPaolo Bonzini 
1030c50d8ae3SPaolo Bonzini 	if (iter->desc) {
1031c50d8ae3SPaolo Bonzini 		if (iter->pos < PTE_LIST_EXT - 1) {
1032c50d8ae3SPaolo Bonzini 			++iter->pos;
1033c50d8ae3SPaolo Bonzini 			sptep = iter->desc->sptes[iter->pos];
1034c50d8ae3SPaolo Bonzini 			if (sptep)
1035c50d8ae3SPaolo Bonzini 				goto out;
1036c50d8ae3SPaolo Bonzini 		}
1037c50d8ae3SPaolo Bonzini 
1038c50d8ae3SPaolo Bonzini 		iter->desc = iter->desc->more;
1039c50d8ae3SPaolo Bonzini 
1040c50d8ae3SPaolo Bonzini 		if (iter->desc) {
1041c50d8ae3SPaolo Bonzini 			iter->pos = 0;
1042c50d8ae3SPaolo Bonzini 			/* desc->sptes[0] cannot be NULL */
1043c50d8ae3SPaolo Bonzini 			sptep = iter->desc->sptes[iter->pos];
1044c50d8ae3SPaolo Bonzini 			goto out;
1045c50d8ae3SPaolo Bonzini 		}
1046c50d8ae3SPaolo Bonzini 	}
1047c50d8ae3SPaolo Bonzini 
1048c50d8ae3SPaolo Bonzini 	return NULL;
1049c50d8ae3SPaolo Bonzini out:
1050c50d8ae3SPaolo Bonzini 	BUG_ON(!is_shadow_present_pte(*sptep));
1051c50d8ae3SPaolo Bonzini 	return sptep;
1052c50d8ae3SPaolo Bonzini }
1053c50d8ae3SPaolo Bonzini 
1054c50d8ae3SPaolo Bonzini #define for_each_rmap_spte(_rmap_head_, _iter_, _spte_)			\
1055c50d8ae3SPaolo Bonzini 	for (_spte_ = rmap_get_first(_rmap_head_, _iter_);		\
1056c50d8ae3SPaolo Bonzini 	     _spte_; _spte_ = rmap_get_next(_iter_))
1057c50d8ae3SPaolo Bonzini 
1058c50d8ae3SPaolo Bonzini static void drop_spte(struct kvm *kvm, u64 *sptep)
1059c50d8ae3SPaolo Bonzini {
1060c50d8ae3SPaolo Bonzini 	if (mmu_spte_clear_track_bits(sptep))
1061c50d8ae3SPaolo Bonzini 		rmap_remove(kvm, sptep);
1062c50d8ae3SPaolo Bonzini }
1063c50d8ae3SPaolo Bonzini 
1064c50d8ae3SPaolo Bonzini 
1065c50d8ae3SPaolo Bonzini static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
1066c50d8ae3SPaolo Bonzini {
1067c50d8ae3SPaolo Bonzini 	if (is_large_pte(*sptep)) {
106857354682SSean Christopherson 		WARN_ON(sptep_to_sp(sptep)->role.level == PG_LEVEL_4K);
1069c50d8ae3SPaolo Bonzini 		drop_spte(kvm, sptep);
1070c50d8ae3SPaolo Bonzini 		--kvm->stat.lpages;
1071c50d8ae3SPaolo Bonzini 		return true;
1072c50d8ae3SPaolo Bonzini 	}
1073c50d8ae3SPaolo Bonzini 
1074c50d8ae3SPaolo Bonzini 	return false;
1075c50d8ae3SPaolo Bonzini }
1076c50d8ae3SPaolo Bonzini 
1077c50d8ae3SPaolo Bonzini static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
1078c50d8ae3SPaolo Bonzini {
1079c50d8ae3SPaolo Bonzini 	if (__drop_large_spte(vcpu->kvm, sptep)) {
108057354682SSean Christopherson 		struct kvm_mmu_page *sp = sptep_to_sp(sptep);
1081c50d8ae3SPaolo Bonzini 
1082c50d8ae3SPaolo Bonzini 		kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
1083c50d8ae3SPaolo Bonzini 			KVM_PAGES_PER_HPAGE(sp->role.level));
1084c50d8ae3SPaolo Bonzini 	}
1085c50d8ae3SPaolo Bonzini }
1086c50d8ae3SPaolo Bonzini 
1087c50d8ae3SPaolo Bonzini /*
1088c50d8ae3SPaolo Bonzini  * Write-protect on the specified @sptep, @pt_protect indicates whether
1089c50d8ae3SPaolo Bonzini  * spte write-protection is caused by protecting shadow page table.
1090c50d8ae3SPaolo Bonzini  *
1091c50d8ae3SPaolo Bonzini  * Note: write protection is difference between dirty logging and spte
1092c50d8ae3SPaolo Bonzini  * protection:
1093c50d8ae3SPaolo Bonzini  * - for dirty logging, the spte can be set to writable at anytime if
1094c50d8ae3SPaolo Bonzini  *   its dirty bitmap is properly set.
1095c50d8ae3SPaolo Bonzini  * - for spte protection, the spte can be writable only after unsync-ing
1096c50d8ae3SPaolo Bonzini  *   shadow page.
1097c50d8ae3SPaolo Bonzini  *
1098c50d8ae3SPaolo Bonzini  * Return true if tlb need be flushed.
1099c50d8ae3SPaolo Bonzini  */
1100c50d8ae3SPaolo Bonzini static bool spte_write_protect(u64 *sptep, bool pt_protect)
1101c50d8ae3SPaolo Bonzini {
1102c50d8ae3SPaolo Bonzini 	u64 spte = *sptep;
1103c50d8ae3SPaolo Bonzini 
1104c50d8ae3SPaolo Bonzini 	if (!is_writable_pte(spte) &&
1105c50d8ae3SPaolo Bonzini 	      !(pt_protect && spte_can_locklessly_be_made_writable(spte)))
1106c50d8ae3SPaolo Bonzini 		return false;
1107c50d8ae3SPaolo Bonzini 
1108805a0f83SStephen Zhang 	rmap_printk("spte %p %llx\n", sptep, *sptep);
1109c50d8ae3SPaolo Bonzini 
1110c50d8ae3SPaolo Bonzini 	if (pt_protect)
11115fc3424fSSean Christopherson 		spte &= ~shadow_mmu_writable_mask;
1112c50d8ae3SPaolo Bonzini 	spte = spte & ~PT_WRITABLE_MASK;
1113c50d8ae3SPaolo Bonzini 
1114c50d8ae3SPaolo Bonzini 	return mmu_spte_update(sptep, spte);
1115c50d8ae3SPaolo Bonzini }
1116c50d8ae3SPaolo Bonzini 
1117c50d8ae3SPaolo Bonzini static bool __rmap_write_protect(struct kvm *kvm,
1118c50d8ae3SPaolo Bonzini 				 struct kvm_rmap_head *rmap_head,
1119c50d8ae3SPaolo Bonzini 				 bool pt_protect)
1120c50d8ae3SPaolo Bonzini {
1121c50d8ae3SPaolo Bonzini 	u64 *sptep;
1122c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
1123c50d8ae3SPaolo Bonzini 	bool flush = false;
1124c50d8ae3SPaolo Bonzini 
1125c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(rmap_head, &iter, sptep)
1126c50d8ae3SPaolo Bonzini 		flush |= spte_write_protect(sptep, pt_protect);
1127c50d8ae3SPaolo Bonzini 
1128c50d8ae3SPaolo Bonzini 	return flush;
1129c50d8ae3SPaolo Bonzini }
1130c50d8ae3SPaolo Bonzini 
1131c50d8ae3SPaolo Bonzini static bool spte_clear_dirty(u64 *sptep)
1132c50d8ae3SPaolo Bonzini {
1133c50d8ae3SPaolo Bonzini 	u64 spte = *sptep;
1134c50d8ae3SPaolo Bonzini 
1135805a0f83SStephen Zhang 	rmap_printk("spte %p %llx\n", sptep, *sptep);
1136c50d8ae3SPaolo Bonzini 
1137c50d8ae3SPaolo Bonzini 	MMU_WARN_ON(!spte_ad_enabled(spte));
1138c50d8ae3SPaolo Bonzini 	spte &= ~shadow_dirty_mask;
1139c50d8ae3SPaolo Bonzini 	return mmu_spte_update(sptep, spte);
1140c50d8ae3SPaolo Bonzini }
1141c50d8ae3SPaolo Bonzini 
1142c50d8ae3SPaolo Bonzini static bool spte_wrprot_for_clear_dirty(u64 *sptep)
1143c50d8ae3SPaolo Bonzini {
1144c50d8ae3SPaolo Bonzini 	bool was_writable = test_and_clear_bit(PT_WRITABLE_SHIFT,
1145c50d8ae3SPaolo Bonzini 					       (unsigned long *)sptep);
1146c50d8ae3SPaolo Bonzini 	if (was_writable && !spte_ad_enabled(*sptep))
1147c50d8ae3SPaolo Bonzini 		kvm_set_pfn_dirty(spte_to_pfn(*sptep));
1148c50d8ae3SPaolo Bonzini 
1149c50d8ae3SPaolo Bonzini 	return was_writable;
1150c50d8ae3SPaolo Bonzini }
1151c50d8ae3SPaolo Bonzini 
1152c50d8ae3SPaolo Bonzini /*
1153c50d8ae3SPaolo Bonzini  * Gets the GFN ready for another round of dirty logging by clearing the
1154c50d8ae3SPaolo Bonzini  *	- D bit on ad-enabled SPTEs, and
1155c50d8ae3SPaolo Bonzini  *	- W bit on ad-disabled SPTEs.
1156c50d8ae3SPaolo Bonzini  * Returns true iff any D or W bits were cleared.
1157c50d8ae3SPaolo Bonzini  */
11580a234f5dSSean Christopherson static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
11590a234f5dSSean Christopherson 			       struct kvm_memory_slot *slot)
1160c50d8ae3SPaolo Bonzini {
1161c50d8ae3SPaolo Bonzini 	u64 *sptep;
1162c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
1163c50d8ae3SPaolo Bonzini 	bool flush = false;
1164c50d8ae3SPaolo Bonzini 
1165c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(rmap_head, &iter, sptep)
1166c50d8ae3SPaolo Bonzini 		if (spte_ad_need_write_protect(*sptep))
1167c50d8ae3SPaolo Bonzini 			flush |= spte_wrprot_for_clear_dirty(sptep);
1168c50d8ae3SPaolo Bonzini 		else
1169c50d8ae3SPaolo Bonzini 			flush |= spte_clear_dirty(sptep);
1170c50d8ae3SPaolo Bonzini 
1171c50d8ae3SPaolo Bonzini 	return flush;
1172c50d8ae3SPaolo Bonzini }
1173c50d8ae3SPaolo Bonzini 
1174c50d8ae3SPaolo Bonzini /**
1175c50d8ae3SPaolo Bonzini  * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
1176c50d8ae3SPaolo Bonzini  * @kvm: kvm instance
1177c50d8ae3SPaolo Bonzini  * @slot: slot to protect
1178c50d8ae3SPaolo Bonzini  * @gfn_offset: start of the BITS_PER_LONG pages we care about
1179c50d8ae3SPaolo Bonzini  * @mask: indicates which pages we should protect
1180c50d8ae3SPaolo Bonzini  *
1181c50d8ae3SPaolo Bonzini  * Used when we do not need to care about huge page mappings: e.g. during dirty
1182c50d8ae3SPaolo Bonzini  * logging we do not have any such mappings.
1183c50d8ae3SPaolo Bonzini  */
1184c50d8ae3SPaolo Bonzini static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1185c50d8ae3SPaolo Bonzini 				     struct kvm_memory_slot *slot,
1186c50d8ae3SPaolo Bonzini 				     gfn_t gfn_offset, unsigned long mask)
1187c50d8ae3SPaolo Bonzini {
1188c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *rmap_head;
1189c50d8ae3SPaolo Bonzini 
1190897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
1191a6a0b05dSBen Gardon 		kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
1192a6a0b05dSBen Gardon 				slot->base_gfn + gfn_offset, mask, true);
1193c50d8ae3SPaolo Bonzini 	while (mask) {
1194c50d8ae3SPaolo Bonzini 		rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
11953bae0459SSean Christopherson 					  PG_LEVEL_4K, slot);
1196c50d8ae3SPaolo Bonzini 		__rmap_write_protect(kvm, rmap_head, false);
1197c50d8ae3SPaolo Bonzini 
1198c50d8ae3SPaolo Bonzini 		/* clear the first set bit */
1199c50d8ae3SPaolo Bonzini 		mask &= mask - 1;
1200c50d8ae3SPaolo Bonzini 	}
1201c50d8ae3SPaolo Bonzini }
1202c50d8ae3SPaolo Bonzini 
1203c50d8ae3SPaolo Bonzini /**
1204c50d8ae3SPaolo Bonzini  * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages, or write
1205c50d8ae3SPaolo Bonzini  * protect the page if the D-bit isn't supported.
1206c50d8ae3SPaolo Bonzini  * @kvm: kvm instance
1207c50d8ae3SPaolo Bonzini  * @slot: slot to clear D-bit
1208c50d8ae3SPaolo Bonzini  * @gfn_offset: start of the BITS_PER_LONG pages we care about
1209c50d8ae3SPaolo Bonzini  * @mask: indicates which pages we should clear D-bit
1210c50d8ae3SPaolo Bonzini  *
1211c50d8ae3SPaolo Bonzini  * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap.
1212c50d8ae3SPaolo Bonzini  */
1213a018eba5SSean Christopherson static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1214c50d8ae3SPaolo Bonzini 					 struct kvm_memory_slot *slot,
1215c50d8ae3SPaolo Bonzini 					 gfn_t gfn_offset, unsigned long mask)
1216c50d8ae3SPaolo Bonzini {
1217c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *rmap_head;
1218c50d8ae3SPaolo Bonzini 
1219897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
1220a6a0b05dSBen Gardon 		kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
1221a6a0b05dSBen Gardon 				slot->base_gfn + gfn_offset, mask, false);
1222c50d8ae3SPaolo Bonzini 	while (mask) {
1223c50d8ae3SPaolo Bonzini 		rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
12243bae0459SSean Christopherson 					  PG_LEVEL_4K, slot);
12250a234f5dSSean Christopherson 		__rmap_clear_dirty(kvm, rmap_head, slot);
1226c50d8ae3SPaolo Bonzini 
1227c50d8ae3SPaolo Bonzini 		/* clear the first set bit */
1228c50d8ae3SPaolo Bonzini 		mask &= mask - 1;
1229c50d8ae3SPaolo Bonzini 	}
1230c50d8ae3SPaolo Bonzini }
1231c50d8ae3SPaolo Bonzini 
1232c50d8ae3SPaolo Bonzini /**
1233c50d8ae3SPaolo Bonzini  * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
1234c50d8ae3SPaolo Bonzini  * PT level pages.
1235c50d8ae3SPaolo Bonzini  *
1236c50d8ae3SPaolo Bonzini  * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
1237c50d8ae3SPaolo Bonzini  * enable dirty logging for them.
1238c50d8ae3SPaolo Bonzini  *
1239c50d8ae3SPaolo Bonzini  * Used when we do not need to care about huge page mappings: e.g. during dirty
1240c50d8ae3SPaolo Bonzini  * logging we do not have any such mappings.
1241c50d8ae3SPaolo Bonzini  */
1242c50d8ae3SPaolo Bonzini void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1243c50d8ae3SPaolo Bonzini 				struct kvm_memory_slot *slot,
1244c50d8ae3SPaolo Bonzini 				gfn_t gfn_offset, unsigned long mask)
1245c50d8ae3SPaolo Bonzini {
1246a018eba5SSean Christopherson 	if (kvm_x86_ops.cpu_dirty_log_size)
1247a018eba5SSean Christopherson 		kvm_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset, mask);
1248c50d8ae3SPaolo Bonzini 	else
1249c50d8ae3SPaolo Bonzini 		kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1250c50d8ae3SPaolo Bonzini }
1251c50d8ae3SPaolo Bonzini 
1252fb04a1edSPeter Xu int kvm_cpu_dirty_log_size(void)
1253fb04a1edSPeter Xu {
12546dd03800SSean Christopherson 	return kvm_x86_ops.cpu_dirty_log_size;
1255fb04a1edSPeter Xu }
1256fb04a1edSPeter Xu 
1257c50d8ae3SPaolo Bonzini bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
1258c50d8ae3SPaolo Bonzini 				    struct kvm_memory_slot *slot, u64 gfn)
1259c50d8ae3SPaolo Bonzini {
1260c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *rmap_head;
1261c50d8ae3SPaolo Bonzini 	int i;
1262c50d8ae3SPaolo Bonzini 	bool write_protected = false;
1263c50d8ae3SPaolo Bonzini 
12643bae0459SSean Christopherson 	for (i = PG_LEVEL_4K; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
1265c50d8ae3SPaolo Bonzini 		rmap_head = __gfn_to_rmap(gfn, i, slot);
1266c50d8ae3SPaolo Bonzini 		write_protected |= __rmap_write_protect(kvm, rmap_head, true);
1267c50d8ae3SPaolo Bonzini 	}
1268c50d8ae3SPaolo Bonzini 
1269897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
127046044f72SBen Gardon 		write_protected |=
127146044f72SBen Gardon 			kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn);
127246044f72SBen Gardon 
1273c50d8ae3SPaolo Bonzini 	return write_protected;
1274c50d8ae3SPaolo Bonzini }
1275c50d8ae3SPaolo Bonzini 
1276c50d8ae3SPaolo Bonzini static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
1277c50d8ae3SPaolo Bonzini {
1278c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
1279c50d8ae3SPaolo Bonzini 
1280c50d8ae3SPaolo Bonzini 	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1281c50d8ae3SPaolo Bonzini 	return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn);
1282c50d8ae3SPaolo Bonzini }
1283c50d8ae3SPaolo Bonzini 
12840a234f5dSSean Christopherson static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
12850a234f5dSSean Christopherson 			  struct kvm_memory_slot *slot)
1286c50d8ae3SPaolo Bonzini {
1287c50d8ae3SPaolo Bonzini 	u64 *sptep;
1288c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
1289c50d8ae3SPaolo Bonzini 	bool flush = false;
1290c50d8ae3SPaolo Bonzini 
1291c50d8ae3SPaolo Bonzini 	while ((sptep = rmap_get_first(rmap_head, &iter))) {
1292805a0f83SStephen Zhang 		rmap_printk("spte %p %llx.\n", sptep, *sptep);
1293c50d8ae3SPaolo Bonzini 
1294c50d8ae3SPaolo Bonzini 		pte_list_remove(rmap_head, sptep);
1295c50d8ae3SPaolo Bonzini 		flush = true;
1296c50d8ae3SPaolo Bonzini 	}
1297c50d8ae3SPaolo Bonzini 
1298c50d8ae3SPaolo Bonzini 	return flush;
1299c50d8ae3SPaolo Bonzini }
1300c50d8ae3SPaolo Bonzini 
1301c50d8ae3SPaolo Bonzini static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1302c50d8ae3SPaolo Bonzini 			   struct kvm_memory_slot *slot, gfn_t gfn, int level,
1303c50d8ae3SPaolo Bonzini 			   unsigned long data)
1304c50d8ae3SPaolo Bonzini {
13050a234f5dSSean Christopherson 	return kvm_zap_rmapp(kvm, rmap_head, slot);
1306c50d8ae3SPaolo Bonzini }
1307c50d8ae3SPaolo Bonzini 
1308c50d8ae3SPaolo Bonzini static int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1309c50d8ae3SPaolo Bonzini 			     struct kvm_memory_slot *slot, gfn_t gfn, int level,
1310c50d8ae3SPaolo Bonzini 			     unsigned long data)
1311c50d8ae3SPaolo Bonzini {
1312c50d8ae3SPaolo Bonzini 	u64 *sptep;
1313c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
1314c50d8ae3SPaolo Bonzini 	int need_flush = 0;
1315c50d8ae3SPaolo Bonzini 	u64 new_spte;
1316c50d8ae3SPaolo Bonzini 	pte_t *ptep = (pte_t *)data;
1317c50d8ae3SPaolo Bonzini 	kvm_pfn_t new_pfn;
1318c50d8ae3SPaolo Bonzini 
1319c50d8ae3SPaolo Bonzini 	WARN_ON(pte_huge(*ptep));
1320c50d8ae3SPaolo Bonzini 	new_pfn = pte_pfn(*ptep);
1321c50d8ae3SPaolo Bonzini 
1322c50d8ae3SPaolo Bonzini restart:
1323c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(rmap_head, &iter, sptep) {
1324805a0f83SStephen Zhang 		rmap_printk("spte %p %llx gfn %llx (%d)\n",
1325c50d8ae3SPaolo Bonzini 			    sptep, *sptep, gfn, level);
1326c50d8ae3SPaolo Bonzini 
1327c50d8ae3SPaolo Bonzini 		need_flush = 1;
1328c50d8ae3SPaolo Bonzini 
1329c50d8ae3SPaolo Bonzini 		if (pte_write(*ptep)) {
1330c50d8ae3SPaolo Bonzini 			pte_list_remove(rmap_head, sptep);
1331c50d8ae3SPaolo Bonzini 			goto restart;
1332c50d8ae3SPaolo Bonzini 		} else {
1333cb3eedabSPaolo Bonzini 			new_spte = kvm_mmu_changed_pte_notifier_make_spte(
1334cb3eedabSPaolo Bonzini 					*sptep, new_pfn);
1335c50d8ae3SPaolo Bonzini 
1336c50d8ae3SPaolo Bonzini 			mmu_spte_clear_track_bits(sptep);
1337c50d8ae3SPaolo Bonzini 			mmu_spte_set(sptep, new_spte);
1338c50d8ae3SPaolo Bonzini 		}
1339c50d8ae3SPaolo Bonzini 	}
1340c50d8ae3SPaolo Bonzini 
1341c50d8ae3SPaolo Bonzini 	if (need_flush && kvm_available_flush_tlb_with_range()) {
1342c50d8ae3SPaolo Bonzini 		kvm_flush_remote_tlbs_with_address(kvm, gfn, 1);
1343c50d8ae3SPaolo Bonzini 		return 0;
1344c50d8ae3SPaolo Bonzini 	}
1345c50d8ae3SPaolo Bonzini 
1346c50d8ae3SPaolo Bonzini 	return need_flush;
1347c50d8ae3SPaolo Bonzini }
1348c50d8ae3SPaolo Bonzini 
1349c50d8ae3SPaolo Bonzini struct slot_rmap_walk_iterator {
1350c50d8ae3SPaolo Bonzini 	/* input fields. */
1351c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
1352c50d8ae3SPaolo Bonzini 	gfn_t start_gfn;
1353c50d8ae3SPaolo Bonzini 	gfn_t end_gfn;
1354c50d8ae3SPaolo Bonzini 	int start_level;
1355c50d8ae3SPaolo Bonzini 	int end_level;
1356c50d8ae3SPaolo Bonzini 
1357c50d8ae3SPaolo Bonzini 	/* output fields. */
1358c50d8ae3SPaolo Bonzini 	gfn_t gfn;
1359c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *rmap;
1360c50d8ae3SPaolo Bonzini 	int level;
1361c50d8ae3SPaolo Bonzini 
1362c50d8ae3SPaolo Bonzini 	/* private field. */
1363c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *end_rmap;
1364c50d8ae3SPaolo Bonzini };
1365c50d8ae3SPaolo Bonzini 
1366c50d8ae3SPaolo Bonzini static void
1367c50d8ae3SPaolo Bonzini rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level)
1368c50d8ae3SPaolo Bonzini {
1369c50d8ae3SPaolo Bonzini 	iterator->level = level;
1370c50d8ae3SPaolo Bonzini 	iterator->gfn = iterator->start_gfn;
1371c50d8ae3SPaolo Bonzini 	iterator->rmap = __gfn_to_rmap(iterator->gfn, level, iterator->slot);
1372c50d8ae3SPaolo Bonzini 	iterator->end_rmap = __gfn_to_rmap(iterator->end_gfn, level,
1373c50d8ae3SPaolo Bonzini 					   iterator->slot);
1374c50d8ae3SPaolo Bonzini }
1375c50d8ae3SPaolo Bonzini 
1376c50d8ae3SPaolo Bonzini static void
1377c50d8ae3SPaolo Bonzini slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator,
1378c50d8ae3SPaolo Bonzini 		    struct kvm_memory_slot *slot, int start_level,
1379c50d8ae3SPaolo Bonzini 		    int end_level, gfn_t start_gfn, gfn_t end_gfn)
1380c50d8ae3SPaolo Bonzini {
1381c50d8ae3SPaolo Bonzini 	iterator->slot = slot;
1382c50d8ae3SPaolo Bonzini 	iterator->start_level = start_level;
1383c50d8ae3SPaolo Bonzini 	iterator->end_level = end_level;
1384c50d8ae3SPaolo Bonzini 	iterator->start_gfn = start_gfn;
1385c50d8ae3SPaolo Bonzini 	iterator->end_gfn = end_gfn;
1386c50d8ae3SPaolo Bonzini 
1387c50d8ae3SPaolo Bonzini 	rmap_walk_init_level(iterator, iterator->start_level);
1388c50d8ae3SPaolo Bonzini }
1389c50d8ae3SPaolo Bonzini 
1390c50d8ae3SPaolo Bonzini static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator)
1391c50d8ae3SPaolo Bonzini {
1392c50d8ae3SPaolo Bonzini 	return !!iterator->rmap;
1393c50d8ae3SPaolo Bonzini }
1394c50d8ae3SPaolo Bonzini 
1395c50d8ae3SPaolo Bonzini static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
1396c50d8ae3SPaolo Bonzini {
1397c50d8ae3SPaolo Bonzini 	if (++iterator->rmap <= iterator->end_rmap) {
1398c50d8ae3SPaolo Bonzini 		iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level));
1399c50d8ae3SPaolo Bonzini 		return;
1400c50d8ae3SPaolo Bonzini 	}
1401c50d8ae3SPaolo Bonzini 
1402c50d8ae3SPaolo Bonzini 	if (++iterator->level > iterator->end_level) {
1403c50d8ae3SPaolo Bonzini 		iterator->rmap = NULL;
1404c50d8ae3SPaolo Bonzini 		return;
1405c50d8ae3SPaolo Bonzini 	}
1406c50d8ae3SPaolo Bonzini 
1407c50d8ae3SPaolo Bonzini 	rmap_walk_init_level(iterator, iterator->level);
1408c50d8ae3SPaolo Bonzini }
1409c50d8ae3SPaolo Bonzini 
1410c50d8ae3SPaolo Bonzini #define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_,	\
1411c50d8ae3SPaolo Bonzini 	   _start_gfn, _end_gfn, _iter_)				\
1412c50d8ae3SPaolo Bonzini 	for (slot_rmap_walk_init(_iter_, _slot_, _start_level_,		\
1413c50d8ae3SPaolo Bonzini 				 _end_level_, _start_gfn, _end_gfn);	\
1414c50d8ae3SPaolo Bonzini 	     slot_rmap_walk_okay(_iter_);				\
1415c50d8ae3SPaolo Bonzini 	     slot_rmap_walk_next(_iter_))
1416c50d8ae3SPaolo Bonzini 
1417c1b91493SSean Christopherson typedef int (*rmap_handler_t)(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1418c1b91493SSean Christopherson 			      struct kvm_memory_slot *slot, gfn_t gfn,
1419c1b91493SSean Christopherson 			      int level, unsigned long data);
1420c1b91493SSean Christopherson 
1421c1b91493SSean Christopherson static __always_inline int kvm_handle_hva_range(struct kvm *kvm,
1422c50d8ae3SPaolo Bonzini 						unsigned long start,
1423c50d8ae3SPaolo Bonzini 						unsigned long end,
1424c50d8ae3SPaolo Bonzini 						unsigned long data,
1425c1b91493SSean Christopherson 						rmap_handler_t handler)
1426c50d8ae3SPaolo Bonzini {
1427c50d8ae3SPaolo Bonzini 	struct kvm_memslots *slots;
1428c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *memslot;
1429c50d8ae3SPaolo Bonzini 	struct slot_rmap_walk_iterator iterator;
1430c50d8ae3SPaolo Bonzini 	int ret = 0;
1431c50d8ae3SPaolo Bonzini 	int i;
1432c50d8ae3SPaolo Bonzini 
1433c50d8ae3SPaolo Bonzini 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
1434c50d8ae3SPaolo Bonzini 		slots = __kvm_memslots(kvm, i);
1435c50d8ae3SPaolo Bonzini 		kvm_for_each_memslot(memslot, slots) {
1436c50d8ae3SPaolo Bonzini 			unsigned long hva_start, hva_end;
1437c50d8ae3SPaolo Bonzini 			gfn_t gfn_start, gfn_end;
1438c50d8ae3SPaolo Bonzini 
1439c50d8ae3SPaolo Bonzini 			hva_start = max(start, memslot->userspace_addr);
1440c50d8ae3SPaolo Bonzini 			hva_end = min(end, memslot->userspace_addr +
1441c50d8ae3SPaolo Bonzini 				      (memslot->npages << PAGE_SHIFT));
1442c50d8ae3SPaolo Bonzini 			if (hva_start >= hva_end)
1443c50d8ae3SPaolo Bonzini 				continue;
1444c50d8ae3SPaolo Bonzini 			/*
1445c50d8ae3SPaolo Bonzini 			 * {gfn(page) | page intersects with [hva_start, hva_end)} =
1446c50d8ae3SPaolo Bonzini 			 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
1447c50d8ae3SPaolo Bonzini 			 */
1448c50d8ae3SPaolo Bonzini 			gfn_start = hva_to_gfn_memslot(hva_start, memslot);
1449c50d8ae3SPaolo Bonzini 			gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
1450c50d8ae3SPaolo Bonzini 
14513bae0459SSean Christopherson 			for_each_slot_rmap_range(memslot, PG_LEVEL_4K,
1452e662ec3eSSean Christopherson 						 KVM_MAX_HUGEPAGE_LEVEL,
1453c50d8ae3SPaolo Bonzini 						 gfn_start, gfn_end - 1,
1454c50d8ae3SPaolo Bonzini 						 &iterator)
1455c50d8ae3SPaolo Bonzini 				ret |= handler(kvm, iterator.rmap, memslot,
1456c50d8ae3SPaolo Bonzini 					       iterator.gfn, iterator.level, data);
1457c50d8ae3SPaolo Bonzini 		}
1458c50d8ae3SPaolo Bonzini 	}
1459c50d8ae3SPaolo Bonzini 
1460c50d8ae3SPaolo Bonzini 	return ret;
1461c50d8ae3SPaolo Bonzini }
1462c50d8ae3SPaolo Bonzini 
1463c50d8ae3SPaolo Bonzini static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
1464c1b91493SSean Christopherson 			  unsigned long data, rmap_handler_t handler)
1465c50d8ae3SPaolo Bonzini {
1466c50d8ae3SPaolo Bonzini 	return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
1467c50d8ae3SPaolo Bonzini }
1468c50d8ae3SPaolo Bonzini 
1469fdfe7cbdSWill Deacon int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
1470fdfe7cbdSWill Deacon 			unsigned flags)
1471c50d8ae3SPaolo Bonzini {
1472063afacdSBen Gardon 	int r;
1473063afacdSBen Gardon 
1474063afacdSBen Gardon 	r = kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
1475063afacdSBen Gardon 
1476897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
1477063afacdSBen Gardon 		r |= kvm_tdp_mmu_zap_hva_range(kvm, start, end);
1478063afacdSBen Gardon 
1479063afacdSBen Gardon 	return r;
1480c50d8ae3SPaolo Bonzini }
1481c50d8ae3SPaolo Bonzini 
1482c50d8ae3SPaolo Bonzini int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
1483c50d8ae3SPaolo Bonzini {
14841d8dd6b3SBen Gardon 	int r;
14851d8dd6b3SBen Gardon 
14861d8dd6b3SBen Gardon 	r = kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
14871d8dd6b3SBen Gardon 
1488897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
14891d8dd6b3SBen Gardon 		r |= kvm_tdp_mmu_set_spte_hva(kvm, hva, &pte);
14901d8dd6b3SBen Gardon 
14911d8dd6b3SBen Gardon 	return r;
1492c50d8ae3SPaolo Bonzini }
1493c50d8ae3SPaolo Bonzini 
1494c50d8ae3SPaolo Bonzini static int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1495c50d8ae3SPaolo Bonzini 			 struct kvm_memory_slot *slot, gfn_t gfn, int level,
1496c50d8ae3SPaolo Bonzini 			 unsigned long data)
1497c50d8ae3SPaolo Bonzini {
1498c50d8ae3SPaolo Bonzini 	u64 *sptep;
14993f649ab7SKees Cook 	struct rmap_iterator iter;
1500c50d8ae3SPaolo Bonzini 	int young = 0;
1501c50d8ae3SPaolo Bonzini 
1502c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(rmap_head, &iter, sptep)
1503c50d8ae3SPaolo Bonzini 		young |= mmu_spte_age(sptep);
1504c50d8ae3SPaolo Bonzini 
1505c50d8ae3SPaolo Bonzini 	trace_kvm_age_page(gfn, level, slot, young);
1506c50d8ae3SPaolo Bonzini 	return young;
1507c50d8ae3SPaolo Bonzini }
1508c50d8ae3SPaolo Bonzini 
1509c50d8ae3SPaolo Bonzini static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1510c50d8ae3SPaolo Bonzini 			      struct kvm_memory_slot *slot, gfn_t gfn,
1511c50d8ae3SPaolo Bonzini 			      int level, unsigned long data)
1512c50d8ae3SPaolo Bonzini {
1513c50d8ae3SPaolo Bonzini 	u64 *sptep;
1514c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
1515c50d8ae3SPaolo Bonzini 
1516c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(rmap_head, &iter, sptep)
1517c50d8ae3SPaolo Bonzini 		if (is_accessed_spte(*sptep))
1518c50d8ae3SPaolo Bonzini 			return 1;
1519c50d8ae3SPaolo Bonzini 	return 0;
1520c50d8ae3SPaolo Bonzini }
1521c50d8ae3SPaolo Bonzini 
1522c50d8ae3SPaolo Bonzini #define RMAP_RECYCLE_THRESHOLD 1000
1523c50d8ae3SPaolo Bonzini 
1524c50d8ae3SPaolo Bonzini static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
1525c50d8ae3SPaolo Bonzini {
1526c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *rmap_head;
1527c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
1528c50d8ae3SPaolo Bonzini 
152957354682SSean Christopherson 	sp = sptep_to_sp(spte);
1530c50d8ae3SPaolo Bonzini 
1531c50d8ae3SPaolo Bonzini 	rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
1532c50d8ae3SPaolo Bonzini 
1533c50d8ae3SPaolo Bonzini 	kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, 0);
1534c50d8ae3SPaolo Bonzini 	kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
1535c50d8ae3SPaolo Bonzini 			KVM_PAGES_PER_HPAGE(sp->role.level));
1536c50d8ae3SPaolo Bonzini }
1537c50d8ae3SPaolo Bonzini 
1538c50d8ae3SPaolo Bonzini int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
1539c50d8ae3SPaolo Bonzini {
1540f8e14497SBen Gardon 	int young = false;
1541f8e14497SBen Gardon 
1542f8e14497SBen Gardon 	young = kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp);
1543897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
1544f8e14497SBen Gardon 		young |= kvm_tdp_mmu_age_hva_range(kvm, start, end);
1545f8e14497SBen Gardon 
1546f8e14497SBen Gardon 	return young;
1547c50d8ae3SPaolo Bonzini }
1548c50d8ae3SPaolo Bonzini 
1549c50d8ae3SPaolo Bonzini int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
1550c50d8ae3SPaolo Bonzini {
1551f8e14497SBen Gardon 	int young = false;
1552f8e14497SBen Gardon 
1553f8e14497SBen Gardon 	young = kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp);
1554897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
1555f8e14497SBen Gardon 		young |= kvm_tdp_mmu_test_age_hva(kvm, hva);
1556f8e14497SBen Gardon 
1557f8e14497SBen Gardon 	return young;
1558c50d8ae3SPaolo Bonzini }
1559c50d8ae3SPaolo Bonzini 
1560c50d8ae3SPaolo Bonzini #ifdef MMU_DEBUG
1561c50d8ae3SPaolo Bonzini static int is_empty_shadow_page(u64 *spt)
1562c50d8ae3SPaolo Bonzini {
1563c50d8ae3SPaolo Bonzini 	u64 *pos;
1564c50d8ae3SPaolo Bonzini 	u64 *end;
1565c50d8ae3SPaolo Bonzini 
1566c50d8ae3SPaolo Bonzini 	for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
1567c50d8ae3SPaolo Bonzini 		if (is_shadow_present_pte(*pos)) {
1568c50d8ae3SPaolo Bonzini 			printk(KERN_ERR "%s: %p %llx\n", __func__,
1569c50d8ae3SPaolo Bonzini 			       pos, *pos);
1570c50d8ae3SPaolo Bonzini 			return 0;
1571c50d8ae3SPaolo Bonzini 		}
1572c50d8ae3SPaolo Bonzini 	return 1;
1573c50d8ae3SPaolo Bonzini }
1574c50d8ae3SPaolo Bonzini #endif
1575c50d8ae3SPaolo Bonzini 
1576c50d8ae3SPaolo Bonzini /*
1577c50d8ae3SPaolo Bonzini  * This value is the sum of all of the kvm instances's
1578c50d8ae3SPaolo Bonzini  * kvm->arch.n_used_mmu_pages values.  We need a global,
1579c50d8ae3SPaolo Bonzini  * aggregate version in order to make the slab shrinker
1580c50d8ae3SPaolo Bonzini  * faster
1581c50d8ae3SPaolo Bonzini  */
1582c50d8ae3SPaolo Bonzini static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, unsigned long nr)
1583c50d8ae3SPaolo Bonzini {
1584c50d8ae3SPaolo Bonzini 	kvm->arch.n_used_mmu_pages += nr;
1585c50d8ae3SPaolo Bonzini 	percpu_counter_add(&kvm_total_used_mmu_pages, nr);
1586c50d8ae3SPaolo Bonzini }
1587c50d8ae3SPaolo Bonzini 
1588c50d8ae3SPaolo Bonzini static void kvm_mmu_free_page(struct kvm_mmu_page *sp)
1589c50d8ae3SPaolo Bonzini {
1590c50d8ae3SPaolo Bonzini 	MMU_WARN_ON(!is_empty_shadow_page(sp->spt));
1591c50d8ae3SPaolo Bonzini 	hlist_del(&sp->hash_link);
1592c50d8ae3SPaolo Bonzini 	list_del(&sp->link);
1593c50d8ae3SPaolo Bonzini 	free_page((unsigned long)sp->spt);
1594c50d8ae3SPaolo Bonzini 	if (!sp->role.direct)
1595c50d8ae3SPaolo Bonzini 		free_page((unsigned long)sp->gfns);
1596c50d8ae3SPaolo Bonzini 	kmem_cache_free(mmu_page_header_cache, sp);
1597c50d8ae3SPaolo Bonzini }
1598c50d8ae3SPaolo Bonzini 
1599c50d8ae3SPaolo Bonzini static unsigned kvm_page_table_hashfn(gfn_t gfn)
1600c50d8ae3SPaolo Bonzini {
1601c50d8ae3SPaolo Bonzini 	return hash_64(gfn, KVM_MMU_HASH_SHIFT);
1602c50d8ae3SPaolo Bonzini }
1603c50d8ae3SPaolo Bonzini 
1604c50d8ae3SPaolo Bonzini static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
1605c50d8ae3SPaolo Bonzini 				    struct kvm_mmu_page *sp, u64 *parent_pte)
1606c50d8ae3SPaolo Bonzini {
1607c50d8ae3SPaolo Bonzini 	if (!parent_pte)
1608c50d8ae3SPaolo Bonzini 		return;
1609c50d8ae3SPaolo Bonzini 
1610c50d8ae3SPaolo Bonzini 	pte_list_add(vcpu, parent_pte, &sp->parent_ptes);
1611c50d8ae3SPaolo Bonzini }
1612c50d8ae3SPaolo Bonzini 
1613c50d8ae3SPaolo Bonzini static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
1614c50d8ae3SPaolo Bonzini 				       u64 *parent_pte)
1615c50d8ae3SPaolo Bonzini {
1616c50d8ae3SPaolo Bonzini 	__pte_list_remove(parent_pte, &sp->parent_ptes);
1617c50d8ae3SPaolo Bonzini }
1618c50d8ae3SPaolo Bonzini 
1619c50d8ae3SPaolo Bonzini static void drop_parent_pte(struct kvm_mmu_page *sp,
1620c50d8ae3SPaolo Bonzini 			    u64 *parent_pte)
1621c50d8ae3SPaolo Bonzini {
1622c50d8ae3SPaolo Bonzini 	mmu_page_remove_parent_pte(sp, parent_pte);
1623c50d8ae3SPaolo Bonzini 	mmu_spte_clear_no_track(parent_pte);
1624c50d8ae3SPaolo Bonzini }
1625c50d8ae3SPaolo Bonzini 
1626c50d8ae3SPaolo Bonzini static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct)
1627c50d8ae3SPaolo Bonzini {
1628c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
1629c50d8ae3SPaolo Bonzini 
163094ce87efSSean Christopherson 	sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
163194ce87efSSean Christopherson 	sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
1632c50d8ae3SPaolo Bonzini 	if (!direct)
163394ce87efSSean Christopherson 		sp->gfns = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache);
1634c50d8ae3SPaolo Bonzini 	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
1635c50d8ae3SPaolo Bonzini 
1636c50d8ae3SPaolo Bonzini 	/*
1637c50d8ae3SPaolo Bonzini 	 * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
1638c50d8ae3SPaolo Bonzini 	 * depends on valid pages being added to the head of the list.  See
1639c50d8ae3SPaolo Bonzini 	 * comments in kvm_zap_obsolete_pages().
1640c50d8ae3SPaolo Bonzini 	 */
1641c50d8ae3SPaolo Bonzini 	sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
1642c50d8ae3SPaolo Bonzini 	list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
1643c50d8ae3SPaolo Bonzini 	kvm_mod_used_mmu_pages(vcpu->kvm, +1);
1644c50d8ae3SPaolo Bonzini 	return sp;
1645c50d8ae3SPaolo Bonzini }
1646c50d8ae3SPaolo Bonzini 
1647c50d8ae3SPaolo Bonzini static void mark_unsync(u64 *spte);
1648c50d8ae3SPaolo Bonzini static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
1649c50d8ae3SPaolo Bonzini {
1650c50d8ae3SPaolo Bonzini 	u64 *sptep;
1651c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
1652c50d8ae3SPaolo Bonzini 
1653c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) {
1654c50d8ae3SPaolo Bonzini 		mark_unsync(sptep);
1655c50d8ae3SPaolo Bonzini 	}
1656c50d8ae3SPaolo Bonzini }
1657c50d8ae3SPaolo Bonzini 
1658c50d8ae3SPaolo Bonzini static void mark_unsync(u64 *spte)
1659c50d8ae3SPaolo Bonzini {
1660c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
1661c50d8ae3SPaolo Bonzini 	unsigned int index;
1662c50d8ae3SPaolo Bonzini 
166357354682SSean Christopherson 	sp = sptep_to_sp(spte);
1664c50d8ae3SPaolo Bonzini 	index = spte - sp->spt;
1665c50d8ae3SPaolo Bonzini 	if (__test_and_set_bit(index, sp->unsync_child_bitmap))
1666c50d8ae3SPaolo Bonzini 		return;
1667c50d8ae3SPaolo Bonzini 	if (sp->unsync_children++)
1668c50d8ae3SPaolo Bonzini 		return;
1669c50d8ae3SPaolo Bonzini 	kvm_mmu_mark_parents_unsync(sp);
1670c50d8ae3SPaolo Bonzini }
1671c50d8ae3SPaolo Bonzini 
1672c50d8ae3SPaolo Bonzini static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
1673c50d8ae3SPaolo Bonzini 			       struct kvm_mmu_page *sp)
1674c50d8ae3SPaolo Bonzini {
1675c50d8ae3SPaolo Bonzini 	return 0;
1676c50d8ae3SPaolo Bonzini }
1677c50d8ae3SPaolo Bonzini 
1678c50d8ae3SPaolo Bonzini #define KVM_PAGE_ARRAY_NR 16
1679c50d8ae3SPaolo Bonzini 
1680c50d8ae3SPaolo Bonzini struct kvm_mmu_pages {
1681c50d8ae3SPaolo Bonzini 	struct mmu_page_and_offset {
1682c50d8ae3SPaolo Bonzini 		struct kvm_mmu_page *sp;
1683c50d8ae3SPaolo Bonzini 		unsigned int idx;
1684c50d8ae3SPaolo Bonzini 	} page[KVM_PAGE_ARRAY_NR];
1685c50d8ae3SPaolo Bonzini 	unsigned int nr;
1686c50d8ae3SPaolo Bonzini };
1687c50d8ae3SPaolo Bonzini 
1688c50d8ae3SPaolo Bonzini static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
1689c50d8ae3SPaolo Bonzini 			 int idx)
1690c50d8ae3SPaolo Bonzini {
1691c50d8ae3SPaolo Bonzini 	int i;
1692c50d8ae3SPaolo Bonzini 
1693c50d8ae3SPaolo Bonzini 	if (sp->unsync)
1694c50d8ae3SPaolo Bonzini 		for (i=0; i < pvec->nr; i++)
1695c50d8ae3SPaolo Bonzini 			if (pvec->page[i].sp == sp)
1696c50d8ae3SPaolo Bonzini 				return 0;
1697c50d8ae3SPaolo Bonzini 
1698c50d8ae3SPaolo Bonzini 	pvec->page[pvec->nr].sp = sp;
1699c50d8ae3SPaolo Bonzini 	pvec->page[pvec->nr].idx = idx;
1700c50d8ae3SPaolo Bonzini 	pvec->nr++;
1701c50d8ae3SPaolo Bonzini 	return (pvec->nr == KVM_PAGE_ARRAY_NR);
1702c50d8ae3SPaolo Bonzini }
1703c50d8ae3SPaolo Bonzini 
1704c50d8ae3SPaolo Bonzini static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx)
1705c50d8ae3SPaolo Bonzini {
1706c50d8ae3SPaolo Bonzini 	--sp->unsync_children;
1707c50d8ae3SPaolo Bonzini 	WARN_ON((int)sp->unsync_children < 0);
1708c50d8ae3SPaolo Bonzini 	__clear_bit(idx, sp->unsync_child_bitmap);
1709c50d8ae3SPaolo Bonzini }
1710c50d8ae3SPaolo Bonzini 
1711c50d8ae3SPaolo Bonzini static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
1712c50d8ae3SPaolo Bonzini 			   struct kvm_mmu_pages *pvec)
1713c50d8ae3SPaolo Bonzini {
1714c50d8ae3SPaolo Bonzini 	int i, ret, nr_unsync_leaf = 0;
1715c50d8ae3SPaolo Bonzini 
1716c50d8ae3SPaolo Bonzini 	for_each_set_bit(i, sp->unsync_child_bitmap, 512) {
1717c50d8ae3SPaolo Bonzini 		struct kvm_mmu_page *child;
1718c50d8ae3SPaolo Bonzini 		u64 ent = sp->spt[i];
1719c50d8ae3SPaolo Bonzini 
1720c50d8ae3SPaolo Bonzini 		if (!is_shadow_present_pte(ent) || is_large_pte(ent)) {
1721c50d8ae3SPaolo Bonzini 			clear_unsync_child_bit(sp, i);
1722c50d8ae3SPaolo Bonzini 			continue;
1723c50d8ae3SPaolo Bonzini 		}
1724c50d8ae3SPaolo Bonzini 
1725e47c4aeeSSean Christopherson 		child = to_shadow_page(ent & PT64_BASE_ADDR_MASK);
1726c50d8ae3SPaolo Bonzini 
1727c50d8ae3SPaolo Bonzini 		if (child->unsync_children) {
1728c50d8ae3SPaolo Bonzini 			if (mmu_pages_add(pvec, child, i))
1729c50d8ae3SPaolo Bonzini 				return -ENOSPC;
1730c50d8ae3SPaolo Bonzini 
1731c50d8ae3SPaolo Bonzini 			ret = __mmu_unsync_walk(child, pvec);
1732c50d8ae3SPaolo Bonzini 			if (!ret) {
1733c50d8ae3SPaolo Bonzini 				clear_unsync_child_bit(sp, i);
1734c50d8ae3SPaolo Bonzini 				continue;
1735c50d8ae3SPaolo Bonzini 			} else if (ret > 0) {
1736c50d8ae3SPaolo Bonzini 				nr_unsync_leaf += ret;
1737c50d8ae3SPaolo Bonzini 			} else
1738c50d8ae3SPaolo Bonzini 				return ret;
1739c50d8ae3SPaolo Bonzini 		} else if (child->unsync) {
1740c50d8ae3SPaolo Bonzini 			nr_unsync_leaf++;
1741c50d8ae3SPaolo Bonzini 			if (mmu_pages_add(pvec, child, i))
1742c50d8ae3SPaolo Bonzini 				return -ENOSPC;
1743c50d8ae3SPaolo Bonzini 		} else
1744c50d8ae3SPaolo Bonzini 			clear_unsync_child_bit(sp, i);
1745c50d8ae3SPaolo Bonzini 	}
1746c50d8ae3SPaolo Bonzini 
1747c50d8ae3SPaolo Bonzini 	return nr_unsync_leaf;
1748c50d8ae3SPaolo Bonzini }
1749c50d8ae3SPaolo Bonzini 
1750c50d8ae3SPaolo Bonzini #define INVALID_INDEX (-1)
1751c50d8ae3SPaolo Bonzini 
1752c50d8ae3SPaolo Bonzini static int mmu_unsync_walk(struct kvm_mmu_page *sp,
1753c50d8ae3SPaolo Bonzini 			   struct kvm_mmu_pages *pvec)
1754c50d8ae3SPaolo Bonzini {
1755c50d8ae3SPaolo Bonzini 	pvec->nr = 0;
1756c50d8ae3SPaolo Bonzini 	if (!sp->unsync_children)
1757c50d8ae3SPaolo Bonzini 		return 0;
1758c50d8ae3SPaolo Bonzini 
1759c50d8ae3SPaolo Bonzini 	mmu_pages_add(pvec, sp, INVALID_INDEX);
1760c50d8ae3SPaolo Bonzini 	return __mmu_unsync_walk(sp, pvec);
1761c50d8ae3SPaolo Bonzini }
1762c50d8ae3SPaolo Bonzini 
1763c50d8ae3SPaolo Bonzini static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1764c50d8ae3SPaolo Bonzini {
1765c50d8ae3SPaolo Bonzini 	WARN_ON(!sp->unsync);
1766c50d8ae3SPaolo Bonzini 	trace_kvm_mmu_sync_page(sp);
1767c50d8ae3SPaolo Bonzini 	sp->unsync = 0;
1768c50d8ae3SPaolo Bonzini 	--kvm->stat.mmu_unsync;
1769c50d8ae3SPaolo Bonzini }
1770c50d8ae3SPaolo Bonzini 
1771c50d8ae3SPaolo Bonzini static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1772c50d8ae3SPaolo Bonzini 				     struct list_head *invalid_list);
1773c50d8ae3SPaolo Bonzini static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1774c50d8ae3SPaolo Bonzini 				    struct list_head *invalid_list);
1775c50d8ae3SPaolo Bonzini 
1776ac101b7cSSean Christopherson #define for_each_valid_sp(_kvm, _sp, _list)				\
1777ac101b7cSSean Christopherson 	hlist_for_each_entry(_sp, _list, hash_link)			\
1778c50d8ae3SPaolo Bonzini 		if (is_obsolete_sp((_kvm), (_sp))) {			\
1779c50d8ae3SPaolo Bonzini 		} else
1780c50d8ae3SPaolo Bonzini 
1781c50d8ae3SPaolo Bonzini #define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn)			\
1782ac101b7cSSean Christopherson 	for_each_valid_sp(_kvm, _sp,					\
1783ac101b7cSSean Christopherson 	  &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)])	\
1784c50d8ae3SPaolo Bonzini 		if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
1785c50d8ae3SPaolo Bonzini 
1786c50d8ae3SPaolo Bonzini static inline bool is_ept_sp(struct kvm_mmu_page *sp)
1787c50d8ae3SPaolo Bonzini {
1788c50d8ae3SPaolo Bonzini 	return sp->role.cr0_wp && sp->role.smap_andnot_wp;
1789c50d8ae3SPaolo Bonzini }
1790c50d8ae3SPaolo Bonzini 
1791c50d8ae3SPaolo Bonzini /* @sp->gfn should be write-protected at the call site */
1792c50d8ae3SPaolo Bonzini static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1793c50d8ae3SPaolo Bonzini 			    struct list_head *invalid_list)
1794c50d8ae3SPaolo Bonzini {
1795c50d8ae3SPaolo Bonzini 	if ((!is_ept_sp(sp) && sp->role.gpte_is_8_bytes != !!is_pae(vcpu)) ||
1796c50d8ae3SPaolo Bonzini 	    vcpu->arch.mmu->sync_page(vcpu, sp) == 0) {
1797c50d8ae3SPaolo Bonzini 		kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
1798c50d8ae3SPaolo Bonzini 		return false;
1799c50d8ae3SPaolo Bonzini 	}
1800c50d8ae3SPaolo Bonzini 
1801c50d8ae3SPaolo Bonzini 	return true;
1802c50d8ae3SPaolo Bonzini }
1803c50d8ae3SPaolo Bonzini 
1804c50d8ae3SPaolo Bonzini static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
1805c50d8ae3SPaolo Bonzini 					struct list_head *invalid_list,
1806c50d8ae3SPaolo Bonzini 					bool remote_flush)
1807c50d8ae3SPaolo Bonzini {
1808c50d8ae3SPaolo Bonzini 	if (!remote_flush && list_empty(invalid_list))
1809c50d8ae3SPaolo Bonzini 		return false;
1810c50d8ae3SPaolo Bonzini 
1811c50d8ae3SPaolo Bonzini 	if (!list_empty(invalid_list))
1812c50d8ae3SPaolo Bonzini 		kvm_mmu_commit_zap_page(kvm, invalid_list);
1813c50d8ae3SPaolo Bonzini 	else
1814c50d8ae3SPaolo Bonzini 		kvm_flush_remote_tlbs(kvm);
1815c50d8ae3SPaolo Bonzini 	return true;
1816c50d8ae3SPaolo Bonzini }
1817c50d8ae3SPaolo Bonzini 
1818c50d8ae3SPaolo Bonzini static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu,
1819c50d8ae3SPaolo Bonzini 				 struct list_head *invalid_list,
1820c50d8ae3SPaolo Bonzini 				 bool remote_flush, bool local_flush)
1821c50d8ae3SPaolo Bonzini {
1822c50d8ae3SPaolo Bonzini 	if (kvm_mmu_remote_flush_or_zap(vcpu->kvm, invalid_list, remote_flush))
1823c50d8ae3SPaolo Bonzini 		return;
1824c50d8ae3SPaolo Bonzini 
1825c50d8ae3SPaolo Bonzini 	if (local_flush)
18268c8560b8SSean Christopherson 		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
1827c50d8ae3SPaolo Bonzini }
1828c50d8ae3SPaolo Bonzini 
1829c50d8ae3SPaolo Bonzini #ifdef CONFIG_KVM_MMU_AUDIT
1830c50d8ae3SPaolo Bonzini #include "mmu_audit.c"
1831c50d8ae3SPaolo Bonzini #else
1832c50d8ae3SPaolo Bonzini static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { }
1833c50d8ae3SPaolo Bonzini static void mmu_audit_disable(void) { }
1834c50d8ae3SPaolo Bonzini #endif
1835c50d8ae3SPaolo Bonzini 
1836c50d8ae3SPaolo Bonzini static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
1837c50d8ae3SPaolo Bonzini {
1838c50d8ae3SPaolo Bonzini 	return sp->role.invalid ||
1839c50d8ae3SPaolo Bonzini 	       unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
1840c50d8ae3SPaolo Bonzini }
1841c50d8ae3SPaolo Bonzini 
1842c50d8ae3SPaolo Bonzini static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1843c50d8ae3SPaolo Bonzini 			 struct list_head *invalid_list)
1844c50d8ae3SPaolo Bonzini {
1845c50d8ae3SPaolo Bonzini 	kvm_unlink_unsync_page(vcpu->kvm, sp);
1846c50d8ae3SPaolo Bonzini 	return __kvm_sync_page(vcpu, sp, invalid_list);
1847c50d8ae3SPaolo Bonzini }
1848c50d8ae3SPaolo Bonzini 
1849c50d8ae3SPaolo Bonzini /* @gfn should be write-protected at the call site */
1850c50d8ae3SPaolo Bonzini static bool kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn,
1851c50d8ae3SPaolo Bonzini 			   struct list_head *invalid_list)
1852c50d8ae3SPaolo Bonzini {
1853c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *s;
1854c50d8ae3SPaolo Bonzini 	bool ret = false;
1855c50d8ae3SPaolo Bonzini 
1856c50d8ae3SPaolo Bonzini 	for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
1857c50d8ae3SPaolo Bonzini 		if (!s->unsync)
1858c50d8ae3SPaolo Bonzini 			continue;
1859c50d8ae3SPaolo Bonzini 
18603bae0459SSean Christopherson 		WARN_ON(s->role.level != PG_LEVEL_4K);
1861c50d8ae3SPaolo Bonzini 		ret |= kvm_sync_page(vcpu, s, invalid_list);
1862c50d8ae3SPaolo Bonzini 	}
1863c50d8ae3SPaolo Bonzini 
1864c50d8ae3SPaolo Bonzini 	return ret;
1865c50d8ae3SPaolo Bonzini }
1866c50d8ae3SPaolo Bonzini 
1867c50d8ae3SPaolo Bonzini struct mmu_page_path {
1868c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL];
1869c50d8ae3SPaolo Bonzini 	unsigned int idx[PT64_ROOT_MAX_LEVEL];
1870c50d8ae3SPaolo Bonzini };
1871c50d8ae3SPaolo Bonzini 
1872c50d8ae3SPaolo Bonzini #define for_each_sp(pvec, sp, parents, i)			\
1873c50d8ae3SPaolo Bonzini 		for (i = mmu_pages_first(&pvec, &parents);	\
1874c50d8ae3SPaolo Bonzini 			i < pvec.nr && ({ sp = pvec.page[i].sp; 1;});	\
1875c50d8ae3SPaolo Bonzini 			i = mmu_pages_next(&pvec, &parents, i))
1876c50d8ae3SPaolo Bonzini 
1877c50d8ae3SPaolo Bonzini static int mmu_pages_next(struct kvm_mmu_pages *pvec,
1878c50d8ae3SPaolo Bonzini 			  struct mmu_page_path *parents,
1879c50d8ae3SPaolo Bonzini 			  int i)
1880c50d8ae3SPaolo Bonzini {
1881c50d8ae3SPaolo Bonzini 	int n;
1882c50d8ae3SPaolo Bonzini 
1883c50d8ae3SPaolo Bonzini 	for (n = i+1; n < pvec->nr; n++) {
1884c50d8ae3SPaolo Bonzini 		struct kvm_mmu_page *sp = pvec->page[n].sp;
1885c50d8ae3SPaolo Bonzini 		unsigned idx = pvec->page[n].idx;
1886c50d8ae3SPaolo Bonzini 		int level = sp->role.level;
1887c50d8ae3SPaolo Bonzini 
1888c50d8ae3SPaolo Bonzini 		parents->idx[level-1] = idx;
18893bae0459SSean Christopherson 		if (level == PG_LEVEL_4K)
1890c50d8ae3SPaolo Bonzini 			break;
1891c50d8ae3SPaolo Bonzini 
1892c50d8ae3SPaolo Bonzini 		parents->parent[level-2] = sp;
1893c50d8ae3SPaolo Bonzini 	}
1894c50d8ae3SPaolo Bonzini 
1895c50d8ae3SPaolo Bonzini 	return n;
1896c50d8ae3SPaolo Bonzini }
1897c50d8ae3SPaolo Bonzini 
1898c50d8ae3SPaolo Bonzini static int mmu_pages_first(struct kvm_mmu_pages *pvec,
1899c50d8ae3SPaolo Bonzini 			   struct mmu_page_path *parents)
1900c50d8ae3SPaolo Bonzini {
1901c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
1902c50d8ae3SPaolo Bonzini 	int level;
1903c50d8ae3SPaolo Bonzini 
1904c50d8ae3SPaolo Bonzini 	if (pvec->nr == 0)
1905c50d8ae3SPaolo Bonzini 		return 0;
1906c50d8ae3SPaolo Bonzini 
1907c50d8ae3SPaolo Bonzini 	WARN_ON(pvec->page[0].idx != INVALID_INDEX);
1908c50d8ae3SPaolo Bonzini 
1909c50d8ae3SPaolo Bonzini 	sp = pvec->page[0].sp;
1910c50d8ae3SPaolo Bonzini 	level = sp->role.level;
19113bae0459SSean Christopherson 	WARN_ON(level == PG_LEVEL_4K);
1912c50d8ae3SPaolo Bonzini 
1913c50d8ae3SPaolo Bonzini 	parents->parent[level-2] = sp;
1914c50d8ae3SPaolo Bonzini 
1915c50d8ae3SPaolo Bonzini 	/* Also set up a sentinel.  Further entries in pvec are all
1916c50d8ae3SPaolo Bonzini 	 * children of sp, so this element is never overwritten.
1917c50d8ae3SPaolo Bonzini 	 */
1918c50d8ae3SPaolo Bonzini 	parents->parent[level-1] = NULL;
1919c50d8ae3SPaolo Bonzini 	return mmu_pages_next(pvec, parents, 0);
1920c50d8ae3SPaolo Bonzini }
1921c50d8ae3SPaolo Bonzini 
1922c50d8ae3SPaolo Bonzini static void mmu_pages_clear_parents(struct mmu_page_path *parents)
1923c50d8ae3SPaolo Bonzini {
1924c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
1925c50d8ae3SPaolo Bonzini 	unsigned int level = 0;
1926c50d8ae3SPaolo Bonzini 
1927c50d8ae3SPaolo Bonzini 	do {
1928c50d8ae3SPaolo Bonzini 		unsigned int idx = parents->idx[level];
1929c50d8ae3SPaolo Bonzini 		sp = parents->parent[level];
1930c50d8ae3SPaolo Bonzini 		if (!sp)
1931c50d8ae3SPaolo Bonzini 			return;
1932c50d8ae3SPaolo Bonzini 
1933c50d8ae3SPaolo Bonzini 		WARN_ON(idx == INVALID_INDEX);
1934c50d8ae3SPaolo Bonzini 		clear_unsync_child_bit(sp, idx);
1935c50d8ae3SPaolo Bonzini 		level++;
1936c50d8ae3SPaolo Bonzini 	} while (!sp->unsync_children);
1937c50d8ae3SPaolo Bonzini }
1938c50d8ae3SPaolo Bonzini 
1939c50d8ae3SPaolo Bonzini static void mmu_sync_children(struct kvm_vcpu *vcpu,
1940c50d8ae3SPaolo Bonzini 			      struct kvm_mmu_page *parent)
1941c50d8ae3SPaolo Bonzini {
1942c50d8ae3SPaolo Bonzini 	int i;
1943c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
1944c50d8ae3SPaolo Bonzini 	struct mmu_page_path parents;
1945c50d8ae3SPaolo Bonzini 	struct kvm_mmu_pages pages;
1946c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
1947c50d8ae3SPaolo Bonzini 	bool flush = false;
1948c50d8ae3SPaolo Bonzini 
1949c50d8ae3SPaolo Bonzini 	while (mmu_unsync_walk(parent, &pages)) {
1950c50d8ae3SPaolo Bonzini 		bool protected = false;
1951c50d8ae3SPaolo Bonzini 
1952c50d8ae3SPaolo Bonzini 		for_each_sp(pages, sp, parents, i)
1953c50d8ae3SPaolo Bonzini 			protected |= rmap_write_protect(vcpu, sp->gfn);
1954c50d8ae3SPaolo Bonzini 
1955c50d8ae3SPaolo Bonzini 		if (protected) {
1956c50d8ae3SPaolo Bonzini 			kvm_flush_remote_tlbs(vcpu->kvm);
1957c50d8ae3SPaolo Bonzini 			flush = false;
1958c50d8ae3SPaolo Bonzini 		}
1959c50d8ae3SPaolo Bonzini 
1960c50d8ae3SPaolo Bonzini 		for_each_sp(pages, sp, parents, i) {
1961c50d8ae3SPaolo Bonzini 			flush |= kvm_sync_page(vcpu, sp, &invalid_list);
1962c50d8ae3SPaolo Bonzini 			mmu_pages_clear_parents(&parents);
1963c50d8ae3SPaolo Bonzini 		}
1964531810caSBen Gardon 		if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
1965c50d8ae3SPaolo Bonzini 			kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
1966531810caSBen Gardon 			cond_resched_rwlock_write(&vcpu->kvm->mmu_lock);
1967c50d8ae3SPaolo Bonzini 			flush = false;
1968c50d8ae3SPaolo Bonzini 		}
1969c50d8ae3SPaolo Bonzini 	}
1970c50d8ae3SPaolo Bonzini 
1971c50d8ae3SPaolo Bonzini 	kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
1972c50d8ae3SPaolo Bonzini }
1973c50d8ae3SPaolo Bonzini 
1974c50d8ae3SPaolo Bonzini static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
1975c50d8ae3SPaolo Bonzini {
1976c50d8ae3SPaolo Bonzini 	atomic_set(&sp->write_flooding_count,  0);
1977c50d8ae3SPaolo Bonzini }
1978c50d8ae3SPaolo Bonzini 
1979c50d8ae3SPaolo Bonzini static void clear_sp_write_flooding_count(u64 *spte)
1980c50d8ae3SPaolo Bonzini {
198157354682SSean Christopherson 	__clear_sp_write_flooding_count(sptep_to_sp(spte));
1982c50d8ae3SPaolo Bonzini }
1983c50d8ae3SPaolo Bonzini 
1984c50d8ae3SPaolo Bonzini static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1985c50d8ae3SPaolo Bonzini 					     gfn_t gfn,
1986c50d8ae3SPaolo Bonzini 					     gva_t gaddr,
1987c50d8ae3SPaolo Bonzini 					     unsigned level,
1988c50d8ae3SPaolo Bonzini 					     int direct,
19890a2b64c5SBen Gardon 					     unsigned int access)
1990c50d8ae3SPaolo Bonzini {
1991fb58a9c3SSean Christopherson 	bool direct_mmu = vcpu->arch.mmu->direct_map;
1992c50d8ae3SPaolo Bonzini 	union kvm_mmu_page_role role;
1993ac101b7cSSean Christopherson 	struct hlist_head *sp_list;
1994c50d8ae3SPaolo Bonzini 	unsigned quadrant;
1995c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
1996c50d8ae3SPaolo Bonzini 	bool need_sync = false;
1997c50d8ae3SPaolo Bonzini 	bool flush = false;
1998c50d8ae3SPaolo Bonzini 	int collisions = 0;
1999c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
2000c50d8ae3SPaolo Bonzini 
2001c50d8ae3SPaolo Bonzini 	role = vcpu->arch.mmu->mmu_role.base;
2002c50d8ae3SPaolo Bonzini 	role.level = level;
2003c50d8ae3SPaolo Bonzini 	role.direct = direct;
2004c50d8ae3SPaolo Bonzini 	if (role.direct)
2005c50d8ae3SPaolo Bonzini 		role.gpte_is_8_bytes = true;
2006c50d8ae3SPaolo Bonzini 	role.access = access;
2007fb58a9c3SSean Christopherson 	if (!direct_mmu && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) {
2008c50d8ae3SPaolo Bonzini 		quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
2009c50d8ae3SPaolo Bonzini 		quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
2010c50d8ae3SPaolo Bonzini 		role.quadrant = quadrant;
2011c50d8ae3SPaolo Bonzini 	}
2012ac101b7cSSean Christopherson 
2013ac101b7cSSean Christopherson 	sp_list = &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
2014ac101b7cSSean Christopherson 	for_each_valid_sp(vcpu->kvm, sp, sp_list) {
2015c50d8ae3SPaolo Bonzini 		if (sp->gfn != gfn) {
2016c50d8ae3SPaolo Bonzini 			collisions++;
2017c50d8ae3SPaolo Bonzini 			continue;
2018c50d8ae3SPaolo Bonzini 		}
2019c50d8ae3SPaolo Bonzini 
2020c50d8ae3SPaolo Bonzini 		if (!need_sync && sp->unsync)
2021c50d8ae3SPaolo Bonzini 			need_sync = true;
2022c50d8ae3SPaolo Bonzini 
2023c50d8ae3SPaolo Bonzini 		if (sp->role.word != role.word)
2024c50d8ae3SPaolo Bonzini 			continue;
2025c50d8ae3SPaolo Bonzini 
2026fb58a9c3SSean Christopherson 		if (direct_mmu)
2027fb58a9c3SSean Christopherson 			goto trace_get_page;
2028fb58a9c3SSean Christopherson 
2029c50d8ae3SPaolo Bonzini 		if (sp->unsync) {
2030c50d8ae3SPaolo Bonzini 			/* The page is good, but __kvm_sync_page might still end
2031c50d8ae3SPaolo Bonzini 			 * up zapping it.  If so, break in order to rebuild it.
2032c50d8ae3SPaolo Bonzini 			 */
2033c50d8ae3SPaolo Bonzini 			if (!__kvm_sync_page(vcpu, sp, &invalid_list))
2034c50d8ae3SPaolo Bonzini 				break;
2035c50d8ae3SPaolo Bonzini 
2036c50d8ae3SPaolo Bonzini 			WARN_ON(!list_empty(&invalid_list));
20378c8560b8SSean Christopherson 			kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
2038c50d8ae3SPaolo Bonzini 		}
2039c50d8ae3SPaolo Bonzini 
2040c50d8ae3SPaolo Bonzini 		if (sp->unsync_children)
2041f6f6195bSLai Jiangshan 			kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
2042c50d8ae3SPaolo Bonzini 
2043c50d8ae3SPaolo Bonzini 		__clear_sp_write_flooding_count(sp);
2044fb58a9c3SSean Christopherson 
2045fb58a9c3SSean Christopherson trace_get_page:
2046c50d8ae3SPaolo Bonzini 		trace_kvm_mmu_get_page(sp, false);
2047c50d8ae3SPaolo Bonzini 		goto out;
2048c50d8ae3SPaolo Bonzini 	}
2049c50d8ae3SPaolo Bonzini 
2050c50d8ae3SPaolo Bonzini 	++vcpu->kvm->stat.mmu_cache_miss;
2051c50d8ae3SPaolo Bonzini 
2052c50d8ae3SPaolo Bonzini 	sp = kvm_mmu_alloc_page(vcpu, direct);
2053c50d8ae3SPaolo Bonzini 
2054c50d8ae3SPaolo Bonzini 	sp->gfn = gfn;
2055c50d8ae3SPaolo Bonzini 	sp->role = role;
2056ac101b7cSSean Christopherson 	hlist_add_head(&sp->hash_link, sp_list);
2057c50d8ae3SPaolo Bonzini 	if (!direct) {
2058c50d8ae3SPaolo Bonzini 		/*
2059c50d8ae3SPaolo Bonzini 		 * we should do write protection before syncing pages
2060c50d8ae3SPaolo Bonzini 		 * otherwise the content of the synced shadow page may
2061c50d8ae3SPaolo Bonzini 		 * be inconsistent with guest page table.
2062c50d8ae3SPaolo Bonzini 		 */
2063c50d8ae3SPaolo Bonzini 		account_shadowed(vcpu->kvm, sp);
20643bae0459SSean Christopherson 		if (level == PG_LEVEL_4K && rmap_write_protect(vcpu, gfn))
2065c50d8ae3SPaolo Bonzini 			kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
2066c50d8ae3SPaolo Bonzini 
20673bae0459SSean Christopherson 		if (level > PG_LEVEL_4K && need_sync)
2068c50d8ae3SPaolo Bonzini 			flush |= kvm_sync_pages(vcpu, gfn, &invalid_list);
2069c50d8ae3SPaolo Bonzini 	}
2070c50d8ae3SPaolo Bonzini 	trace_kvm_mmu_get_page(sp, true);
2071c50d8ae3SPaolo Bonzini 
2072c50d8ae3SPaolo Bonzini 	kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
2073c50d8ae3SPaolo Bonzini out:
2074c50d8ae3SPaolo Bonzini 	if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions)
2075c50d8ae3SPaolo Bonzini 		vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions;
2076c50d8ae3SPaolo Bonzini 	return sp;
2077c50d8ae3SPaolo Bonzini }
2078c50d8ae3SPaolo Bonzini 
2079c50d8ae3SPaolo Bonzini static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator,
2080c50d8ae3SPaolo Bonzini 					struct kvm_vcpu *vcpu, hpa_t root,
2081c50d8ae3SPaolo Bonzini 					u64 addr)
2082c50d8ae3SPaolo Bonzini {
2083c50d8ae3SPaolo Bonzini 	iterator->addr = addr;
2084c50d8ae3SPaolo Bonzini 	iterator->shadow_addr = root;
2085c50d8ae3SPaolo Bonzini 	iterator->level = vcpu->arch.mmu->shadow_root_level;
2086c50d8ae3SPaolo Bonzini 
2087c50d8ae3SPaolo Bonzini 	if (iterator->level == PT64_ROOT_4LEVEL &&
2088c50d8ae3SPaolo Bonzini 	    vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL &&
2089c50d8ae3SPaolo Bonzini 	    !vcpu->arch.mmu->direct_map)
2090c50d8ae3SPaolo Bonzini 		--iterator->level;
2091c50d8ae3SPaolo Bonzini 
2092c50d8ae3SPaolo Bonzini 	if (iterator->level == PT32E_ROOT_LEVEL) {
2093c50d8ae3SPaolo Bonzini 		/*
2094c50d8ae3SPaolo Bonzini 		 * prev_root is currently only used for 64-bit hosts. So only
2095c50d8ae3SPaolo Bonzini 		 * the active root_hpa is valid here.
2096c50d8ae3SPaolo Bonzini 		 */
2097c50d8ae3SPaolo Bonzini 		BUG_ON(root != vcpu->arch.mmu->root_hpa);
2098c50d8ae3SPaolo Bonzini 
2099c50d8ae3SPaolo Bonzini 		iterator->shadow_addr
2100c50d8ae3SPaolo Bonzini 			= vcpu->arch.mmu->pae_root[(addr >> 30) & 3];
2101c50d8ae3SPaolo Bonzini 		iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
2102c50d8ae3SPaolo Bonzini 		--iterator->level;
2103c50d8ae3SPaolo Bonzini 		if (!iterator->shadow_addr)
2104c50d8ae3SPaolo Bonzini 			iterator->level = 0;
2105c50d8ae3SPaolo Bonzini 	}
2106c50d8ae3SPaolo Bonzini }
2107c50d8ae3SPaolo Bonzini 
2108c50d8ae3SPaolo Bonzini static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
2109c50d8ae3SPaolo Bonzini 			     struct kvm_vcpu *vcpu, u64 addr)
2110c50d8ae3SPaolo Bonzini {
2111c50d8ae3SPaolo Bonzini 	shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root_hpa,
2112c50d8ae3SPaolo Bonzini 				    addr);
2113c50d8ae3SPaolo Bonzini }
2114c50d8ae3SPaolo Bonzini 
2115c50d8ae3SPaolo Bonzini static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
2116c50d8ae3SPaolo Bonzini {
21173bae0459SSean Christopherson 	if (iterator->level < PG_LEVEL_4K)
2118c50d8ae3SPaolo Bonzini 		return false;
2119c50d8ae3SPaolo Bonzini 
2120c50d8ae3SPaolo Bonzini 	iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
2121c50d8ae3SPaolo Bonzini 	iterator->sptep	= ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
2122c50d8ae3SPaolo Bonzini 	return true;
2123c50d8ae3SPaolo Bonzini }
2124c50d8ae3SPaolo Bonzini 
2125c50d8ae3SPaolo Bonzini static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator,
2126c50d8ae3SPaolo Bonzini 			       u64 spte)
2127c50d8ae3SPaolo Bonzini {
2128c50d8ae3SPaolo Bonzini 	if (is_last_spte(spte, iterator->level)) {
2129c50d8ae3SPaolo Bonzini 		iterator->level = 0;
2130c50d8ae3SPaolo Bonzini 		return;
2131c50d8ae3SPaolo Bonzini 	}
2132c50d8ae3SPaolo Bonzini 
2133c50d8ae3SPaolo Bonzini 	iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK;
2134c50d8ae3SPaolo Bonzini 	--iterator->level;
2135c50d8ae3SPaolo Bonzini }
2136c50d8ae3SPaolo Bonzini 
2137c50d8ae3SPaolo Bonzini static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
2138c50d8ae3SPaolo Bonzini {
2139c50d8ae3SPaolo Bonzini 	__shadow_walk_next(iterator, *iterator->sptep);
2140c50d8ae3SPaolo Bonzini }
2141c50d8ae3SPaolo Bonzini 
2142c50d8ae3SPaolo Bonzini static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
2143c50d8ae3SPaolo Bonzini 			     struct kvm_mmu_page *sp)
2144c50d8ae3SPaolo Bonzini {
2145c50d8ae3SPaolo Bonzini 	u64 spte;
2146c50d8ae3SPaolo Bonzini 
2147c50d8ae3SPaolo Bonzini 	BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
2148c50d8ae3SPaolo Bonzini 
2149cc4674d0SBen Gardon 	spte = make_nonleaf_spte(sp->spt, sp_ad_disabled(sp));
2150c50d8ae3SPaolo Bonzini 
2151c50d8ae3SPaolo Bonzini 	mmu_spte_set(sptep, spte);
2152c50d8ae3SPaolo Bonzini 
2153c50d8ae3SPaolo Bonzini 	mmu_page_add_parent_pte(vcpu, sp, sptep);
2154c50d8ae3SPaolo Bonzini 
2155c50d8ae3SPaolo Bonzini 	if (sp->unsync_children || sp->unsync)
2156c50d8ae3SPaolo Bonzini 		mark_unsync(sptep);
2157c50d8ae3SPaolo Bonzini }
2158c50d8ae3SPaolo Bonzini 
2159c50d8ae3SPaolo Bonzini static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2160c50d8ae3SPaolo Bonzini 				   unsigned direct_access)
2161c50d8ae3SPaolo Bonzini {
2162c50d8ae3SPaolo Bonzini 	if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
2163c50d8ae3SPaolo Bonzini 		struct kvm_mmu_page *child;
2164c50d8ae3SPaolo Bonzini 
2165c50d8ae3SPaolo Bonzini 		/*
2166c50d8ae3SPaolo Bonzini 		 * For the direct sp, if the guest pte's dirty bit
2167c50d8ae3SPaolo Bonzini 		 * changed form clean to dirty, it will corrupt the
2168c50d8ae3SPaolo Bonzini 		 * sp's access: allow writable in the read-only sp,
2169c50d8ae3SPaolo Bonzini 		 * so we should update the spte at this point to get
2170c50d8ae3SPaolo Bonzini 		 * a new sp with the correct access.
2171c50d8ae3SPaolo Bonzini 		 */
2172e47c4aeeSSean Christopherson 		child = to_shadow_page(*sptep & PT64_BASE_ADDR_MASK);
2173c50d8ae3SPaolo Bonzini 		if (child->role.access == direct_access)
2174c50d8ae3SPaolo Bonzini 			return;
2175c50d8ae3SPaolo Bonzini 
2176c50d8ae3SPaolo Bonzini 		drop_parent_pte(child, sptep);
2177c50d8ae3SPaolo Bonzini 		kvm_flush_remote_tlbs_with_address(vcpu->kvm, child->gfn, 1);
2178c50d8ae3SPaolo Bonzini 	}
2179c50d8ae3SPaolo Bonzini }
2180c50d8ae3SPaolo Bonzini 
21812de4085cSBen Gardon /* Returns the number of zapped non-leaf child shadow pages. */
21822de4085cSBen Gardon static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
21832de4085cSBen Gardon 			    u64 *spte, struct list_head *invalid_list)
2184c50d8ae3SPaolo Bonzini {
2185c50d8ae3SPaolo Bonzini 	u64 pte;
2186c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *child;
2187c50d8ae3SPaolo Bonzini 
2188c50d8ae3SPaolo Bonzini 	pte = *spte;
2189c50d8ae3SPaolo Bonzini 	if (is_shadow_present_pte(pte)) {
2190c50d8ae3SPaolo Bonzini 		if (is_last_spte(pte, sp->role.level)) {
2191c50d8ae3SPaolo Bonzini 			drop_spte(kvm, spte);
2192c50d8ae3SPaolo Bonzini 			if (is_large_pte(pte))
2193c50d8ae3SPaolo Bonzini 				--kvm->stat.lpages;
2194c50d8ae3SPaolo Bonzini 		} else {
2195e47c4aeeSSean Christopherson 			child = to_shadow_page(pte & PT64_BASE_ADDR_MASK);
2196c50d8ae3SPaolo Bonzini 			drop_parent_pte(child, spte);
21972de4085cSBen Gardon 
21982de4085cSBen Gardon 			/*
21992de4085cSBen Gardon 			 * Recursively zap nested TDP SPs, parentless SPs are
22002de4085cSBen Gardon 			 * unlikely to be used again in the near future.  This
22012de4085cSBen Gardon 			 * avoids retaining a large number of stale nested SPs.
22022de4085cSBen Gardon 			 */
22032de4085cSBen Gardon 			if (tdp_enabled && invalid_list &&
22042de4085cSBen Gardon 			    child->role.guest_mode && !child->parent_ptes.val)
22052de4085cSBen Gardon 				return kvm_mmu_prepare_zap_page(kvm, child,
22062de4085cSBen Gardon 								invalid_list);
2207c50d8ae3SPaolo Bonzini 		}
2208ace569e0SSean Christopherson 	} else if (is_mmio_spte(pte)) {
2209c50d8ae3SPaolo Bonzini 		mmu_spte_clear_no_track(spte);
2210ace569e0SSean Christopherson 	}
22112de4085cSBen Gardon 	return 0;
2212c50d8ae3SPaolo Bonzini }
2213c50d8ae3SPaolo Bonzini 
22142de4085cSBen Gardon static int kvm_mmu_page_unlink_children(struct kvm *kvm,
22152de4085cSBen Gardon 					struct kvm_mmu_page *sp,
22162de4085cSBen Gardon 					struct list_head *invalid_list)
2217c50d8ae3SPaolo Bonzini {
22182de4085cSBen Gardon 	int zapped = 0;
2219c50d8ae3SPaolo Bonzini 	unsigned i;
2220c50d8ae3SPaolo Bonzini 
2221c50d8ae3SPaolo Bonzini 	for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
22222de4085cSBen Gardon 		zapped += mmu_page_zap_pte(kvm, sp, sp->spt + i, invalid_list);
22232de4085cSBen Gardon 
22242de4085cSBen Gardon 	return zapped;
2225c50d8ae3SPaolo Bonzini }
2226c50d8ae3SPaolo Bonzini 
2227c50d8ae3SPaolo Bonzini static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
2228c50d8ae3SPaolo Bonzini {
2229c50d8ae3SPaolo Bonzini 	u64 *sptep;
2230c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
2231c50d8ae3SPaolo Bonzini 
2232c50d8ae3SPaolo Bonzini 	while ((sptep = rmap_get_first(&sp->parent_ptes, &iter)))
2233c50d8ae3SPaolo Bonzini 		drop_parent_pte(sp, sptep);
2234c50d8ae3SPaolo Bonzini }
2235c50d8ae3SPaolo Bonzini 
2236c50d8ae3SPaolo Bonzini static int mmu_zap_unsync_children(struct kvm *kvm,
2237c50d8ae3SPaolo Bonzini 				   struct kvm_mmu_page *parent,
2238c50d8ae3SPaolo Bonzini 				   struct list_head *invalid_list)
2239c50d8ae3SPaolo Bonzini {
2240c50d8ae3SPaolo Bonzini 	int i, zapped = 0;
2241c50d8ae3SPaolo Bonzini 	struct mmu_page_path parents;
2242c50d8ae3SPaolo Bonzini 	struct kvm_mmu_pages pages;
2243c50d8ae3SPaolo Bonzini 
22443bae0459SSean Christopherson 	if (parent->role.level == PG_LEVEL_4K)
2245c50d8ae3SPaolo Bonzini 		return 0;
2246c50d8ae3SPaolo Bonzini 
2247c50d8ae3SPaolo Bonzini 	while (mmu_unsync_walk(parent, &pages)) {
2248c50d8ae3SPaolo Bonzini 		struct kvm_mmu_page *sp;
2249c50d8ae3SPaolo Bonzini 
2250c50d8ae3SPaolo Bonzini 		for_each_sp(pages, sp, parents, i) {
2251c50d8ae3SPaolo Bonzini 			kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
2252c50d8ae3SPaolo Bonzini 			mmu_pages_clear_parents(&parents);
2253c50d8ae3SPaolo Bonzini 			zapped++;
2254c50d8ae3SPaolo Bonzini 		}
2255c50d8ae3SPaolo Bonzini 	}
2256c50d8ae3SPaolo Bonzini 
2257c50d8ae3SPaolo Bonzini 	return zapped;
2258c50d8ae3SPaolo Bonzini }
2259c50d8ae3SPaolo Bonzini 
2260c50d8ae3SPaolo Bonzini static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
2261c50d8ae3SPaolo Bonzini 				       struct kvm_mmu_page *sp,
2262c50d8ae3SPaolo Bonzini 				       struct list_head *invalid_list,
2263c50d8ae3SPaolo Bonzini 				       int *nr_zapped)
2264c50d8ae3SPaolo Bonzini {
2265c50d8ae3SPaolo Bonzini 	bool list_unstable;
2266c50d8ae3SPaolo Bonzini 
2267c50d8ae3SPaolo Bonzini 	trace_kvm_mmu_prepare_zap_page(sp);
2268c50d8ae3SPaolo Bonzini 	++kvm->stat.mmu_shadow_zapped;
2269c50d8ae3SPaolo Bonzini 	*nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list);
22702de4085cSBen Gardon 	*nr_zapped += kvm_mmu_page_unlink_children(kvm, sp, invalid_list);
2271c50d8ae3SPaolo Bonzini 	kvm_mmu_unlink_parents(kvm, sp);
2272c50d8ae3SPaolo Bonzini 
2273c50d8ae3SPaolo Bonzini 	/* Zapping children means active_mmu_pages has become unstable. */
2274c50d8ae3SPaolo Bonzini 	list_unstable = *nr_zapped;
2275c50d8ae3SPaolo Bonzini 
2276c50d8ae3SPaolo Bonzini 	if (!sp->role.invalid && !sp->role.direct)
2277c50d8ae3SPaolo Bonzini 		unaccount_shadowed(kvm, sp);
2278c50d8ae3SPaolo Bonzini 
2279c50d8ae3SPaolo Bonzini 	if (sp->unsync)
2280c50d8ae3SPaolo Bonzini 		kvm_unlink_unsync_page(kvm, sp);
2281c50d8ae3SPaolo Bonzini 	if (!sp->root_count) {
2282c50d8ae3SPaolo Bonzini 		/* Count self */
2283c50d8ae3SPaolo Bonzini 		(*nr_zapped)++;
2284f95eec9bSSean Christopherson 
2285f95eec9bSSean Christopherson 		/*
2286f95eec9bSSean Christopherson 		 * Already invalid pages (previously active roots) are not on
2287f95eec9bSSean Christopherson 		 * the active page list.  See list_del() in the "else" case of
2288f95eec9bSSean Christopherson 		 * !sp->root_count.
2289f95eec9bSSean Christopherson 		 */
2290f95eec9bSSean Christopherson 		if (sp->role.invalid)
2291f95eec9bSSean Christopherson 			list_add(&sp->link, invalid_list);
2292f95eec9bSSean Christopherson 		else
2293c50d8ae3SPaolo Bonzini 			list_move(&sp->link, invalid_list);
2294c50d8ae3SPaolo Bonzini 		kvm_mod_used_mmu_pages(kvm, -1);
2295c50d8ae3SPaolo Bonzini 	} else {
2296f95eec9bSSean Christopherson 		/*
2297f95eec9bSSean Christopherson 		 * Remove the active root from the active page list, the root
2298f95eec9bSSean Christopherson 		 * will be explicitly freed when the root_count hits zero.
2299f95eec9bSSean Christopherson 		 */
2300f95eec9bSSean Christopherson 		list_del(&sp->link);
2301c50d8ae3SPaolo Bonzini 
2302c50d8ae3SPaolo Bonzini 		/*
2303c50d8ae3SPaolo Bonzini 		 * Obsolete pages cannot be used on any vCPUs, see the comment
2304c50d8ae3SPaolo Bonzini 		 * in kvm_mmu_zap_all_fast().  Note, is_obsolete_sp() also
2305c50d8ae3SPaolo Bonzini 		 * treats invalid shadow pages as being obsolete.
2306c50d8ae3SPaolo Bonzini 		 */
2307c50d8ae3SPaolo Bonzini 		if (!is_obsolete_sp(kvm, sp))
2308c50d8ae3SPaolo Bonzini 			kvm_reload_remote_mmus(kvm);
2309c50d8ae3SPaolo Bonzini 	}
2310c50d8ae3SPaolo Bonzini 
2311c50d8ae3SPaolo Bonzini 	if (sp->lpage_disallowed)
2312c50d8ae3SPaolo Bonzini 		unaccount_huge_nx_page(kvm, sp);
2313c50d8ae3SPaolo Bonzini 
2314c50d8ae3SPaolo Bonzini 	sp->role.invalid = 1;
2315c50d8ae3SPaolo Bonzini 	return list_unstable;
2316c50d8ae3SPaolo Bonzini }
2317c50d8ae3SPaolo Bonzini 
2318c50d8ae3SPaolo Bonzini static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2319c50d8ae3SPaolo Bonzini 				     struct list_head *invalid_list)
2320c50d8ae3SPaolo Bonzini {
2321c50d8ae3SPaolo Bonzini 	int nr_zapped;
2322c50d8ae3SPaolo Bonzini 
2323c50d8ae3SPaolo Bonzini 	__kvm_mmu_prepare_zap_page(kvm, sp, invalid_list, &nr_zapped);
2324c50d8ae3SPaolo Bonzini 	return nr_zapped;
2325c50d8ae3SPaolo Bonzini }
2326c50d8ae3SPaolo Bonzini 
2327c50d8ae3SPaolo Bonzini static void kvm_mmu_commit_zap_page(struct kvm *kvm,
2328c50d8ae3SPaolo Bonzini 				    struct list_head *invalid_list)
2329c50d8ae3SPaolo Bonzini {
2330c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp, *nsp;
2331c50d8ae3SPaolo Bonzini 
2332c50d8ae3SPaolo Bonzini 	if (list_empty(invalid_list))
2333c50d8ae3SPaolo Bonzini 		return;
2334c50d8ae3SPaolo Bonzini 
2335c50d8ae3SPaolo Bonzini 	/*
2336c50d8ae3SPaolo Bonzini 	 * We need to make sure everyone sees our modifications to
2337c50d8ae3SPaolo Bonzini 	 * the page tables and see changes to vcpu->mode here. The barrier
2338c50d8ae3SPaolo Bonzini 	 * in the kvm_flush_remote_tlbs() achieves this. This pairs
2339c50d8ae3SPaolo Bonzini 	 * with vcpu_enter_guest and walk_shadow_page_lockless_begin/end.
2340c50d8ae3SPaolo Bonzini 	 *
2341c50d8ae3SPaolo Bonzini 	 * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit
2342c50d8ae3SPaolo Bonzini 	 * guest mode and/or lockless shadow page table walks.
2343c50d8ae3SPaolo Bonzini 	 */
2344c50d8ae3SPaolo Bonzini 	kvm_flush_remote_tlbs(kvm);
2345c50d8ae3SPaolo Bonzini 
2346c50d8ae3SPaolo Bonzini 	list_for_each_entry_safe(sp, nsp, invalid_list, link) {
2347c50d8ae3SPaolo Bonzini 		WARN_ON(!sp->role.invalid || sp->root_count);
2348c50d8ae3SPaolo Bonzini 		kvm_mmu_free_page(sp);
2349c50d8ae3SPaolo Bonzini 	}
2350c50d8ae3SPaolo Bonzini }
2351c50d8ae3SPaolo Bonzini 
23526b82ef2cSSean Christopherson static unsigned long kvm_mmu_zap_oldest_mmu_pages(struct kvm *kvm,
23536b82ef2cSSean Christopherson 						  unsigned long nr_to_zap)
2354c50d8ae3SPaolo Bonzini {
23556b82ef2cSSean Christopherson 	unsigned long total_zapped = 0;
23566b82ef2cSSean Christopherson 	struct kvm_mmu_page *sp, *tmp;
2357ba7888ddSSean Christopherson 	LIST_HEAD(invalid_list);
23586b82ef2cSSean Christopherson 	bool unstable;
23596b82ef2cSSean Christopherson 	int nr_zapped;
2360c50d8ae3SPaolo Bonzini 
2361c50d8ae3SPaolo Bonzini 	if (list_empty(&kvm->arch.active_mmu_pages))
2362ba7888ddSSean Christopherson 		return 0;
2363c50d8ae3SPaolo Bonzini 
23646b82ef2cSSean Christopherson restart:
23658fc51726SSean Christopherson 	list_for_each_entry_safe_reverse(sp, tmp, &kvm->arch.active_mmu_pages, link) {
23666b82ef2cSSean Christopherson 		/*
23676b82ef2cSSean Christopherson 		 * Don't zap active root pages, the page itself can't be freed
23686b82ef2cSSean Christopherson 		 * and zapping it will just force vCPUs to realloc and reload.
23696b82ef2cSSean Christopherson 		 */
23706b82ef2cSSean Christopherson 		if (sp->root_count)
23716b82ef2cSSean Christopherson 			continue;
23726b82ef2cSSean Christopherson 
23736b82ef2cSSean Christopherson 		unstable = __kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list,
23746b82ef2cSSean Christopherson 						      &nr_zapped);
23756b82ef2cSSean Christopherson 		total_zapped += nr_zapped;
23766b82ef2cSSean Christopherson 		if (total_zapped >= nr_to_zap)
2377ba7888ddSSean Christopherson 			break;
2378ba7888ddSSean Christopherson 
23796b82ef2cSSean Christopherson 		if (unstable)
23806b82ef2cSSean Christopherson 			goto restart;
2381ba7888ddSSean Christopherson 	}
23826b82ef2cSSean Christopherson 
23836b82ef2cSSean Christopherson 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
23846b82ef2cSSean Christopherson 
23856b82ef2cSSean Christopherson 	kvm->stat.mmu_recycled += total_zapped;
23866b82ef2cSSean Christopherson 	return total_zapped;
23876b82ef2cSSean Christopherson }
23886b82ef2cSSean Christopherson 
2389afe8d7e6SSean Christopherson static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
2390afe8d7e6SSean Christopherson {
2391afe8d7e6SSean Christopherson 	if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
2392afe8d7e6SSean Christopherson 		return kvm->arch.n_max_mmu_pages -
2393afe8d7e6SSean Christopherson 			kvm->arch.n_used_mmu_pages;
2394afe8d7e6SSean Christopherson 
2395afe8d7e6SSean Christopherson 	return 0;
2396c50d8ae3SPaolo Bonzini }
2397c50d8ae3SPaolo Bonzini 
2398ba7888ddSSean Christopherson static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
2399ba7888ddSSean Christopherson {
24006b82ef2cSSean Christopherson 	unsigned long avail = kvm_mmu_available_pages(vcpu->kvm);
2401ba7888ddSSean Christopherson 
24026b82ef2cSSean Christopherson 	if (likely(avail >= KVM_MIN_FREE_MMU_PAGES))
2403ba7888ddSSean Christopherson 		return 0;
2404ba7888ddSSean Christopherson 
24056b82ef2cSSean Christopherson 	kvm_mmu_zap_oldest_mmu_pages(vcpu->kvm, KVM_REFILL_PAGES - avail);
2406ba7888ddSSean Christopherson 
24076e6ec584SSean Christopherson 	/*
24086e6ec584SSean Christopherson 	 * Note, this check is intentionally soft, it only guarantees that one
24096e6ec584SSean Christopherson 	 * page is available, while the caller may end up allocating as many as
24106e6ec584SSean Christopherson 	 * four pages, e.g. for PAE roots or for 5-level paging.  Temporarily
24116e6ec584SSean Christopherson 	 * exceeding the (arbitrary by default) limit will not harm the host,
24126e6ec584SSean Christopherson 	 * being too agressive may unnecessarily kill the guest, and getting an
24136e6ec584SSean Christopherson 	 * exact count is far more trouble than it's worth, especially in the
24146e6ec584SSean Christopherson 	 * page fault paths.
24156e6ec584SSean Christopherson 	 */
2416ba7888ddSSean Christopherson 	if (!kvm_mmu_available_pages(vcpu->kvm))
2417ba7888ddSSean Christopherson 		return -ENOSPC;
2418ba7888ddSSean Christopherson 	return 0;
2419ba7888ddSSean Christopherson }
2420ba7888ddSSean Christopherson 
2421c50d8ae3SPaolo Bonzini /*
2422c50d8ae3SPaolo Bonzini  * Changing the number of mmu pages allocated to the vm
2423c50d8ae3SPaolo Bonzini  * Note: if goal_nr_mmu_pages is too small, you will get dead lock
2424c50d8ae3SPaolo Bonzini  */
2425c50d8ae3SPaolo Bonzini void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
2426c50d8ae3SPaolo Bonzini {
2427531810caSBen Gardon 	write_lock(&kvm->mmu_lock);
2428c50d8ae3SPaolo Bonzini 
2429c50d8ae3SPaolo Bonzini 	if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
24306b82ef2cSSean Christopherson 		kvm_mmu_zap_oldest_mmu_pages(kvm, kvm->arch.n_used_mmu_pages -
24316b82ef2cSSean Christopherson 						  goal_nr_mmu_pages);
2432c50d8ae3SPaolo Bonzini 
2433c50d8ae3SPaolo Bonzini 		goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
2434c50d8ae3SPaolo Bonzini 	}
2435c50d8ae3SPaolo Bonzini 
2436c50d8ae3SPaolo Bonzini 	kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
2437c50d8ae3SPaolo Bonzini 
2438531810caSBen Gardon 	write_unlock(&kvm->mmu_lock);
2439c50d8ae3SPaolo Bonzini }
2440c50d8ae3SPaolo Bonzini 
2441c50d8ae3SPaolo Bonzini int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
2442c50d8ae3SPaolo Bonzini {
2443c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
2444c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
2445c50d8ae3SPaolo Bonzini 	int r;
2446c50d8ae3SPaolo Bonzini 
2447c50d8ae3SPaolo Bonzini 	pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
2448c50d8ae3SPaolo Bonzini 	r = 0;
2449531810caSBen Gardon 	write_lock(&kvm->mmu_lock);
2450c50d8ae3SPaolo Bonzini 	for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
2451c50d8ae3SPaolo Bonzini 		pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
2452c50d8ae3SPaolo Bonzini 			 sp->role.word);
2453c50d8ae3SPaolo Bonzini 		r = 1;
2454c50d8ae3SPaolo Bonzini 		kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
2455c50d8ae3SPaolo Bonzini 	}
2456c50d8ae3SPaolo Bonzini 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
2457531810caSBen Gardon 	write_unlock(&kvm->mmu_lock);
2458c50d8ae3SPaolo Bonzini 
2459c50d8ae3SPaolo Bonzini 	return r;
2460c50d8ae3SPaolo Bonzini }
246196ad91aeSSean Christopherson 
246296ad91aeSSean Christopherson static int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
246396ad91aeSSean Christopherson {
246496ad91aeSSean Christopherson 	gpa_t gpa;
246596ad91aeSSean Christopherson 	int r;
246696ad91aeSSean Christopherson 
246796ad91aeSSean Christopherson 	if (vcpu->arch.mmu->direct_map)
246896ad91aeSSean Christopherson 		return 0;
246996ad91aeSSean Christopherson 
247096ad91aeSSean Christopherson 	gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
247196ad91aeSSean Christopherson 
247296ad91aeSSean Christopherson 	r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
247396ad91aeSSean Christopherson 
247496ad91aeSSean Christopherson 	return r;
247596ad91aeSSean Christopherson }
2476c50d8ae3SPaolo Bonzini 
2477c50d8ae3SPaolo Bonzini static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
2478c50d8ae3SPaolo Bonzini {
2479c50d8ae3SPaolo Bonzini 	trace_kvm_mmu_unsync_page(sp);
2480c50d8ae3SPaolo Bonzini 	++vcpu->kvm->stat.mmu_unsync;
2481c50d8ae3SPaolo Bonzini 	sp->unsync = 1;
2482c50d8ae3SPaolo Bonzini 
2483c50d8ae3SPaolo Bonzini 	kvm_mmu_mark_parents_unsync(sp);
2484c50d8ae3SPaolo Bonzini }
2485c50d8ae3SPaolo Bonzini 
24865a9624afSPaolo Bonzini bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
2487c50d8ae3SPaolo Bonzini 			    bool can_unsync)
2488c50d8ae3SPaolo Bonzini {
2489c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
2490c50d8ae3SPaolo Bonzini 
2491c50d8ae3SPaolo Bonzini 	if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
2492c50d8ae3SPaolo Bonzini 		return true;
2493c50d8ae3SPaolo Bonzini 
2494c50d8ae3SPaolo Bonzini 	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
2495c50d8ae3SPaolo Bonzini 		if (!can_unsync)
2496c50d8ae3SPaolo Bonzini 			return true;
2497c50d8ae3SPaolo Bonzini 
2498c50d8ae3SPaolo Bonzini 		if (sp->unsync)
2499c50d8ae3SPaolo Bonzini 			continue;
2500c50d8ae3SPaolo Bonzini 
25013bae0459SSean Christopherson 		WARN_ON(sp->role.level != PG_LEVEL_4K);
2502c50d8ae3SPaolo Bonzini 		kvm_unsync_page(vcpu, sp);
2503c50d8ae3SPaolo Bonzini 	}
2504c50d8ae3SPaolo Bonzini 
2505c50d8ae3SPaolo Bonzini 	/*
2506c50d8ae3SPaolo Bonzini 	 * We need to ensure that the marking of unsync pages is visible
2507c50d8ae3SPaolo Bonzini 	 * before the SPTE is updated to allow writes because
2508c50d8ae3SPaolo Bonzini 	 * kvm_mmu_sync_roots() checks the unsync flags without holding
2509c50d8ae3SPaolo Bonzini 	 * the MMU lock and so can race with this. If the SPTE was updated
2510c50d8ae3SPaolo Bonzini 	 * before the page had been marked as unsync-ed, something like the
2511c50d8ae3SPaolo Bonzini 	 * following could happen:
2512c50d8ae3SPaolo Bonzini 	 *
2513c50d8ae3SPaolo Bonzini 	 * CPU 1                    CPU 2
2514c50d8ae3SPaolo Bonzini 	 * ---------------------------------------------------------------------
2515c50d8ae3SPaolo Bonzini 	 * 1.2 Host updates SPTE
2516c50d8ae3SPaolo Bonzini 	 *     to be writable
2517c50d8ae3SPaolo Bonzini 	 *                      2.1 Guest writes a GPTE for GVA X.
2518c50d8ae3SPaolo Bonzini 	 *                          (GPTE being in the guest page table shadowed
2519c50d8ae3SPaolo Bonzini 	 *                           by the SP from CPU 1.)
2520c50d8ae3SPaolo Bonzini 	 *                          This reads SPTE during the page table walk.
2521c50d8ae3SPaolo Bonzini 	 *                          Since SPTE.W is read as 1, there is no
2522c50d8ae3SPaolo Bonzini 	 *                          fault.
2523c50d8ae3SPaolo Bonzini 	 *
2524c50d8ae3SPaolo Bonzini 	 *                      2.2 Guest issues TLB flush.
2525c50d8ae3SPaolo Bonzini 	 *                          That causes a VM Exit.
2526c50d8ae3SPaolo Bonzini 	 *
2527c50d8ae3SPaolo Bonzini 	 *                      2.3 kvm_mmu_sync_pages() reads sp->unsync.
2528c50d8ae3SPaolo Bonzini 	 *                          Since it is false, so it just returns.
2529c50d8ae3SPaolo Bonzini 	 *
2530c50d8ae3SPaolo Bonzini 	 *                      2.4 Guest accesses GVA X.
2531c50d8ae3SPaolo Bonzini 	 *                          Since the mapping in the SP was not updated,
2532c50d8ae3SPaolo Bonzini 	 *                          so the old mapping for GVA X incorrectly
2533c50d8ae3SPaolo Bonzini 	 *                          gets used.
2534c50d8ae3SPaolo Bonzini 	 * 1.1 Host marks SP
2535c50d8ae3SPaolo Bonzini 	 *     as unsync
2536c50d8ae3SPaolo Bonzini 	 *     (sp->unsync = true)
2537c50d8ae3SPaolo Bonzini 	 *
2538c50d8ae3SPaolo Bonzini 	 * The write barrier below ensures that 1.1 happens before 1.2 and thus
2539c50d8ae3SPaolo Bonzini 	 * the situation in 2.4 does not arise. The implicit barrier in 2.2
2540c50d8ae3SPaolo Bonzini 	 * pairs with this write barrier.
2541c50d8ae3SPaolo Bonzini 	 */
2542c50d8ae3SPaolo Bonzini 	smp_wmb();
2543c50d8ae3SPaolo Bonzini 
2544c50d8ae3SPaolo Bonzini 	return false;
2545c50d8ae3SPaolo Bonzini }
2546c50d8ae3SPaolo Bonzini 
2547799a4190SBen Gardon static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2548799a4190SBen Gardon 		    unsigned int pte_access, int level,
2549799a4190SBen Gardon 		    gfn_t gfn, kvm_pfn_t pfn, bool speculative,
2550799a4190SBen Gardon 		    bool can_unsync, bool host_writable)
2551799a4190SBen Gardon {
2552799a4190SBen Gardon 	u64 spte;
2553799a4190SBen Gardon 	struct kvm_mmu_page *sp;
2554799a4190SBen Gardon 	int ret;
2555799a4190SBen Gardon 
2556799a4190SBen Gardon 	sp = sptep_to_sp(sptep);
2557799a4190SBen Gardon 
2558799a4190SBen Gardon 	ret = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative,
2559799a4190SBen Gardon 			can_unsync, host_writable, sp_ad_disabled(sp), &spte);
2560799a4190SBen Gardon 
2561799a4190SBen Gardon 	if (spte & PT_WRITABLE_MASK)
2562799a4190SBen Gardon 		kvm_vcpu_mark_page_dirty(vcpu, gfn);
2563799a4190SBen Gardon 
256412703759SSean Christopherson 	if (*sptep == spte)
256512703759SSean Christopherson 		ret |= SET_SPTE_SPURIOUS;
256612703759SSean Christopherson 	else if (mmu_spte_update(sptep, spte))
2567c50d8ae3SPaolo Bonzini 		ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
2568c50d8ae3SPaolo Bonzini 	return ret;
2569c50d8ae3SPaolo Bonzini }
2570c50d8ae3SPaolo Bonzini 
25710a2b64c5SBen Gardon static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2572e88b8093SSean Christopherson 			unsigned int pte_access, bool write_fault, int level,
25730a2b64c5SBen Gardon 			gfn_t gfn, kvm_pfn_t pfn, bool speculative,
25740a2b64c5SBen Gardon 			bool host_writable)
2575c50d8ae3SPaolo Bonzini {
2576c50d8ae3SPaolo Bonzini 	int was_rmapped = 0;
2577c50d8ae3SPaolo Bonzini 	int rmap_count;
2578c50d8ae3SPaolo Bonzini 	int set_spte_ret;
2579c4371c2aSSean Christopherson 	int ret = RET_PF_FIXED;
2580c50d8ae3SPaolo Bonzini 	bool flush = false;
2581c50d8ae3SPaolo Bonzini 
2582c50d8ae3SPaolo Bonzini 	pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
2583c50d8ae3SPaolo Bonzini 		 *sptep, write_fault, gfn);
2584c50d8ae3SPaolo Bonzini 
2585a54aa15cSSean Christopherson 	if (unlikely(is_noslot_pfn(pfn))) {
2586a54aa15cSSean Christopherson 		mark_mmio_spte(vcpu, sptep, gfn, pte_access);
2587a54aa15cSSean Christopherson 		return RET_PF_EMULATE;
2588a54aa15cSSean Christopherson 	}
2589a54aa15cSSean Christopherson 
2590c50d8ae3SPaolo Bonzini 	if (is_shadow_present_pte(*sptep)) {
2591c50d8ae3SPaolo Bonzini 		/*
2592c50d8ae3SPaolo Bonzini 		 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
2593c50d8ae3SPaolo Bonzini 		 * the parent of the now unreachable PTE.
2594c50d8ae3SPaolo Bonzini 		 */
25953bae0459SSean Christopherson 		if (level > PG_LEVEL_4K && !is_large_pte(*sptep)) {
2596c50d8ae3SPaolo Bonzini 			struct kvm_mmu_page *child;
2597c50d8ae3SPaolo Bonzini 			u64 pte = *sptep;
2598c50d8ae3SPaolo Bonzini 
2599e47c4aeeSSean Christopherson 			child = to_shadow_page(pte & PT64_BASE_ADDR_MASK);
2600c50d8ae3SPaolo Bonzini 			drop_parent_pte(child, sptep);
2601c50d8ae3SPaolo Bonzini 			flush = true;
2602c50d8ae3SPaolo Bonzini 		} else if (pfn != spte_to_pfn(*sptep)) {
2603c50d8ae3SPaolo Bonzini 			pgprintk("hfn old %llx new %llx\n",
2604c50d8ae3SPaolo Bonzini 				 spte_to_pfn(*sptep), pfn);
2605c50d8ae3SPaolo Bonzini 			drop_spte(vcpu->kvm, sptep);
2606c50d8ae3SPaolo Bonzini 			flush = true;
2607c50d8ae3SPaolo Bonzini 		} else
2608c50d8ae3SPaolo Bonzini 			was_rmapped = 1;
2609c50d8ae3SPaolo Bonzini 	}
2610c50d8ae3SPaolo Bonzini 
2611c50d8ae3SPaolo Bonzini 	set_spte_ret = set_spte(vcpu, sptep, pte_access, level, gfn, pfn,
2612c50d8ae3SPaolo Bonzini 				speculative, true, host_writable);
2613c50d8ae3SPaolo Bonzini 	if (set_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
2614c50d8ae3SPaolo Bonzini 		if (write_fault)
2615c50d8ae3SPaolo Bonzini 			ret = RET_PF_EMULATE;
26168c8560b8SSean Christopherson 		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
2617c50d8ae3SPaolo Bonzini 	}
2618c50d8ae3SPaolo Bonzini 
2619c50d8ae3SPaolo Bonzini 	if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH || flush)
2620c50d8ae3SPaolo Bonzini 		kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn,
2621c50d8ae3SPaolo Bonzini 				KVM_PAGES_PER_HPAGE(level));
2622c50d8ae3SPaolo Bonzini 
262312703759SSean Christopherson 	/*
262412703759SSean Christopherson 	 * The fault is fully spurious if and only if the new SPTE and old SPTE
262512703759SSean Christopherson 	 * are identical, and emulation is not required.
262612703759SSean Christopherson 	 */
262712703759SSean Christopherson 	if ((set_spte_ret & SET_SPTE_SPURIOUS) && ret == RET_PF_FIXED) {
262812703759SSean Christopherson 		WARN_ON_ONCE(!was_rmapped);
262912703759SSean Christopherson 		return RET_PF_SPURIOUS;
263012703759SSean Christopherson 	}
263112703759SSean Christopherson 
2632c50d8ae3SPaolo Bonzini 	pgprintk("%s: setting spte %llx\n", __func__, *sptep);
2633c50d8ae3SPaolo Bonzini 	trace_kvm_mmu_set_spte(level, gfn, sptep);
2634c50d8ae3SPaolo Bonzini 	if (!was_rmapped && is_large_pte(*sptep))
2635c50d8ae3SPaolo Bonzini 		++vcpu->kvm->stat.lpages;
2636c50d8ae3SPaolo Bonzini 
2637c50d8ae3SPaolo Bonzini 	if (is_shadow_present_pte(*sptep)) {
2638c50d8ae3SPaolo Bonzini 		if (!was_rmapped) {
2639c50d8ae3SPaolo Bonzini 			rmap_count = rmap_add(vcpu, sptep, gfn);
2640c50d8ae3SPaolo Bonzini 			if (rmap_count > RMAP_RECYCLE_THRESHOLD)
2641c50d8ae3SPaolo Bonzini 				rmap_recycle(vcpu, sptep, gfn);
2642c50d8ae3SPaolo Bonzini 		}
2643c50d8ae3SPaolo Bonzini 	}
2644c50d8ae3SPaolo Bonzini 
2645c50d8ae3SPaolo Bonzini 	return ret;
2646c50d8ae3SPaolo Bonzini }
2647c50d8ae3SPaolo Bonzini 
2648c50d8ae3SPaolo Bonzini static kvm_pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
2649c50d8ae3SPaolo Bonzini 				     bool no_dirty_log)
2650c50d8ae3SPaolo Bonzini {
2651c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
2652c50d8ae3SPaolo Bonzini 
2653c50d8ae3SPaolo Bonzini 	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log);
2654c50d8ae3SPaolo Bonzini 	if (!slot)
2655c50d8ae3SPaolo Bonzini 		return KVM_PFN_ERR_FAULT;
2656c50d8ae3SPaolo Bonzini 
2657c50d8ae3SPaolo Bonzini 	return gfn_to_pfn_memslot_atomic(slot, gfn);
2658c50d8ae3SPaolo Bonzini }
2659c50d8ae3SPaolo Bonzini 
2660c50d8ae3SPaolo Bonzini static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
2661c50d8ae3SPaolo Bonzini 				    struct kvm_mmu_page *sp,
2662c50d8ae3SPaolo Bonzini 				    u64 *start, u64 *end)
2663c50d8ae3SPaolo Bonzini {
2664c50d8ae3SPaolo Bonzini 	struct page *pages[PTE_PREFETCH_NUM];
2665c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
26660a2b64c5SBen Gardon 	unsigned int access = sp->role.access;
2667c50d8ae3SPaolo Bonzini 	int i, ret;
2668c50d8ae3SPaolo Bonzini 	gfn_t gfn;
2669c50d8ae3SPaolo Bonzini 
2670c50d8ae3SPaolo Bonzini 	gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
2671c50d8ae3SPaolo Bonzini 	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
2672c50d8ae3SPaolo Bonzini 	if (!slot)
2673c50d8ae3SPaolo Bonzini 		return -1;
2674c50d8ae3SPaolo Bonzini 
2675c50d8ae3SPaolo Bonzini 	ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start);
2676c50d8ae3SPaolo Bonzini 	if (ret <= 0)
2677c50d8ae3SPaolo Bonzini 		return -1;
2678c50d8ae3SPaolo Bonzini 
2679c50d8ae3SPaolo Bonzini 	for (i = 0; i < ret; i++, gfn++, start++) {
2680e88b8093SSean Christopherson 		mmu_set_spte(vcpu, start, access, false, sp->role.level, gfn,
2681c50d8ae3SPaolo Bonzini 			     page_to_pfn(pages[i]), true, true);
2682c50d8ae3SPaolo Bonzini 		put_page(pages[i]);
2683c50d8ae3SPaolo Bonzini 	}
2684c50d8ae3SPaolo Bonzini 
2685c50d8ae3SPaolo Bonzini 	return 0;
2686c50d8ae3SPaolo Bonzini }
2687c50d8ae3SPaolo Bonzini 
2688c50d8ae3SPaolo Bonzini static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
2689c50d8ae3SPaolo Bonzini 				  struct kvm_mmu_page *sp, u64 *sptep)
2690c50d8ae3SPaolo Bonzini {
2691c50d8ae3SPaolo Bonzini 	u64 *spte, *start = NULL;
2692c50d8ae3SPaolo Bonzini 	int i;
2693c50d8ae3SPaolo Bonzini 
2694c50d8ae3SPaolo Bonzini 	WARN_ON(!sp->role.direct);
2695c50d8ae3SPaolo Bonzini 
2696c50d8ae3SPaolo Bonzini 	i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
2697c50d8ae3SPaolo Bonzini 	spte = sp->spt + i;
2698c50d8ae3SPaolo Bonzini 
2699c50d8ae3SPaolo Bonzini 	for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
2700c50d8ae3SPaolo Bonzini 		if (is_shadow_present_pte(*spte) || spte == sptep) {
2701c50d8ae3SPaolo Bonzini 			if (!start)
2702c50d8ae3SPaolo Bonzini 				continue;
2703c50d8ae3SPaolo Bonzini 			if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
2704c50d8ae3SPaolo Bonzini 				break;
2705c50d8ae3SPaolo Bonzini 			start = NULL;
2706c50d8ae3SPaolo Bonzini 		} else if (!start)
2707c50d8ae3SPaolo Bonzini 			start = spte;
2708c50d8ae3SPaolo Bonzini 	}
2709c50d8ae3SPaolo Bonzini }
2710c50d8ae3SPaolo Bonzini 
2711c50d8ae3SPaolo Bonzini static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
2712c50d8ae3SPaolo Bonzini {
2713c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
2714c50d8ae3SPaolo Bonzini 
271557354682SSean Christopherson 	sp = sptep_to_sp(sptep);
2716c50d8ae3SPaolo Bonzini 
2717c50d8ae3SPaolo Bonzini 	/*
2718c50d8ae3SPaolo Bonzini 	 * Without accessed bits, there's no way to distinguish between
2719c50d8ae3SPaolo Bonzini 	 * actually accessed translations and prefetched, so disable pte
2720c50d8ae3SPaolo Bonzini 	 * prefetch if accessed bits aren't available.
2721c50d8ae3SPaolo Bonzini 	 */
2722c50d8ae3SPaolo Bonzini 	if (sp_ad_disabled(sp))
2723c50d8ae3SPaolo Bonzini 		return;
2724c50d8ae3SPaolo Bonzini 
27253bae0459SSean Christopherson 	if (sp->role.level > PG_LEVEL_4K)
2726c50d8ae3SPaolo Bonzini 		return;
2727c50d8ae3SPaolo Bonzini 
27284a42d848SDavid Stevens 	/*
27294a42d848SDavid Stevens 	 * If addresses are being invalidated, skip prefetching to avoid
27304a42d848SDavid Stevens 	 * accidentally prefetching those addresses.
27314a42d848SDavid Stevens 	 */
27324a42d848SDavid Stevens 	if (unlikely(vcpu->kvm->mmu_notifier_count))
27334a42d848SDavid Stevens 		return;
27344a42d848SDavid Stevens 
2735c50d8ae3SPaolo Bonzini 	__direct_pte_prefetch(vcpu, sp, sptep);
2736c50d8ae3SPaolo Bonzini }
2737c50d8ae3SPaolo Bonzini 
27381b6d9d9eSSean Christopherson static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
27391b6d9d9eSSean Christopherson 				  struct kvm_memory_slot *slot)
2740db543216SSean Christopherson {
2741db543216SSean Christopherson 	unsigned long hva;
2742db543216SSean Christopherson 	pte_t *pte;
2743db543216SSean Christopherson 	int level;
2744db543216SSean Christopherson 
2745e851265aSSean Christopherson 	if (!PageCompound(pfn_to_page(pfn)) && !kvm_is_zone_device_pfn(pfn))
27463bae0459SSean Christopherson 		return PG_LEVEL_4K;
2747db543216SSean Christopherson 
2748293e306eSSean Christopherson 	/*
2749293e306eSSean Christopherson 	 * Note, using the already-retrieved memslot and __gfn_to_hva_memslot()
2750293e306eSSean Christopherson 	 * is not solely for performance, it's also necessary to avoid the
2751293e306eSSean Christopherson 	 * "writable" check in __gfn_to_hva_many(), which will always fail on
2752293e306eSSean Christopherson 	 * read-only memslots due to gfn_to_hva() assuming writes.  Earlier
2753293e306eSSean Christopherson 	 * page fault steps have already verified the guest isn't writing a
2754293e306eSSean Christopherson 	 * read-only memslot.
2755293e306eSSean Christopherson 	 */
2756db543216SSean Christopherson 	hva = __gfn_to_hva_memslot(slot, gfn);
2757db543216SSean Christopherson 
27581b6d9d9eSSean Christopherson 	pte = lookup_address_in_mm(kvm->mm, hva, &level);
2759db543216SSean Christopherson 	if (unlikely(!pte))
27603bae0459SSean Christopherson 		return PG_LEVEL_4K;
2761db543216SSean Christopherson 
2762db543216SSean Christopherson 	return level;
2763db543216SSean Christopherson }
2764db543216SSean Christopherson 
27651b6d9d9eSSean Christopherson int kvm_mmu_max_mapping_level(struct kvm *kvm, struct kvm_memory_slot *slot,
27661b6d9d9eSSean Christopherson 			      gfn_t gfn, kvm_pfn_t pfn, int max_level)
27671b6d9d9eSSean Christopherson {
27681b6d9d9eSSean Christopherson 	struct kvm_lpage_info *linfo;
27691b6d9d9eSSean Christopherson 
27701b6d9d9eSSean Christopherson 	max_level = min(max_level, max_huge_page_level);
27711b6d9d9eSSean Christopherson 	for ( ; max_level > PG_LEVEL_4K; max_level--) {
27721b6d9d9eSSean Christopherson 		linfo = lpage_info_slot(gfn, slot, max_level);
27731b6d9d9eSSean Christopherson 		if (!linfo->disallow_lpage)
27741b6d9d9eSSean Christopherson 			break;
27751b6d9d9eSSean Christopherson 	}
27761b6d9d9eSSean Christopherson 
27771b6d9d9eSSean Christopherson 	if (max_level == PG_LEVEL_4K)
27781b6d9d9eSSean Christopherson 		return PG_LEVEL_4K;
27791b6d9d9eSSean Christopherson 
27801b6d9d9eSSean Christopherson 	return host_pfn_mapping_level(kvm, gfn, pfn, slot);
27811b6d9d9eSSean Christopherson }
27821b6d9d9eSSean Christopherson 
2783bb18842eSBen Gardon int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
27843cf06612SSean Christopherson 			    int max_level, kvm_pfn_t *pfnp,
27853cf06612SSean Christopherson 			    bool huge_page_disallowed, int *req_level)
27860885904dSSean Christopherson {
2787293e306eSSean Christopherson 	struct kvm_memory_slot *slot;
27880885904dSSean Christopherson 	kvm_pfn_t pfn = *pfnp;
278917eff019SSean Christopherson 	kvm_pfn_t mask;
279083f06fa7SSean Christopherson 	int level;
27910885904dSSean Christopherson 
27923cf06612SSean Christopherson 	*req_level = PG_LEVEL_4K;
27933cf06612SSean Christopherson 
27943bae0459SSean Christopherson 	if (unlikely(max_level == PG_LEVEL_4K))
27953bae0459SSean Christopherson 		return PG_LEVEL_4K;
279617eff019SSean Christopherson 
2797e851265aSSean Christopherson 	if (is_error_noslot_pfn(pfn) || kvm_is_reserved_pfn(pfn))
27983bae0459SSean Christopherson 		return PG_LEVEL_4K;
279917eff019SSean Christopherson 
2800293e306eSSean Christopherson 	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, true);
2801293e306eSSean Christopherson 	if (!slot)
28023bae0459SSean Christopherson 		return PG_LEVEL_4K;
2803293e306eSSean Christopherson 
28041b6d9d9eSSean Christopherson 	level = kvm_mmu_max_mapping_level(vcpu->kvm, slot, gfn, pfn, max_level);
28053bae0459SSean Christopherson 	if (level == PG_LEVEL_4K)
280683f06fa7SSean Christopherson 		return level;
280717eff019SSean Christopherson 
28083cf06612SSean Christopherson 	*req_level = level = min(level, max_level);
28093cf06612SSean Christopherson 
28103cf06612SSean Christopherson 	/*
28113cf06612SSean Christopherson 	 * Enforce the iTLB multihit workaround after capturing the requested
28123cf06612SSean Christopherson 	 * level, which will be used to do precise, accurate accounting.
28133cf06612SSean Christopherson 	 */
28143cf06612SSean Christopherson 	if (huge_page_disallowed)
28153cf06612SSean Christopherson 		return PG_LEVEL_4K;
28164cd071d1SSean Christopherson 
28170885904dSSean Christopherson 	/*
28184cd071d1SSean Christopherson 	 * mmu_notifier_retry() was successful and mmu_lock is held, so
28194cd071d1SSean Christopherson 	 * the pmd can't be split from under us.
28200885904dSSean Christopherson 	 */
28210885904dSSean Christopherson 	mask = KVM_PAGES_PER_HPAGE(level) - 1;
28220885904dSSean Christopherson 	VM_BUG_ON((gfn & mask) != (pfn & mask));
28234cd071d1SSean Christopherson 	*pfnp = pfn & ~mask;
282483f06fa7SSean Christopherson 
282583f06fa7SSean Christopherson 	return level;
28260885904dSSean Christopherson }
28270885904dSSean Christopherson 
2828bb18842eSBen Gardon void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level,
2829bb18842eSBen Gardon 				kvm_pfn_t *pfnp, int *goal_levelp)
2830c50d8ae3SPaolo Bonzini {
2831bb18842eSBen Gardon 	int level = *goal_levelp;
2832c50d8ae3SPaolo Bonzini 
28337d945312SBen Gardon 	if (cur_level == level && level > PG_LEVEL_4K &&
2834c50d8ae3SPaolo Bonzini 	    is_shadow_present_pte(spte) &&
2835c50d8ae3SPaolo Bonzini 	    !is_large_pte(spte)) {
2836c50d8ae3SPaolo Bonzini 		/*
2837c50d8ae3SPaolo Bonzini 		 * A small SPTE exists for this pfn, but FNAME(fetch)
2838c50d8ae3SPaolo Bonzini 		 * and __direct_map would like to create a large PTE
2839c50d8ae3SPaolo Bonzini 		 * instead: just force them to go down another level,
2840c50d8ae3SPaolo Bonzini 		 * patching back for them into pfn the next 9 bits of
2841c50d8ae3SPaolo Bonzini 		 * the address.
2842c50d8ae3SPaolo Bonzini 		 */
28437d945312SBen Gardon 		u64 page_mask = KVM_PAGES_PER_HPAGE(level) -
28447d945312SBen Gardon 				KVM_PAGES_PER_HPAGE(level - 1);
2845c50d8ae3SPaolo Bonzini 		*pfnp |= gfn & page_mask;
2846bb18842eSBen Gardon 		(*goal_levelp)--;
2847c50d8ae3SPaolo Bonzini 	}
2848c50d8ae3SPaolo Bonzini }
2849c50d8ae3SPaolo Bonzini 
28506c2fd34fSSean Christopherson static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
285183f06fa7SSean Christopherson 			int map_writable, int max_level, kvm_pfn_t pfn,
28526c2fd34fSSean Christopherson 			bool prefault, bool is_tdp)
2853c50d8ae3SPaolo Bonzini {
28546c2fd34fSSean Christopherson 	bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled();
28556c2fd34fSSean Christopherson 	bool write = error_code & PFERR_WRITE_MASK;
28566c2fd34fSSean Christopherson 	bool exec = error_code & PFERR_FETCH_MASK;
28576c2fd34fSSean Christopherson 	bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
2858c50d8ae3SPaolo Bonzini 	struct kvm_shadow_walk_iterator it;
2859c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
28603cf06612SSean Christopherson 	int level, req_level, ret;
2861c50d8ae3SPaolo Bonzini 	gfn_t gfn = gpa >> PAGE_SHIFT;
2862c50d8ae3SPaolo Bonzini 	gfn_t base_gfn = gfn;
2863c50d8ae3SPaolo Bonzini 
28640c7a98e3SSean Christopherson 	if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
2865c50d8ae3SPaolo Bonzini 		return RET_PF_RETRY;
2866c50d8ae3SPaolo Bonzini 
28673cf06612SSean Christopherson 	level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
28683cf06612SSean Christopherson 					huge_page_disallowed, &req_level);
28694cd071d1SSean Christopherson 
2870c50d8ae3SPaolo Bonzini 	trace_kvm_mmu_spte_requested(gpa, level, pfn);
2871c50d8ae3SPaolo Bonzini 	for_each_shadow_entry(vcpu, gpa, it) {
2872c50d8ae3SPaolo Bonzini 		/*
2873c50d8ae3SPaolo Bonzini 		 * We cannot overwrite existing page tables with an NX
2874c50d8ae3SPaolo Bonzini 		 * large page, as the leaf could be executable.
2875c50d8ae3SPaolo Bonzini 		 */
2876dcc70651SSean Christopherson 		if (nx_huge_page_workaround_enabled)
28777d945312SBen Gardon 			disallowed_hugepage_adjust(*it.sptep, gfn, it.level,
28787d945312SBen Gardon 						   &pfn, &level);
2879c50d8ae3SPaolo Bonzini 
2880c50d8ae3SPaolo Bonzini 		base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
2881c50d8ae3SPaolo Bonzini 		if (it.level == level)
2882c50d8ae3SPaolo Bonzini 			break;
2883c50d8ae3SPaolo Bonzini 
2884c50d8ae3SPaolo Bonzini 		drop_large_spte(vcpu, it.sptep);
2885c50d8ae3SPaolo Bonzini 		if (!is_shadow_present_pte(*it.sptep)) {
2886c50d8ae3SPaolo Bonzini 			sp = kvm_mmu_get_page(vcpu, base_gfn, it.addr,
2887c50d8ae3SPaolo Bonzini 					      it.level - 1, true, ACC_ALL);
2888c50d8ae3SPaolo Bonzini 
2889c50d8ae3SPaolo Bonzini 			link_shadow_page(vcpu, it.sptep, sp);
28905bcaf3e1SSean Christopherson 			if (is_tdp && huge_page_disallowed &&
28915bcaf3e1SSean Christopherson 			    req_level >= it.level)
2892c50d8ae3SPaolo Bonzini 				account_huge_nx_page(vcpu->kvm, sp);
2893c50d8ae3SPaolo Bonzini 		}
2894c50d8ae3SPaolo Bonzini 	}
2895c50d8ae3SPaolo Bonzini 
2896c50d8ae3SPaolo Bonzini 	ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL,
2897c50d8ae3SPaolo Bonzini 			   write, level, base_gfn, pfn, prefault,
2898c50d8ae3SPaolo Bonzini 			   map_writable);
289912703759SSean Christopherson 	if (ret == RET_PF_SPURIOUS)
290012703759SSean Christopherson 		return ret;
290112703759SSean Christopherson 
2902c50d8ae3SPaolo Bonzini 	direct_pte_prefetch(vcpu, it.sptep);
2903c50d8ae3SPaolo Bonzini 	++vcpu->stat.pf_fixed;
2904c50d8ae3SPaolo Bonzini 	return ret;
2905c50d8ae3SPaolo Bonzini }
2906c50d8ae3SPaolo Bonzini 
2907c50d8ae3SPaolo Bonzini static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk)
2908c50d8ae3SPaolo Bonzini {
2909c50d8ae3SPaolo Bonzini 	send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, PAGE_SHIFT, tsk);
2910c50d8ae3SPaolo Bonzini }
2911c50d8ae3SPaolo Bonzini 
2912c50d8ae3SPaolo Bonzini static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
2913c50d8ae3SPaolo Bonzini {
2914c50d8ae3SPaolo Bonzini 	/*
2915c50d8ae3SPaolo Bonzini 	 * Do not cache the mmio info caused by writing the readonly gfn
2916c50d8ae3SPaolo Bonzini 	 * into the spte otherwise read access on readonly gfn also can
2917c50d8ae3SPaolo Bonzini 	 * caused mmio page fault and treat it as mmio access.
2918c50d8ae3SPaolo Bonzini 	 */
2919c50d8ae3SPaolo Bonzini 	if (pfn == KVM_PFN_ERR_RO_FAULT)
2920c50d8ae3SPaolo Bonzini 		return RET_PF_EMULATE;
2921c50d8ae3SPaolo Bonzini 
2922c50d8ae3SPaolo Bonzini 	if (pfn == KVM_PFN_ERR_HWPOISON) {
2923c50d8ae3SPaolo Bonzini 		kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current);
2924c50d8ae3SPaolo Bonzini 		return RET_PF_RETRY;
2925c50d8ae3SPaolo Bonzini 	}
2926c50d8ae3SPaolo Bonzini 
2927c50d8ae3SPaolo Bonzini 	return -EFAULT;
2928c50d8ae3SPaolo Bonzini }
2929c50d8ae3SPaolo Bonzini 
2930c50d8ae3SPaolo Bonzini static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
29310a2b64c5SBen Gardon 				kvm_pfn_t pfn, unsigned int access,
29320a2b64c5SBen Gardon 				int *ret_val)
2933c50d8ae3SPaolo Bonzini {
2934c50d8ae3SPaolo Bonzini 	/* The pfn is invalid, report the error! */
2935c50d8ae3SPaolo Bonzini 	if (unlikely(is_error_pfn(pfn))) {
2936c50d8ae3SPaolo Bonzini 		*ret_val = kvm_handle_bad_page(vcpu, gfn, pfn);
2937c50d8ae3SPaolo Bonzini 		return true;
2938c50d8ae3SPaolo Bonzini 	}
2939c50d8ae3SPaolo Bonzini 
294030ab5901SSean Christopherson 	if (unlikely(is_noslot_pfn(pfn))) {
2941c50d8ae3SPaolo Bonzini 		vcpu_cache_mmio_info(vcpu, gva, gfn,
2942c50d8ae3SPaolo Bonzini 				     access & shadow_mmio_access_mask);
294330ab5901SSean Christopherson 		/*
294430ab5901SSean Christopherson 		 * If MMIO caching is disabled, emulate immediately without
294530ab5901SSean Christopherson 		 * touching the shadow page tables as attempting to install an
294630ab5901SSean Christopherson 		 * MMIO SPTE will just be an expensive nop.
294730ab5901SSean Christopherson 		 */
294830ab5901SSean Christopherson 		if (unlikely(!shadow_mmio_value)) {
294930ab5901SSean Christopherson 			*ret_val = RET_PF_EMULATE;
295030ab5901SSean Christopherson 			return true;
295130ab5901SSean Christopherson 		}
295230ab5901SSean Christopherson 	}
2953c50d8ae3SPaolo Bonzini 
2954c50d8ae3SPaolo Bonzini 	return false;
2955c50d8ae3SPaolo Bonzini }
2956c50d8ae3SPaolo Bonzini 
2957c50d8ae3SPaolo Bonzini static bool page_fault_can_be_fast(u32 error_code)
2958c50d8ae3SPaolo Bonzini {
2959c50d8ae3SPaolo Bonzini 	/*
2960c50d8ae3SPaolo Bonzini 	 * Do not fix the mmio spte with invalid generation number which
2961c50d8ae3SPaolo Bonzini 	 * need to be updated by slow page fault path.
2962c50d8ae3SPaolo Bonzini 	 */
2963c50d8ae3SPaolo Bonzini 	if (unlikely(error_code & PFERR_RSVD_MASK))
2964c50d8ae3SPaolo Bonzini 		return false;
2965c50d8ae3SPaolo Bonzini 
2966c50d8ae3SPaolo Bonzini 	/* See if the page fault is due to an NX violation */
2967c50d8ae3SPaolo Bonzini 	if (unlikely(((error_code & (PFERR_FETCH_MASK | PFERR_PRESENT_MASK))
2968c50d8ae3SPaolo Bonzini 		      == (PFERR_FETCH_MASK | PFERR_PRESENT_MASK))))
2969c50d8ae3SPaolo Bonzini 		return false;
2970c50d8ae3SPaolo Bonzini 
2971c50d8ae3SPaolo Bonzini 	/*
2972c50d8ae3SPaolo Bonzini 	 * #PF can be fast if:
2973c50d8ae3SPaolo Bonzini 	 * 1. The shadow page table entry is not present, which could mean that
2974c50d8ae3SPaolo Bonzini 	 *    the fault is potentially caused by access tracking (if enabled).
2975c50d8ae3SPaolo Bonzini 	 * 2. The shadow page table entry is present and the fault
2976c50d8ae3SPaolo Bonzini 	 *    is caused by write-protect, that means we just need change the W
2977c50d8ae3SPaolo Bonzini 	 *    bit of the spte which can be done out of mmu-lock.
2978c50d8ae3SPaolo Bonzini 	 *
2979c50d8ae3SPaolo Bonzini 	 * However, if access tracking is disabled we know that a non-present
2980c50d8ae3SPaolo Bonzini 	 * page must be a genuine page fault where we have to create a new SPTE.
2981c50d8ae3SPaolo Bonzini 	 * So, if access tracking is disabled, we return true only for write
2982c50d8ae3SPaolo Bonzini 	 * accesses to a present page.
2983c50d8ae3SPaolo Bonzini 	 */
2984c50d8ae3SPaolo Bonzini 
2985c50d8ae3SPaolo Bonzini 	return shadow_acc_track_mask != 0 ||
2986c50d8ae3SPaolo Bonzini 	       ((error_code & (PFERR_WRITE_MASK | PFERR_PRESENT_MASK))
2987c50d8ae3SPaolo Bonzini 		== (PFERR_WRITE_MASK | PFERR_PRESENT_MASK));
2988c50d8ae3SPaolo Bonzini }
2989c50d8ae3SPaolo Bonzini 
2990c50d8ae3SPaolo Bonzini /*
2991c50d8ae3SPaolo Bonzini  * Returns true if the SPTE was fixed successfully. Otherwise,
2992c50d8ae3SPaolo Bonzini  * someone else modified the SPTE from its original value.
2993c50d8ae3SPaolo Bonzini  */
2994c50d8ae3SPaolo Bonzini static bool
2995c50d8ae3SPaolo Bonzini fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
2996c50d8ae3SPaolo Bonzini 			u64 *sptep, u64 old_spte, u64 new_spte)
2997c50d8ae3SPaolo Bonzini {
2998c50d8ae3SPaolo Bonzini 	gfn_t gfn;
2999c50d8ae3SPaolo Bonzini 
3000c50d8ae3SPaolo Bonzini 	WARN_ON(!sp->role.direct);
3001c50d8ae3SPaolo Bonzini 
3002c50d8ae3SPaolo Bonzini 	/*
3003c50d8ae3SPaolo Bonzini 	 * Theoretically we could also set dirty bit (and flush TLB) here in
3004c50d8ae3SPaolo Bonzini 	 * order to eliminate unnecessary PML logging. See comments in
3005c50d8ae3SPaolo Bonzini 	 * set_spte. But fast_page_fault is very unlikely to happen with PML
3006c50d8ae3SPaolo Bonzini 	 * enabled, so we do not do this. This might result in the same GPA
3007c50d8ae3SPaolo Bonzini 	 * to be logged in PML buffer again when the write really happens, and
3008c50d8ae3SPaolo Bonzini 	 * eventually to be called by mark_page_dirty twice. But it's also no
3009c50d8ae3SPaolo Bonzini 	 * harm. This also avoids the TLB flush needed after setting dirty bit
3010c50d8ae3SPaolo Bonzini 	 * so non-PML cases won't be impacted.
3011c50d8ae3SPaolo Bonzini 	 *
3012c50d8ae3SPaolo Bonzini 	 * Compare with set_spte where instead shadow_dirty_mask is set.
3013c50d8ae3SPaolo Bonzini 	 */
3014c50d8ae3SPaolo Bonzini 	if (cmpxchg64(sptep, old_spte, new_spte) != old_spte)
3015c50d8ae3SPaolo Bonzini 		return false;
3016c50d8ae3SPaolo Bonzini 
3017c50d8ae3SPaolo Bonzini 	if (is_writable_pte(new_spte) && !is_writable_pte(old_spte)) {
3018c50d8ae3SPaolo Bonzini 		/*
3019c50d8ae3SPaolo Bonzini 		 * The gfn of direct spte is stable since it is
3020c50d8ae3SPaolo Bonzini 		 * calculated by sp->gfn.
3021c50d8ae3SPaolo Bonzini 		 */
3022c50d8ae3SPaolo Bonzini 		gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
3023c50d8ae3SPaolo Bonzini 		kvm_vcpu_mark_page_dirty(vcpu, gfn);
3024c50d8ae3SPaolo Bonzini 	}
3025c50d8ae3SPaolo Bonzini 
3026c50d8ae3SPaolo Bonzini 	return true;
3027c50d8ae3SPaolo Bonzini }
3028c50d8ae3SPaolo Bonzini 
3029c50d8ae3SPaolo Bonzini static bool is_access_allowed(u32 fault_err_code, u64 spte)
3030c50d8ae3SPaolo Bonzini {
3031c50d8ae3SPaolo Bonzini 	if (fault_err_code & PFERR_FETCH_MASK)
3032c50d8ae3SPaolo Bonzini 		return is_executable_pte(spte);
3033c50d8ae3SPaolo Bonzini 
3034c50d8ae3SPaolo Bonzini 	if (fault_err_code & PFERR_WRITE_MASK)
3035c50d8ae3SPaolo Bonzini 		return is_writable_pte(spte);
3036c50d8ae3SPaolo Bonzini 
3037c50d8ae3SPaolo Bonzini 	/* Fault was on Read access */
3038c50d8ae3SPaolo Bonzini 	return spte & PT_PRESENT_MASK;
3039c50d8ae3SPaolo Bonzini }
3040c50d8ae3SPaolo Bonzini 
3041c50d8ae3SPaolo Bonzini /*
3042c4371c2aSSean Christopherson  * Returns one of RET_PF_INVALID, RET_PF_FIXED or RET_PF_SPURIOUS.
3043c50d8ae3SPaolo Bonzini  */
3044c4371c2aSSean Christopherson static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
3045c50d8ae3SPaolo Bonzini 			   u32 error_code)
3046c50d8ae3SPaolo Bonzini {
3047c50d8ae3SPaolo Bonzini 	struct kvm_shadow_walk_iterator iterator;
3048c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
3049c4371c2aSSean Christopherson 	int ret = RET_PF_INVALID;
3050c50d8ae3SPaolo Bonzini 	u64 spte = 0ull;
3051c50d8ae3SPaolo Bonzini 	uint retry_count = 0;
3052c50d8ae3SPaolo Bonzini 
3053c50d8ae3SPaolo Bonzini 	if (!page_fault_can_be_fast(error_code))
3054c4371c2aSSean Christopherson 		return ret;
3055c50d8ae3SPaolo Bonzini 
3056c50d8ae3SPaolo Bonzini 	walk_shadow_page_lockless_begin(vcpu);
3057c50d8ae3SPaolo Bonzini 
3058c50d8ae3SPaolo Bonzini 	do {
3059c50d8ae3SPaolo Bonzini 		u64 new_spte;
3060c50d8ae3SPaolo Bonzini 
3061736c291cSSean Christopherson 		for_each_shadow_entry_lockless(vcpu, cr2_or_gpa, iterator, spte)
3062f9fa2509SSean Christopherson 			if (!is_shadow_present_pte(spte))
3063c50d8ae3SPaolo Bonzini 				break;
3064c50d8ae3SPaolo Bonzini 
3065ec89e643SSean Christopherson 		if (!is_shadow_present_pte(spte))
3066ec89e643SSean Christopherson 			break;
3067ec89e643SSean Christopherson 
306857354682SSean Christopherson 		sp = sptep_to_sp(iterator.sptep);
3069c50d8ae3SPaolo Bonzini 		if (!is_last_spte(spte, sp->role.level))
3070c50d8ae3SPaolo Bonzini 			break;
3071c50d8ae3SPaolo Bonzini 
3072c50d8ae3SPaolo Bonzini 		/*
3073c50d8ae3SPaolo Bonzini 		 * Check whether the memory access that caused the fault would
3074c50d8ae3SPaolo Bonzini 		 * still cause it if it were to be performed right now. If not,
3075c50d8ae3SPaolo Bonzini 		 * then this is a spurious fault caused by TLB lazily flushed,
3076c50d8ae3SPaolo Bonzini 		 * or some other CPU has already fixed the PTE after the
3077c50d8ae3SPaolo Bonzini 		 * current CPU took the fault.
3078c50d8ae3SPaolo Bonzini 		 *
3079c50d8ae3SPaolo Bonzini 		 * Need not check the access of upper level table entries since
3080c50d8ae3SPaolo Bonzini 		 * they are always ACC_ALL.
3081c50d8ae3SPaolo Bonzini 		 */
3082c50d8ae3SPaolo Bonzini 		if (is_access_allowed(error_code, spte)) {
3083c4371c2aSSean Christopherson 			ret = RET_PF_SPURIOUS;
3084c50d8ae3SPaolo Bonzini 			break;
3085c50d8ae3SPaolo Bonzini 		}
3086c50d8ae3SPaolo Bonzini 
3087c50d8ae3SPaolo Bonzini 		new_spte = spte;
3088c50d8ae3SPaolo Bonzini 
3089c50d8ae3SPaolo Bonzini 		if (is_access_track_spte(spte))
3090c50d8ae3SPaolo Bonzini 			new_spte = restore_acc_track_spte(new_spte);
3091c50d8ae3SPaolo Bonzini 
3092c50d8ae3SPaolo Bonzini 		/*
3093c50d8ae3SPaolo Bonzini 		 * Currently, to simplify the code, write-protection can
3094c50d8ae3SPaolo Bonzini 		 * be removed in the fast path only if the SPTE was
3095c50d8ae3SPaolo Bonzini 		 * write-protected for dirty-logging or access tracking.
3096c50d8ae3SPaolo Bonzini 		 */
3097c50d8ae3SPaolo Bonzini 		if ((error_code & PFERR_WRITE_MASK) &&
3098e6302698SMiaohe Lin 		    spte_can_locklessly_be_made_writable(spte)) {
3099c50d8ae3SPaolo Bonzini 			new_spte |= PT_WRITABLE_MASK;
3100c50d8ae3SPaolo Bonzini 
3101c50d8ae3SPaolo Bonzini 			/*
3102c50d8ae3SPaolo Bonzini 			 * Do not fix write-permission on the large spte.  Since
3103c50d8ae3SPaolo Bonzini 			 * we only dirty the first page into the dirty-bitmap in
3104c50d8ae3SPaolo Bonzini 			 * fast_pf_fix_direct_spte(), other pages are missed
3105c50d8ae3SPaolo Bonzini 			 * if its slot has dirty logging enabled.
3106c50d8ae3SPaolo Bonzini 			 *
3107c50d8ae3SPaolo Bonzini 			 * Instead, we let the slow page fault path create a
3108c50d8ae3SPaolo Bonzini 			 * normal spte to fix the access.
3109c50d8ae3SPaolo Bonzini 			 *
3110c50d8ae3SPaolo Bonzini 			 * See the comments in kvm_arch_commit_memory_region().
3111c50d8ae3SPaolo Bonzini 			 */
31123bae0459SSean Christopherson 			if (sp->role.level > PG_LEVEL_4K)
3113c50d8ae3SPaolo Bonzini 				break;
3114c50d8ae3SPaolo Bonzini 		}
3115c50d8ae3SPaolo Bonzini 
3116c50d8ae3SPaolo Bonzini 		/* Verify that the fault can be handled in the fast path */
3117c50d8ae3SPaolo Bonzini 		if (new_spte == spte ||
3118c50d8ae3SPaolo Bonzini 		    !is_access_allowed(error_code, new_spte))
3119c50d8ae3SPaolo Bonzini 			break;
3120c50d8ae3SPaolo Bonzini 
3121c50d8ae3SPaolo Bonzini 		/*
3122c50d8ae3SPaolo Bonzini 		 * Currently, fast page fault only works for direct mapping
3123c50d8ae3SPaolo Bonzini 		 * since the gfn is not stable for indirect shadow page. See
31243ecad8c2SMauro Carvalho Chehab 		 * Documentation/virt/kvm/locking.rst to get more detail.
3125c50d8ae3SPaolo Bonzini 		 */
3126c4371c2aSSean Christopherson 		if (fast_pf_fix_direct_spte(vcpu, sp, iterator.sptep, spte,
3127c4371c2aSSean Christopherson 					    new_spte)) {
3128c4371c2aSSean Christopherson 			ret = RET_PF_FIXED;
3129c50d8ae3SPaolo Bonzini 			break;
3130c4371c2aSSean Christopherson 		}
3131c50d8ae3SPaolo Bonzini 
3132c50d8ae3SPaolo Bonzini 		if (++retry_count > 4) {
3133c50d8ae3SPaolo Bonzini 			printk_once(KERN_WARNING
3134c50d8ae3SPaolo Bonzini 				"kvm: Fast #PF retrying more than 4 times.\n");
3135c50d8ae3SPaolo Bonzini 			break;
3136c50d8ae3SPaolo Bonzini 		}
3137c50d8ae3SPaolo Bonzini 
3138c50d8ae3SPaolo Bonzini 	} while (true);
3139c50d8ae3SPaolo Bonzini 
3140736c291cSSean Christopherson 	trace_fast_page_fault(vcpu, cr2_or_gpa, error_code, iterator.sptep,
3141c4371c2aSSean Christopherson 			      spte, ret);
3142c50d8ae3SPaolo Bonzini 	walk_shadow_page_lockless_end(vcpu);
3143c50d8ae3SPaolo Bonzini 
3144c4371c2aSSean Christopherson 	return ret;
3145c50d8ae3SPaolo Bonzini }
3146c50d8ae3SPaolo Bonzini 
3147c50d8ae3SPaolo Bonzini static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
3148c50d8ae3SPaolo Bonzini 			       struct list_head *invalid_list)
3149c50d8ae3SPaolo Bonzini {
3150c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
3151c50d8ae3SPaolo Bonzini 
3152c50d8ae3SPaolo Bonzini 	if (!VALID_PAGE(*root_hpa))
3153c50d8ae3SPaolo Bonzini 		return;
3154c50d8ae3SPaolo Bonzini 
3155e47c4aeeSSean Christopherson 	sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK);
315602c00b3aSBen Gardon 
315702c00b3aSBen Gardon 	if (kvm_mmu_put_root(kvm, sp)) {
3158897218ffSPaolo Bonzini 		if (is_tdp_mmu_page(sp))
315902c00b3aSBen Gardon 			kvm_tdp_mmu_free_root(kvm, sp);
316002c00b3aSBen Gardon 		else if (sp->role.invalid)
3161c50d8ae3SPaolo Bonzini 			kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
316202c00b3aSBen Gardon 	}
3163c50d8ae3SPaolo Bonzini 
3164c50d8ae3SPaolo Bonzini 	*root_hpa = INVALID_PAGE;
3165c50d8ae3SPaolo Bonzini }
3166c50d8ae3SPaolo Bonzini 
3167c50d8ae3SPaolo Bonzini /* roots_to_free must be some combination of the KVM_MMU_ROOT_* flags */
3168c50d8ae3SPaolo Bonzini void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
3169c50d8ae3SPaolo Bonzini 			ulong roots_to_free)
3170c50d8ae3SPaolo Bonzini {
31714d710de9SSean Christopherson 	struct kvm *kvm = vcpu->kvm;
3172c50d8ae3SPaolo Bonzini 	int i;
3173c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
3174c50d8ae3SPaolo Bonzini 	bool free_active_root = roots_to_free & KVM_MMU_ROOT_CURRENT;
3175c50d8ae3SPaolo Bonzini 
3176c50d8ae3SPaolo Bonzini 	BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG);
3177c50d8ae3SPaolo Bonzini 
3178c50d8ae3SPaolo Bonzini 	/* Before acquiring the MMU lock, see if we need to do any real work. */
3179c50d8ae3SPaolo Bonzini 	if (!(free_active_root && VALID_PAGE(mmu->root_hpa))) {
3180c50d8ae3SPaolo Bonzini 		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
3181c50d8ae3SPaolo Bonzini 			if ((roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) &&
3182c50d8ae3SPaolo Bonzini 			    VALID_PAGE(mmu->prev_roots[i].hpa))
3183c50d8ae3SPaolo Bonzini 				break;
3184c50d8ae3SPaolo Bonzini 
3185c50d8ae3SPaolo Bonzini 		if (i == KVM_MMU_NUM_PREV_ROOTS)
3186c50d8ae3SPaolo Bonzini 			return;
3187c50d8ae3SPaolo Bonzini 	}
3188c50d8ae3SPaolo Bonzini 
3189531810caSBen Gardon 	write_lock(&kvm->mmu_lock);
3190c50d8ae3SPaolo Bonzini 
3191c50d8ae3SPaolo Bonzini 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
3192c50d8ae3SPaolo Bonzini 		if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i))
31934d710de9SSean Christopherson 			mmu_free_root_page(kvm, &mmu->prev_roots[i].hpa,
3194c50d8ae3SPaolo Bonzini 					   &invalid_list);
3195c50d8ae3SPaolo Bonzini 
3196c50d8ae3SPaolo Bonzini 	if (free_active_root) {
3197c50d8ae3SPaolo Bonzini 		if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
3198c50d8ae3SPaolo Bonzini 		    (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) {
31994d710de9SSean Christopherson 			mmu_free_root_page(kvm, &mmu->root_hpa, &invalid_list);
320004d45551SSean Christopherson 		} else if (mmu->pae_root) {
3201c834e5e4SSean Christopherson 			for (i = 0; i < 4; ++i) {
3202c834e5e4SSean Christopherson 				if (!IS_VALID_PAE_ROOT(mmu->pae_root[i]))
3203c834e5e4SSean Christopherson 					continue;
3204c834e5e4SSean Christopherson 
3205c834e5e4SSean Christopherson 				mmu_free_root_page(kvm, &mmu->pae_root[i],
3206c50d8ae3SPaolo Bonzini 						   &invalid_list);
3207c834e5e4SSean Christopherson 				mmu->pae_root[i] = INVALID_PAE_ROOT;
3208c834e5e4SSean Christopherson 			}
3209c50d8ae3SPaolo Bonzini 		}
321004d45551SSean Christopherson 		mmu->root_hpa = INVALID_PAGE;
3211be01e8e2SSean Christopherson 		mmu->root_pgd = 0;
3212c50d8ae3SPaolo Bonzini 	}
3213c50d8ae3SPaolo Bonzini 
32144d710de9SSean Christopherson 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
3215531810caSBen Gardon 	write_unlock(&kvm->mmu_lock);
3216c50d8ae3SPaolo Bonzini }
3217c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_free_roots);
3218c50d8ae3SPaolo Bonzini 
3219c50d8ae3SPaolo Bonzini static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
3220c50d8ae3SPaolo Bonzini {
3221c50d8ae3SPaolo Bonzini 	int ret = 0;
3222c50d8ae3SPaolo Bonzini 
3223995decb6SVitaly Kuznetsov 	if (!kvm_vcpu_is_visible_gfn(vcpu, root_gfn)) {
3224c50d8ae3SPaolo Bonzini 		kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
3225c50d8ae3SPaolo Bonzini 		ret = 1;
3226c50d8ae3SPaolo Bonzini 	}
3227c50d8ae3SPaolo Bonzini 
3228c50d8ae3SPaolo Bonzini 	return ret;
3229c50d8ae3SPaolo Bonzini }
3230c50d8ae3SPaolo Bonzini 
32318123f265SSean Christopherson static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva,
32328123f265SSean Christopherson 			    u8 level, bool direct)
3233c50d8ae3SPaolo Bonzini {
3234c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
32358123f265SSean Christopherson 
32368123f265SSean Christopherson 	sp = kvm_mmu_get_page(vcpu, gfn, gva, level, direct, ACC_ALL);
32378123f265SSean Christopherson 	++sp->root_count;
32388123f265SSean Christopherson 
32398123f265SSean Christopherson 	return __pa(sp->spt);
32408123f265SSean Christopherson }
32418123f265SSean Christopherson 
32428123f265SSean Christopherson static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
32438123f265SSean Christopherson {
3244b37233c9SSean Christopherson 	struct kvm_mmu *mmu = vcpu->arch.mmu;
3245b37233c9SSean Christopherson 	u8 shadow_root_level = mmu->shadow_root_level;
32468123f265SSean Christopherson 	hpa_t root;
3247c50d8ae3SPaolo Bonzini 	unsigned i;
3248c50d8ae3SPaolo Bonzini 
3249897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(vcpu->kvm)) {
325002c00b3aSBen Gardon 		root = kvm_tdp_mmu_get_vcpu_root_hpa(vcpu);
3251b37233c9SSean Christopherson 		mmu->root_hpa = root;
325202c00b3aSBen Gardon 	} else if (shadow_root_level >= PT64_ROOT_4LEVEL) {
32536e6ec584SSean Christopherson 		root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level, true);
3254b37233c9SSean Christopherson 		mmu->root_hpa = root;
32558123f265SSean Christopherson 	} else if (shadow_root_level == PT32E_ROOT_LEVEL) {
325673ad1606SSean Christopherson 		if (WARN_ON_ONCE(!mmu->pae_root))
325773ad1606SSean Christopherson 			return -EIO;
325873ad1606SSean Christopherson 
3259c50d8ae3SPaolo Bonzini 		for (i = 0; i < 4; ++i) {
3260c834e5e4SSean Christopherson 			WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
3261c50d8ae3SPaolo Bonzini 
32628123f265SSean Christopherson 			root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT),
32638123f265SSean Christopherson 					      i << 30, PT32_ROOT_LEVEL, true);
326417e368d9SSean Christopherson 			mmu->pae_root[i] = root | PT_PRESENT_MASK |
326517e368d9SSean Christopherson 					   shadow_me_mask;
3266c50d8ae3SPaolo Bonzini 		}
3267b37233c9SSean Christopherson 		mmu->root_hpa = __pa(mmu->pae_root);
326873ad1606SSean Christopherson 	} else {
326973ad1606SSean Christopherson 		WARN_ONCE(1, "Bad TDP root level = %d\n", shadow_root_level);
327073ad1606SSean Christopherson 		return -EIO;
327173ad1606SSean Christopherson 	}
32723651c7fcSSean Christopherson 
3273be01e8e2SSean Christopherson 	/* root_pgd is ignored for direct MMUs. */
3274b37233c9SSean Christopherson 	mmu->root_pgd = 0;
3275c50d8ae3SPaolo Bonzini 
3276c50d8ae3SPaolo Bonzini 	return 0;
3277c50d8ae3SPaolo Bonzini }
3278c50d8ae3SPaolo Bonzini 
3279c50d8ae3SPaolo Bonzini static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
3280c50d8ae3SPaolo Bonzini {
3281b37233c9SSean Christopherson 	struct kvm_mmu *mmu = vcpu->arch.mmu;
32826e0918aeSSean Christopherson 	u64 pdptrs[4], pm_mask;
3283be01e8e2SSean Christopherson 	gfn_t root_gfn, root_pgd;
32848123f265SSean Christopherson 	hpa_t root;
3285c50d8ae3SPaolo Bonzini 	int i;
3286c50d8ae3SPaolo Bonzini 
3287b37233c9SSean Christopherson 	root_pgd = mmu->get_guest_pgd(vcpu);
3288be01e8e2SSean Christopherson 	root_gfn = root_pgd >> PAGE_SHIFT;
3289c50d8ae3SPaolo Bonzini 
3290c50d8ae3SPaolo Bonzini 	if (mmu_check_root(vcpu, root_gfn))
3291c50d8ae3SPaolo Bonzini 		return 1;
3292c50d8ae3SPaolo Bonzini 
32936e0918aeSSean Christopherson 	if (mmu->root_level == PT32E_ROOT_LEVEL) {
32946e0918aeSSean Christopherson 		for (i = 0; i < 4; ++i) {
32956e0918aeSSean Christopherson 			pdptrs[i] = mmu->get_pdptr(vcpu, i);
32966e0918aeSSean Christopherson 			if (!(pdptrs[i] & PT_PRESENT_MASK))
32976e0918aeSSean Christopherson 				continue;
32986e0918aeSSean Christopherson 
32996e0918aeSSean Christopherson 			if (mmu_check_root(vcpu, pdptrs[i] >> PAGE_SHIFT))
33006e0918aeSSean Christopherson 				return 1;
33016e0918aeSSean Christopherson 		}
33026e0918aeSSean Christopherson 	}
33036e0918aeSSean Christopherson 
3304c50d8ae3SPaolo Bonzini 	/*
3305c50d8ae3SPaolo Bonzini 	 * Do we shadow a long mode page table? If so we need to
3306c50d8ae3SPaolo Bonzini 	 * write-protect the guests page table root.
3307c50d8ae3SPaolo Bonzini 	 */
3308b37233c9SSean Christopherson 	if (mmu->root_level >= PT64_ROOT_4LEVEL) {
33098123f265SSean Christopherson 		root = mmu_alloc_root(vcpu, root_gfn, 0,
3310b37233c9SSean Christopherson 				      mmu->shadow_root_level, false);
3311b37233c9SSean Christopherson 		mmu->root_hpa = root;
3312be01e8e2SSean Christopherson 		goto set_root_pgd;
3313c50d8ae3SPaolo Bonzini 	}
3314c50d8ae3SPaolo Bonzini 
331573ad1606SSean Christopherson 	if (WARN_ON_ONCE(!mmu->pae_root))
331673ad1606SSean Christopherson 		return -EIO;
331773ad1606SSean Christopherson 
3318c50d8ae3SPaolo Bonzini 	/*
3319c50d8ae3SPaolo Bonzini 	 * We shadow a 32 bit page table. This may be a legacy 2-level
3320c50d8ae3SPaolo Bonzini 	 * or a PAE 3-level page table. In either case we need to be aware that
3321c50d8ae3SPaolo Bonzini 	 * the shadow page table may be a PAE or a long mode page table.
3322c50d8ae3SPaolo Bonzini 	 */
332317e368d9SSean Christopherson 	pm_mask = PT_PRESENT_MASK | shadow_me_mask;
3324748e52b9SSean Christopherson 	if (mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
3325c50d8ae3SPaolo Bonzini 		pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
3326c50d8ae3SPaolo Bonzini 
332773ad1606SSean Christopherson 		if (WARN_ON_ONCE(!mmu->lm_root))
332873ad1606SSean Christopherson 			return -EIO;
332973ad1606SSean Christopherson 
3330748e52b9SSean Christopherson 		mmu->lm_root[0] = __pa(mmu->pae_root) | pm_mask;
333104d45551SSean Christopherson 	}
333204d45551SSean Christopherson 
3333c50d8ae3SPaolo Bonzini 	for (i = 0; i < 4; ++i) {
3334c834e5e4SSean Christopherson 		WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
33356e6ec584SSean Christopherson 
3336b37233c9SSean Christopherson 		if (mmu->root_level == PT32E_ROOT_LEVEL) {
33376e0918aeSSean Christopherson 			if (!(pdptrs[i] & PT_PRESENT_MASK)) {
3338c834e5e4SSean Christopherson 				mmu->pae_root[i] = INVALID_PAE_ROOT;
3339c50d8ae3SPaolo Bonzini 				continue;
3340c50d8ae3SPaolo Bonzini 			}
33416e0918aeSSean Christopherson 			root_gfn = pdptrs[i] >> PAGE_SHIFT;
3342c50d8ae3SPaolo Bonzini 		}
3343c50d8ae3SPaolo Bonzini 
33448123f265SSean Christopherson 		root = mmu_alloc_root(vcpu, root_gfn, i << 30,
33458123f265SSean Christopherson 				      PT32_ROOT_LEVEL, false);
3346b37233c9SSean Christopherson 		mmu->pae_root[i] = root | pm_mask;
3347c50d8ae3SPaolo Bonzini 	}
3348c50d8ae3SPaolo Bonzini 
3349ba0a194fSSean Christopherson 	if (mmu->shadow_root_level == PT64_ROOT_4LEVEL)
3350b37233c9SSean Christopherson 		mmu->root_hpa = __pa(mmu->lm_root);
3351ba0a194fSSean Christopherson 	else
3352ba0a194fSSean Christopherson 		mmu->root_hpa = __pa(mmu->pae_root);
3353c50d8ae3SPaolo Bonzini 
3354be01e8e2SSean Christopherson set_root_pgd:
3355b37233c9SSean Christopherson 	mmu->root_pgd = root_pgd;
3356c50d8ae3SPaolo Bonzini 
3357c50d8ae3SPaolo Bonzini 	return 0;
3358c50d8ae3SPaolo Bonzini }
3359c50d8ae3SPaolo Bonzini 
3360748e52b9SSean Christopherson static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
3361748e52b9SSean Christopherson {
3362748e52b9SSean Christopherson 	struct kvm_mmu *mmu = vcpu->arch.mmu;
3363748e52b9SSean Christopherson 	u64 *lm_root, *pae_root;
3364748e52b9SSean Christopherson 
3365748e52b9SSean Christopherson 	/*
3366748e52b9SSean Christopherson 	 * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP
3367748e52b9SSean Christopherson 	 * tables are allocated and initialized at root creation as there is no
3368748e52b9SSean Christopherson 	 * equivalent level in the guest's NPT to shadow.  Allocate the tables
3369748e52b9SSean Christopherson 	 * on demand, as running a 32-bit L1 VMM on 64-bit KVM is very rare.
3370748e52b9SSean Christopherson 	 */
3371748e52b9SSean Christopherson 	if (mmu->direct_map || mmu->root_level >= PT64_ROOT_4LEVEL ||
3372748e52b9SSean Christopherson 	    mmu->shadow_root_level < PT64_ROOT_4LEVEL)
3373748e52b9SSean Christopherson 		return 0;
3374748e52b9SSean Christopherson 
3375748e52b9SSean Christopherson 	/*
3376748e52b9SSean Christopherson 	 * This mess only works with 4-level paging and needs to be updated to
3377748e52b9SSean Christopherson 	 * work with 5-level paging.
3378748e52b9SSean Christopherson 	 */
3379748e52b9SSean Christopherson 	if (WARN_ON_ONCE(mmu->shadow_root_level != PT64_ROOT_4LEVEL))
3380748e52b9SSean Christopherson 		return -EIO;
3381748e52b9SSean Christopherson 
3382748e52b9SSean Christopherson 	if (mmu->pae_root && mmu->lm_root)
3383748e52b9SSean Christopherson 		return 0;
3384748e52b9SSean Christopherson 
3385748e52b9SSean Christopherson 	/*
3386748e52b9SSean Christopherson 	 * The special roots should always be allocated in concert.  Yell and
3387748e52b9SSean Christopherson 	 * bail if KVM ends up in a state where only one of the roots is valid.
3388748e52b9SSean Christopherson 	 */
3389748e52b9SSean Christopherson 	if (WARN_ON_ONCE(!tdp_enabled || mmu->pae_root || mmu->lm_root))
3390748e52b9SSean Christopherson 		return -EIO;
3391748e52b9SSean Christopherson 
3392*4a98623dSSean Christopherson 	/*
3393*4a98623dSSean Christopherson 	 * Unlike 32-bit NPT, the PDP table doesn't need to be in low mem, and
3394*4a98623dSSean Christopherson 	 * doesn't need to be decrypted.
3395*4a98623dSSean Christopherson 	 */
3396748e52b9SSean Christopherson 	pae_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3397748e52b9SSean Christopherson 	if (!pae_root)
3398748e52b9SSean Christopherson 		return -ENOMEM;
3399748e52b9SSean Christopherson 
3400748e52b9SSean Christopherson 	lm_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3401748e52b9SSean Christopherson 	if (!lm_root) {
3402748e52b9SSean Christopherson 		free_page((unsigned long)pae_root);
3403748e52b9SSean Christopherson 		return -ENOMEM;
3404748e52b9SSean Christopherson 	}
3405748e52b9SSean Christopherson 
3406748e52b9SSean Christopherson 	mmu->pae_root = pae_root;
3407748e52b9SSean Christopherson 	mmu->lm_root = lm_root;
3408748e52b9SSean Christopherson 
3409748e52b9SSean Christopherson 	return 0;
3410748e52b9SSean Christopherson }
3411748e52b9SSean Christopherson 
3412c50d8ae3SPaolo Bonzini void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
3413c50d8ae3SPaolo Bonzini {
3414c50d8ae3SPaolo Bonzini 	int i;
3415c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
3416c50d8ae3SPaolo Bonzini 
3417c50d8ae3SPaolo Bonzini 	if (vcpu->arch.mmu->direct_map)
3418c50d8ae3SPaolo Bonzini 		return;
3419c50d8ae3SPaolo Bonzini 
3420c50d8ae3SPaolo Bonzini 	if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
3421c50d8ae3SPaolo Bonzini 		return;
3422c50d8ae3SPaolo Bonzini 
3423c50d8ae3SPaolo Bonzini 	vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
3424c50d8ae3SPaolo Bonzini 
3425c50d8ae3SPaolo Bonzini 	if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
3426c50d8ae3SPaolo Bonzini 		hpa_t root = vcpu->arch.mmu->root_hpa;
3427e47c4aeeSSean Christopherson 		sp = to_shadow_page(root);
3428c50d8ae3SPaolo Bonzini 
3429c50d8ae3SPaolo Bonzini 		/*
3430c50d8ae3SPaolo Bonzini 		 * Even if another CPU was marking the SP as unsync-ed
3431c50d8ae3SPaolo Bonzini 		 * simultaneously, any guest page table changes are not
3432c50d8ae3SPaolo Bonzini 		 * guaranteed to be visible anyway until this VCPU issues a TLB
3433c50d8ae3SPaolo Bonzini 		 * flush strictly after those changes are made. We only need to
3434c50d8ae3SPaolo Bonzini 		 * ensure that the other CPU sets these flags before any actual
3435c50d8ae3SPaolo Bonzini 		 * changes to the page tables are made. The comments in
3436c50d8ae3SPaolo Bonzini 		 * mmu_need_write_protect() describe what could go wrong if this
3437c50d8ae3SPaolo Bonzini 		 * requirement isn't satisfied.
3438c50d8ae3SPaolo Bonzini 		 */
3439c50d8ae3SPaolo Bonzini 		if (!smp_load_acquire(&sp->unsync) &&
3440c50d8ae3SPaolo Bonzini 		    !smp_load_acquire(&sp->unsync_children))
3441c50d8ae3SPaolo Bonzini 			return;
3442c50d8ae3SPaolo Bonzini 
3443531810caSBen Gardon 		write_lock(&vcpu->kvm->mmu_lock);
3444c50d8ae3SPaolo Bonzini 		kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
3445c50d8ae3SPaolo Bonzini 
3446c50d8ae3SPaolo Bonzini 		mmu_sync_children(vcpu, sp);
3447c50d8ae3SPaolo Bonzini 
3448c50d8ae3SPaolo Bonzini 		kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
3449531810caSBen Gardon 		write_unlock(&vcpu->kvm->mmu_lock);
3450c50d8ae3SPaolo Bonzini 		return;
3451c50d8ae3SPaolo Bonzini 	}
3452c50d8ae3SPaolo Bonzini 
3453531810caSBen Gardon 	write_lock(&vcpu->kvm->mmu_lock);
3454c50d8ae3SPaolo Bonzini 	kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
3455c50d8ae3SPaolo Bonzini 
3456c50d8ae3SPaolo Bonzini 	for (i = 0; i < 4; ++i) {
3457c50d8ae3SPaolo Bonzini 		hpa_t root = vcpu->arch.mmu->pae_root[i];
3458c50d8ae3SPaolo Bonzini 
3459c834e5e4SSean Christopherson 		if (IS_VALID_PAE_ROOT(root)) {
3460c50d8ae3SPaolo Bonzini 			root &= PT64_BASE_ADDR_MASK;
3461e47c4aeeSSean Christopherson 			sp = to_shadow_page(root);
3462c50d8ae3SPaolo Bonzini 			mmu_sync_children(vcpu, sp);
3463c50d8ae3SPaolo Bonzini 		}
3464c50d8ae3SPaolo Bonzini 	}
3465c50d8ae3SPaolo Bonzini 
3466c50d8ae3SPaolo Bonzini 	kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
3467531810caSBen Gardon 	write_unlock(&vcpu->kvm->mmu_lock);
3468c50d8ae3SPaolo Bonzini }
3469c50d8ae3SPaolo Bonzini 
3470736c291cSSean Christopherson static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gpa_t vaddr,
3471c50d8ae3SPaolo Bonzini 				  u32 access, struct x86_exception *exception)
3472c50d8ae3SPaolo Bonzini {
3473c50d8ae3SPaolo Bonzini 	if (exception)
3474c50d8ae3SPaolo Bonzini 		exception->error_code = 0;
3475c50d8ae3SPaolo Bonzini 	return vaddr;
3476c50d8ae3SPaolo Bonzini }
3477c50d8ae3SPaolo Bonzini 
3478736c291cSSean Christopherson static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gpa_t vaddr,
3479c50d8ae3SPaolo Bonzini 					 u32 access,
3480c50d8ae3SPaolo Bonzini 					 struct x86_exception *exception)
3481c50d8ae3SPaolo Bonzini {
3482c50d8ae3SPaolo Bonzini 	if (exception)
3483c50d8ae3SPaolo Bonzini 		exception->error_code = 0;
3484c50d8ae3SPaolo Bonzini 	return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception);
3485c50d8ae3SPaolo Bonzini }
3486c50d8ae3SPaolo Bonzini 
3487c50d8ae3SPaolo Bonzini static bool
3488c50d8ae3SPaolo Bonzini __is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check, u64 pte, int level)
3489c50d8ae3SPaolo Bonzini {
3490b5c3c1b3SSean Christopherson 	int bit7 = (pte >> 7) & 1;
3491c50d8ae3SPaolo Bonzini 
3492b5c3c1b3SSean Christopherson 	return pte & rsvd_check->rsvd_bits_mask[bit7][level-1];
3493c50d8ae3SPaolo Bonzini }
3494c50d8ae3SPaolo Bonzini 
3495b5c3c1b3SSean Christopherson static bool __is_bad_mt_xwr(struct rsvd_bits_validate *rsvd_check, u64 pte)
3496c50d8ae3SPaolo Bonzini {
3497b5c3c1b3SSean Christopherson 	return rsvd_check->bad_mt_xwr & BIT_ULL(pte & 0x3f);
3498c50d8ae3SPaolo Bonzini }
3499c50d8ae3SPaolo Bonzini 
3500c50d8ae3SPaolo Bonzini static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3501c50d8ae3SPaolo Bonzini {
3502c50d8ae3SPaolo Bonzini 	/*
3503c50d8ae3SPaolo Bonzini 	 * A nested guest cannot use the MMIO cache if it is using nested
3504c50d8ae3SPaolo Bonzini 	 * page tables, because cr2 is a nGPA while the cache stores GPAs.
3505c50d8ae3SPaolo Bonzini 	 */
3506c50d8ae3SPaolo Bonzini 	if (mmu_is_nested(vcpu))
3507c50d8ae3SPaolo Bonzini 		return false;
3508c50d8ae3SPaolo Bonzini 
3509c50d8ae3SPaolo Bonzini 	if (direct)
3510c50d8ae3SPaolo Bonzini 		return vcpu_match_mmio_gpa(vcpu, addr);
3511c50d8ae3SPaolo Bonzini 
3512c50d8ae3SPaolo Bonzini 	return vcpu_match_mmio_gva(vcpu, addr);
3513c50d8ae3SPaolo Bonzini }
3514c50d8ae3SPaolo Bonzini 
351595fb5b02SBen Gardon /*
351695fb5b02SBen Gardon  * Return the level of the lowest level SPTE added to sptes.
351795fb5b02SBen Gardon  * That SPTE may be non-present.
351895fb5b02SBen Gardon  */
351939b4d43eSSean Christopherson static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level)
3520c50d8ae3SPaolo Bonzini {
3521c50d8ae3SPaolo Bonzini 	struct kvm_shadow_walk_iterator iterator;
35222aa07893SSean Christopherson 	int leaf = -1;
352395fb5b02SBen Gardon 	u64 spte;
3524c50d8ae3SPaolo Bonzini 
3525c50d8ae3SPaolo Bonzini 	walk_shadow_page_lockless_begin(vcpu);
3526c50d8ae3SPaolo Bonzini 
352739b4d43eSSean Christopherson 	for (shadow_walk_init(&iterator, vcpu, addr),
352839b4d43eSSean Christopherson 	     *root_level = iterator.level;
3529c50d8ae3SPaolo Bonzini 	     shadow_walk_okay(&iterator);
3530c50d8ae3SPaolo Bonzini 	     __shadow_walk_next(&iterator, spte)) {
353195fb5b02SBen Gardon 		leaf = iterator.level;
3532c50d8ae3SPaolo Bonzini 		spte = mmu_spte_get_lockless(iterator.sptep);
3533c50d8ae3SPaolo Bonzini 
3534dde81f94SSean Christopherson 		sptes[leaf] = spte;
3535c50d8ae3SPaolo Bonzini 
3536c50d8ae3SPaolo Bonzini 		if (!is_shadow_present_pte(spte))
3537c50d8ae3SPaolo Bonzini 			break;
353895fb5b02SBen Gardon 	}
353995fb5b02SBen Gardon 
354095fb5b02SBen Gardon 	walk_shadow_page_lockless_end(vcpu);
354195fb5b02SBen Gardon 
354295fb5b02SBen Gardon 	return leaf;
354395fb5b02SBen Gardon }
354495fb5b02SBen Gardon 
35459aa41879SSean Christopherson /* return true if reserved bit(s) are detected on a valid, non-MMIO SPTE. */
354695fb5b02SBen Gardon static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
354795fb5b02SBen Gardon {
3548dde81f94SSean Christopherson 	u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
354995fb5b02SBen Gardon 	struct rsvd_bits_validate *rsvd_check;
355039b4d43eSSean Christopherson 	int root, leaf, level;
355195fb5b02SBen Gardon 	bool reserved = false;
355295fb5b02SBen Gardon 
355395fb5b02SBen Gardon 	if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) {
355495fb5b02SBen Gardon 		*sptep = 0ull;
355595fb5b02SBen Gardon 		return reserved;
355695fb5b02SBen Gardon 	}
355795fb5b02SBen Gardon 
355895fb5b02SBen Gardon 	if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
355939b4d43eSSean Christopherson 		leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root);
356095fb5b02SBen Gardon 	else
356139b4d43eSSean Christopherson 		leaf = get_walk(vcpu, addr, sptes, &root);
356295fb5b02SBen Gardon 
35632aa07893SSean Christopherson 	if (unlikely(leaf < 0)) {
35642aa07893SSean Christopherson 		*sptep = 0ull;
35652aa07893SSean Christopherson 		return reserved;
35662aa07893SSean Christopherson 	}
35672aa07893SSean Christopherson 
35689aa41879SSean Christopherson 	*sptep = sptes[leaf];
35699aa41879SSean Christopherson 
35709aa41879SSean Christopherson 	/*
35719aa41879SSean Christopherson 	 * Skip reserved bits checks on the terminal leaf if it's not a valid
35729aa41879SSean Christopherson 	 * SPTE.  Note, this also (intentionally) skips MMIO SPTEs, which, by
35739aa41879SSean Christopherson 	 * design, always have reserved bits set.  The purpose of the checks is
35749aa41879SSean Christopherson 	 * to detect reserved bits on non-MMIO SPTEs. i.e. buggy SPTEs.
35759aa41879SSean Christopherson 	 */
35769aa41879SSean Christopherson 	if (!is_shadow_present_pte(sptes[leaf]))
35779aa41879SSean Christopherson 		leaf++;
357895fb5b02SBen Gardon 
357995fb5b02SBen Gardon 	rsvd_check = &vcpu->arch.mmu->shadow_zero_check;
358095fb5b02SBen Gardon 
35819aa41879SSean Christopherson 	for (level = root; level >= leaf; level--)
3582b5c3c1b3SSean Christopherson 		/*
3583b5c3c1b3SSean Christopherson 		 * Use a bitwise-OR instead of a logical-OR to aggregate the
3584b5c3c1b3SSean Christopherson 		 * reserved bit and EPT's invalid memtype/XWR checks to avoid
3585b5c3c1b3SSean Christopherson 		 * adding a Jcc in the loop.
3586b5c3c1b3SSean Christopherson 		 */
3587dde81f94SSean Christopherson 		reserved |= __is_bad_mt_xwr(rsvd_check, sptes[level]) |
3588dde81f94SSean Christopherson 			    __is_rsvd_bits_set(rsvd_check, sptes[level], level);
3589c50d8ae3SPaolo Bonzini 
3590c50d8ae3SPaolo Bonzini 	if (reserved) {
3591bb4cdf3aSSean Christopherson 		pr_err("%s: reserved bits set on MMU-present spte, addr 0x%llx, hierarchy:\n",
3592c50d8ae3SPaolo Bonzini 		       __func__, addr);
359395fb5b02SBen Gardon 		for (level = root; level >= leaf; level--)
3594bb4cdf3aSSean Christopherson 			pr_err("------ spte = 0x%llx level = %d, rsvd bits = 0x%llx",
3595bb4cdf3aSSean Christopherson 			       sptes[level], level,
3596bb4cdf3aSSean Christopherson 			       rsvd_check->rsvd_bits_mask[(sptes[level] >> 7) & 1][level-1]);
3597c50d8ae3SPaolo Bonzini 	}
3598ddce6208SSean Christopherson 
3599c50d8ae3SPaolo Bonzini 	return reserved;
3600c50d8ae3SPaolo Bonzini }
3601c50d8ae3SPaolo Bonzini 
3602c50d8ae3SPaolo Bonzini static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3603c50d8ae3SPaolo Bonzini {
3604c50d8ae3SPaolo Bonzini 	u64 spte;
3605c50d8ae3SPaolo Bonzini 	bool reserved;
3606c50d8ae3SPaolo Bonzini 
3607c50d8ae3SPaolo Bonzini 	if (mmio_info_in_cache(vcpu, addr, direct))
3608c50d8ae3SPaolo Bonzini 		return RET_PF_EMULATE;
3609c50d8ae3SPaolo Bonzini 
361095fb5b02SBen Gardon 	reserved = get_mmio_spte(vcpu, addr, &spte);
3611c50d8ae3SPaolo Bonzini 	if (WARN_ON(reserved))
3612c50d8ae3SPaolo Bonzini 		return -EINVAL;
3613c50d8ae3SPaolo Bonzini 
3614c50d8ae3SPaolo Bonzini 	if (is_mmio_spte(spte)) {
3615c50d8ae3SPaolo Bonzini 		gfn_t gfn = get_mmio_spte_gfn(spte);
36160a2b64c5SBen Gardon 		unsigned int access = get_mmio_spte_access(spte);
3617c50d8ae3SPaolo Bonzini 
3618c50d8ae3SPaolo Bonzini 		if (!check_mmio_spte(vcpu, spte))
3619c50d8ae3SPaolo Bonzini 			return RET_PF_INVALID;
3620c50d8ae3SPaolo Bonzini 
3621c50d8ae3SPaolo Bonzini 		if (direct)
3622c50d8ae3SPaolo Bonzini 			addr = 0;
3623c50d8ae3SPaolo Bonzini 
3624c50d8ae3SPaolo Bonzini 		trace_handle_mmio_page_fault(addr, gfn, access);
3625c50d8ae3SPaolo Bonzini 		vcpu_cache_mmio_info(vcpu, addr, gfn, access);
3626c50d8ae3SPaolo Bonzini 		return RET_PF_EMULATE;
3627c50d8ae3SPaolo Bonzini 	}
3628c50d8ae3SPaolo Bonzini 
3629c50d8ae3SPaolo Bonzini 	/*
3630c50d8ae3SPaolo Bonzini 	 * If the page table is zapped by other cpus, let CPU fault again on
3631c50d8ae3SPaolo Bonzini 	 * the address.
3632c50d8ae3SPaolo Bonzini 	 */
3633c50d8ae3SPaolo Bonzini 	return RET_PF_RETRY;
3634c50d8ae3SPaolo Bonzini }
3635c50d8ae3SPaolo Bonzini 
3636c50d8ae3SPaolo Bonzini static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
3637c50d8ae3SPaolo Bonzini 					 u32 error_code, gfn_t gfn)
3638c50d8ae3SPaolo Bonzini {
3639c50d8ae3SPaolo Bonzini 	if (unlikely(error_code & PFERR_RSVD_MASK))
3640c50d8ae3SPaolo Bonzini 		return false;
3641c50d8ae3SPaolo Bonzini 
3642c50d8ae3SPaolo Bonzini 	if (!(error_code & PFERR_PRESENT_MASK) ||
3643c50d8ae3SPaolo Bonzini 	      !(error_code & PFERR_WRITE_MASK))
3644c50d8ae3SPaolo Bonzini 		return false;
3645c50d8ae3SPaolo Bonzini 
3646c50d8ae3SPaolo Bonzini 	/*
3647c50d8ae3SPaolo Bonzini 	 * guest is writing the page which is write tracked which can
3648c50d8ae3SPaolo Bonzini 	 * not be fixed by page fault handler.
3649c50d8ae3SPaolo Bonzini 	 */
3650c50d8ae3SPaolo Bonzini 	if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
3651c50d8ae3SPaolo Bonzini 		return true;
3652c50d8ae3SPaolo Bonzini 
3653c50d8ae3SPaolo Bonzini 	return false;
3654c50d8ae3SPaolo Bonzini }
3655c50d8ae3SPaolo Bonzini 
3656c50d8ae3SPaolo Bonzini static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
3657c50d8ae3SPaolo Bonzini {
3658c50d8ae3SPaolo Bonzini 	struct kvm_shadow_walk_iterator iterator;
3659c50d8ae3SPaolo Bonzini 	u64 spte;
3660c50d8ae3SPaolo Bonzini 
3661c50d8ae3SPaolo Bonzini 	walk_shadow_page_lockless_begin(vcpu);
3662c50d8ae3SPaolo Bonzini 	for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
3663c50d8ae3SPaolo Bonzini 		clear_sp_write_flooding_count(iterator.sptep);
3664c50d8ae3SPaolo Bonzini 		if (!is_shadow_present_pte(spte))
3665c50d8ae3SPaolo Bonzini 			break;
3666c50d8ae3SPaolo Bonzini 	}
3667c50d8ae3SPaolo Bonzini 	walk_shadow_page_lockless_end(vcpu);
3668c50d8ae3SPaolo Bonzini }
3669c50d8ae3SPaolo Bonzini 
3670e8c22266SVitaly Kuznetsov static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
36719f1a8526SSean Christopherson 				    gfn_t gfn)
3672c50d8ae3SPaolo Bonzini {
3673c50d8ae3SPaolo Bonzini 	struct kvm_arch_async_pf arch;
3674c50d8ae3SPaolo Bonzini 
3675c50d8ae3SPaolo Bonzini 	arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
3676c50d8ae3SPaolo Bonzini 	arch.gfn = gfn;
3677c50d8ae3SPaolo Bonzini 	arch.direct_map = vcpu->arch.mmu->direct_map;
3678d8dd54e0SSean Christopherson 	arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
3679c50d8ae3SPaolo Bonzini 
36809f1a8526SSean Christopherson 	return kvm_setup_async_pf(vcpu, cr2_or_gpa,
36819f1a8526SSean Christopherson 				  kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
3682c50d8ae3SPaolo Bonzini }
3683c50d8ae3SPaolo Bonzini 
3684c50d8ae3SPaolo Bonzini static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
36854a42d848SDavid Stevens 			 gpa_t cr2_or_gpa, kvm_pfn_t *pfn, hva_t *hva,
36864a42d848SDavid Stevens 			 bool write, bool *writable)
3687c50d8ae3SPaolo Bonzini {
3688c36b7150SPaolo Bonzini 	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3689c50d8ae3SPaolo Bonzini 	bool async;
3690c50d8ae3SPaolo Bonzini 
3691e0c37868SSean Christopherson 	/*
3692e0c37868SSean Christopherson 	 * Retry the page fault if the gfn hit a memslot that is being deleted
3693e0c37868SSean Christopherson 	 * or moved.  This ensures any existing SPTEs for the old memslot will
3694e0c37868SSean Christopherson 	 * be zapped before KVM inserts a new MMIO SPTE for the gfn.
3695e0c37868SSean Christopherson 	 */
3696e0c37868SSean Christopherson 	if (slot && (slot->flags & KVM_MEMSLOT_INVALID))
3697e0c37868SSean Christopherson 		return true;
3698e0c37868SSean Christopherson 
3699c36b7150SPaolo Bonzini 	/* Don't expose private memslots to L2. */
3700c36b7150SPaolo Bonzini 	if (is_guest_mode(vcpu) && !kvm_is_visible_memslot(slot)) {
3701c50d8ae3SPaolo Bonzini 		*pfn = KVM_PFN_NOSLOT;
3702c583eed6SSean Christopherson 		*writable = false;
3703c50d8ae3SPaolo Bonzini 		return false;
3704c50d8ae3SPaolo Bonzini 	}
3705c50d8ae3SPaolo Bonzini 
3706c50d8ae3SPaolo Bonzini 	async = false;
37074a42d848SDavid Stevens 	*pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async,
37084a42d848SDavid Stevens 				    write, writable, hva);
3709c50d8ae3SPaolo Bonzini 	if (!async)
3710c50d8ae3SPaolo Bonzini 		return false; /* *pfn has correct page already */
3711c50d8ae3SPaolo Bonzini 
3712c50d8ae3SPaolo Bonzini 	if (!prefault && kvm_can_do_async_pf(vcpu)) {
37139f1a8526SSean Christopherson 		trace_kvm_try_async_get_page(cr2_or_gpa, gfn);
3714c50d8ae3SPaolo Bonzini 		if (kvm_find_async_pf_gfn(vcpu, gfn)) {
37159f1a8526SSean Christopherson 			trace_kvm_async_pf_doublefault(cr2_or_gpa, gfn);
3716c50d8ae3SPaolo Bonzini 			kvm_make_request(KVM_REQ_APF_HALT, vcpu);
3717c50d8ae3SPaolo Bonzini 			return true;
37189f1a8526SSean Christopherson 		} else if (kvm_arch_setup_async_pf(vcpu, cr2_or_gpa, gfn))
3719c50d8ae3SPaolo Bonzini 			return true;
3720c50d8ae3SPaolo Bonzini 	}
3721c50d8ae3SPaolo Bonzini 
37224a42d848SDavid Stevens 	*pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL,
37234a42d848SDavid Stevens 				    write, writable, hva);
3724c50d8ae3SPaolo Bonzini 	return false;
3725c50d8ae3SPaolo Bonzini }
3726c50d8ae3SPaolo Bonzini 
37270f90e1c1SSean Christopherson static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
37280f90e1c1SSean Christopherson 			     bool prefault, int max_level, bool is_tdp)
3729c50d8ae3SPaolo Bonzini {
3730367fd790SSean Christopherson 	bool write = error_code & PFERR_WRITE_MASK;
37310f90e1c1SSean Christopherson 	bool map_writable;
3732c50d8ae3SPaolo Bonzini 
37330f90e1c1SSean Christopherson 	gfn_t gfn = gpa >> PAGE_SHIFT;
37340f90e1c1SSean Christopherson 	unsigned long mmu_seq;
37350f90e1c1SSean Christopherson 	kvm_pfn_t pfn;
37364a42d848SDavid Stevens 	hva_t hva;
373783f06fa7SSean Christopherson 	int r;
3738c50d8ae3SPaolo Bonzini 
3739c50d8ae3SPaolo Bonzini 	if (page_fault_handle_page_track(vcpu, error_code, gfn))
3740c50d8ae3SPaolo Bonzini 		return RET_PF_EMULATE;
3741c50d8ae3SPaolo Bonzini 
3742bb18842eSBen Gardon 	if (!is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)) {
3743c4371c2aSSean Christopherson 		r = fast_page_fault(vcpu, gpa, error_code);
3744c4371c2aSSean Christopherson 		if (r != RET_PF_INVALID)
3745c4371c2aSSean Christopherson 			return r;
3746bb18842eSBen Gardon 	}
374783291445SSean Christopherson 
3748378f5cd6SSean Christopherson 	r = mmu_topup_memory_caches(vcpu, false);
3749c50d8ae3SPaolo Bonzini 	if (r)
3750c50d8ae3SPaolo Bonzini 		return r;
3751c50d8ae3SPaolo Bonzini 
3752367fd790SSean Christopherson 	mmu_seq = vcpu->kvm->mmu_notifier_seq;
3753367fd790SSean Christopherson 	smp_rmb();
3754367fd790SSean Christopherson 
37554a42d848SDavid Stevens 	if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, &hva,
37564a42d848SDavid Stevens 			 write, &map_writable))
3757367fd790SSean Christopherson 		return RET_PF_RETRY;
3758367fd790SSean Christopherson 
37590f90e1c1SSean Christopherson 	if (handle_abnormal_pfn(vcpu, is_tdp ? 0 : gpa, gfn, pfn, ACC_ALL, &r))
3760367fd790SSean Christopherson 		return r;
3761367fd790SSean Christopherson 
3762367fd790SSean Christopherson 	r = RET_PF_RETRY;
3763a2855afcSBen Gardon 
3764a2855afcSBen Gardon 	if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
3765a2855afcSBen Gardon 		read_lock(&vcpu->kvm->mmu_lock);
3766a2855afcSBen Gardon 	else
3767531810caSBen Gardon 		write_lock(&vcpu->kvm->mmu_lock);
3768a2855afcSBen Gardon 
37694a42d848SDavid Stevens 	if (!is_noslot_pfn(pfn) && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, hva))
3770367fd790SSean Christopherson 		goto out_unlock;
37717bd7ded6SSean Christopherson 	r = make_mmu_pages_available(vcpu);
37727bd7ded6SSean Christopherson 	if (r)
3773367fd790SSean Christopherson 		goto out_unlock;
3774bb18842eSBen Gardon 
3775bb18842eSBen Gardon 	if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
3776bb18842eSBen Gardon 		r = kvm_tdp_mmu_map(vcpu, gpa, error_code, map_writable, max_level,
3777bb18842eSBen Gardon 				    pfn, prefault);
3778bb18842eSBen Gardon 	else
37796c2fd34fSSean Christopherson 		r = __direct_map(vcpu, gpa, error_code, map_writable, max_level, pfn,
37806c2fd34fSSean Christopherson 				 prefault, is_tdp);
37810f90e1c1SSean Christopherson 
3782367fd790SSean Christopherson out_unlock:
3783a2855afcSBen Gardon 	if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
3784a2855afcSBen Gardon 		read_unlock(&vcpu->kvm->mmu_lock);
3785a2855afcSBen Gardon 	else
3786531810caSBen Gardon 		write_unlock(&vcpu->kvm->mmu_lock);
3787367fd790SSean Christopherson 	kvm_release_pfn_clean(pfn);
3788367fd790SSean Christopherson 	return r;
3789c50d8ae3SPaolo Bonzini }
3790c50d8ae3SPaolo Bonzini 
37910f90e1c1SSean Christopherson static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa,
37920f90e1c1SSean Christopherson 				u32 error_code, bool prefault)
37930f90e1c1SSean Christopherson {
37940f90e1c1SSean Christopherson 	pgprintk("%s: gva %lx error %x\n", __func__, gpa, error_code);
37950f90e1c1SSean Christopherson 
37960f90e1c1SSean Christopherson 	/* This path builds a PAE pagetable, we can map 2mb pages at maximum. */
37970f90e1c1SSean Christopherson 	return direct_page_fault(vcpu, gpa & PAGE_MASK, error_code, prefault,
37983bae0459SSean Christopherson 				 PG_LEVEL_2M, false);
37990f90e1c1SSean Christopherson }
38000f90e1c1SSean Christopherson 
3801c50d8ae3SPaolo Bonzini int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
3802c50d8ae3SPaolo Bonzini 				u64 fault_address, char *insn, int insn_len)
3803c50d8ae3SPaolo Bonzini {
3804c50d8ae3SPaolo Bonzini 	int r = 1;
38059ce372b3SVitaly Kuznetsov 	u32 flags = vcpu->arch.apf.host_apf_flags;
3806c50d8ae3SPaolo Bonzini 
3807736c291cSSean Christopherson #ifndef CONFIG_X86_64
3808736c291cSSean Christopherson 	/* A 64-bit CR2 should be impossible on 32-bit KVM. */
3809736c291cSSean Christopherson 	if (WARN_ON_ONCE(fault_address >> 32))
3810736c291cSSean Christopherson 		return -EFAULT;
3811736c291cSSean Christopherson #endif
3812736c291cSSean Christopherson 
3813c50d8ae3SPaolo Bonzini 	vcpu->arch.l1tf_flush_l1d = true;
38149ce372b3SVitaly Kuznetsov 	if (!flags) {
3815c50d8ae3SPaolo Bonzini 		trace_kvm_page_fault(fault_address, error_code);
3816c50d8ae3SPaolo Bonzini 
3817c50d8ae3SPaolo Bonzini 		if (kvm_event_needs_reinjection(vcpu))
3818c50d8ae3SPaolo Bonzini 			kvm_mmu_unprotect_page_virt(vcpu, fault_address);
3819c50d8ae3SPaolo Bonzini 		r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn,
3820c50d8ae3SPaolo Bonzini 				insn_len);
38219ce372b3SVitaly Kuznetsov 	} else if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) {
382268fd66f1SVitaly Kuznetsov 		vcpu->arch.apf.host_apf_flags = 0;
3823c50d8ae3SPaolo Bonzini 		local_irq_disable();
38246bca69adSThomas Gleixner 		kvm_async_pf_task_wait_schedule(fault_address);
3825c50d8ae3SPaolo Bonzini 		local_irq_enable();
38269ce372b3SVitaly Kuznetsov 	} else {
38279ce372b3SVitaly Kuznetsov 		WARN_ONCE(1, "Unexpected host async PF flags: %x\n", flags);
3828c50d8ae3SPaolo Bonzini 	}
38299ce372b3SVitaly Kuznetsov 
3830c50d8ae3SPaolo Bonzini 	return r;
3831c50d8ae3SPaolo Bonzini }
3832c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_handle_page_fault);
3833c50d8ae3SPaolo Bonzini 
38347a02674dSSean Christopherson int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
3835c50d8ae3SPaolo Bonzini 		       bool prefault)
3836c50d8ae3SPaolo Bonzini {
3837cb9b88c6SSean Christopherson 	int max_level;
3838c50d8ae3SPaolo Bonzini 
3839e662ec3eSSean Christopherson 	for (max_level = KVM_MAX_HUGEPAGE_LEVEL;
38403bae0459SSean Christopherson 	     max_level > PG_LEVEL_4K;
3841cb9b88c6SSean Christopherson 	     max_level--) {
3842cb9b88c6SSean Christopherson 		int page_num = KVM_PAGES_PER_HPAGE(max_level);
38430f90e1c1SSean Christopherson 		gfn_t base = (gpa >> PAGE_SHIFT) & ~(page_num - 1);
3844c50d8ae3SPaolo Bonzini 
3845cb9b88c6SSean Christopherson 		if (kvm_mtrr_check_gfn_range_consistency(vcpu, base, page_num))
3846cb9b88c6SSean Christopherson 			break;
3847c50d8ae3SPaolo Bonzini 	}
3848c50d8ae3SPaolo Bonzini 
38490f90e1c1SSean Christopherson 	return direct_page_fault(vcpu, gpa, error_code, prefault,
38500f90e1c1SSean Christopherson 				 max_level, true);
3851c50d8ae3SPaolo Bonzini }
3852c50d8ae3SPaolo Bonzini 
3853c50d8ae3SPaolo Bonzini static void nonpaging_init_context(struct kvm_vcpu *vcpu,
3854c50d8ae3SPaolo Bonzini 				   struct kvm_mmu *context)
3855c50d8ae3SPaolo Bonzini {
3856c50d8ae3SPaolo Bonzini 	context->page_fault = nonpaging_page_fault;
3857c50d8ae3SPaolo Bonzini 	context->gva_to_gpa = nonpaging_gva_to_gpa;
3858c50d8ae3SPaolo Bonzini 	context->sync_page = nonpaging_sync_page;
38595efac074SPaolo Bonzini 	context->invlpg = NULL;
3860c50d8ae3SPaolo Bonzini 	context->root_level = 0;
3861c50d8ae3SPaolo Bonzini 	context->shadow_root_level = PT32E_ROOT_LEVEL;
3862c50d8ae3SPaolo Bonzini 	context->direct_map = true;
3863c50d8ae3SPaolo Bonzini 	context->nx = false;
3864c50d8ae3SPaolo Bonzini }
3865c50d8ae3SPaolo Bonzini 
3866be01e8e2SSean Christopherson static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
38670be44352SSean Christopherson 				  union kvm_mmu_page_role role)
38680be44352SSean Christopherson {
3869be01e8e2SSean Christopherson 	return (role.direct || pgd == root->pgd) &&
3870e47c4aeeSSean Christopherson 	       VALID_PAGE(root->hpa) && to_shadow_page(root->hpa) &&
3871e47c4aeeSSean Christopherson 	       role.word == to_shadow_page(root->hpa)->role.word;
38720be44352SSean Christopherson }
38730be44352SSean Christopherson 
3874c50d8ae3SPaolo Bonzini /*
3875be01e8e2SSean Christopherson  * Find out if a previously cached root matching the new pgd/role is available.
3876c50d8ae3SPaolo Bonzini  * The current root is also inserted into the cache.
3877c50d8ae3SPaolo Bonzini  * If a matching root was found, it is assigned to kvm_mmu->root_hpa and true is
3878c50d8ae3SPaolo Bonzini  * returned.
3879c50d8ae3SPaolo Bonzini  * Otherwise, the LRU root from the cache is assigned to kvm_mmu->root_hpa and
3880c50d8ae3SPaolo Bonzini  * false is returned. This root should now be freed by the caller.
3881c50d8ae3SPaolo Bonzini  */
3882be01e8e2SSean Christopherson static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_pgd,
3883c50d8ae3SPaolo Bonzini 				  union kvm_mmu_page_role new_role)
3884c50d8ae3SPaolo Bonzini {
3885c50d8ae3SPaolo Bonzini 	uint i;
3886c50d8ae3SPaolo Bonzini 	struct kvm_mmu_root_info root;
3887c50d8ae3SPaolo Bonzini 	struct kvm_mmu *mmu = vcpu->arch.mmu;
3888c50d8ae3SPaolo Bonzini 
3889be01e8e2SSean Christopherson 	root.pgd = mmu->root_pgd;
3890c50d8ae3SPaolo Bonzini 	root.hpa = mmu->root_hpa;
3891c50d8ae3SPaolo Bonzini 
3892be01e8e2SSean Christopherson 	if (is_root_usable(&root, new_pgd, new_role))
38930be44352SSean Christopherson 		return true;
38940be44352SSean Christopherson 
3895c50d8ae3SPaolo Bonzini 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
3896c50d8ae3SPaolo Bonzini 		swap(root, mmu->prev_roots[i]);
3897c50d8ae3SPaolo Bonzini 
3898be01e8e2SSean Christopherson 		if (is_root_usable(&root, new_pgd, new_role))
3899c50d8ae3SPaolo Bonzini 			break;
3900c50d8ae3SPaolo Bonzini 	}
3901c50d8ae3SPaolo Bonzini 
3902c50d8ae3SPaolo Bonzini 	mmu->root_hpa = root.hpa;
3903be01e8e2SSean Christopherson 	mmu->root_pgd = root.pgd;
3904c50d8ae3SPaolo Bonzini 
3905c50d8ae3SPaolo Bonzini 	return i < KVM_MMU_NUM_PREV_ROOTS;
3906c50d8ae3SPaolo Bonzini }
3907c50d8ae3SPaolo Bonzini 
3908be01e8e2SSean Christopherson static bool fast_pgd_switch(struct kvm_vcpu *vcpu, gpa_t new_pgd,
3909b869855bSSean Christopherson 			    union kvm_mmu_page_role new_role)
3910c50d8ae3SPaolo Bonzini {
3911c50d8ae3SPaolo Bonzini 	struct kvm_mmu *mmu = vcpu->arch.mmu;
3912c50d8ae3SPaolo Bonzini 
3913c50d8ae3SPaolo Bonzini 	/*
3914c50d8ae3SPaolo Bonzini 	 * For now, limit the fast switch to 64-bit hosts+VMs in order to avoid
3915c50d8ae3SPaolo Bonzini 	 * having to deal with PDPTEs. We may add support for 32-bit hosts/VMs
3916c50d8ae3SPaolo Bonzini 	 * later if necessary.
3917c50d8ae3SPaolo Bonzini 	 */
3918c50d8ae3SPaolo Bonzini 	if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
3919b869855bSSean Christopherson 	    mmu->root_level >= PT64_ROOT_4LEVEL)
3920fe9304d3SVitaly Kuznetsov 		return cached_root_available(vcpu, new_pgd, new_role);
3921c50d8ae3SPaolo Bonzini 
3922c50d8ae3SPaolo Bonzini 	return false;
3923c50d8ae3SPaolo Bonzini }
3924c50d8ae3SPaolo Bonzini 
3925be01e8e2SSean Christopherson static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd,
3926c50d8ae3SPaolo Bonzini 			      union kvm_mmu_page_role new_role,
39274a632ac6SSean Christopherson 			      bool skip_tlb_flush, bool skip_mmu_sync)
3928c50d8ae3SPaolo Bonzini {
3929be01e8e2SSean Christopherson 	if (!fast_pgd_switch(vcpu, new_pgd, new_role)) {
3930b869855bSSean Christopherson 		kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, KVM_MMU_ROOT_CURRENT);
3931b869855bSSean Christopherson 		return;
3932c50d8ae3SPaolo Bonzini 	}
3933c50d8ae3SPaolo Bonzini 
3934c50d8ae3SPaolo Bonzini 	/*
3935b869855bSSean Christopherson 	 * It's possible that the cached previous root page is obsolete because
3936b869855bSSean Christopherson 	 * of a change in the MMU generation number. However, changing the
3937b869855bSSean Christopherson 	 * generation number is accompanied by KVM_REQ_MMU_RELOAD, which will
3938b869855bSSean Christopherson 	 * free the root set here and allocate a new one.
3939b869855bSSean Christopherson 	 */
3940b869855bSSean Christopherson 	kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
3941b869855bSSean Christopherson 
394271fe7013SSean Christopherson 	if (!skip_mmu_sync || force_flush_and_sync_on_reuse)
3943b869855bSSean Christopherson 		kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
394471fe7013SSean Christopherson 	if (!skip_tlb_flush || force_flush_and_sync_on_reuse)
3945b869855bSSean Christopherson 		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
3946b869855bSSean Christopherson 
3947b869855bSSean Christopherson 	/*
3948b869855bSSean Christopherson 	 * The last MMIO access's GVA and GPA are cached in the VCPU. When
3949b869855bSSean Christopherson 	 * switching to a new CR3, that GVA->GPA mapping may no longer be
3950b869855bSSean Christopherson 	 * valid. So clear any cached MMIO info even when we don't need to sync
3951b869855bSSean Christopherson 	 * the shadow page tables.
3952c50d8ae3SPaolo Bonzini 	 */
3953c50d8ae3SPaolo Bonzini 	vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
3954c50d8ae3SPaolo Bonzini 
3955daa5b6c1SBen Gardon 	/*
3956daa5b6c1SBen Gardon 	 * If this is a direct root page, it doesn't have a write flooding
3957daa5b6c1SBen Gardon 	 * count. Otherwise, clear the write flooding count.
3958daa5b6c1SBen Gardon 	 */
3959daa5b6c1SBen Gardon 	if (!new_role.direct)
3960daa5b6c1SBen Gardon 		__clear_sp_write_flooding_count(
3961daa5b6c1SBen Gardon 				to_shadow_page(vcpu->arch.mmu->root_hpa));
3962c50d8ae3SPaolo Bonzini }
3963c50d8ae3SPaolo Bonzini 
3964be01e8e2SSean Christopherson void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd, bool skip_tlb_flush,
39654a632ac6SSean Christopherson 		     bool skip_mmu_sync)
3966c50d8ae3SPaolo Bonzini {
3967be01e8e2SSean Christopherson 	__kvm_mmu_new_pgd(vcpu, new_pgd, kvm_mmu_calc_root_page_role(vcpu),
39684a632ac6SSean Christopherson 			  skip_tlb_flush, skip_mmu_sync);
3969c50d8ae3SPaolo Bonzini }
3970be01e8e2SSean Christopherson EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd);
3971c50d8ae3SPaolo Bonzini 
3972c50d8ae3SPaolo Bonzini static unsigned long get_cr3(struct kvm_vcpu *vcpu)
3973c50d8ae3SPaolo Bonzini {
3974c50d8ae3SPaolo Bonzini 	return kvm_read_cr3(vcpu);
3975c50d8ae3SPaolo Bonzini }
3976c50d8ae3SPaolo Bonzini 
3977c50d8ae3SPaolo Bonzini static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
39780a2b64c5SBen Gardon 			   unsigned int access, int *nr_present)
3979c50d8ae3SPaolo Bonzini {
3980c50d8ae3SPaolo Bonzini 	if (unlikely(is_mmio_spte(*sptep))) {
3981c50d8ae3SPaolo Bonzini 		if (gfn != get_mmio_spte_gfn(*sptep)) {
3982c50d8ae3SPaolo Bonzini 			mmu_spte_clear_no_track(sptep);
3983c50d8ae3SPaolo Bonzini 			return true;
3984c50d8ae3SPaolo Bonzini 		}
3985c50d8ae3SPaolo Bonzini 
3986c50d8ae3SPaolo Bonzini 		(*nr_present)++;
3987c50d8ae3SPaolo Bonzini 		mark_mmio_spte(vcpu, sptep, gfn, access);
3988c50d8ae3SPaolo Bonzini 		return true;
3989c50d8ae3SPaolo Bonzini 	}
3990c50d8ae3SPaolo Bonzini 
3991c50d8ae3SPaolo Bonzini 	return false;
3992c50d8ae3SPaolo Bonzini }
3993c50d8ae3SPaolo Bonzini 
3994c50d8ae3SPaolo Bonzini static inline bool is_last_gpte(struct kvm_mmu *mmu,
3995c50d8ae3SPaolo Bonzini 				unsigned level, unsigned gpte)
3996c50d8ae3SPaolo Bonzini {
3997c50d8ae3SPaolo Bonzini 	/*
3998c50d8ae3SPaolo Bonzini 	 * The RHS has bit 7 set iff level < mmu->last_nonleaf_level.
3999c50d8ae3SPaolo Bonzini 	 * If it is clear, there are no large pages at this level, so clear
4000c50d8ae3SPaolo Bonzini 	 * PT_PAGE_SIZE_MASK in gpte if that is the case.
4001c50d8ae3SPaolo Bonzini 	 */
4002c50d8ae3SPaolo Bonzini 	gpte &= level - mmu->last_nonleaf_level;
4003c50d8ae3SPaolo Bonzini 
4004c50d8ae3SPaolo Bonzini 	/*
40053bae0459SSean Christopherson 	 * PG_LEVEL_4K always terminates.  The RHS has bit 7 set
40063bae0459SSean Christopherson 	 * iff level <= PG_LEVEL_4K, which for our purpose means
40073bae0459SSean Christopherson 	 * level == PG_LEVEL_4K; set PT_PAGE_SIZE_MASK in gpte then.
4008c50d8ae3SPaolo Bonzini 	 */
40093bae0459SSean Christopherson 	gpte |= level - PG_LEVEL_4K - 1;
4010c50d8ae3SPaolo Bonzini 
4011c50d8ae3SPaolo Bonzini 	return gpte & PT_PAGE_SIZE_MASK;
4012c50d8ae3SPaolo Bonzini }
4013c50d8ae3SPaolo Bonzini 
4014c50d8ae3SPaolo Bonzini #define PTTYPE_EPT 18 /* arbitrary */
4015c50d8ae3SPaolo Bonzini #define PTTYPE PTTYPE_EPT
4016c50d8ae3SPaolo Bonzini #include "paging_tmpl.h"
4017c50d8ae3SPaolo Bonzini #undef PTTYPE
4018c50d8ae3SPaolo Bonzini 
4019c50d8ae3SPaolo Bonzini #define PTTYPE 64
4020c50d8ae3SPaolo Bonzini #include "paging_tmpl.h"
4021c50d8ae3SPaolo Bonzini #undef PTTYPE
4022c50d8ae3SPaolo Bonzini 
4023c50d8ae3SPaolo Bonzini #define PTTYPE 32
4024c50d8ae3SPaolo Bonzini #include "paging_tmpl.h"
4025c50d8ae3SPaolo Bonzini #undef PTTYPE
4026c50d8ae3SPaolo Bonzini 
4027c50d8ae3SPaolo Bonzini static void
4028c50d8ae3SPaolo Bonzini __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
4029c50d8ae3SPaolo Bonzini 			struct rsvd_bits_validate *rsvd_check,
40305b7f575cSSean Christopherson 			u64 pa_bits_rsvd, int level, bool nx, bool gbpages,
4031c50d8ae3SPaolo Bonzini 			bool pse, bool amd)
4032c50d8ae3SPaolo Bonzini {
4033c50d8ae3SPaolo Bonzini 	u64 gbpages_bit_rsvd = 0;
4034c50d8ae3SPaolo Bonzini 	u64 nonleaf_bit8_rsvd = 0;
40355b7f575cSSean Christopherson 	u64 high_bits_rsvd;
4036c50d8ae3SPaolo Bonzini 
4037c50d8ae3SPaolo Bonzini 	rsvd_check->bad_mt_xwr = 0;
4038c50d8ae3SPaolo Bonzini 
4039c50d8ae3SPaolo Bonzini 	if (!gbpages)
4040c50d8ae3SPaolo Bonzini 		gbpages_bit_rsvd = rsvd_bits(7, 7);
4041c50d8ae3SPaolo Bonzini 
40425b7f575cSSean Christopherson 	if (level == PT32E_ROOT_LEVEL)
40435b7f575cSSean Christopherson 		high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 62);
40445b7f575cSSean Christopherson 	else
40455b7f575cSSean Christopherson 		high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);
40465b7f575cSSean Christopherson 
40475b7f575cSSean Christopherson 	/* Note, NX doesn't exist in PDPTEs, this is handled below. */
40485b7f575cSSean Christopherson 	if (!nx)
40495b7f575cSSean Christopherson 		high_bits_rsvd |= rsvd_bits(63, 63);
40505b7f575cSSean Christopherson 
4051c50d8ae3SPaolo Bonzini 	/*
4052c50d8ae3SPaolo Bonzini 	 * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for
4053c50d8ae3SPaolo Bonzini 	 * leaf entries) on AMD CPUs only.
4054c50d8ae3SPaolo Bonzini 	 */
4055c50d8ae3SPaolo Bonzini 	if (amd)
4056c50d8ae3SPaolo Bonzini 		nonleaf_bit8_rsvd = rsvd_bits(8, 8);
4057c50d8ae3SPaolo Bonzini 
4058c50d8ae3SPaolo Bonzini 	switch (level) {
4059c50d8ae3SPaolo Bonzini 	case PT32_ROOT_LEVEL:
4060c50d8ae3SPaolo Bonzini 		/* no rsvd bits for 2 level 4K page table entries */
4061c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[0][1] = 0;
4062c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[0][0] = 0;
4063c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[1][0] =
4064c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[0][0];
4065c50d8ae3SPaolo Bonzini 
4066c50d8ae3SPaolo Bonzini 		if (!pse) {
4067c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[1][1] = 0;
4068c50d8ae3SPaolo Bonzini 			break;
4069c50d8ae3SPaolo Bonzini 		}
4070c50d8ae3SPaolo Bonzini 
4071c50d8ae3SPaolo Bonzini 		if (is_cpuid_PSE36())
4072c50d8ae3SPaolo Bonzini 			/* 36bits PSE 4MB page */
4073c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
4074c50d8ae3SPaolo Bonzini 		else
4075c50d8ae3SPaolo Bonzini 			/* 32 bits PSE 4MB page */
4076c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
4077c50d8ae3SPaolo Bonzini 		break;
4078c50d8ae3SPaolo Bonzini 	case PT32E_ROOT_LEVEL:
40795b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[0][2] = rsvd_bits(63, 63) |
40805b7f575cSSean Christopherson 						   high_bits_rsvd |
40815b7f575cSSean Christopherson 						   rsvd_bits(5, 8) |
40825b7f575cSSean Christopherson 						   rsvd_bits(1, 2);	/* PDPTE */
40835b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd;	/* PDE */
40845b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;	/* PTE */
40855b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
4086c50d8ae3SPaolo Bonzini 						   rsvd_bits(13, 20);	/* large page */
4087c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[1][0] =
4088c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[0][0];
4089c50d8ae3SPaolo Bonzini 		break;
4090c50d8ae3SPaolo Bonzini 	case PT64_ROOT_5LEVEL:
40915b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd |
40925b7f575cSSean Christopherson 						   nonleaf_bit8_rsvd |
40935b7f575cSSean Christopherson 						   rsvd_bits(7, 7);
4094c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[1][4] =
4095c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[0][4];
4096df561f66SGustavo A. R. Silva 		fallthrough;
4097c50d8ae3SPaolo Bonzini 	case PT64_ROOT_4LEVEL:
40985b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd |
40995b7f575cSSean Christopherson 						   nonleaf_bit8_rsvd |
41005b7f575cSSean Christopherson 						   rsvd_bits(7, 7);
41015b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd |
41025b7f575cSSean Christopherson 						   gbpages_bit_rsvd;
41035b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd;
41045b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
4105c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[1][3] =
4106c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[0][3];
41075b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd |
41085b7f575cSSean Christopherson 						   gbpages_bit_rsvd |
4109c50d8ae3SPaolo Bonzini 						   rsvd_bits(13, 29);
41105b7f575cSSean Christopherson 		rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
4111c50d8ae3SPaolo Bonzini 						   rsvd_bits(13, 20); /* large page */
4112c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[1][0] =
4113c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[0][0];
4114c50d8ae3SPaolo Bonzini 		break;
4115c50d8ae3SPaolo Bonzini 	}
4116c50d8ae3SPaolo Bonzini }
4117c50d8ae3SPaolo Bonzini 
4118c50d8ae3SPaolo Bonzini static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
4119c50d8ae3SPaolo Bonzini 				  struct kvm_mmu *context)
4120c50d8ae3SPaolo Bonzini {
4121c50d8ae3SPaolo Bonzini 	__reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check,
41225b7f575cSSean Christopherson 				vcpu->arch.reserved_gpa_bits,
41235b7f575cSSean Christopherson 				context->root_level, context->nx,
4124c50d8ae3SPaolo Bonzini 				guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
412523493d0aSSean Christopherson 				is_pse(vcpu),
412623493d0aSSean Christopherson 				guest_cpuid_is_amd_or_hygon(vcpu));
4127c50d8ae3SPaolo Bonzini }
4128c50d8ae3SPaolo Bonzini 
4129c50d8ae3SPaolo Bonzini static void
4130c50d8ae3SPaolo Bonzini __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
41315b7f575cSSean Christopherson 			    u64 pa_bits_rsvd, bool execonly)
4132c50d8ae3SPaolo Bonzini {
41335b7f575cSSean Christopherson 	u64 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);
4134c50d8ae3SPaolo Bonzini 	u64 bad_mt_xwr;
4135c50d8ae3SPaolo Bonzini 
41365b7f575cSSean Christopherson 	rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd | rsvd_bits(3, 7);
41375b7f575cSSean Christopherson 	rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd | rsvd_bits(3, 7);
41385b7f575cSSean Christopherson 	rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd | rsvd_bits(3, 6);
41395b7f575cSSean Christopherson 	rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd | rsvd_bits(3, 6);
41405b7f575cSSean Christopherson 	rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
4141c50d8ae3SPaolo Bonzini 
4142c50d8ae3SPaolo Bonzini 	/* large page */
4143c50d8ae3SPaolo Bonzini 	rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4];
4144c50d8ae3SPaolo Bonzini 	rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3];
41455b7f575cSSean Christopherson 	rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd | rsvd_bits(12, 29);
41465b7f575cSSean Christopherson 	rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd | rsvd_bits(12, 20);
4147c50d8ae3SPaolo Bonzini 	rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0];
4148c50d8ae3SPaolo Bonzini 
4149c50d8ae3SPaolo Bonzini 	bad_mt_xwr = 0xFFull << (2 * 8);	/* bits 3..5 must not be 2 */
4150c50d8ae3SPaolo Bonzini 	bad_mt_xwr |= 0xFFull << (3 * 8);	/* bits 3..5 must not be 3 */
4151c50d8ae3SPaolo Bonzini 	bad_mt_xwr |= 0xFFull << (7 * 8);	/* bits 3..5 must not be 7 */
4152c50d8ae3SPaolo Bonzini 	bad_mt_xwr |= REPEAT_BYTE(1ull << 2);	/* bits 0..2 must not be 010 */
4153c50d8ae3SPaolo Bonzini 	bad_mt_xwr |= REPEAT_BYTE(1ull << 6);	/* bits 0..2 must not be 110 */
4154c50d8ae3SPaolo Bonzini 	if (!execonly) {
4155c50d8ae3SPaolo Bonzini 		/* bits 0..2 must not be 100 unless VMX capabilities allow it */
4156c50d8ae3SPaolo Bonzini 		bad_mt_xwr |= REPEAT_BYTE(1ull << 4);
4157c50d8ae3SPaolo Bonzini 	}
4158c50d8ae3SPaolo Bonzini 	rsvd_check->bad_mt_xwr = bad_mt_xwr;
4159c50d8ae3SPaolo Bonzini }
4160c50d8ae3SPaolo Bonzini 
4161c50d8ae3SPaolo Bonzini static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
4162c50d8ae3SPaolo Bonzini 		struct kvm_mmu *context, bool execonly)
4163c50d8ae3SPaolo Bonzini {
4164c50d8ae3SPaolo Bonzini 	__reset_rsvds_bits_mask_ept(&context->guest_rsvd_check,
41655b7f575cSSean Christopherson 				    vcpu->arch.reserved_gpa_bits, execonly);
4166c50d8ae3SPaolo Bonzini }
4167c50d8ae3SPaolo Bonzini 
41686f8e65a6SSean Christopherson static inline u64 reserved_hpa_bits(void)
41696f8e65a6SSean Christopherson {
41706f8e65a6SSean Christopherson 	return rsvd_bits(shadow_phys_bits, 63);
41716f8e65a6SSean Christopherson }
41726f8e65a6SSean Christopherson 
4173c50d8ae3SPaolo Bonzini /*
4174c50d8ae3SPaolo Bonzini  * the page table on host is the shadow page table for the page
4175c50d8ae3SPaolo Bonzini  * table in guest or amd nested guest, its mmu features completely
4176c50d8ae3SPaolo Bonzini  * follow the features in guest.
4177c50d8ae3SPaolo Bonzini  */
4178c50d8ae3SPaolo Bonzini void
4179c50d8ae3SPaolo Bonzini reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
4180c50d8ae3SPaolo Bonzini {
4181c50d8ae3SPaolo Bonzini 	bool uses_nx = context->nx ||
4182c50d8ae3SPaolo Bonzini 		context->mmu_role.base.smep_andnot_wp;
4183c50d8ae3SPaolo Bonzini 	struct rsvd_bits_validate *shadow_zero_check;
4184c50d8ae3SPaolo Bonzini 	int i;
4185c50d8ae3SPaolo Bonzini 
4186c50d8ae3SPaolo Bonzini 	/*
4187c50d8ae3SPaolo Bonzini 	 * Passing "true" to the last argument is okay; it adds a check
4188c50d8ae3SPaolo Bonzini 	 * on bit 8 of the SPTEs which KVM doesn't use anyway.
4189c50d8ae3SPaolo Bonzini 	 */
4190c50d8ae3SPaolo Bonzini 	shadow_zero_check = &context->shadow_zero_check;
4191c50d8ae3SPaolo Bonzini 	__reset_rsvds_bits_mask(vcpu, shadow_zero_check,
41926f8e65a6SSean Christopherson 				reserved_hpa_bits(),
4193c50d8ae3SPaolo Bonzini 				context->shadow_root_level, uses_nx,
4194c50d8ae3SPaolo Bonzini 				guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
4195c50d8ae3SPaolo Bonzini 				is_pse(vcpu), true);
4196c50d8ae3SPaolo Bonzini 
4197c50d8ae3SPaolo Bonzini 	if (!shadow_me_mask)
4198c50d8ae3SPaolo Bonzini 		return;
4199c50d8ae3SPaolo Bonzini 
4200c50d8ae3SPaolo Bonzini 	for (i = context->shadow_root_level; --i >= 0;) {
4201c50d8ae3SPaolo Bonzini 		shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
4202c50d8ae3SPaolo Bonzini 		shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
4203c50d8ae3SPaolo Bonzini 	}
4204c50d8ae3SPaolo Bonzini 
4205c50d8ae3SPaolo Bonzini }
4206c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask);
4207c50d8ae3SPaolo Bonzini 
4208c50d8ae3SPaolo Bonzini static inline bool boot_cpu_is_amd(void)
4209c50d8ae3SPaolo Bonzini {
4210c50d8ae3SPaolo Bonzini 	WARN_ON_ONCE(!tdp_enabled);
4211c50d8ae3SPaolo Bonzini 	return shadow_x_mask == 0;
4212c50d8ae3SPaolo Bonzini }
4213c50d8ae3SPaolo Bonzini 
4214c50d8ae3SPaolo Bonzini /*
4215c50d8ae3SPaolo Bonzini  * the direct page table on host, use as much mmu features as
4216c50d8ae3SPaolo Bonzini  * possible, however, kvm currently does not do execution-protection.
4217c50d8ae3SPaolo Bonzini  */
4218c50d8ae3SPaolo Bonzini static void
4219c50d8ae3SPaolo Bonzini reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
4220c50d8ae3SPaolo Bonzini 				struct kvm_mmu *context)
4221c50d8ae3SPaolo Bonzini {
4222c50d8ae3SPaolo Bonzini 	struct rsvd_bits_validate *shadow_zero_check;
4223c50d8ae3SPaolo Bonzini 	int i;
4224c50d8ae3SPaolo Bonzini 
4225c50d8ae3SPaolo Bonzini 	shadow_zero_check = &context->shadow_zero_check;
4226c50d8ae3SPaolo Bonzini 
4227c50d8ae3SPaolo Bonzini 	if (boot_cpu_is_amd())
4228c50d8ae3SPaolo Bonzini 		__reset_rsvds_bits_mask(vcpu, shadow_zero_check,
42296f8e65a6SSean Christopherson 					reserved_hpa_bits(),
4230c50d8ae3SPaolo Bonzini 					context->shadow_root_level, false,
4231c50d8ae3SPaolo Bonzini 					boot_cpu_has(X86_FEATURE_GBPAGES),
4232c50d8ae3SPaolo Bonzini 					true, true);
4233c50d8ae3SPaolo Bonzini 	else
4234c50d8ae3SPaolo Bonzini 		__reset_rsvds_bits_mask_ept(shadow_zero_check,
42356f8e65a6SSean Christopherson 					    reserved_hpa_bits(), false);
4236c50d8ae3SPaolo Bonzini 
4237c50d8ae3SPaolo Bonzini 	if (!shadow_me_mask)
4238c50d8ae3SPaolo Bonzini 		return;
4239c50d8ae3SPaolo Bonzini 
4240c50d8ae3SPaolo Bonzini 	for (i = context->shadow_root_level; --i >= 0;) {
4241c50d8ae3SPaolo Bonzini 		shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
4242c50d8ae3SPaolo Bonzini 		shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
4243c50d8ae3SPaolo Bonzini 	}
4244c50d8ae3SPaolo Bonzini }
4245c50d8ae3SPaolo Bonzini 
4246c50d8ae3SPaolo Bonzini /*
4247c50d8ae3SPaolo Bonzini  * as the comments in reset_shadow_zero_bits_mask() except it
4248c50d8ae3SPaolo Bonzini  * is the shadow page table for intel nested guest.
4249c50d8ae3SPaolo Bonzini  */
4250c50d8ae3SPaolo Bonzini static void
4251c50d8ae3SPaolo Bonzini reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
4252c50d8ae3SPaolo Bonzini 				struct kvm_mmu *context, bool execonly)
4253c50d8ae3SPaolo Bonzini {
4254c50d8ae3SPaolo Bonzini 	__reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
42556f8e65a6SSean Christopherson 				    reserved_hpa_bits(), execonly);
4256c50d8ae3SPaolo Bonzini }
4257c50d8ae3SPaolo Bonzini 
4258c50d8ae3SPaolo Bonzini #define BYTE_MASK(access) \
4259c50d8ae3SPaolo Bonzini 	((1 & (access) ? 2 : 0) | \
4260c50d8ae3SPaolo Bonzini 	 (2 & (access) ? 4 : 0) | \
4261c50d8ae3SPaolo Bonzini 	 (3 & (access) ? 8 : 0) | \
4262c50d8ae3SPaolo Bonzini 	 (4 & (access) ? 16 : 0) | \
4263c50d8ae3SPaolo Bonzini 	 (5 & (access) ? 32 : 0) | \
4264c50d8ae3SPaolo Bonzini 	 (6 & (access) ? 64 : 0) | \
4265c50d8ae3SPaolo Bonzini 	 (7 & (access) ? 128 : 0))
4266c50d8ae3SPaolo Bonzini 
4267c50d8ae3SPaolo Bonzini 
4268c50d8ae3SPaolo Bonzini static void update_permission_bitmask(struct kvm_vcpu *vcpu,
4269c50d8ae3SPaolo Bonzini 				      struct kvm_mmu *mmu, bool ept)
4270c50d8ae3SPaolo Bonzini {
4271c50d8ae3SPaolo Bonzini 	unsigned byte;
4272c50d8ae3SPaolo Bonzini 
4273c50d8ae3SPaolo Bonzini 	const u8 x = BYTE_MASK(ACC_EXEC_MASK);
4274c50d8ae3SPaolo Bonzini 	const u8 w = BYTE_MASK(ACC_WRITE_MASK);
4275c50d8ae3SPaolo Bonzini 	const u8 u = BYTE_MASK(ACC_USER_MASK);
4276c50d8ae3SPaolo Bonzini 
4277c50d8ae3SPaolo Bonzini 	bool cr4_smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP) != 0;
4278c50d8ae3SPaolo Bonzini 	bool cr4_smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP) != 0;
4279c50d8ae3SPaolo Bonzini 	bool cr0_wp = is_write_protection(vcpu);
4280c50d8ae3SPaolo Bonzini 
4281c50d8ae3SPaolo Bonzini 	for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) {
4282c50d8ae3SPaolo Bonzini 		unsigned pfec = byte << 1;
4283c50d8ae3SPaolo Bonzini 
4284c50d8ae3SPaolo Bonzini 		/*
4285c50d8ae3SPaolo Bonzini 		 * Each "*f" variable has a 1 bit for each UWX value
4286c50d8ae3SPaolo Bonzini 		 * that causes a fault with the given PFEC.
4287c50d8ae3SPaolo Bonzini 		 */
4288c50d8ae3SPaolo Bonzini 
4289c50d8ae3SPaolo Bonzini 		/* Faults from writes to non-writable pages */
4290c50d8ae3SPaolo Bonzini 		u8 wf = (pfec & PFERR_WRITE_MASK) ? (u8)~w : 0;
4291c50d8ae3SPaolo Bonzini 		/* Faults from user mode accesses to supervisor pages */
4292c50d8ae3SPaolo Bonzini 		u8 uf = (pfec & PFERR_USER_MASK) ? (u8)~u : 0;
4293c50d8ae3SPaolo Bonzini 		/* Faults from fetches of non-executable pages*/
4294c50d8ae3SPaolo Bonzini 		u8 ff = (pfec & PFERR_FETCH_MASK) ? (u8)~x : 0;
4295c50d8ae3SPaolo Bonzini 		/* Faults from kernel mode fetches of user pages */
4296c50d8ae3SPaolo Bonzini 		u8 smepf = 0;
4297c50d8ae3SPaolo Bonzini 		/* Faults from kernel mode accesses of user pages */
4298c50d8ae3SPaolo Bonzini 		u8 smapf = 0;
4299c50d8ae3SPaolo Bonzini 
4300c50d8ae3SPaolo Bonzini 		if (!ept) {
4301c50d8ae3SPaolo Bonzini 			/* Faults from kernel mode accesses to user pages */
4302c50d8ae3SPaolo Bonzini 			u8 kf = (pfec & PFERR_USER_MASK) ? 0 : u;
4303c50d8ae3SPaolo Bonzini 
4304c50d8ae3SPaolo Bonzini 			/* Not really needed: !nx will cause pte.nx to fault */
4305c50d8ae3SPaolo Bonzini 			if (!mmu->nx)
4306c50d8ae3SPaolo Bonzini 				ff = 0;
4307c50d8ae3SPaolo Bonzini 
4308c50d8ae3SPaolo Bonzini 			/* Allow supervisor writes if !cr0.wp */
4309c50d8ae3SPaolo Bonzini 			if (!cr0_wp)
4310c50d8ae3SPaolo Bonzini 				wf = (pfec & PFERR_USER_MASK) ? wf : 0;
4311c50d8ae3SPaolo Bonzini 
4312c50d8ae3SPaolo Bonzini 			/* Disallow supervisor fetches of user code if cr4.smep */
4313c50d8ae3SPaolo Bonzini 			if (cr4_smep)
4314c50d8ae3SPaolo Bonzini 				smepf = (pfec & PFERR_FETCH_MASK) ? kf : 0;
4315c50d8ae3SPaolo Bonzini 
4316c50d8ae3SPaolo Bonzini 			/*
4317c50d8ae3SPaolo Bonzini 			 * SMAP:kernel-mode data accesses from user-mode
4318c50d8ae3SPaolo Bonzini 			 * mappings should fault. A fault is considered
4319c50d8ae3SPaolo Bonzini 			 * as a SMAP violation if all of the following
4320c50d8ae3SPaolo Bonzini 			 * conditions are true:
4321c50d8ae3SPaolo Bonzini 			 *   - X86_CR4_SMAP is set in CR4
4322c50d8ae3SPaolo Bonzini 			 *   - A user page is accessed
4323c50d8ae3SPaolo Bonzini 			 *   - The access is not a fetch
4324c50d8ae3SPaolo Bonzini 			 *   - Page fault in kernel mode
4325c50d8ae3SPaolo Bonzini 			 *   - if CPL = 3 or X86_EFLAGS_AC is clear
4326c50d8ae3SPaolo Bonzini 			 *
4327c50d8ae3SPaolo Bonzini 			 * Here, we cover the first three conditions.
4328c50d8ae3SPaolo Bonzini 			 * The fourth is computed dynamically in permission_fault();
4329c50d8ae3SPaolo Bonzini 			 * PFERR_RSVD_MASK bit will be set in PFEC if the access is
4330c50d8ae3SPaolo Bonzini 			 * *not* subject to SMAP restrictions.
4331c50d8ae3SPaolo Bonzini 			 */
4332c50d8ae3SPaolo Bonzini 			if (cr4_smap)
4333c50d8ae3SPaolo Bonzini 				smapf = (pfec & (PFERR_RSVD_MASK|PFERR_FETCH_MASK)) ? 0 : kf;
4334c50d8ae3SPaolo Bonzini 		}
4335c50d8ae3SPaolo Bonzini 
4336c50d8ae3SPaolo Bonzini 		mmu->permissions[byte] = ff | uf | wf | smepf | smapf;
4337c50d8ae3SPaolo Bonzini 	}
4338c50d8ae3SPaolo Bonzini }
4339c50d8ae3SPaolo Bonzini 
4340c50d8ae3SPaolo Bonzini /*
4341c50d8ae3SPaolo Bonzini * PKU is an additional mechanism by which the paging controls access to
4342c50d8ae3SPaolo Bonzini * user-mode addresses based on the value in the PKRU register.  Protection
4343c50d8ae3SPaolo Bonzini * key violations are reported through a bit in the page fault error code.
4344c50d8ae3SPaolo Bonzini * Unlike other bits of the error code, the PK bit is not known at the
4345c50d8ae3SPaolo Bonzini * call site of e.g. gva_to_gpa; it must be computed directly in
4346c50d8ae3SPaolo Bonzini * permission_fault based on two bits of PKRU, on some machine state (CR4,
4347c50d8ae3SPaolo Bonzini * CR0, EFER, CPL), and on other bits of the error code and the page tables.
4348c50d8ae3SPaolo Bonzini *
4349c50d8ae3SPaolo Bonzini * In particular the following conditions come from the error code, the
4350c50d8ae3SPaolo Bonzini * page tables and the machine state:
4351c50d8ae3SPaolo Bonzini * - PK is always zero unless CR4.PKE=1 and EFER.LMA=1
4352c50d8ae3SPaolo Bonzini * - PK is always zero if RSVD=1 (reserved bit set) or F=1 (instruction fetch)
4353c50d8ae3SPaolo Bonzini * - PK is always zero if U=0 in the page tables
4354c50d8ae3SPaolo Bonzini * - PKRU.WD is ignored if CR0.WP=0 and the access is a supervisor access.
4355c50d8ae3SPaolo Bonzini *
4356c50d8ae3SPaolo Bonzini * The PKRU bitmask caches the result of these four conditions.  The error
4357c50d8ae3SPaolo Bonzini * code (minus the P bit) and the page table's U bit form an index into the
4358c50d8ae3SPaolo Bonzini * PKRU bitmask.  Two bits of the PKRU bitmask are then extracted and ANDed
4359c50d8ae3SPaolo Bonzini * with the two bits of the PKRU register corresponding to the protection key.
4360c50d8ae3SPaolo Bonzini * For the first three conditions above the bits will be 00, thus masking
4361c50d8ae3SPaolo Bonzini * away both AD and WD.  For all reads or if the last condition holds, WD
4362c50d8ae3SPaolo Bonzini * only will be masked away.
4363c50d8ae3SPaolo Bonzini */
4364c50d8ae3SPaolo Bonzini static void update_pkru_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
4365c50d8ae3SPaolo Bonzini 				bool ept)
4366c50d8ae3SPaolo Bonzini {
4367c50d8ae3SPaolo Bonzini 	unsigned bit;
4368c50d8ae3SPaolo Bonzini 	bool wp;
4369c50d8ae3SPaolo Bonzini 
4370c50d8ae3SPaolo Bonzini 	if (ept) {
4371c50d8ae3SPaolo Bonzini 		mmu->pkru_mask = 0;
4372c50d8ae3SPaolo Bonzini 		return;
4373c50d8ae3SPaolo Bonzini 	}
4374c50d8ae3SPaolo Bonzini 
4375c50d8ae3SPaolo Bonzini 	/* PKEY is enabled only if CR4.PKE and EFER.LMA are both set. */
4376c50d8ae3SPaolo Bonzini 	if (!kvm_read_cr4_bits(vcpu, X86_CR4_PKE) || !is_long_mode(vcpu)) {
4377c50d8ae3SPaolo Bonzini 		mmu->pkru_mask = 0;
4378c50d8ae3SPaolo Bonzini 		return;
4379c50d8ae3SPaolo Bonzini 	}
4380c50d8ae3SPaolo Bonzini 
4381c50d8ae3SPaolo Bonzini 	wp = is_write_protection(vcpu);
4382c50d8ae3SPaolo Bonzini 
4383c50d8ae3SPaolo Bonzini 	for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) {
4384c50d8ae3SPaolo Bonzini 		unsigned pfec, pkey_bits;
4385c50d8ae3SPaolo Bonzini 		bool check_pkey, check_write, ff, uf, wf, pte_user;
4386c50d8ae3SPaolo Bonzini 
4387c50d8ae3SPaolo Bonzini 		pfec = bit << 1;
4388c50d8ae3SPaolo Bonzini 		ff = pfec & PFERR_FETCH_MASK;
4389c50d8ae3SPaolo Bonzini 		uf = pfec & PFERR_USER_MASK;
4390c50d8ae3SPaolo Bonzini 		wf = pfec & PFERR_WRITE_MASK;
4391c50d8ae3SPaolo Bonzini 
4392c50d8ae3SPaolo Bonzini 		/* PFEC.RSVD is replaced by ACC_USER_MASK. */
4393c50d8ae3SPaolo Bonzini 		pte_user = pfec & PFERR_RSVD_MASK;
4394c50d8ae3SPaolo Bonzini 
4395c50d8ae3SPaolo Bonzini 		/*
4396c50d8ae3SPaolo Bonzini 		 * Only need to check the access which is not an
4397c50d8ae3SPaolo Bonzini 		 * instruction fetch and is to a user page.
4398c50d8ae3SPaolo Bonzini 		 */
4399c50d8ae3SPaolo Bonzini 		check_pkey = (!ff && pte_user);
4400c50d8ae3SPaolo Bonzini 		/*
4401c50d8ae3SPaolo Bonzini 		 * write access is controlled by PKRU if it is a
4402c50d8ae3SPaolo Bonzini 		 * user access or CR0.WP = 1.
4403c50d8ae3SPaolo Bonzini 		 */
4404c50d8ae3SPaolo Bonzini 		check_write = check_pkey && wf && (uf || wp);
4405c50d8ae3SPaolo Bonzini 
4406c50d8ae3SPaolo Bonzini 		/* PKRU.AD stops both read and write access. */
4407c50d8ae3SPaolo Bonzini 		pkey_bits = !!check_pkey;
4408c50d8ae3SPaolo Bonzini 		/* PKRU.WD stops write access. */
4409c50d8ae3SPaolo Bonzini 		pkey_bits |= (!!check_write) << 1;
4410c50d8ae3SPaolo Bonzini 
4411c50d8ae3SPaolo Bonzini 		mmu->pkru_mask |= (pkey_bits & 3) << pfec;
4412c50d8ae3SPaolo Bonzini 	}
4413c50d8ae3SPaolo Bonzini }
4414c50d8ae3SPaolo Bonzini 
4415c50d8ae3SPaolo Bonzini static void update_last_nonleaf_level(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
4416c50d8ae3SPaolo Bonzini {
4417c50d8ae3SPaolo Bonzini 	unsigned root_level = mmu->root_level;
4418c50d8ae3SPaolo Bonzini 
4419c50d8ae3SPaolo Bonzini 	mmu->last_nonleaf_level = root_level;
4420c50d8ae3SPaolo Bonzini 	if (root_level == PT32_ROOT_LEVEL && is_pse(vcpu))
4421c50d8ae3SPaolo Bonzini 		mmu->last_nonleaf_level++;
4422c50d8ae3SPaolo Bonzini }
4423c50d8ae3SPaolo Bonzini 
4424c50d8ae3SPaolo Bonzini static void paging64_init_context_common(struct kvm_vcpu *vcpu,
4425c50d8ae3SPaolo Bonzini 					 struct kvm_mmu *context,
4426c50d8ae3SPaolo Bonzini 					 int level)
4427c50d8ae3SPaolo Bonzini {
4428c50d8ae3SPaolo Bonzini 	context->nx = is_nx(vcpu);
4429c50d8ae3SPaolo Bonzini 	context->root_level = level;
4430c50d8ae3SPaolo Bonzini 
4431c50d8ae3SPaolo Bonzini 	reset_rsvds_bits_mask(vcpu, context);
4432c50d8ae3SPaolo Bonzini 	update_permission_bitmask(vcpu, context, false);
4433c50d8ae3SPaolo Bonzini 	update_pkru_bitmask(vcpu, context, false);
4434c50d8ae3SPaolo Bonzini 	update_last_nonleaf_level(vcpu, context);
4435c50d8ae3SPaolo Bonzini 
4436c50d8ae3SPaolo Bonzini 	MMU_WARN_ON(!is_pae(vcpu));
4437c50d8ae3SPaolo Bonzini 	context->page_fault = paging64_page_fault;
4438c50d8ae3SPaolo Bonzini 	context->gva_to_gpa = paging64_gva_to_gpa;
4439c50d8ae3SPaolo Bonzini 	context->sync_page = paging64_sync_page;
4440c50d8ae3SPaolo Bonzini 	context->invlpg = paging64_invlpg;
4441c50d8ae3SPaolo Bonzini 	context->shadow_root_level = level;
4442c50d8ae3SPaolo Bonzini 	context->direct_map = false;
4443c50d8ae3SPaolo Bonzini }
4444c50d8ae3SPaolo Bonzini 
4445c50d8ae3SPaolo Bonzini static void paging64_init_context(struct kvm_vcpu *vcpu,
4446c50d8ae3SPaolo Bonzini 				  struct kvm_mmu *context)
4447c50d8ae3SPaolo Bonzini {
4448c50d8ae3SPaolo Bonzini 	int root_level = is_la57_mode(vcpu) ?
4449c50d8ae3SPaolo Bonzini 			 PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
4450c50d8ae3SPaolo Bonzini 
4451c50d8ae3SPaolo Bonzini 	paging64_init_context_common(vcpu, context, root_level);
4452c50d8ae3SPaolo Bonzini }
4453c50d8ae3SPaolo Bonzini 
4454c50d8ae3SPaolo Bonzini static void paging32_init_context(struct kvm_vcpu *vcpu,
4455c50d8ae3SPaolo Bonzini 				  struct kvm_mmu *context)
4456c50d8ae3SPaolo Bonzini {
4457c50d8ae3SPaolo Bonzini 	context->nx = false;
4458c50d8ae3SPaolo Bonzini 	context->root_level = PT32_ROOT_LEVEL;
4459c50d8ae3SPaolo Bonzini 
4460c50d8ae3SPaolo Bonzini 	reset_rsvds_bits_mask(vcpu, context);
4461c50d8ae3SPaolo Bonzini 	update_permission_bitmask(vcpu, context, false);
4462c50d8ae3SPaolo Bonzini 	update_pkru_bitmask(vcpu, context, false);
4463c50d8ae3SPaolo Bonzini 	update_last_nonleaf_level(vcpu, context);
4464c50d8ae3SPaolo Bonzini 
4465c50d8ae3SPaolo Bonzini 	context->page_fault = paging32_page_fault;
4466c50d8ae3SPaolo Bonzini 	context->gva_to_gpa = paging32_gva_to_gpa;
4467c50d8ae3SPaolo Bonzini 	context->sync_page = paging32_sync_page;
4468c50d8ae3SPaolo Bonzini 	context->invlpg = paging32_invlpg;
4469c50d8ae3SPaolo Bonzini 	context->shadow_root_level = PT32E_ROOT_LEVEL;
4470c50d8ae3SPaolo Bonzini 	context->direct_map = false;
4471c50d8ae3SPaolo Bonzini }
4472c50d8ae3SPaolo Bonzini 
4473c50d8ae3SPaolo Bonzini static void paging32E_init_context(struct kvm_vcpu *vcpu,
4474c50d8ae3SPaolo Bonzini 				   struct kvm_mmu *context)
4475c50d8ae3SPaolo Bonzini {
4476c50d8ae3SPaolo Bonzini 	paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL);
4477c50d8ae3SPaolo Bonzini }
4478c50d8ae3SPaolo Bonzini 
4479c50d8ae3SPaolo Bonzini static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu)
4480c50d8ae3SPaolo Bonzini {
4481c50d8ae3SPaolo Bonzini 	union kvm_mmu_extended_role ext = {0};
4482c50d8ae3SPaolo Bonzini 
4483c50d8ae3SPaolo Bonzini 	ext.cr0_pg = !!is_paging(vcpu);
4484c50d8ae3SPaolo Bonzini 	ext.cr4_pae = !!is_pae(vcpu);
4485c50d8ae3SPaolo Bonzini 	ext.cr4_smep = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
4486c50d8ae3SPaolo Bonzini 	ext.cr4_smap = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
4487c50d8ae3SPaolo Bonzini 	ext.cr4_pse = !!is_pse(vcpu);
4488c50d8ae3SPaolo Bonzini 	ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE);
4489c50d8ae3SPaolo Bonzini 	ext.maxphyaddr = cpuid_maxphyaddr(vcpu);
4490c50d8ae3SPaolo Bonzini 
4491c50d8ae3SPaolo Bonzini 	ext.valid = 1;
4492c50d8ae3SPaolo Bonzini 
4493c50d8ae3SPaolo Bonzini 	return ext;
4494c50d8ae3SPaolo Bonzini }
4495c50d8ae3SPaolo Bonzini 
4496c50d8ae3SPaolo Bonzini static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
4497c50d8ae3SPaolo Bonzini 						   bool base_only)
4498c50d8ae3SPaolo Bonzini {
4499c50d8ae3SPaolo Bonzini 	union kvm_mmu_role role = {0};
4500c50d8ae3SPaolo Bonzini 
4501c50d8ae3SPaolo Bonzini 	role.base.access = ACC_ALL;
4502c50d8ae3SPaolo Bonzini 	role.base.nxe = !!is_nx(vcpu);
4503c50d8ae3SPaolo Bonzini 	role.base.cr0_wp = is_write_protection(vcpu);
4504c50d8ae3SPaolo Bonzini 	role.base.smm = is_smm(vcpu);
4505c50d8ae3SPaolo Bonzini 	role.base.guest_mode = is_guest_mode(vcpu);
4506c50d8ae3SPaolo Bonzini 
4507c50d8ae3SPaolo Bonzini 	if (base_only)
4508c50d8ae3SPaolo Bonzini 		return role;
4509c50d8ae3SPaolo Bonzini 
4510c50d8ae3SPaolo Bonzini 	role.ext = kvm_calc_mmu_role_ext(vcpu);
4511c50d8ae3SPaolo Bonzini 
4512c50d8ae3SPaolo Bonzini 	return role;
4513c50d8ae3SPaolo Bonzini }
4514c50d8ae3SPaolo Bonzini 
4515d468d94bSSean Christopherson static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
4516d468d94bSSean Christopherson {
4517d468d94bSSean Christopherson 	/* Use 5-level TDP if and only if it's useful/necessary. */
451883013059SSean Christopherson 	if (max_tdp_level == 5 && cpuid_maxphyaddr(vcpu) <= 48)
4519d468d94bSSean Christopherson 		return 4;
4520d468d94bSSean Christopherson 
452183013059SSean Christopherson 	return max_tdp_level;
4522d468d94bSSean Christopherson }
4523d468d94bSSean Christopherson 
4524c50d8ae3SPaolo Bonzini static union kvm_mmu_role
4525c50d8ae3SPaolo Bonzini kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
4526c50d8ae3SPaolo Bonzini {
4527c50d8ae3SPaolo Bonzini 	union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);
4528c50d8ae3SPaolo Bonzini 
4529c50d8ae3SPaolo Bonzini 	role.base.ad_disabled = (shadow_accessed_mask == 0);
4530d468d94bSSean Christopherson 	role.base.level = kvm_mmu_get_tdp_level(vcpu);
4531c50d8ae3SPaolo Bonzini 	role.base.direct = true;
4532c50d8ae3SPaolo Bonzini 	role.base.gpte_is_8_bytes = true;
4533c50d8ae3SPaolo Bonzini 
4534c50d8ae3SPaolo Bonzini 	return role;
4535c50d8ae3SPaolo Bonzini }
4536c50d8ae3SPaolo Bonzini 
4537c50d8ae3SPaolo Bonzini static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
4538c50d8ae3SPaolo Bonzini {
45398c008659SPaolo Bonzini 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
4540c50d8ae3SPaolo Bonzini 	union kvm_mmu_role new_role =
4541c50d8ae3SPaolo Bonzini 		kvm_calc_tdp_mmu_root_page_role(vcpu, false);
4542c50d8ae3SPaolo Bonzini 
4543c50d8ae3SPaolo Bonzini 	if (new_role.as_u64 == context->mmu_role.as_u64)
4544c50d8ae3SPaolo Bonzini 		return;
4545c50d8ae3SPaolo Bonzini 
4546c50d8ae3SPaolo Bonzini 	context->mmu_role.as_u64 = new_role.as_u64;
45477a02674dSSean Christopherson 	context->page_fault = kvm_tdp_page_fault;
4548c50d8ae3SPaolo Bonzini 	context->sync_page = nonpaging_sync_page;
45495efac074SPaolo Bonzini 	context->invlpg = NULL;
4550d468d94bSSean Christopherson 	context->shadow_root_level = kvm_mmu_get_tdp_level(vcpu);
4551c50d8ae3SPaolo Bonzini 	context->direct_map = true;
4552d8dd54e0SSean Christopherson 	context->get_guest_pgd = get_cr3;
4553c50d8ae3SPaolo Bonzini 	context->get_pdptr = kvm_pdptr_read;
4554c50d8ae3SPaolo Bonzini 	context->inject_page_fault = kvm_inject_page_fault;
4555c50d8ae3SPaolo Bonzini 
4556c50d8ae3SPaolo Bonzini 	if (!is_paging(vcpu)) {
4557c50d8ae3SPaolo Bonzini 		context->nx = false;
4558c50d8ae3SPaolo Bonzini 		context->gva_to_gpa = nonpaging_gva_to_gpa;
4559c50d8ae3SPaolo Bonzini 		context->root_level = 0;
4560c50d8ae3SPaolo Bonzini 	} else if (is_long_mode(vcpu)) {
4561c50d8ae3SPaolo Bonzini 		context->nx = is_nx(vcpu);
4562c50d8ae3SPaolo Bonzini 		context->root_level = is_la57_mode(vcpu) ?
4563c50d8ae3SPaolo Bonzini 				PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
4564c50d8ae3SPaolo Bonzini 		reset_rsvds_bits_mask(vcpu, context);
4565c50d8ae3SPaolo Bonzini 		context->gva_to_gpa = paging64_gva_to_gpa;
4566c50d8ae3SPaolo Bonzini 	} else if (is_pae(vcpu)) {
4567c50d8ae3SPaolo Bonzini 		context->nx = is_nx(vcpu);
4568c50d8ae3SPaolo Bonzini 		context->root_level = PT32E_ROOT_LEVEL;
4569c50d8ae3SPaolo Bonzini 		reset_rsvds_bits_mask(vcpu, context);
4570c50d8ae3SPaolo Bonzini 		context->gva_to_gpa = paging64_gva_to_gpa;
4571c50d8ae3SPaolo Bonzini 	} else {
4572c50d8ae3SPaolo Bonzini 		context->nx = false;
4573c50d8ae3SPaolo Bonzini 		context->root_level = PT32_ROOT_LEVEL;
4574c50d8ae3SPaolo Bonzini 		reset_rsvds_bits_mask(vcpu, context);
4575c50d8ae3SPaolo Bonzini 		context->gva_to_gpa = paging32_gva_to_gpa;
4576c50d8ae3SPaolo Bonzini 	}
4577c50d8ae3SPaolo Bonzini 
4578c50d8ae3SPaolo Bonzini 	update_permission_bitmask(vcpu, context, false);
4579c50d8ae3SPaolo Bonzini 	update_pkru_bitmask(vcpu, context, false);
4580c50d8ae3SPaolo Bonzini 	update_last_nonleaf_level(vcpu, context);
4581c50d8ae3SPaolo Bonzini 	reset_tdp_shadow_zero_bits_mask(vcpu, context);
4582c50d8ae3SPaolo Bonzini }
4583c50d8ae3SPaolo Bonzini 
4584c50d8ae3SPaolo Bonzini static union kvm_mmu_role
458559505b55SSean Christopherson kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu, bool base_only)
4586c50d8ae3SPaolo Bonzini {
4587c50d8ae3SPaolo Bonzini 	union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);
4588c50d8ae3SPaolo Bonzini 
4589c50d8ae3SPaolo Bonzini 	role.base.smep_andnot_wp = role.ext.cr4_smep &&
4590c50d8ae3SPaolo Bonzini 		!is_write_protection(vcpu);
4591c50d8ae3SPaolo Bonzini 	role.base.smap_andnot_wp = role.ext.cr4_smap &&
4592c50d8ae3SPaolo Bonzini 		!is_write_protection(vcpu);
4593c50d8ae3SPaolo Bonzini 	role.base.gpte_is_8_bytes = !!is_pae(vcpu);
4594c50d8ae3SPaolo Bonzini 
459559505b55SSean Christopherson 	return role;
459659505b55SSean Christopherson }
459759505b55SSean Christopherson 
459859505b55SSean Christopherson static union kvm_mmu_role
459959505b55SSean Christopherson kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
460059505b55SSean Christopherson {
460159505b55SSean Christopherson 	union kvm_mmu_role role =
460259505b55SSean Christopherson 		kvm_calc_shadow_root_page_role_common(vcpu, base_only);
460359505b55SSean Christopherson 
460459505b55SSean Christopherson 	role.base.direct = !is_paging(vcpu);
460559505b55SSean Christopherson 
4606c50d8ae3SPaolo Bonzini 	if (!is_long_mode(vcpu))
4607c50d8ae3SPaolo Bonzini 		role.base.level = PT32E_ROOT_LEVEL;
4608c50d8ae3SPaolo Bonzini 	else if (is_la57_mode(vcpu))
4609c50d8ae3SPaolo Bonzini 		role.base.level = PT64_ROOT_5LEVEL;
4610c50d8ae3SPaolo Bonzini 	else
4611c50d8ae3SPaolo Bonzini 		role.base.level = PT64_ROOT_4LEVEL;
4612c50d8ae3SPaolo Bonzini 
4613c50d8ae3SPaolo Bonzini 	return role;
4614c50d8ae3SPaolo Bonzini }
4615c50d8ae3SPaolo Bonzini 
46168c008659SPaolo Bonzini static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
46178c008659SPaolo Bonzini 				    u32 cr0, u32 cr4, u32 efer,
46188c008659SPaolo Bonzini 				    union kvm_mmu_role new_role)
4619c50d8ae3SPaolo Bonzini {
4620929d1cfaSPaolo Bonzini 	if (!(cr0 & X86_CR0_PG))
4621c50d8ae3SPaolo Bonzini 		nonpaging_init_context(vcpu, context);
4622929d1cfaSPaolo Bonzini 	else if (efer & EFER_LMA)
4623c50d8ae3SPaolo Bonzini 		paging64_init_context(vcpu, context);
4624929d1cfaSPaolo Bonzini 	else if (cr4 & X86_CR4_PAE)
4625c50d8ae3SPaolo Bonzini 		paging32E_init_context(vcpu, context);
4626c50d8ae3SPaolo Bonzini 	else
4627c50d8ae3SPaolo Bonzini 		paging32_init_context(vcpu, context);
4628c50d8ae3SPaolo Bonzini 
4629c50d8ae3SPaolo Bonzini 	context->mmu_role.as_u64 = new_role.as_u64;
4630c50d8ae3SPaolo Bonzini 	reset_shadow_zero_bits_mask(vcpu, context);
4631c50d8ae3SPaolo Bonzini }
46320f04a2acSVitaly Kuznetsov 
46330f04a2acSVitaly Kuznetsov static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer)
46340f04a2acSVitaly Kuznetsov {
46358c008659SPaolo Bonzini 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
46360f04a2acSVitaly Kuznetsov 	union kvm_mmu_role new_role =
46370f04a2acSVitaly Kuznetsov 		kvm_calc_shadow_mmu_root_page_role(vcpu, false);
46380f04a2acSVitaly Kuznetsov 
46390f04a2acSVitaly Kuznetsov 	if (new_role.as_u64 != context->mmu_role.as_u64)
46408c008659SPaolo Bonzini 		shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role);
46410f04a2acSVitaly Kuznetsov }
46420f04a2acSVitaly Kuznetsov 
464359505b55SSean Christopherson static union kvm_mmu_role
464459505b55SSean Christopherson kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu)
464559505b55SSean Christopherson {
464659505b55SSean Christopherson 	union kvm_mmu_role role =
464759505b55SSean Christopherson 		kvm_calc_shadow_root_page_role_common(vcpu, false);
464859505b55SSean Christopherson 
464959505b55SSean Christopherson 	role.base.direct = false;
4650d468d94bSSean Christopherson 	role.base.level = kvm_mmu_get_tdp_level(vcpu);
465159505b55SSean Christopherson 
465259505b55SSean Christopherson 	return role;
465359505b55SSean Christopherson }
465459505b55SSean Christopherson 
46550f04a2acSVitaly Kuznetsov void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer,
46560f04a2acSVitaly Kuznetsov 			     gpa_t nested_cr3)
46570f04a2acSVitaly Kuznetsov {
46588c008659SPaolo Bonzini 	struct kvm_mmu *context = &vcpu->arch.guest_mmu;
465959505b55SSean Christopherson 	union kvm_mmu_role new_role = kvm_calc_shadow_npt_root_page_role(vcpu);
46600f04a2acSVitaly Kuznetsov 
4661a506fdd2SVitaly Kuznetsov 	__kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base, false, false);
4662a506fdd2SVitaly Kuznetsov 
4663a3322d5cSSean Christopherson 	if (new_role.as_u64 != context->mmu_role.as_u64) {
46648c008659SPaolo Bonzini 		shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role);
4665a3322d5cSSean Christopherson 
4666a3322d5cSSean Christopherson 		/*
4667a3322d5cSSean Christopherson 		 * Override the level set by the common init helper, nested TDP
4668a3322d5cSSean Christopherson 		 * always uses the host's TDP configuration.
4669a3322d5cSSean Christopherson 		 */
4670a3322d5cSSean Christopherson 		context->shadow_root_level = new_role.base.level;
4671a3322d5cSSean Christopherson 	}
46720f04a2acSVitaly Kuznetsov }
46730f04a2acSVitaly Kuznetsov EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);
4674c50d8ae3SPaolo Bonzini 
4675c50d8ae3SPaolo Bonzini static union kvm_mmu_role
4676c50d8ae3SPaolo Bonzini kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
4677bb1fcc70SSean Christopherson 				   bool execonly, u8 level)
4678c50d8ae3SPaolo Bonzini {
4679c50d8ae3SPaolo Bonzini 	union kvm_mmu_role role = {0};
4680c50d8ae3SPaolo Bonzini 
4681c50d8ae3SPaolo Bonzini 	/* SMM flag is inherited from root_mmu */
4682c50d8ae3SPaolo Bonzini 	role.base.smm = vcpu->arch.root_mmu.mmu_role.base.smm;
4683c50d8ae3SPaolo Bonzini 
4684bb1fcc70SSean Christopherson 	role.base.level = level;
4685c50d8ae3SPaolo Bonzini 	role.base.gpte_is_8_bytes = true;
4686c50d8ae3SPaolo Bonzini 	role.base.direct = false;
4687c50d8ae3SPaolo Bonzini 	role.base.ad_disabled = !accessed_dirty;
4688c50d8ae3SPaolo Bonzini 	role.base.guest_mode = true;
4689c50d8ae3SPaolo Bonzini 	role.base.access = ACC_ALL;
4690c50d8ae3SPaolo Bonzini 
4691c50d8ae3SPaolo Bonzini 	/*
4692c50d8ae3SPaolo Bonzini 	 * WP=1 and NOT_WP=1 is an impossible combination, use WP and the
4693c50d8ae3SPaolo Bonzini 	 * SMAP variation to denote shadow EPT entries.
4694c50d8ae3SPaolo Bonzini 	 */
4695c50d8ae3SPaolo Bonzini 	role.base.cr0_wp = true;
4696c50d8ae3SPaolo Bonzini 	role.base.smap_andnot_wp = true;
4697c50d8ae3SPaolo Bonzini 
4698c50d8ae3SPaolo Bonzini 	role.ext = kvm_calc_mmu_role_ext(vcpu);
4699c50d8ae3SPaolo Bonzini 	role.ext.execonly = execonly;
4700c50d8ae3SPaolo Bonzini 
4701c50d8ae3SPaolo Bonzini 	return role;
4702c50d8ae3SPaolo Bonzini }
4703c50d8ae3SPaolo Bonzini 
4704c50d8ae3SPaolo Bonzini void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
4705c50d8ae3SPaolo Bonzini 			     bool accessed_dirty, gpa_t new_eptp)
4706c50d8ae3SPaolo Bonzini {
47078c008659SPaolo Bonzini 	struct kvm_mmu *context = &vcpu->arch.guest_mmu;
4708bb1fcc70SSean Christopherson 	u8 level = vmx_eptp_page_walk_level(new_eptp);
4709c50d8ae3SPaolo Bonzini 	union kvm_mmu_role new_role =
4710c50d8ae3SPaolo Bonzini 		kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
4711bb1fcc70SSean Christopherson 						   execonly, level);
4712c50d8ae3SPaolo Bonzini 
4713be01e8e2SSean Christopherson 	__kvm_mmu_new_pgd(vcpu, new_eptp, new_role.base, true, true);
4714c50d8ae3SPaolo Bonzini 
4715c50d8ae3SPaolo Bonzini 	if (new_role.as_u64 == context->mmu_role.as_u64)
4716c50d8ae3SPaolo Bonzini 		return;
4717c50d8ae3SPaolo Bonzini 
4718bb1fcc70SSean Christopherson 	context->shadow_root_level = level;
4719c50d8ae3SPaolo Bonzini 
4720c50d8ae3SPaolo Bonzini 	context->nx = true;
4721c50d8ae3SPaolo Bonzini 	context->ept_ad = accessed_dirty;
4722c50d8ae3SPaolo Bonzini 	context->page_fault = ept_page_fault;
4723c50d8ae3SPaolo Bonzini 	context->gva_to_gpa = ept_gva_to_gpa;
4724c50d8ae3SPaolo Bonzini 	context->sync_page = ept_sync_page;
4725c50d8ae3SPaolo Bonzini 	context->invlpg = ept_invlpg;
4726bb1fcc70SSean Christopherson 	context->root_level = level;
4727c50d8ae3SPaolo Bonzini 	context->direct_map = false;
4728c50d8ae3SPaolo Bonzini 	context->mmu_role.as_u64 = new_role.as_u64;
4729c50d8ae3SPaolo Bonzini 
4730c50d8ae3SPaolo Bonzini 	update_permission_bitmask(vcpu, context, true);
4731c50d8ae3SPaolo Bonzini 	update_pkru_bitmask(vcpu, context, true);
4732c50d8ae3SPaolo Bonzini 	update_last_nonleaf_level(vcpu, context);
4733c50d8ae3SPaolo Bonzini 	reset_rsvds_bits_mask_ept(vcpu, context, execonly);
4734c50d8ae3SPaolo Bonzini 	reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
4735c50d8ae3SPaolo Bonzini }
4736c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
4737c50d8ae3SPaolo Bonzini 
4738c50d8ae3SPaolo Bonzini static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
4739c50d8ae3SPaolo Bonzini {
47408c008659SPaolo Bonzini 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
4741c50d8ae3SPaolo Bonzini 
4742929d1cfaSPaolo Bonzini 	kvm_init_shadow_mmu(vcpu,
4743929d1cfaSPaolo Bonzini 			    kvm_read_cr0_bits(vcpu, X86_CR0_PG),
4744929d1cfaSPaolo Bonzini 			    kvm_read_cr4_bits(vcpu, X86_CR4_PAE),
4745929d1cfaSPaolo Bonzini 			    vcpu->arch.efer);
4746929d1cfaSPaolo Bonzini 
4747d8dd54e0SSean Christopherson 	context->get_guest_pgd     = get_cr3;
4748c50d8ae3SPaolo Bonzini 	context->get_pdptr         = kvm_pdptr_read;
4749c50d8ae3SPaolo Bonzini 	context->inject_page_fault = kvm_inject_page_fault;
4750c50d8ae3SPaolo Bonzini }
4751c50d8ae3SPaolo Bonzini 
4752c50d8ae3SPaolo Bonzini static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
4753c50d8ae3SPaolo Bonzini {
4754c50d8ae3SPaolo Bonzini 	union kvm_mmu_role new_role = kvm_calc_mmu_role_common(vcpu, false);
4755c50d8ae3SPaolo Bonzini 	struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
4756c50d8ae3SPaolo Bonzini 
4757c50d8ae3SPaolo Bonzini 	if (new_role.as_u64 == g_context->mmu_role.as_u64)
4758c50d8ae3SPaolo Bonzini 		return;
4759c50d8ae3SPaolo Bonzini 
4760c50d8ae3SPaolo Bonzini 	g_context->mmu_role.as_u64 = new_role.as_u64;
4761d8dd54e0SSean Christopherson 	g_context->get_guest_pgd     = get_cr3;
4762c50d8ae3SPaolo Bonzini 	g_context->get_pdptr         = kvm_pdptr_read;
4763c50d8ae3SPaolo Bonzini 	g_context->inject_page_fault = kvm_inject_page_fault;
4764c50d8ae3SPaolo Bonzini 
4765c50d8ae3SPaolo Bonzini 	/*
47665efac074SPaolo Bonzini 	 * L2 page tables are never shadowed, so there is no need to sync
47675efac074SPaolo Bonzini 	 * SPTEs.
47685efac074SPaolo Bonzini 	 */
47695efac074SPaolo Bonzini 	g_context->invlpg            = NULL;
47705efac074SPaolo Bonzini 
47715efac074SPaolo Bonzini 	/*
4772c50d8ae3SPaolo Bonzini 	 * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using
4773c50d8ae3SPaolo Bonzini 	 * L1's nested page tables (e.g. EPT12). The nested translation
4774c50d8ae3SPaolo Bonzini 	 * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using
4775c50d8ae3SPaolo Bonzini 	 * L2's page tables as the first level of translation and L1's
4776c50d8ae3SPaolo Bonzini 	 * nested page tables as the second level of translation. Basically
4777c50d8ae3SPaolo Bonzini 	 * the gva_to_gpa functions between mmu and nested_mmu are swapped.
4778c50d8ae3SPaolo Bonzini 	 */
4779c50d8ae3SPaolo Bonzini 	if (!is_paging(vcpu)) {
4780c50d8ae3SPaolo Bonzini 		g_context->nx = false;
4781c50d8ae3SPaolo Bonzini 		g_context->root_level = 0;
4782c50d8ae3SPaolo Bonzini 		g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested;
4783c50d8ae3SPaolo Bonzini 	} else if (is_long_mode(vcpu)) {
4784c50d8ae3SPaolo Bonzini 		g_context->nx = is_nx(vcpu);
4785c50d8ae3SPaolo Bonzini 		g_context->root_level = is_la57_mode(vcpu) ?
4786c50d8ae3SPaolo Bonzini 					PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
4787c50d8ae3SPaolo Bonzini 		reset_rsvds_bits_mask(vcpu, g_context);
4788c50d8ae3SPaolo Bonzini 		g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
4789c50d8ae3SPaolo Bonzini 	} else if (is_pae(vcpu)) {
4790c50d8ae3SPaolo Bonzini 		g_context->nx = is_nx(vcpu);
4791c50d8ae3SPaolo Bonzini 		g_context->root_level = PT32E_ROOT_LEVEL;
4792c50d8ae3SPaolo Bonzini 		reset_rsvds_bits_mask(vcpu, g_context);
4793c50d8ae3SPaolo Bonzini 		g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
4794c50d8ae3SPaolo Bonzini 	} else {
4795c50d8ae3SPaolo Bonzini 		g_context->nx = false;
4796c50d8ae3SPaolo Bonzini 		g_context->root_level = PT32_ROOT_LEVEL;
4797c50d8ae3SPaolo Bonzini 		reset_rsvds_bits_mask(vcpu, g_context);
4798c50d8ae3SPaolo Bonzini 		g_context->gva_to_gpa = paging32_gva_to_gpa_nested;
4799c50d8ae3SPaolo Bonzini 	}
4800c50d8ae3SPaolo Bonzini 
4801c50d8ae3SPaolo Bonzini 	update_permission_bitmask(vcpu, g_context, false);
4802c50d8ae3SPaolo Bonzini 	update_pkru_bitmask(vcpu, g_context, false);
4803c50d8ae3SPaolo Bonzini 	update_last_nonleaf_level(vcpu, g_context);
4804c50d8ae3SPaolo Bonzini }
4805c50d8ae3SPaolo Bonzini 
4806c50d8ae3SPaolo Bonzini void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots)
4807c50d8ae3SPaolo Bonzini {
4808c50d8ae3SPaolo Bonzini 	if (reset_roots) {
4809c50d8ae3SPaolo Bonzini 		uint i;
4810c50d8ae3SPaolo Bonzini 
4811c50d8ae3SPaolo Bonzini 		vcpu->arch.mmu->root_hpa = INVALID_PAGE;
4812c50d8ae3SPaolo Bonzini 
4813c50d8ae3SPaolo Bonzini 		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
4814c50d8ae3SPaolo Bonzini 			vcpu->arch.mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
4815c50d8ae3SPaolo Bonzini 	}
4816c50d8ae3SPaolo Bonzini 
4817c50d8ae3SPaolo Bonzini 	if (mmu_is_nested(vcpu))
4818c50d8ae3SPaolo Bonzini 		init_kvm_nested_mmu(vcpu);
4819c50d8ae3SPaolo Bonzini 	else if (tdp_enabled)
4820c50d8ae3SPaolo Bonzini 		init_kvm_tdp_mmu(vcpu);
4821c50d8ae3SPaolo Bonzini 	else
4822c50d8ae3SPaolo Bonzini 		init_kvm_softmmu(vcpu);
4823c50d8ae3SPaolo Bonzini }
4824c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_init_mmu);
4825c50d8ae3SPaolo Bonzini 
4826c50d8ae3SPaolo Bonzini static union kvm_mmu_page_role
4827c50d8ae3SPaolo Bonzini kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu)
4828c50d8ae3SPaolo Bonzini {
4829c50d8ae3SPaolo Bonzini 	union kvm_mmu_role role;
4830c50d8ae3SPaolo Bonzini 
4831c50d8ae3SPaolo Bonzini 	if (tdp_enabled)
4832c50d8ae3SPaolo Bonzini 		role = kvm_calc_tdp_mmu_root_page_role(vcpu, true);
4833c50d8ae3SPaolo Bonzini 	else
4834c50d8ae3SPaolo Bonzini 		role = kvm_calc_shadow_mmu_root_page_role(vcpu, true);
4835c50d8ae3SPaolo Bonzini 
4836c50d8ae3SPaolo Bonzini 	return role.base;
4837c50d8ae3SPaolo Bonzini }
4838c50d8ae3SPaolo Bonzini 
4839c50d8ae3SPaolo Bonzini void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
4840c50d8ae3SPaolo Bonzini {
4841c50d8ae3SPaolo Bonzini 	kvm_mmu_unload(vcpu);
4842c50d8ae3SPaolo Bonzini 	kvm_init_mmu(vcpu, true);
4843c50d8ae3SPaolo Bonzini }
4844c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
4845c50d8ae3SPaolo Bonzini 
4846c50d8ae3SPaolo Bonzini int kvm_mmu_load(struct kvm_vcpu *vcpu)
4847c50d8ae3SPaolo Bonzini {
4848c50d8ae3SPaolo Bonzini 	int r;
4849c50d8ae3SPaolo Bonzini 
4850378f5cd6SSean Christopherson 	r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->direct_map);
4851c50d8ae3SPaolo Bonzini 	if (r)
4852c50d8ae3SPaolo Bonzini 		goto out;
4853748e52b9SSean Christopherson 	r = mmu_alloc_special_roots(vcpu);
4854748e52b9SSean Christopherson 	if (r)
4855748e52b9SSean Christopherson 		goto out;
48566e6ec584SSean Christopherson 	write_lock(&vcpu->kvm->mmu_lock);
48576e6ec584SSean Christopherson 	if (make_mmu_pages_available(vcpu))
48586e6ec584SSean Christopherson 		r = -ENOSPC;
48596e6ec584SSean Christopherson 	else if (vcpu->arch.mmu->direct_map)
48606e6ec584SSean Christopherson 		r = mmu_alloc_direct_roots(vcpu);
48616e6ec584SSean Christopherson 	else
48626e6ec584SSean Christopherson 		r = mmu_alloc_shadow_roots(vcpu);
48636e6ec584SSean Christopherson 	write_unlock(&vcpu->kvm->mmu_lock);
4864c50d8ae3SPaolo Bonzini 	if (r)
4865c50d8ae3SPaolo Bonzini 		goto out;
4866a91f387bSSean Christopherson 
4867a91f387bSSean Christopherson 	kvm_mmu_sync_roots(vcpu);
4868a91f387bSSean Christopherson 
4869727a7e27SPaolo Bonzini 	kvm_mmu_load_pgd(vcpu);
4870b3646477SJason Baron 	static_call(kvm_x86_tlb_flush_current)(vcpu);
4871c50d8ae3SPaolo Bonzini out:
4872c50d8ae3SPaolo Bonzini 	return r;
4873c50d8ae3SPaolo Bonzini }
4874c50d8ae3SPaolo Bonzini 
4875c50d8ae3SPaolo Bonzini void kvm_mmu_unload(struct kvm_vcpu *vcpu)
4876c50d8ae3SPaolo Bonzini {
4877c50d8ae3SPaolo Bonzini 	kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL);
4878c50d8ae3SPaolo Bonzini 	WARN_ON(VALID_PAGE(vcpu->arch.root_mmu.root_hpa));
4879c50d8ae3SPaolo Bonzini 	kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
4880c50d8ae3SPaolo Bonzini 	WARN_ON(VALID_PAGE(vcpu->arch.guest_mmu.root_hpa));
4881c50d8ae3SPaolo Bonzini }
4882c50d8ae3SPaolo Bonzini 
4883c50d8ae3SPaolo Bonzini static bool need_remote_flush(u64 old, u64 new)
4884c50d8ae3SPaolo Bonzini {
4885c50d8ae3SPaolo Bonzini 	if (!is_shadow_present_pte(old))
4886c50d8ae3SPaolo Bonzini 		return false;
4887c50d8ae3SPaolo Bonzini 	if (!is_shadow_present_pte(new))
4888c50d8ae3SPaolo Bonzini 		return true;
4889c50d8ae3SPaolo Bonzini 	if ((old ^ new) & PT64_BASE_ADDR_MASK)
4890c50d8ae3SPaolo Bonzini 		return true;
4891c50d8ae3SPaolo Bonzini 	old ^= shadow_nx_mask;
4892c50d8ae3SPaolo Bonzini 	new ^= shadow_nx_mask;
4893c50d8ae3SPaolo Bonzini 	return (old & ~new & PT64_PERM_MASK) != 0;
4894c50d8ae3SPaolo Bonzini }
4895c50d8ae3SPaolo Bonzini 
4896c50d8ae3SPaolo Bonzini static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
4897c50d8ae3SPaolo Bonzini 				    int *bytes)
4898c50d8ae3SPaolo Bonzini {
4899c50d8ae3SPaolo Bonzini 	u64 gentry = 0;
4900c50d8ae3SPaolo Bonzini 	int r;
4901c50d8ae3SPaolo Bonzini 
4902c50d8ae3SPaolo Bonzini 	/*
4903c50d8ae3SPaolo Bonzini 	 * Assume that the pte write on a page table of the same type
4904c50d8ae3SPaolo Bonzini 	 * as the current vcpu paging mode since we update the sptes only
4905c50d8ae3SPaolo Bonzini 	 * when they have the same mode.
4906c50d8ae3SPaolo Bonzini 	 */
4907c50d8ae3SPaolo Bonzini 	if (is_pae(vcpu) && *bytes == 4) {
4908c50d8ae3SPaolo Bonzini 		/* Handle a 32-bit guest writing two halves of a 64-bit gpte */
4909c50d8ae3SPaolo Bonzini 		*gpa &= ~(gpa_t)7;
4910c50d8ae3SPaolo Bonzini 		*bytes = 8;
4911c50d8ae3SPaolo Bonzini 	}
4912c50d8ae3SPaolo Bonzini 
4913c50d8ae3SPaolo Bonzini 	if (*bytes == 4 || *bytes == 8) {
4914c50d8ae3SPaolo Bonzini 		r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes);
4915c50d8ae3SPaolo Bonzini 		if (r)
4916c50d8ae3SPaolo Bonzini 			gentry = 0;
4917c50d8ae3SPaolo Bonzini 	}
4918c50d8ae3SPaolo Bonzini 
4919c50d8ae3SPaolo Bonzini 	return gentry;
4920c50d8ae3SPaolo Bonzini }
4921c50d8ae3SPaolo Bonzini 
4922c50d8ae3SPaolo Bonzini /*
4923c50d8ae3SPaolo Bonzini  * If we're seeing too many writes to a page, it may no longer be a page table,
4924c50d8ae3SPaolo Bonzini  * or we may be forking, in which case it is better to unmap the page.
4925c50d8ae3SPaolo Bonzini  */
4926c50d8ae3SPaolo Bonzini static bool detect_write_flooding(struct kvm_mmu_page *sp)
4927c50d8ae3SPaolo Bonzini {
4928c50d8ae3SPaolo Bonzini 	/*
4929c50d8ae3SPaolo Bonzini 	 * Skip write-flooding detected for the sp whose level is 1, because
4930c50d8ae3SPaolo Bonzini 	 * it can become unsync, then the guest page is not write-protected.
4931c50d8ae3SPaolo Bonzini 	 */
49323bae0459SSean Christopherson 	if (sp->role.level == PG_LEVEL_4K)
4933c50d8ae3SPaolo Bonzini 		return false;
4934c50d8ae3SPaolo Bonzini 
4935c50d8ae3SPaolo Bonzini 	atomic_inc(&sp->write_flooding_count);
4936c50d8ae3SPaolo Bonzini 	return atomic_read(&sp->write_flooding_count) >= 3;
4937c50d8ae3SPaolo Bonzini }
4938c50d8ae3SPaolo Bonzini 
4939c50d8ae3SPaolo Bonzini /*
4940c50d8ae3SPaolo Bonzini  * Misaligned accesses are too much trouble to fix up; also, they usually
4941c50d8ae3SPaolo Bonzini  * indicate a page is not used as a page table.
4942c50d8ae3SPaolo Bonzini  */
4943c50d8ae3SPaolo Bonzini static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
4944c50d8ae3SPaolo Bonzini 				    int bytes)
4945c50d8ae3SPaolo Bonzini {
4946c50d8ae3SPaolo Bonzini 	unsigned offset, pte_size, misaligned;
4947c50d8ae3SPaolo Bonzini 
4948c50d8ae3SPaolo Bonzini 	pgprintk("misaligned: gpa %llx bytes %d role %x\n",
4949c50d8ae3SPaolo Bonzini 		 gpa, bytes, sp->role.word);
4950c50d8ae3SPaolo Bonzini 
4951c50d8ae3SPaolo Bonzini 	offset = offset_in_page(gpa);
4952c50d8ae3SPaolo Bonzini 	pte_size = sp->role.gpte_is_8_bytes ? 8 : 4;
4953c50d8ae3SPaolo Bonzini 
4954c50d8ae3SPaolo Bonzini 	/*
4955c50d8ae3SPaolo Bonzini 	 * Sometimes, the OS only writes the last one bytes to update status
4956c50d8ae3SPaolo Bonzini 	 * bits, for example, in linux, andb instruction is used in clear_bit().
4957c50d8ae3SPaolo Bonzini 	 */
4958c50d8ae3SPaolo Bonzini 	if (!(offset & (pte_size - 1)) && bytes == 1)
4959c50d8ae3SPaolo Bonzini 		return false;
4960c50d8ae3SPaolo Bonzini 
4961c50d8ae3SPaolo Bonzini 	misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
4962c50d8ae3SPaolo Bonzini 	misaligned |= bytes < 4;
4963c50d8ae3SPaolo Bonzini 
4964c50d8ae3SPaolo Bonzini 	return misaligned;
4965c50d8ae3SPaolo Bonzini }
4966c50d8ae3SPaolo Bonzini 
4967c50d8ae3SPaolo Bonzini static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
4968c50d8ae3SPaolo Bonzini {
4969c50d8ae3SPaolo Bonzini 	unsigned page_offset, quadrant;
4970c50d8ae3SPaolo Bonzini 	u64 *spte;
4971c50d8ae3SPaolo Bonzini 	int level;
4972c50d8ae3SPaolo Bonzini 
4973c50d8ae3SPaolo Bonzini 	page_offset = offset_in_page(gpa);
4974c50d8ae3SPaolo Bonzini 	level = sp->role.level;
4975c50d8ae3SPaolo Bonzini 	*nspte = 1;
4976c50d8ae3SPaolo Bonzini 	if (!sp->role.gpte_is_8_bytes) {
4977c50d8ae3SPaolo Bonzini 		page_offset <<= 1;	/* 32->64 */
4978c50d8ae3SPaolo Bonzini 		/*
4979c50d8ae3SPaolo Bonzini 		 * A 32-bit pde maps 4MB while the shadow pdes map
4980c50d8ae3SPaolo Bonzini 		 * only 2MB.  So we need to double the offset again
4981c50d8ae3SPaolo Bonzini 		 * and zap two pdes instead of one.
4982c50d8ae3SPaolo Bonzini 		 */
4983c50d8ae3SPaolo Bonzini 		if (level == PT32_ROOT_LEVEL) {
4984c50d8ae3SPaolo Bonzini 			page_offset &= ~7; /* kill rounding error */
4985c50d8ae3SPaolo Bonzini 			page_offset <<= 1;
4986c50d8ae3SPaolo Bonzini 			*nspte = 2;
4987c50d8ae3SPaolo Bonzini 		}
4988c50d8ae3SPaolo Bonzini 		quadrant = page_offset >> PAGE_SHIFT;
4989c50d8ae3SPaolo Bonzini 		page_offset &= ~PAGE_MASK;
4990c50d8ae3SPaolo Bonzini 		if (quadrant != sp->role.quadrant)
4991c50d8ae3SPaolo Bonzini 			return NULL;
4992c50d8ae3SPaolo Bonzini 	}
4993c50d8ae3SPaolo Bonzini 
4994c50d8ae3SPaolo Bonzini 	spte = &sp->spt[page_offset / sizeof(*spte)];
4995c50d8ae3SPaolo Bonzini 	return spte;
4996c50d8ae3SPaolo Bonzini }
4997c50d8ae3SPaolo Bonzini 
4998c50d8ae3SPaolo Bonzini static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
4999c50d8ae3SPaolo Bonzini 			      const u8 *new, int bytes,
5000c50d8ae3SPaolo Bonzini 			      struct kvm_page_track_notifier_node *node)
5001c50d8ae3SPaolo Bonzini {
5002c50d8ae3SPaolo Bonzini 	gfn_t gfn = gpa >> PAGE_SHIFT;
5003c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
5004c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
5005c50d8ae3SPaolo Bonzini 	u64 entry, gentry, *spte;
5006c50d8ae3SPaolo Bonzini 	int npte;
5007c50d8ae3SPaolo Bonzini 	bool remote_flush, local_flush;
5008c50d8ae3SPaolo Bonzini 
5009c50d8ae3SPaolo Bonzini 	/*
5010c50d8ae3SPaolo Bonzini 	 * If we don't have indirect shadow pages, it means no page is
5011c50d8ae3SPaolo Bonzini 	 * write-protected, so we can exit simply.
5012c50d8ae3SPaolo Bonzini 	 */
5013c50d8ae3SPaolo Bonzini 	if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
5014c50d8ae3SPaolo Bonzini 		return;
5015c50d8ae3SPaolo Bonzini 
5016c50d8ae3SPaolo Bonzini 	remote_flush = local_flush = false;
5017c50d8ae3SPaolo Bonzini 
5018c50d8ae3SPaolo Bonzini 	pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
5019c50d8ae3SPaolo Bonzini 
5020c50d8ae3SPaolo Bonzini 	/*
5021c50d8ae3SPaolo Bonzini 	 * No need to care whether allocation memory is successful
5022c50d8ae3SPaolo Bonzini 	 * or not since pte prefetch is skiped if it does not have
5023c50d8ae3SPaolo Bonzini 	 * enough objects in the cache.
5024c50d8ae3SPaolo Bonzini 	 */
5025378f5cd6SSean Christopherson 	mmu_topup_memory_caches(vcpu, true);
5026c50d8ae3SPaolo Bonzini 
5027531810caSBen Gardon 	write_lock(&vcpu->kvm->mmu_lock);
5028c50d8ae3SPaolo Bonzini 
5029c50d8ae3SPaolo Bonzini 	gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes);
5030c50d8ae3SPaolo Bonzini 
5031c50d8ae3SPaolo Bonzini 	++vcpu->kvm->stat.mmu_pte_write;
5032c50d8ae3SPaolo Bonzini 	kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
5033c50d8ae3SPaolo Bonzini 
5034c50d8ae3SPaolo Bonzini 	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
5035c50d8ae3SPaolo Bonzini 		if (detect_write_misaligned(sp, gpa, bytes) ||
5036c50d8ae3SPaolo Bonzini 		      detect_write_flooding(sp)) {
5037c50d8ae3SPaolo Bonzini 			kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
5038c50d8ae3SPaolo Bonzini 			++vcpu->kvm->stat.mmu_flooded;
5039c50d8ae3SPaolo Bonzini 			continue;
5040c50d8ae3SPaolo Bonzini 		}
5041c50d8ae3SPaolo Bonzini 
5042c50d8ae3SPaolo Bonzini 		spte = get_written_sptes(sp, gpa, &npte);
5043c50d8ae3SPaolo Bonzini 		if (!spte)
5044c50d8ae3SPaolo Bonzini 			continue;
5045c50d8ae3SPaolo Bonzini 
5046c50d8ae3SPaolo Bonzini 		local_flush = true;
5047c50d8ae3SPaolo Bonzini 		while (npte--) {
5048c50d8ae3SPaolo Bonzini 			entry = *spte;
50492de4085cSBen Gardon 			mmu_page_zap_pte(vcpu->kvm, sp, spte, NULL);
5050c5e2184dSSean Christopherson 			if (gentry && sp->role.level != PG_LEVEL_4K)
5051c5e2184dSSean Christopherson 				++vcpu->kvm->stat.mmu_pde_zapped;
5052c50d8ae3SPaolo Bonzini 			if (need_remote_flush(entry, *spte))
5053c50d8ae3SPaolo Bonzini 				remote_flush = true;
5054c50d8ae3SPaolo Bonzini 			++spte;
5055c50d8ae3SPaolo Bonzini 		}
5056c50d8ae3SPaolo Bonzini 	}
5057c50d8ae3SPaolo Bonzini 	kvm_mmu_flush_or_zap(vcpu, &invalid_list, remote_flush, local_flush);
5058c50d8ae3SPaolo Bonzini 	kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
5059531810caSBen Gardon 	write_unlock(&vcpu->kvm->mmu_lock);
5060c50d8ae3SPaolo Bonzini }
5061c50d8ae3SPaolo Bonzini 
5062736c291cSSean Christopherson int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
5063c50d8ae3SPaolo Bonzini 		       void *insn, int insn_len)
5064c50d8ae3SPaolo Bonzini {
506592daa48bSSean Christopherson 	int r, emulation_type = EMULTYPE_PF;
5066c50d8ae3SPaolo Bonzini 	bool direct = vcpu->arch.mmu->direct_map;
5067c50d8ae3SPaolo Bonzini 
50686948199aSSean Christopherson 	if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
5069ddce6208SSean Christopherson 		return RET_PF_RETRY;
5070ddce6208SSean Christopherson 
5071c50d8ae3SPaolo Bonzini 	r = RET_PF_INVALID;
5072c50d8ae3SPaolo Bonzini 	if (unlikely(error_code & PFERR_RSVD_MASK)) {
5073736c291cSSean Christopherson 		r = handle_mmio_page_fault(vcpu, cr2_or_gpa, direct);
5074c50d8ae3SPaolo Bonzini 		if (r == RET_PF_EMULATE)
5075c50d8ae3SPaolo Bonzini 			goto emulate;
5076c50d8ae3SPaolo Bonzini 	}
5077c50d8ae3SPaolo Bonzini 
5078c50d8ae3SPaolo Bonzini 	if (r == RET_PF_INVALID) {
50797a02674dSSean Christopherson 		r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa,
50807a02674dSSean Christopherson 					  lower_32_bits(error_code), false);
50817b367bc9SSean Christopherson 		if (WARN_ON_ONCE(r == RET_PF_INVALID))
50827b367bc9SSean Christopherson 			return -EIO;
5083c50d8ae3SPaolo Bonzini 	}
5084c50d8ae3SPaolo Bonzini 
5085c50d8ae3SPaolo Bonzini 	if (r < 0)
5086c50d8ae3SPaolo Bonzini 		return r;
508783a2ba4cSSean Christopherson 	if (r != RET_PF_EMULATE)
508883a2ba4cSSean Christopherson 		return 1;
5089c50d8ae3SPaolo Bonzini 
5090c50d8ae3SPaolo Bonzini 	/*
5091c50d8ae3SPaolo Bonzini 	 * Before emulating the instruction, check if the error code
5092c50d8ae3SPaolo Bonzini 	 * was due to a RO violation while translating the guest page.
5093c50d8ae3SPaolo Bonzini 	 * This can occur when using nested virtualization with nested
5094c50d8ae3SPaolo Bonzini 	 * paging in both guests. If true, we simply unprotect the page
5095c50d8ae3SPaolo Bonzini 	 * and resume the guest.
5096c50d8ae3SPaolo Bonzini 	 */
5097c50d8ae3SPaolo Bonzini 	if (vcpu->arch.mmu->direct_map &&
5098c50d8ae3SPaolo Bonzini 	    (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) {
5099736c291cSSean Christopherson 		kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2_or_gpa));
5100c50d8ae3SPaolo Bonzini 		return 1;
5101c50d8ae3SPaolo Bonzini 	}
5102c50d8ae3SPaolo Bonzini 
5103c50d8ae3SPaolo Bonzini 	/*
5104c50d8ae3SPaolo Bonzini 	 * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still
5105c50d8ae3SPaolo Bonzini 	 * optimistically try to just unprotect the page and let the processor
5106c50d8ae3SPaolo Bonzini 	 * re-execute the instruction that caused the page fault.  Do not allow
5107c50d8ae3SPaolo Bonzini 	 * retrying MMIO emulation, as it's not only pointless but could also
5108c50d8ae3SPaolo Bonzini 	 * cause us to enter an infinite loop because the processor will keep
5109c50d8ae3SPaolo Bonzini 	 * faulting on the non-existent MMIO address.  Retrying an instruction
5110c50d8ae3SPaolo Bonzini 	 * from a nested guest is also pointless and dangerous as we are only
5111c50d8ae3SPaolo Bonzini 	 * explicitly shadowing L1's page tables, i.e. unprotecting something
5112c50d8ae3SPaolo Bonzini 	 * for L1 isn't going to magically fix whatever issue cause L2 to fail.
5113c50d8ae3SPaolo Bonzini 	 */
5114736c291cSSean Christopherson 	if (!mmio_info_in_cache(vcpu, cr2_or_gpa, direct) && !is_guest_mode(vcpu))
511592daa48bSSean Christopherson 		emulation_type |= EMULTYPE_ALLOW_RETRY_PF;
5116c50d8ae3SPaolo Bonzini emulate:
5117736c291cSSean Christopherson 	return x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn,
5118c50d8ae3SPaolo Bonzini 				       insn_len);
5119c50d8ae3SPaolo Bonzini }
5120c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
5121c50d8ae3SPaolo Bonzini 
51225efac074SPaolo Bonzini void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
51235efac074SPaolo Bonzini 			    gva_t gva, hpa_t root_hpa)
5124c50d8ae3SPaolo Bonzini {
5125c50d8ae3SPaolo Bonzini 	int i;
5126c50d8ae3SPaolo Bonzini 
51275efac074SPaolo Bonzini 	/* It's actually a GPA for vcpu->arch.guest_mmu.  */
51285efac074SPaolo Bonzini 	if (mmu != &vcpu->arch.guest_mmu) {
51295efac074SPaolo Bonzini 		/* INVLPG on a non-canonical address is a NOP according to the SDM.  */
5130c50d8ae3SPaolo Bonzini 		if (is_noncanonical_address(gva, vcpu))
5131c50d8ae3SPaolo Bonzini 			return;
5132c50d8ae3SPaolo Bonzini 
5133b3646477SJason Baron 		static_call(kvm_x86_tlb_flush_gva)(vcpu, gva);
51345efac074SPaolo Bonzini 	}
51355efac074SPaolo Bonzini 
51365efac074SPaolo Bonzini 	if (!mmu->invlpg)
51375efac074SPaolo Bonzini 		return;
51385efac074SPaolo Bonzini 
51395efac074SPaolo Bonzini 	if (root_hpa == INVALID_PAGE) {
5140c50d8ae3SPaolo Bonzini 		mmu->invlpg(vcpu, gva, mmu->root_hpa);
5141c50d8ae3SPaolo Bonzini 
5142c50d8ae3SPaolo Bonzini 		/*
5143c50d8ae3SPaolo Bonzini 		 * INVLPG is required to invalidate any global mappings for the VA,
5144c50d8ae3SPaolo Bonzini 		 * irrespective of PCID. Since it would take us roughly similar amount
5145c50d8ae3SPaolo Bonzini 		 * of work to determine whether any of the prev_root mappings of the VA
5146c50d8ae3SPaolo Bonzini 		 * is marked global, or to just sync it blindly, so we might as well
5147c50d8ae3SPaolo Bonzini 		 * just always sync it.
5148c50d8ae3SPaolo Bonzini 		 *
5149c50d8ae3SPaolo Bonzini 		 * Mappings not reachable via the current cr3 or the prev_roots will be
5150c50d8ae3SPaolo Bonzini 		 * synced when switching to that cr3, so nothing needs to be done here
5151c50d8ae3SPaolo Bonzini 		 * for them.
5152c50d8ae3SPaolo Bonzini 		 */
5153c50d8ae3SPaolo Bonzini 		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
5154c50d8ae3SPaolo Bonzini 			if (VALID_PAGE(mmu->prev_roots[i].hpa))
5155c50d8ae3SPaolo Bonzini 				mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
51565efac074SPaolo Bonzini 	} else {
51575efac074SPaolo Bonzini 		mmu->invlpg(vcpu, gva, root_hpa);
51585efac074SPaolo Bonzini 	}
51595efac074SPaolo Bonzini }
5160c50d8ae3SPaolo Bonzini 
51615efac074SPaolo Bonzini void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
51625efac074SPaolo Bonzini {
51635efac074SPaolo Bonzini 	kvm_mmu_invalidate_gva(vcpu, vcpu->arch.mmu, gva, INVALID_PAGE);
5164c50d8ae3SPaolo Bonzini 	++vcpu->stat.invlpg;
5165c50d8ae3SPaolo Bonzini }
5166c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
5167c50d8ae3SPaolo Bonzini 
51685efac074SPaolo Bonzini 
5169c50d8ae3SPaolo Bonzini void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
5170c50d8ae3SPaolo Bonzini {
5171c50d8ae3SPaolo Bonzini 	struct kvm_mmu *mmu = vcpu->arch.mmu;
5172c50d8ae3SPaolo Bonzini 	bool tlb_flush = false;
5173c50d8ae3SPaolo Bonzini 	uint i;
5174c50d8ae3SPaolo Bonzini 
5175c50d8ae3SPaolo Bonzini 	if (pcid == kvm_get_active_pcid(vcpu)) {
5176c50d8ae3SPaolo Bonzini 		mmu->invlpg(vcpu, gva, mmu->root_hpa);
5177c50d8ae3SPaolo Bonzini 		tlb_flush = true;
5178c50d8ae3SPaolo Bonzini 	}
5179c50d8ae3SPaolo Bonzini 
5180c50d8ae3SPaolo Bonzini 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
5181c50d8ae3SPaolo Bonzini 		if (VALID_PAGE(mmu->prev_roots[i].hpa) &&
5182be01e8e2SSean Christopherson 		    pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd)) {
5183c50d8ae3SPaolo Bonzini 			mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
5184c50d8ae3SPaolo Bonzini 			tlb_flush = true;
5185c50d8ae3SPaolo Bonzini 		}
5186c50d8ae3SPaolo Bonzini 	}
5187c50d8ae3SPaolo Bonzini 
5188c50d8ae3SPaolo Bonzini 	if (tlb_flush)
5189b3646477SJason Baron 		static_call(kvm_x86_tlb_flush_gva)(vcpu, gva);
5190c50d8ae3SPaolo Bonzini 
5191c50d8ae3SPaolo Bonzini 	++vcpu->stat.invlpg;
5192c50d8ae3SPaolo Bonzini 
5193c50d8ae3SPaolo Bonzini 	/*
5194c50d8ae3SPaolo Bonzini 	 * Mappings not reachable via the current cr3 or the prev_roots will be
5195c50d8ae3SPaolo Bonzini 	 * synced when switching to that cr3, so nothing needs to be done here
5196c50d8ae3SPaolo Bonzini 	 * for them.
5197c50d8ae3SPaolo Bonzini 	 */
5198c50d8ae3SPaolo Bonzini }
5199c50d8ae3SPaolo Bonzini 
520083013059SSean Christopherson void kvm_configure_mmu(bool enable_tdp, int tdp_max_root_level,
520183013059SSean Christopherson 		       int tdp_huge_page_level)
5202c50d8ae3SPaolo Bonzini {
5203bde77235SSean Christopherson 	tdp_enabled = enable_tdp;
520483013059SSean Christopherson 	max_tdp_level = tdp_max_root_level;
5205703c335dSSean Christopherson 
5206703c335dSSean Christopherson 	/*
52071d92d2e8SSean Christopherson 	 * max_huge_page_level reflects KVM's MMU capabilities irrespective
5208703c335dSSean Christopherson 	 * of kernel support, e.g. KVM may be capable of using 1GB pages when
5209703c335dSSean Christopherson 	 * the kernel is not.  But, KVM never creates a page size greater than
5210703c335dSSean Christopherson 	 * what is used by the kernel for any given HVA, i.e. the kernel's
5211703c335dSSean Christopherson 	 * capabilities are ultimately consulted by kvm_mmu_hugepage_adjust().
5212703c335dSSean Christopherson 	 */
5213703c335dSSean Christopherson 	if (tdp_enabled)
52141d92d2e8SSean Christopherson 		max_huge_page_level = tdp_huge_page_level;
5215703c335dSSean Christopherson 	else if (boot_cpu_has(X86_FEATURE_GBPAGES))
52161d92d2e8SSean Christopherson 		max_huge_page_level = PG_LEVEL_1G;
5217703c335dSSean Christopherson 	else
52181d92d2e8SSean Christopherson 		max_huge_page_level = PG_LEVEL_2M;
5219c50d8ae3SPaolo Bonzini }
5220bde77235SSean Christopherson EXPORT_SYMBOL_GPL(kvm_configure_mmu);
5221c50d8ae3SPaolo Bonzini 
5222c50d8ae3SPaolo Bonzini /* The return value indicates if tlb flush on all vcpus is needed. */
52230a234f5dSSean Christopherson typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head,
52240a234f5dSSean Christopherson 				    struct kvm_memory_slot *slot);
5225c50d8ae3SPaolo Bonzini 
5226c50d8ae3SPaolo Bonzini /* The caller should hold mmu-lock before calling this function. */
5227c50d8ae3SPaolo Bonzini static __always_inline bool
5228c50d8ae3SPaolo Bonzini slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
5229c50d8ae3SPaolo Bonzini 			slot_level_handler fn, int start_level, int end_level,
5230c50d8ae3SPaolo Bonzini 			gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb)
5231c50d8ae3SPaolo Bonzini {
5232c50d8ae3SPaolo Bonzini 	struct slot_rmap_walk_iterator iterator;
5233c50d8ae3SPaolo Bonzini 	bool flush = false;
5234c50d8ae3SPaolo Bonzini 
5235c50d8ae3SPaolo Bonzini 	for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn,
5236c50d8ae3SPaolo Bonzini 			end_gfn, &iterator) {
5237c50d8ae3SPaolo Bonzini 		if (iterator.rmap)
52380a234f5dSSean Christopherson 			flush |= fn(kvm, iterator.rmap, memslot);
5239c50d8ae3SPaolo Bonzini 
5240531810caSBen Gardon 		if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
5241c50d8ae3SPaolo Bonzini 			if (flush && lock_flush_tlb) {
5242c50d8ae3SPaolo Bonzini 				kvm_flush_remote_tlbs_with_address(kvm,
5243c50d8ae3SPaolo Bonzini 						start_gfn,
5244c50d8ae3SPaolo Bonzini 						iterator.gfn - start_gfn + 1);
5245c50d8ae3SPaolo Bonzini 				flush = false;
5246c50d8ae3SPaolo Bonzini 			}
5247531810caSBen Gardon 			cond_resched_rwlock_write(&kvm->mmu_lock);
5248c50d8ae3SPaolo Bonzini 		}
5249c50d8ae3SPaolo Bonzini 	}
5250c50d8ae3SPaolo Bonzini 
5251c50d8ae3SPaolo Bonzini 	if (flush && lock_flush_tlb) {
5252c50d8ae3SPaolo Bonzini 		kvm_flush_remote_tlbs_with_address(kvm, start_gfn,
5253c50d8ae3SPaolo Bonzini 						   end_gfn - start_gfn + 1);
5254c50d8ae3SPaolo Bonzini 		flush = false;
5255c50d8ae3SPaolo Bonzini 	}
5256c50d8ae3SPaolo Bonzini 
5257c50d8ae3SPaolo Bonzini 	return flush;
5258c50d8ae3SPaolo Bonzini }
5259c50d8ae3SPaolo Bonzini 
5260c50d8ae3SPaolo Bonzini static __always_inline bool
5261c50d8ae3SPaolo Bonzini slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
5262c50d8ae3SPaolo Bonzini 		  slot_level_handler fn, int start_level, int end_level,
5263c50d8ae3SPaolo Bonzini 		  bool lock_flush_tlb)
5264c50d8ae3SPaolo Bonzini {
5265c50d8ae3SPaolo Bonzini 	return slot_handle_level_range(kvm, memslot, fn, start_level,
5266c50d8ae3SPaolo Bonzini 			end_level, memslot->base_gfn,
5267c50d8ae3SPaolo Bonzini 			memslot->base_gfn + memslot->npages - 1,
5268c50d8ae3SPaolo Bonzini 			lock_flush_tlb);
5269c50d8ae3SPaolo Bonzini }
5270c50d8ae3SPaolo Bonzini 
5271c50d8ae3SPaolo Bonzini static __always_inline bool
5272c50d8ae3SPaolo Bonzini slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
5273c50d8ae3SPaolo Bonzini 		 slot_level_handler fn, bool lock_flush_tlb)
5274c50d8ae3SPaolo Bonzini {
52753bae0459SSean Christopherson 	return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K,
52763bae0459SSean Christopherson 				 PG_LEVEL_4K, lock_flush_tlb);
5277c50d8ae3SPaolo Bonzini }
5278c50d8ae3SPaolo Bonzini 
5279c50d8ae3SPaolo Bonzini static void free_mmu_pages(struct kvm_mmu *mmu)
5280c50d8ae3SPaolo Bonzini {
5281*4a98623dSSean Christopherson 	if (!tdp_enabled && mmu->pae_root)
5282*4a98623dSSean Christopherson 		set_memory_encrypted((unsigned long)mmu->pae_root, 1);
5283c50d8ae3SPaolo Bonzini 	free_page((unsigned long)mmu->pae_root);
5284c50d8ae3SPaolo Bonzini 	free_page((unsigned long)mmu->lm_root);
5285c50d8ae3SPaolo Bonzini }
5286c50d8ae3SPaolo Bonzini 
528704d28e37SSean Christopherson static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
5288c50d8ae3SPaolo Bonzini {
5289c50d8ae3SPaolo Bonzini 	struct page *page;
5290c50d8ae3SPaolo Bonzini 	int i;
5291c50d8ae3SPaolo Bonzini 
529204d28e37SSean Christopherson 	mmu->root_hpa = INVALID_PAGE;
529304d28e37SSean Christopherson 	mmu->root_pgd = 0;
529404d28e37SSean Christopherson 	mmu->translate_gpa = translate_gpa;
529504d28e37SSean Christopherson 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
529604d28e37SSean Christopherson 		mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
529704d28e37SSean Christopherson 
5298c50d8ae3SPaolo Bonzini 	/*
5299c50d8ae3SPaolo Bonzini 	 * When using PAE paging, the four PDPTEs are treated as 'root' pages,
5300c50d8ae3SPaolo Bonzini 	 * while the PDP table is a per-vCPU construct that's allocated at MMU
5301c50d8ae3SPaolo Bonzini 	 * creation.  When emulating 32-bit mode, cr3 is only 32 bits even on
5302c50d8ae3SPaolo Bonzini 	 * x86_64.  Therefore we need to allocate the PDP table in the first
530304d45551SSean Christopherson 	 * 4GB of memory, which happens to fit the DMA32 zone.  TDP paging
530404d45551SSean Christopherson 	 * generally doesn't use PAE paging and can skip allocating the PDP
530504d45551SSean Christopherson 	 * table.  The main exception, handled here, is SVM's 32-bit NPT.  The
530604d45551SSean Christopherson 	 * other exception is for shadowing L1's 32-bit or PAE NPT on 64-bit
530704d45551SSean Christopherson 	 * KVM; that horror is handled on-demand by mmu_alloc_shadow_roots().
5308c50d8ae3SPaolo Bonzini 	 */
5309d468d94bSSean Christopherson 	if (tdp_enabled && kvm_mmu_get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
5310c50d8ae3SPaolo Bonzini 		return 0;
5311c50d8ae3SPaolo Bonzini 
5312c50d8ae3SPaolo Bonzini 	page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32);
5313c50d8ae3SPaolo Bonzini 	if (!page)
5314c50d8ae3SPaolo Bonzini 		return -ENOMEM;
5315c50d8ae3SPaolo Bonzini 
5316c50d8ae3SPaolo Bonzini 	mmu->pae_root = page_address(page);
5317*4a98623dSSean Christopherson 
5318*4a98623dSSean Christopherson 	/*
5319*4a98623dSSean Christopherson 	 * CR3 is only 32 bits when PAE paging is used, thus it's impossible to
5320*4a98623dSSean Christopherson 	 * get the CPU to treat the PDPTEs as encrypted.  Decrypt the page so
5321*4a98623dSSean Christopherson 	 * that KVM's writes and the CPU's reads get along.  Note, this is
5322*4a98623dSSean Christopherson 	 * only necessary when using shadow paging, as 64-bit NPT can get at
5323*4a98623dSSean Christopherson 	 * the C-bit even when shadowing 32-bit NPT, and SME isn't supported
5324*4a98623dSSean Christopherson 	 * by 32-bit kernels (when KVM itself uses 32-bit NPT).
5325*4a98623dSSean Christopherson 	 */
5326*4a98623dSSean Christopherson 	if (!tdp_enabled)
5327*4a98623dSSean Christopherson 		set_memory_decrypted((unsigned long)mmu->pae_root, 1);
5328*4a98623dSSean Christopherson 	else
5329*4a98623dSSean Christopherson 		WARN_ON_ONCE(shadow_me_mask);
5330*4a98623dSSean Christopherson 
5331c50d8ae3SPaolo Bonzini 	for (i = 0; i < 4; ++i)
5332c834e5e4SSean Christopherson 		mmu->pae_root[i] = INVALID_PAE_ROOT;
5333c50d8ae3SPaolo Bonzini 
5334c50d8ae3SPaolo Bonzini 	return 0;
5335c50d8ae3SPaolo Bonzini }
5336c50d8ae3SPaolo Bonzini 
5337c50d8ae3SPaolo Bonzini int kvm_mmu_create(struct kvm_vcpu *vcpu)
5338c50d8ae3SPaolo Bonzini {
5339c50d8ae3SPaolo Bonzini 	int ret;
5340c50d8ae3SPaolo Bonzini 
53415962bfb7SSean Christopherson 	vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
53425f6078f9SSean Christopherson 	vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;
53435f6078f9SSean Christopherson 
53445962bfb7SSean Christopherson 	vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
53455f6078f9SSean Christopherson 	vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
53465962bfb7SSean Christopherson 
534796880883SSean Christopherson 	vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
534896880883SSean Christopherson 
5349c50d8ae3SPaolo Bonzini 	vcpu->arch.mmu = &vcpu->arch.root_mmu;
5350c50d8ae3SPaolo Bonzini 	vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
5351c50d8ae3SPaolo Bonzini 
5352c50d8ae3SPaolo Bonzini 	vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
5353c50d8ae3SPaolo Bonzini 
535404d28e37SSean Christopherson 	ret = __kvm_mmu_create(vcpu, &vcpu->arch.guest_mmu);
5355c50d8ae3SPaolo Bonzini 	if (ret)
5356c50d8ae3SPaolo Bonzini 		return ret;
5357c50d8ae3SPaolo Bonzini 
535804d28e37SSean Christopherson 	ret = __kvm_mmu_create(vcpu, &vcpu->arch.root_mmu);
5359c50d8ae3SPaolo Bonzini 	if (ret)
5360c50d8ae3SPaolo Bonzini 		goto fail_allocate_root;
5361c50d8ae3SPaolo Bonzini 
5362c50d8ae3SPaolo Bonzini 	return ret;
5363c50d8ae3SPaolo Bonzini  fail_allocate_root:
5364c50d8ae3SPaolo Bonzini 	free_mmu_pages(&vcpu->arch.guest_mmu);
5365c50d8ae3SPaolo Bonzini 	return ret;
5366c50d8ae3SPaolo Bonzini }
5367c50d8ae3SPaolo Bonzini 
5368c50d8ae3SPaolo Bonzini #define BATCH_ZAP_PAGES	10
5369c50d8ae3SPaolo Bonzini static void kvm_zap_obsolete_pages(struct kvm *kvm)
5370c50d8ae3SPaolo Bonzini {
5371c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp, *node;
5372c50d8ae3SPaolo Bonzini 	int nr_zapped, batch = 0;
5373c50d8ae3SPaolo Bonzini 
5374c50d8ae3SPaolo Bonzini restart:
5375c50d8ae3SPaolo Bonzini 	list_for_each_entry_safe_reverse(sp, node,
5376c50d8ae3SPaolo Bonzini 	      &kvm->arch.active_mmu_pages, link) {
5377c50d8ae3SPaolo Bonzini 		/*
5378c50d8ae3SPaolo Bonzini 		 * No obsolete valid page exists before a newly created page
5379c50d8ae3SPaolo Bonzini 		 * since active_mmu_pages is a FIFO list.
5380c50d8ae3SPaolo Bonzini 		 */
5381c50d8ae3SPaolo Bonzini 		if (!is_obsolete_sp(kvm, sp))
5382c50d8ae3SPaolo Bonzini 			break;
5383c50d8ae3SPaolo Bonzini 
5384c50d8ae3SPaolo Bonzini 		/*
5385f95eec9bSSean Christopherson 		 * Invalid pages should never land back on the list of active
5386f95eec9bSSean Christopherson 		 * pages.  Skip the bogus page, otherwise we'll get stuck in an
5387f95eec9bSSean Christopherson 		 * infinite loop if the page gets put back on the list (again).
5388c50d8ae3SPaolo Bonzini 		 */
5389f95eec9bSSean Christopherson 		if (WARN_ON(sp->role.invalid))
5390c50d8ae3SPaolo Bonzini 			continue;
5391c50d8ae3SPaolo Bonzini 
5392c50d8ae3SPaolo Bonzini 		/*
5393c50d8ae3SPaolo Bonzini 		 * No need to flush the TLB since we're only zapping shadow
5394c50d8ae3SPaolo Bonzini 		 * pages with an obsolete generation number and all vCPUS have
5395c50d8ae3SPaolo Bonzini 		 * loaded a new root, i.e. the shadow pages being zapped cannot
5396c50d8ae3SPaolo Bonzini 		 * be in active use by the guest.
5397c50d8ae3SPaolo Bonzini 		 */
5398c50d8ae3SPaolo Bonzini 		if (batch >= BATCH_ZAP_PAGES &&
5399531810caSBen Gardon 		    cond_resched_rwlock_write(&kvm->mmu_lock)) {
5400c50d8ae3SPaolo Bonzini 			batch = 0;
5401c50d8ae3SPaolo Bonzini 			goto restart;
5402c50d8ae3SPaolo Bonzini 		}
5403c50d8ae3SPaolo Bonzini 
5404c50d8ae3SPaolo Bonzini 		if (__kvm_mmu_prepare_zap_page(kvm, sp,
5405c50d8ae3SPaolo Bonzini 				&kvm->arch.zapped_obsolete_pages, &nr_zapped)) {
5406c50d8ae3SPaolo Bonzini 			batch += nr_zapped;
5407c50d8ae3SPaolo Bonzini 			goto restart;
5408c50d8ae3SPaolo Bonzini 		}
5409c50d8ae3SPaolo Bonzini 	}
5410c50d8ae3SPaolo Bonzini 
5411c50d8ae3SPaolo Bonzini 	/*
5412c50d8ae3SPaolo Bonzini 	 * Trigger a remote TLB flush before freeing the page tables to ensure
5413c50d8ae3SPaolo Bonzini 	 * KVM is not in the middle of a lockless shadow page table walk, which
5414c50d8ae3SPaolo Bonzini 	 * may reference the pages.
5415c50d8ae3SPaolo Bonzini 	 */
5416c50d8ae3SPaolo Bonzini 	kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages);
5417c50d8ae3SPaolo Bonzini }
5418c50d8ae3SPaolo Bonzini 
5419c50d8ae3SPaolo Bonzini /*
5420c50d8ae3SPaolo Bonzini  * Fast invalidate all shadow pages and use lock-break technique
5421c50d8ae3SPaolo Bonzini  * to zap obsolete pages.
5422c50d8ae3SPaolo Bonzini  *
5423c50d8ae3SPaolo Bonzini  * It's required when memslot is being deleted or VM is being
5424c50d8ae3SPaolo Bonzini  * destroyed, in these cases, we should ensure that KVM MMU does
5425c50d8ae3SPaolo Bonzini  * not use any resource of the being-deleted slot or all slots
5426c50d8ae3SPaolo Bonzini  * after calling the function.
5427c50d8ae3SPaolo Bonzini  */
5428c50d8ae3SPaolo Bonzini static void kvm_mmu_zap_all_fast(struct kvm *kvm)
5429c50d8ae3SPaolo Bonzini {
5430c50d8ae3SPaolo Bonzini 	lockdep_assert_held(&kvm->slots_lock);
5431c50d8ae3SPaolo Bonzini 
5432531810caSBen Gardon 	write_lock(&kvm->mmu_lock);
5433c50d8ae3SPaolo Bonzini 	trace_kvm_mmu_zap_all_fast(kvm);
5434c50d8ae3SPaolo Bonzini 
5435c50d8ae3SPaolo Bonzini 	/*
5436c50d8ae3SPaolo Bonzini 	 * Toggle mmu_valid_gen between '0' and '1'.  Because slots_lock is
5437c50d8ae3SPaolo Bonzini 	 * held for the entire duration of zapping obsolete pages, it's
5438c50d8ae3SPaolo Bonzini 	 * impossible for there to be multiple invalid generations associated
5439c50d8ae3SPaolo Bonzini 	 * with *valid* shadow pages at any given time, i.e. there is exactly
5440c50d8ae3SPaolo Bonzini 	 * one valid generation and (at most) one invalid generation.
5441c50d8ae3SPaolo Bonzini 	 */
5442c50d8ae3SPaolo Bonzini 	kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1;
5443c50d8ae3SPaolo Bonzini 
5444c50d8ae3SPaolo Bonzini 	/*
5445c50d8ae3SPaolo Bonzini 	 * Notify all vcpus to reload its shadow page table and flush TLB.
5446c50d8ae3SPaolo Bonzini 	 * Then all vcpus will switch to new shadow page table with the new
5447c50d8ae3SPaolo Bonzini 	 * mmu_valid_gen.
5448c50d8ae3SPaolo Bonzini 	 *
5449c50d8ae3SPaolo Bonzini 	 * Note: we need to do this under the protection of mmu_lock,
5450c50d8ae3SPaolo Bonzini 	 * otherwise, vcpu would purge shadow page but miss tlb flush.
5451c50d8ae3SPaolo Bonzini 	 */
5452c50d8ae3SPaolo Bonzini 	kvm_reload_remote_mmus(kvm);
5453c50d8ae3SPaolo Bonzini 
5454c50d8ae3SPaolo Bonzini 	kvm_zap_obsolete_pages(kvm);
5455faaf05b0SBen Gardon 
5456897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
5457faaf05b0SBen Gardon 		kvm_tdp_mmu_zap_all(kvm);
5458faaf05b0SBen Gardon 
5459531810caSBen Gardon 	write_unlock(&kvm->mmu_lock);
5460c50d8ae3SPaolo Bonzini }
5461c50d8ae3SPaolo Bonzini 
5462c50d8ae3SPaolo Bonzini static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
5463c50d8ae3SPaolo Bonzini {
5464c50d8ae3SPaolo Bonzini 	return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
5465c50d8ae3SPaolo Bonzini }
5466c50d8ae3SPaolo Bonzini 
5467c50d8ae3SPaolo Bonzini static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
5468c50d8ae3SPaolo Bonzini 			struct kvm_memory_slot *slot,
5469c50d8ae3SPaolo Bonzini 			struct kvm_page_track_notifier_node *node)
5470c50d8ae3SPaolo Bonzini {
5471c50d8ae3SPaolo Bonzini 	kvm_mmu_zap_all_fast(kvm);
5472c50d8ae3SPaolo Bonzini }
5473c50d8ae3SPaolo Bonzini 
5474c50d8ae3SPaolo Bonzini void kvm_mmu_init_vm(struct kvm *kvm)
5475c50d8ae3SPaolo Bonzini {
5476c50d8ae3SPaolo Bonzini 	struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
5477c50d8ae3SPaolo Bonzini 
5478fe5db27dSBen Gardon 	kvm_mmu_init_tdp_mmu(kvm);
5479fe5db27dSBen Gardon 
5480c50d8ae3SPaolo Bonzini 	node->track_write = kvm_mmu_pte_write;
5481c50d8ae3SPaolo Bonzini 	node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
5482c50d8ae3SPaolo Bonzini 	kvm_page_track_register_notifier(kvm, node);
5483c50d8ae3SPaolo Bonzini }
5484c50d8ae3SPaolo Bonzini 
5485c50d8ae3SPaolo Bonzini void kvm_mmu_uninit_vm(struct kvm *kvm)
5486c50d8ae3SPaolo Bonzini {
5487c50d8ae3SPaolo Bonzini 	struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
5488c50d8ae3SPaolo Bonzini 
5489c50d8ae3SPaolo Bonzini 	kvm_page_track_unregister_notifier(kvm, node);
5490fe5db27dSBen Gardon 
5491fe5db27dSBen Gardon 	kvm_mmu_uninit_tdp_mmu(kvm);
5492c50d8ae3SPaolo Bonzini }
5493c50d8ae3SPaolo Bonzini 
5494c50d8ae3SPaolo Bonzini void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
5495c50d8ae3SPaolo Bonzini {
5496c50d8ae3SPaolo Bonzini 	struct kvm_memslots *slots;
5497c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *memslot;
5498c50d8ae3SPaolo Bonzini 	int i;
5499faaf05b0SBen Gardon 	bool flush;
5500c50d8ae3SPaolo Bonzini 
5501531810caSBen Gardon 	write_lock(&kvm->mmu_lock);
5502c50d8ae3SPaolo Bonzini 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
5503c50d8ae3SPaolo Bonzini 		slots = __kvm_memslots(kvm, i);
5504c50d8ae3SPaolo Bonzini 		kvm_for_each_memslot(memslot, slots) {
5505c50d8ae3SPaolo Bonzini 			gfn_t start, end;
5506c50d8ae3SPaolo Bonzini 
5507c50d8ae3SPaolo Bonzini 			start = max(gfn_start, memslot->base_gfn);
5508c50d8ae3SPaolo Bonzini 			end = min(gfn_end, memslot->base_gfn + memslot->npages);
5509c50d8ae3SPaolo Bonzini 			if (start >= end)
5510c50d8ae3SPaolo Bonzini 				continue;
5511c50d8ae3SPaolo Bonzini 
5512c50d8ae3SPaolo Bonzini 			slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
55133bae0459SSean Christopherson 						PG_LEVEL_4K,
5514e662ec3eSSean Christopherson 						KVM_MAX_HUGEPAGE_LEVEL,
5515c50d8ae3SPaolo Bonzini 						start, end - 1, true);
5516c50d8ae3SPaolo Bonzini 		}
5517c50d8ae3SPaolo Bonzini 	}
5518c50d8ae3SPaolo Bonzini 
5519897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm)) {
5520faaf05b0SBen Gardon 		flush = kvm_tdp_mmu_zap_gfn_range(kvm, gfn_start, gfn_end);
5521faaf05b0SBen Gardon 		if (flush)
5522faaf05b0SBen Gardon 			kvm_flush_remote_tlbs(kvm);
5523faaf05b0SBen Gardon 	}
5524faaf05b0SBen Gardon 
5525531810caSBen Gardon 	write_unlock(&kvm->mmu_lock);
5526c50d8ae3SPaolo Bonzini }
5527c50d8ae3SPaolo Bonzini 
5528c50d8ae3SPaolo Bonzini static bool slot_rmap_write_protect(struct kvm *kvm,
55290a234f5dSSean Christopherson 				    struct kvm_rmap_head *rmap_head,
55300a234f5dSSean Christopherson 				    struct kvm_memory_slot *slot)
5531c50d8ae3SPaolo Bonzini {
5532c50d8ae3SPaolo Bonzini 	return __rmap_write_protect(kvm, rmap_head, false);
5533c50d8ae3SPaolo Bonzini }
5534c50d8ae3SPaolo Bonzini 
5535c50d8ae3SPaolo Bonzini void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
55363c9bd400SJay Zhou 				      struct kvm_memory_slot *memslot,
55373c9bd400SJay Zhou 				      int start_level)
5538c50d8ae3SPaolo Bonzini {
5539c50d8ae3SPaolo Bonzini 	bool flush;
5540c50d8ae3SPaolo Bonzini 
5541531810caSBen Gardon 	write_lock(&kvm->mmu_lock);
55423c9bd400SJay Zhou 	flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
5543e662ec3eSSean Christopherson 				start_level, KVM_MAX_HUGEPAGE_LEVEL, false);
5544897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
5545a6a0b05dSBen Gardon 		flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, PG_LEVEL_4K);
5546531810caSBen Gardon 	write_unlock(&kvm->mmu_lock);
5547c50d8ae3SPaolo Bonzini 
5548c50d8ae3SPaolo Bonzini 	/*
5549c50d8ae3SPaolo Bonzini 	 * We can flush all the TLBs out of the mmu lock without TLB
5550c50d8ae3SPaolo Bonzini 	 * corruption since we just change the spte from writable to
5551c50d8ae3SPaolo Bonzini 	 * readonly so that we only need to care the case of changing
5552c50d8ae3SPaolo Bonzini 	 * spte from present to present (changing the spte from present
5553c50d8ae3SPaolo Bonzini 	 * to nonpresent will flush all the TLBs immediately), in other
5554c50d8ae3SPaolo Bonzini 	 * words, the only case we care is mmu_spte_update() where we
55555fc3424fSSean Christopherson 	 * have checked Host-writable | MMU-writable instead of
55565fc3424fSSean Christopherson 	 * PT_WRITABLE_MASK, that means it does not depend on PT_WRITABLE_MASK
55575fc3424fSSean Christopherson 	 * anymore.
5558c50d8ae3SPaolo Bonzini 	 */
5559c50d8ae3SPaolo Bonzini 	if (flush)
55607f42aa76SSean Christopherson 		kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
5561c50d8ae3SPaolo Bonzini }
5562c50d8ae3SPaolo Bonzini 
5563c50d8ae3SPaolo Bonzini static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
55640a234f5dSSean Christopherson 					 struct kvm_rmap_head *rmap_head,
55650a234f5dSSean Christopherson 					 struct kvm_memory_slot *slot)
5566c50d8ae3SPaolo Bonzini {
5567c50d8ae3SPaolo Bonzini 	u64 *sptep;
5568c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
5569c50d8ae3SPaolo Bonzini 	int need_tlb_flush = 0;
5570c50d8ae3SPaolo Bonzini 	kvm_pfn_t pfn;
5571c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
5572c50d8ae3SPaolo Bonzini 
5573c50d8ae3SPaolo Bonzini restart:
5574c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(rmap_head, &iter, sptep) {
557557354682SSean Christopherson 		sp = sptep_to_sp(sptep);
5576c50d8ae3SPaolo Bonzini 		pfn = spte_to_pfn(*sptep);
5577c50d8ae3SPaolo Bonzini 
5578c50d8ae3SPaolo Bonzini 		/*
5579c50d8ae3SPaolo Bonzini 		 * We cannot do huge page mapping for indirect shadow pages,
5580c50d8ae3SPaolo Bonzini 		 * which are found on the last rmap (level = 1) when not using
5581c50d8ae3SPaolo Bonzini 		 * tdp; such shadow pages are synced with the page table in
5582c50d8ae3SPaolo Bonzini 		 * the guest, and the guest page table is using 4K page size
5583c50d8ae3SPaolo Bonzini 		 * mapping if the indirect sp has level = 1.
5584c50d8ae3SPaolo Bonzini 		 */
5585c50d8ae3SPaolo Bonzini 		if (sp->role.direct && !kvm_is_reserved_pfn(pfn) &&
55869eba50f8SSean Christopherson 		    sp->role.level < kvm_mmu_max_mapping_level(kvm, slot, sp->gfn,
55879eba50f8SSean Christopherson 							       pfn, PG_LEVEL_NUM)) {
5588c50d8ae3SPaolo Bonzini 			pte_list_remove(rmap_head, sptep);
5589c50d8ae3SPaolo Bonzini 
5590c50d8ae3SPaolo Bonzini 			if (kvm_available_flush_tlb_with_range())
5591c50d8ae3SPaolo Bonzini 				kvm_flush_remote_tlbs_with_address(kvm, sp->gfn,
5592c50d8ae3SPaolo Bonzini 					KVM_PAGES_PER_HPAGE(sp->role.level));
5593c50d8ae3SPaolo Bonzini 			else
5594c50d8ae3SPaolo Bonzini 				need_tlb_flush = 1;
5595c50d8ae3SPaolo Bonzini 
5596c50d8ae3SPaolo Bonzini 			goto restart;
5597c50d8ae3SPaolo Bonzini 		}
5598c50d8ae3SPaolo Bonzini 	}
5599c50d8ae3SPaolo Bonzini 
5600c50d8ae3SPaolo Bonzini 	return need_tlb_flush;
5601c50d8ae3SPaolo Bonzini }
5602c50d8ae3SPaolo Bonzini 
5603c50d8ae3SPaolo Bonzini void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
5604c50d8ae3SPaolo Bonzini 				   const struct kvm_memory_slot *memslot)
5605c50d8ae3SPaolo Bonzini {
5606c50d8ae3SPaolo Bonzini 	/* FIXME: const-ify all uses of struct kvm_memory_slot.  */
56079eba50f8SSean Christopherson 	struct kvm_memory_slot *slot = (struct kvm_memory_slot *)memslot;
56089eba50f8SSean Christopherson 
5609531810caSBen Gardon 	write_lock(&kvm->mmu_lock);
56109eba50f8SSean Christopherson 	slot_handle_leaf(kvm, slot, kvm_mmu_zap_collapsible_spte, true);
561114881998SBen Gardon 
5612897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
56139eba50f8SSean Christopherson 		kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot);
5614531810caSBen Gardon 	write_unlock(&kvm->mmu_lock);
5615c50d8ae3SPaolo Bonzini }
5616c50d8ae3SPaolo Bonzini 
5617b3594ffbSSean Christopherson void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
5618b3594ffbSSean Christopherson 					struct kvm_memory_slot *memslot)
5619b3594ffbSSean Christopherson {
5620b3594ffbSSean Christopherson 	/*
56217f42aa76SSean Christopherson 	 * All current use cases for flushing the TLBs for a specific memslot
56227f42aa76SSean Christopherson 	 * are related to dirty logging, and do the TLB flush out of mmu_lock.
56237f42aa76SSean Christopherson 	 * The interaction between the various operations on memslot must be
56247f42aa76SSean Christopherson 	 * serialized by slots_locks to ensure the TLB flush from one operation
56257f42aa76SSean Christopherson 	 * is observed by any other operation on the same memslot.
5626b3594ffbSSean Christopherson 	 */
5627b3594ffbSSean Christopherson 	lockdep_assert_held(&kvm->slots_lock);
5628cec37648SSean Christopherson 	kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
5629cec37648SSean Christopherson 					   memslot->npages);
5630b3594ffbSSean Christopherson }
5631b3594ffbSSean Christopherson 
5632c50d8ae3SPaolo Bonzini void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
5633c50d8ae3SPaolo Bonzini 				   struct kvm_memory_slot *memslot)
5634c50d8ae3SPaolo Bonzini {
5635c50d8ae3SPaolo Bonzini 	bool flush;
5636c50d8ae3SPaolo Bonzini 
5637531810caSBen Gardon 	write_lock(&kvm->mmu_lock);
5638c50d8ae3SPaolo Bonzini 	flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false);
5639897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
5640a6a0b05dSBen Gardon 		flush |= kvm_tdp_mmu_clear_dirty_slot(kvm, memslot);
5641531810caSBen Gardon 	write_unlock(&kvm->mmu_lock);
5642c50d8ae3SPaolo Bonzini 
5643c50d8ae3SPaolo Bonzini 	/*
5644c50d8ae3SPaolo Bonzini 	 * It's also safe to flush TLBs out of mmu lock here as currently this
5645c50d8ae3SPaolo Bonzini 	 * function is only used for dirty logging, in which case flushing TLB
5646c50d8ae3SPaolo Bonzini 	 * out of mmu lock also guarantees no dirty pages will be lost in
5647c50d8ae3SPaolo Bonzini 	 * dirty_bitmap.
5648c50d8ae3SPaolo Bonzini 	 */
5649c50d8ae3SPaolo Bonzini 	if (flush)
56507f42aa76SSean Christopherson 		kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
5651c50d8ae3SPaolo Bonzini }
5652c50d8ae3SPaolo Bonzini 
5653c50d8ae3SPaolo Bonzini void kvm_mmu_zap_all(struct kvm *kvm)
5654c50d8ae3SPaolo Bonzini {
5655c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp, *node;
5656c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
5657c50d8ae3SPaolo Bonzini 	int ign;
5658c50d8ae3SPaolo Bonzini 
5659531810caSBen Gardon 	write_lock(&kvm->mmu_lock);
5660c50d8ae3SPaolo Bonzini restart:
5661c50d8ae3SPaolo Bonzini 	list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
5662f95eec9bSSean Christopherson 		if (WARN_ON(sp->role.invalid))
5663c50d8ae3SPaolo Bonzini 			continue;
5664c50d8ae3SPaolo Bonzini 		if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
5665c50d8ae3SPaolo Bonzini 			goto restart;
5666531810caSBen Gardon 		if (cond_resched_rwlock_write(&kvm->mmu_lock))
5667c50d8ae3SPaolo Bonzini 			goto restart;
5668c50d8ae3SPaolo Bonzini 	}
5669c50d8ae3SPaolo Bonzini 
5670c50d8ae3SPaolo Bonzini 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
5671faaf05b0SBen Gardon 
5672897218ffSPaolo Bonzini 	if (is_tdp_mmu_enabled(kvm))
5673faaf05b0SBen Gardon 		kvm_tdp_mmu_zap_all(kvm);
5674faaf05b0SBen Gardon 
5675531810caSBen Gardon 	write_unlock(&kvm->mmu_lock);
5676c50d8ae3SPaolo Bonzini }
5677c50d8ae3SPaolo Bonzini 
5678c50d8ae3SPaolo Bonzini void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
5679c50d8ae3SPaolo Bonzini {
5680c50d8ae3SPaolo Bonzini 	WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
5681c50d8ae3SPaolo Bonzini 
5682c50d8ae3SPaolo Bonzini 	gen &= MMIO_SPTE_GEN_MASK;
5683c50d8ae3SPaolo Bonzini 
5684c50d8ae3SPaolo Bonzini 	/*
5685c50d8ae3SPaolo Bonzini 	 * Generation numbers are incremented in multiples of the number of
5686c50d8ae3SPaolo Bonzini 	 * address spaces in order to provide unique generations across all
5687c50d8ae3SPaolo Bonzini 	 * address spaces.  Strip what is effectively the address space
5688c50d8ae3SPaolo Bonzini 	 * modifier prior to checking for a wrap of the MMIO generation so
5689c50d8ae3SPaolo Bonzini 	 * that a wrap in any address space is detected.
5690c50d8ae3SPaolo Bonzini 	 */
5691c50d8ae3SPaolo Bonzini 	gen &= ~((u64)KVM_ADDRESS_SPACE_NUM - 1);
5692c50d8ae3SPaolo Bonzini 
5693c50d8ae3SPaolo Bonzini 	/*
5694c50d8ae3SPaolo Bonzini 	 * The very rare case: if the MMIO generation number has wrapped,
5695c50d8ae3SPaolo Bonzini 	 * zap all shadow pages.
5696c50d8ae3SPaolo Bonzini 	 */
5697c50d8ae3SPaolo Bonzini 	if (unlikely(gen == 0)) {
5698c50d8ae3SPaolo Bonzini 		kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
5699c50d8ae3SPaolo Bonzini 		kvm_mmu_zap_all_fast(kvm);
5700c50d8ae3SPaolo Bonzini 	}
5701c50d8ae3SPaolo Bonzini }
5702c50d8ae3SPaolo Bonzini 
5703c50d8ae3SPaolo Bonzini static unsigned long
5704c50d8ae3SPaolo Bonzini mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
5705c50d8ae3SPaolo Bonzini {
5706c50d8ae3SPaolo Bonzini 	struct kvm *kvm;
5707c50d8ae3SPaolo Bonzini 	int nr_to_scan = sc->nr_to_scan;
5708c50d8ae3SPaolo Bonzini 	unsigned long freed = 0;
5709c50d8ae3SPaolo Bonzini 
5710c50d8ae3SPaolo Bonzini 	mutex_lock(&kvm_lock);
5711c50d8ae3SPaolo Bonzini 
5712c50d8ae3SPaolo Bonzini 	list_for_each_entry(kvm, &vm_list, vm_list) {
5713c50d8ae3SPaolo Bonzini 		int idx;
5714c50d8ae3SPaolo Bonzini 		LIST_HEAD(invalid_list);
5715c50d8ae3SPaolo Bonzini 
5716c50d8ae3SPaolo Bonzini 		/*
5717c50d8ae3SPaolo Bonzini 		 * Never scan more than sc->nr_to_scan VM instances.
5718c50d8ae3SPaolo Bonzini 		 * Will not hit this condition practically since we do not try
5719c50d8ae3SPaolo Bonzini 		 * to shrink more than one VM and it is very unlikely to see
5720c50d8ae3SPaolo Bonzini 		 * !n_used_mmu_pages so many times.
5721c50d8ae3SPaolo Bonzini 		 */
5722c50d8ae3SPaolo Bonzini 		if (!nr_to_scan--)
5723c50d8ae3SPaolo Bonzini 			break;
5724c50d8ae3SPaolo Bonzini 		/*
5725c50d8ae3SPaolo Bonzini 		 * n_used_mmu_pages is accessed without holding kvm->mmu_lock
5726c50d8ae3SPaolo Bonzini 		 * here. We may skip a VM instance errorneosly, but we do not
5727c50d8ae3SPaolo Bonzini 		 * want to shrink a VM that only started to populate its MMU
5728c50d8ae3SPaolo Bonzini 		 * anyway.
5729c50d8ae3SPaolo Bonzini 		 */
5730c50d8ae3SPaolo Bonzini 		if (!kvm->arch.n_used_mmu_pages &&
5731c50d8ae3SPaolo Bonzini 		    !kvm_has_zapped_obsolete_pages(kvm))
5732c50d8ae3SPaolo Bonzini 			continue;
5733c50d8ae3SPaolo Bonzini 
5734c50d8ae3SPaolo Bonzini 		idx = srcu_read_lock(&kvm->srcu);
5735531810caSBen Gardon 		write_lock(&kvm->mmu_lock);
5736c50d8ae3SPaolo Bonzini 
5737c50d8ae3SPaolo Bonzini 		if (kvm_has_zapped_obsolete_pages(kvm)) {
5738c50d8ae3SPaolo Bonzini 			kvm_mmu_commit_zap_page(kvm,
5739c50d8ae3SPaolo Bonzini 			      &kvm->arch.zapped_obsolete_pages);
5740c50d8ae3SPaolo Bonzini 			goto unlock;
5741c50d8ae3SPaolo Bonzini 		}
5742c50d8ae3SPaolo Bonzini 
5743ebdb292dSSean Christopherson 		freed = kvm_mmu_zap_oldest_mmu_pages(kvm, sc->nr_to_scan);
5744c50d8ae3SPaolo Bonzini 
5745c50d8ae3SPaolo Bonzini unlock:
5746531810caSBen Gardon 		write_unlock(&kvm->mmu_lock);
5747c50d8ae3SPaolo Bonzini 		srcu_read_unlock(&kvm->srcu, idx);
5748c50d8ae3SPaolo Bonzini 
5749c50d8ae3SPaolo Bonzini 		/*
5750c50d8ae3SPaolo Bonzini 		 * unfair on small ones
5751c50d8ae3SPaolo Bonzini 		 * per-vm shrinkers cry out
5752c50d8ae3SPaolo Bonzini 		 * sadness comes quickly
5753c50d8ae3SPaolo Bonzini 		 */
5754c50d8ae3SPaolo Bonzini 		list_move_tail(&kvm->vm_list, &vm_list);
5755c50d8ae3SPaolo Bonzini 		break;
5756c50d8ae3SPaolo Bonzini 	}
5757c50d8ae3SPaolo Bonzini 
5758c50d8ae3SPaolo Bonzini 	mutex_unlock(&kvm_lock);
5759c50d8ae3SPaolo Bonzini 	return freed;
5760c50d8ae3SPaolo Bonzini }
5761c50d8ae3SPaolo Bonzini 
5762c50d8ae3SPaolo Bonzini static unsigned long
5763c50d8ae3SPaolo Bonzini mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
5764c50d8ae3SPaolo Bonzini {
5765c50d8ae3SPaolo Bonzini 	return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
5766c50d8ae3SPaolo Bonzini }
5767c50d8ae3SPaolo Bonzini 
5768c50d8ae3SPaolo Bonzini static struct shrinker mmu_shrinker = {
5769c50d8ae3SPaolo Bonzini 	.count_objects = mmu_shrink_count,
5770c50d8ae3SPaolo Bonzini 	.scan_objects = mmu_shrink_scan,
5771c50d8ae3SPaolo Bonzini 	.seeks = DEFAULT_SEEKS * 10,
5772c50d8ae3SPaolo Bonzini };
5773c50d8ae3SPaolo Bonzini 
5774c50d8ae3SPaolo Bonzini static void mmu_destroy_caches(void)
5775c50d8ae3SPaolo Bonzini {
5776c50d8ae3SPaolo Bonzini 	kmem_cache_destroy(pte_list_desc_cache);
5777c50d8ae3SPaolo Bonzini 	kmem_cache_destroy(mmu_page_header_cache);
5778c50d8ae3SPaolo Bonzini }
5779c50d8ae3SPaolo Bonzini 
5780c50d8ae3SPaolo Bonzini static bool get_nx_auto_mode(void)
5781c50d8ae3SPaolo Bonzini {
5782c50d8ae3SPaolo Bonzini 	/* Return true when CPU has the bug, and mitigations are ON */
5783c50d8ae3SPaolo Bonzini 	return boot_cpu_has_bug(X86_BUG_ITLB_MULTIHIT) && !cpu_mitigations_off();
5784c50d8ae3SPaolo Bonzini }
5785c50d8ae3SPaolo Bonzini 
5786c50d8ae3SPaolo Bonzini static void __set_nx_huge_pages(bool val)
5787c50d8ae3SPaolo Bonzini {
5788c50d8ae3SPaolo Bonzini 	nx_huge_pages = itlb_multihit_kvm_mitigation = val;
5789c50d8ae3SPaolo Bonzini }
5790c50d8ae3SPaolo Bonzini 
5791c50d8ae3SPaolo Bonzini static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
5792c50d8ae3SPaolo Bonzini {
5793c50d8ae3SPaolo Bonzini 	bool old_val = nx_huge_pages;
5794c50d8ae3SPaolo Bonzini 	bool new_val;
5795c50d8ae3SPaolo Bonzini 
5796c50d8ae3SPaolo Bonzini 	/* In "auto" mode deploy workaround only if CPU has the bug. */
5797c50d8ae3SPaolo Bonzini 	if (sysfs_streq(val, "off"))
5798c50d8ae3SPaolo Bonzini 		new_val = 0;
5799c50d8ae3SPaolo Bonzini 	else if (sysfs_streq(val, "force"))
5800c50d8ae3SPaolo Bonzini 		new_val = 1;
5801c50d8ae3SPaolo Bonzini 	else if (sysfs_streq(val, "auto"))
5802c50d8ae3SPaolo Bonzini 		new_val = get_nx_auto_mode();
5803c50d8ae3SPaolo Bonzini 	else if (strtobool(val, &new_val) < 0)
5804c50d8ae3SPaolo Bonzini 		return -EINVAL;
5805c50d8ae3SPaolo Bonzini 
5806c50d8ae3SPaolo Bonzini 	__set_nx_huge_pages(new_val);
5807c50d8ae3SPaolo Bonzini 
5808c50d8ae3SPaolo Bonzini 	if (new_val != old_val) {
5809c50d8ae3SPaolo Bonzini 		struct kvm *kvm;
5810c50d8ae3SPaolo Bonzini 
5811c50d8ae3SPaolo Bonzini 		mutex_lock(&kvm_lock);
5812c50d8ae3SPaolo Bonzini 
5813c50d8ae3SPaolo Bonzini 		list_for_each_entry(kvm, &vm_list, vm_list) {
5814c50d8ae3SPaolo Bonzini 			mutex_lock(&kvm->slots_lock);
5815c50d8ae3SPaolo Bonzini 			kvm_mmu_zap_all_fast(kvm);
5816c50d8ae3SPaolo Bonzini 			mutex_unlock(&kvm->slots_lock);
5817c50d8ae3SPaolo Bonzini 
5818c50d8ae3SPaolo Bonzini 			wake_up_process(kvm->arch.nx_lpage_recovery_thread);
5819c50d8ae3SPaolo Bonzini 		}
5820c50d8ae3SPaolo Bonzini 		mutex_unlock(&kvm_lock);
5821c50d8ae3SPaolo Bonzini 	}
5822c50d8ae3SPaolo Bonzini 
5823c50d8ae3SPaolo Bonzini 	return 0;
5824c50d8ae3SPaolo Bonzini }
5825c50d8ae3SPaolo Bonzini 
5826c50d8ae3SPaolo Bonzini int kvm_mmu_module_init(void)
5827c50d8ae3SPaolo Bonzini {
5828c50d8ae3SPaolo Bonzini 	int ret = -ENOMEM;
5829c50d8ae3SPaolo Bonzini 
5830c50d8ae3SPaolo Bonzini 	if (nx_huge_pages == -1)
5831c50d8ae3SPaolo Bonzini 		__set_nx_huge_pages(get_nx_auto_mode());
5832c50d8ae3SPaolo Bonzini 
5833c50d8ae3SPaolo Bonzini 	/*
5834c50d8ae3SPaolo Bonzini 	 * MMU roles use union aliasing which is, generally speaking, an
5835c50d8ae3SPaolo Bonzini 	 * undefined behavior. However, we supposedly know how compilers behave
5836c50d8ae3SPaolo Bonzini 	 * and the current status quo is unlikely to change. Guardians below are
5837c50d8ae3SPaolo Bonzini 	 * supposed to let us know if the assumption becomes false.
5838c50d8ae3SPaolo Bonzini 	 */
5839c50d8ae3SPaolo Bonzini 	BUILD_BUG_ON(sizeof(union kvm_mmu_page_role) != sizeof(u32));
5840c50d8ae3SPaolo Bonzini 	BUILD_BUG_ON(sizeof(union kvm_mmu_extended_role) != sizeof(u32));
5841c50d8ae3SPaolo Bonzini 	BUILD_BUG_ON(sizeof(union kvm_mmu_role) != sizeof(u64));
5842c50d8ae3SPaolo Bonzini 
5843c50d8ae3SPaolo Bonzini 	kvm_mmu_reset_all_pte_masks();
5844c50d8ae3SPaolo Bonzini 
5845c50d8ae3SPaolo Bonzini 	pte_list_desc_cache = kmem_cache_create("pte_list_desc",
5846c50d8ae3SPaolo Bonzini 					    sizeof(struct pte_list_desc),
5847c50d8ae3SPaolo Bonzini 					    0, SLAB_ACCOUNT, NULL);
5848c50d8ae3SPaolo Bonzini 	if (!pte_list_desc_cache)
5849c50d8ae3SPaolo Bonzini 		goto out;
5850c50d8ae3SPaolo Bonzini 
5851c50d8ae3SPaolo Bonzini 	mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
5852c50d8ae3SPaolo Bonzini 						  sizeof(struct kvm_mmu_page),
5853c50d8ae3SPaolo Bonzini 						  0, SLAB_ACCOUNT, NULL);
5854c50d8ae3SPaolo Bonzini 	if (!mmu_page_header_cache)
5855c50d8ae3SPaolo Bonzini 		goto out;
5856c50d8ae3SPaolo Bonzini 
5857c50d8ae3SPaolo Bonzini 	if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL))
5858c50d8ae3SPaolo Bonzini 		goto out;
5859c50d8ae3SPaolo Bonzini 
5860c50d8ae3SPaolo Bonzini 	ret = register_shrinker(&mmu_shrinker);
5861c50d8ae3SPaolo Bonzini 	if (ret)
5862c50d8ae3SPaolo Bonzini 		goto out;
5863c50d8ae3SPaolo Bonzini 
5864c50d8ae3SPaolo Bonzini 	return 0;
5865c50d8ae3SPaolo Bonzini 
5866c50d8ae3SPaolo Bonzini out:
5867c50d8ae3SPaolo Bonzini 	mmu_destroy_caches();
5868c50d8ae3SPaolo Bonzini 	return ret;
5869c50d8ae3SPaolo Bonzini }
5870c50d8ae3SPaolo Bonzini 
5871c50d8ae3SPaolo Bonzini /*
5872c50d8ae3SPaolo Bonzini  * Calculate mmu pages needed for kvm.
5873c50d8ae3SPaolo Bonzini  */
5874c50d8ae3SPaolo Bonzini unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
5875c50d8ae3SPaolo Bonzini {
5876c50d8ae3SPaolo Bonzini 	unsigned long nr_mmu_pages;
5877c50d8ae3SPaolo Bonzini 	unsigned long nr_pages = 0;
5878c50d8ae3SPaolo Bonzini 	struct kvm_memslots *slots;
5879c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *memslot;
5880c50d8ae3SPaolo Bonzini 	int i;
5881c50d8ae3SPaolo Bonzini 
5882c50d8ae3SPaolo Bonzini 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
5883c50d8ae3SPaolo Bonzini 		slots = __kvm_memslots(kvm, i);
5884c50d8ae3SPaolo Bonzini 
5885c50d8ae3SPaolo Bonzini 		kvm_for_each_memslot(memslot, slots)
5886c50d8ae3SPaolo Bonzini 			nr_pages += memslot->npages;
5887c50d8ae3SPaolo Bonzini 	}
5888c50d8ae3SPaolo Bonzini 
5889c50d8ae3SPaolo Bonzini 	nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
5890c50d8ae3SPaolo Bonzini 	nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES);
5891c50d8ae3SPaolo Bonzini 
5892c50d8ae3SPaolo Bonzini 	return nr_mmu_pages;
5893c50d8ae3SPaolo Bonzini }
5894c50d8ae3SPaolo Bonzini 
5895c50d8ae3SPaolo Bonzini void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
5896c50d8ae3SPaolo Bonzini {
5897c50d8ae3SPaolo Bonzini 	kvm_mmu_unload(vcpu);
5898c50d8ae3SPaolo Bonzini 	free_mmu_pages(&vcpu->arch.root_mmu);
5899c50d8ae3SPaolo Bonzini 	free_mmu_pages(&vcpu->arch.guest_mmu);
5900c50d8ae3SPaolo Bonzini 	mmu_free_memory_caches(vcpu);
5901c50d8ae3SPaolo Bonzini }
5902c50d8ae3SPaolo Bonzini 
5903c50d8ae3SPaolo Bonzini void kvm_mmu_module_exit(void)
5904c50d8ae3SPaolo Bonzini {
5905c50d8ae3SPaolo Bonzini 	mmu_destroy_caches();
5906c50d8ae3SPaolo Bonzini 	percpu_counter_destroy(&kvm_total_used_mmu_pages);
5907c50d8ae3SPaolo Bonzini 	unregister_shrinker(&mmu_shrinker);
5908c50d8ae3SPaolo Bonzini 	mmu_audit_disable();
5909c50d8ae3SPaolo Bonzini }
5910c50d8ae3SPaolo Bonzini 
5911c50d8ae3SPaolo Bonzini static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp)
5912c50d8ae3SPaolo Bonzini {
5913c50d8ae3SPaolo Bonzini 	unsigned int old_val;
5914c50d8ae3SPaolo Bonzini 	int err;
5915c50d8ae3SPaolo Bonzini 
5916c50d8ae3SPaolo Bonzini 	old_val = nx_huge_pages_recovery_ratio;
5917c50d8ae3SPaolo Bonzini 	err = param_set_uint(val, kp);
5918c50d8ae3SPaolo Bonzini 	if (err)
5919c50d8ae3SPaolo Bonzini 		return err;
5920c50d8ae3SPaolo Bonzini 
5921c50d8ae3SPaolo Bonzini 	if (READ_ONCE(nx_huge_pages) &&
5922c50d8ae3SPaolo Bonzini 	    !old_val && nx_huge_pages_recovery_ratio) {
5923c50d8ae3SPaolo Bonzini 		struct kvm *kvm;
5924c50d8ae3SPaolo Bonzini 
5925c50d8ae3SPaolo Bonzini 		mutex_lock(&kvm_lock);
5926c50d8ae3SPaolo Bonzini 
5927c50d8ae3SPaolo Bonzini 		list_for_each_entry(kvm, &vm_list, vm_list)
5928c50d8ae3SPaolo Bonzini 			wake_up_process(kvm->arch.nx_lpage_recovery_thread);
5929c50d8ae3SPaolo Bonzini 
5930c50d8ae3SPaolo Bonzini 		mutex_unlock(&kvm_lock);
5931c50d8ae3SPaolo Bonzini 	}
5932c50d8ae3SPaolo Bonzini 
5933c50d8ae3SPaolo Bonzini 	return err;
5934c50d8ae3SPaolo Bonzini }
5935c50d8ae3SPaolo Bonzini 
5936c50d8ae3SPaolo Bonzini static void kvm_recover_nx_lpages(struct kvm *kvm)
5937c50d8ae3SPaolo Bonzini {
5938c50d8ae3SPaolo Bonzini 	int rcu_idx;
5939c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
5940c50d8ae3SPaolo Bonzini 	unsigned int ratio;
5941c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
5942c50d8ae3SPaolo Bonzini 	ulong to_zap;
5943c50d8ae3SPaolo Bonzini 
5944c50d8ae3SPaolo Bonzini 	rcu_idx = srcu_read_lock(&kvm->srcu);
5945531810caSBen Gardon 	write_lock(&kvm->mmu_lock);
5946c50d8ae3SPaolo Bonzini 
5947c50d8ae3SPaolo Bonzini 	ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
5948c50d8ae3SPaolo Bonzini 	to_zap = ratio ? DIV_ROUND_UP(kvm->stat.nx_lpage_splits, ratio) : 0;
59497d919c7aSSean Christopherson 	for ( ; to_zap; --to_zap) {
59507d919c7aSSean Christopherson 		if (list_empty(&kvm->arch.lpage_disallowed_mmu_pages))
59517d919c7aSSean Christopherson 			break;
59527d919c7aSSean Christopherson 
5953c50d8ae3SPaolo Bonzini 		/*
5954c50d8ae3SPaolo Bonzini 		 * We use a separate list instead of just using active_mmu_pages
5955c50d8ae3SPaolo Bonzini 		 * because the number of lpage_disallowed pages is expected to
5956c50d8ae3SPaolo Bonzini 		 * be relatively small compared to the total.
5957c50d8ae3SPaolo Bonzini 		 */
5958c50d8ae3SPaolo Bonzini 		sp = list_first_entry(&kvm->arch.lpage_disallowed_mmu_pages,
5959c50d8ae3SPaolo Bonzini 				      struct kvm_mmu_page,
5960c50d8ae3SPaolo Bonzini 				      lpage_disallowed_link);
5961c50d8ae3SPaolo Bonzini 		WARN_ON_ONCE(!sp->lpage_disallowed);
5962897218ffSPaolo Bonzini 		if (is_tdp_mmu_page(sp)) {
596329cf0f50SBen Gardon 			kvm_tdp_mmu_zap_gfn_range(kvm, sp->gfn,
596429cf0f50SBen Gardon 				sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level));
59658d1a182eSBen Gardon 		} else {
5966c50d8ae3SPaolo Bonzini 			kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
5967c50d8ae3SPaolo Bonzini 			WARN_ON_ONCE(sp->lpage_disallowed);
596829cf0f50SBen Gardon 		}
5969c50d8ae3SPaolo Bonzini 
5970531810caSBen Gardon 		if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
5971c50d8ae3SPaolo Bonzini 			kvm_mmu_commit_zap_page(kvm, &invalid_list);
5972531810caSBen Gardon 			cond_resched_rwlock_write(&kvm->mmu_lock);
5973c50d8ae3SPaolo Bonzini 		}
5974c50d8ae3SPaolo Bonzini 	}
5975e8950569SSean Christopherson 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
5976c50d8ae3SPaolo Bonzini 
5977531810caSBen Gardon 	write_unlock(&kvm->mmu_lock);
5978c50d8ae3SPaolo Bonzini 	srcu_read_unlock(&kvm->srcu, rcu_idx);
5979c50d8ae3SPaolo Bonzini }
5980c50d8ae3SPaolo Bonzini 
5981c50d8ae3SPaolo Bonzini static long get_nx_lpage_recovery_timeout(u64 start_time)
5982c50d8ae3SPaolo Bonzini {
5983c50d8ae3SPaolo Bonzini 	return READ_ONCE(nx_huge_pages) && READ_ONCE(nx_huge_pages_recovery_ratio)
5984c50d8ae3SPaolo Bonzini 		? start_time + 60 * HZ - get_jiffies_64()
5985c50d8ae3SPaolo Bonzini 		: MAX_SCHEDULE_TIMEOUT;
5986c50d8ae3SPaolo Bonzini }
5987c50d8ae3SPaolo Bonzini 
5988c50d8ae3SPaolo Bonzini static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data)
5989c50d8ae3SPaolo Bonzini {
5990c50d8ae3SPaolo Bonzini 	u64 start_time;
5991c50d8ae3SPaolo Bonzini 	long remaining_time;
5992c50d8ae3SPaolo Bonzini 
5993c50d8ae3SPaolo Bonzini 	while (true) {
5994c50d8ae3SPaolo Bonzini 		start_time = get_jiffies_64();
5995c50d8ae3SPaolo Bonzini 		remaining_time = get_nx_lpage_recovery_timeout(start_time);
5996c50d8ae3SPaolo Bonzini 
5997c50d8ae3SPaolo Bonzini 		set_current_state(TASK_INTERRUPTIBLE);
5998c50d8ae3SPaolo Bonzini 		while (!kthread_should_stop() && remaining_time > 0) {
5999c50d8ae3SPaolo Bonzini 			schedule_timeout(remaining_time);
6000c50d8ae3SPaolo Bonzini 			remaining_time = get_nx_lpage_recovery_timeout(start_time);
6001c50d8ae3SPaolo Bonzini 			set_current_state(TASK_INTERRUPTIBLE);
6002c50d8ae3SPaolo Bonzini 		}
6003c50d8ae3SPaolo Bonzini 
6004c50d8ae3SPaolo Bonzini 		set_current_state(TASK_RUNNING);
6005c50d8ae3SPaolo Bonzini 
6006c50d8ae3SPaolo Bonzini 		if (kthread_should_stop())
6007c50d8ae3SPaolo Bonzini 			return 0;
6008c50d8ae3SPaolo Bonzini 
6009c50d8ae3SPaolo Bonzini 		kvm_recover_nx_lpages(kvm);
6010c50d8ae3SPaolo Bonzini 	}
6011c50d8ae3SPaolo Bonzini }
6012c50d8ae3SPaolo Bonzini 
6013c50d8ae3SPaolo Bonzini int kvm_mmu_post_init_vm(struct kvm *kvm)
6014c50d8ae3SPaolo Bonzini {
6015c50d8ae3SPaolo Bonzini 	int err;
6016c50d8ae3SPaolo Bonzini 
6017c50d8ae3SPaolo Bonzini 	err = kvm_vm_create_worker_thread(kvm, kvm_nx_lpage_recovery_worker, 0,
6018c50d8ae3SPaolo Bonzini 					  "kvm-nx-lpage-recovery",
6019c50d8ae3SPaolo Bonzini 					  &kvm->arch.nx_lpage_recovery_thread);
6020c50d8ae3SPaolo Bonzini 	if (!err)
6021c50d8ae3SPaolo Bonzini 		kthread_unpark(kvm->arch.nx_lpage_recovery_thread);
6022c50d8ae3SPaolo Bonzini 
6023c50d8ae3SPaolo Bonzini 	return err;
6024c50d8ae3SPaolo Bonzini }
6025c50d8ae3SPaolo Bonzini 
6026c50d8ae3SPaolo Bonzini void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
6027c50d8ae3SPaolo Bonzini {
6028c50d8ae3SPaolo Bonzini 	if (kvm->arch.nx_lpage_recovery_thread)
6029c50d8ae3SPaolo Bonzini 		kthread_stop(kvm->arch.nx_lpage_recovery_thread);
6030c50d8ae3SPaolo Bonzini }
6031