xref: /linux/arch/x86/kvm/mmu/mmu.c (revision 7b367bc9a6415dc6f31135d50ab37e8ee18d6974)
1c50d8ae3SPaolo Bonzini // SPDX-License-Identifier: GPL-2.0-only
2c50d8ae3SPaolo Bonzini /*
3c50d8ae3SPaolo Bonzini  * Kernel-based Virtual Machine driver for Linux
4c50d8ae3SPaolo Bonzini  *
5c50d8ae3SPaolo Bonzini  * This module enables machines with Intel VT-x extensions to run virtual
6c50d8ae3SPaolo Bonzini  * machines without emulation or binary translation.
7c50d8ae3SPaolo Bonzini  *
8c50d8ae3SPaolo Bonzini  * MMU support
9c50d8ae3SPaolo Bonzini  *
10c50d8ae3SPaolo Bonzini  * Copyright (C) 2006 Qumranet, Inc.
11c50d8ae3SPaolo Bonzini  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
12c50d8ae3SPaolo Bonzini  *
13c50d8ae3SPaolo Bonzini  * Authors:
14c50d8ae3SPaolo Bonzini  *   Yaniv Kamay  <yaniv@qumranet.com>
15c50d8ae3SPaolo Bonzini  *   Avi Kivity   <avi@qumranet.com>
16c50d8ae3SPaolo Bonzini  */
17c50d8ae3SPaolo Bonzini 
18c50d8ae3SPaolo Bonzini #include "irq.h"
1988197e6aS彭浩(Richard) #include "ioapic.h"
20c50d8ae3SPaolo Bonzini #include "mmu.h"
216ca9a6f3SSean Christopherson #include "mmu_internal.h"
22c50d8ae3SPaolo Bonzini #include "x86.h"
23c50d8ae3SPaolo Bonzini #include "kvm_cache_regs.h"
242f728d66SSean Christopherson #include "kvm_emulate.h"
25c50d8ae3SPaolo Bonzini #include "cpuid.h"
26c50d8ae3SPaolo Bonzini 
27c50d8ae3SPaolo Bonzini #include <linux/kvm_host.h>
28c50d8ae3SPaolo Bonzini #include <linux/types.h>
29c50d8ae3SPaolo Bonzini #include <linux/string.h>
30c50d8ae3SPaolo Bonzini #include <linux/mm.h>
31c50d8ae3SPaolo Bonzini #include <linux/highmem.h>
32c50d8ae3SPaolo Bonzini #include <linux/moduleparam.h>
33c50d8ae3SPaolo Bonzini #include <linux/export.h>
34c50d8ae3SPaolo Bonzini #include <linux/swap.h>
35c50d8ae3SPaolo Bonzini #include <linux/hugetlb.h>
36c50d8ae3SPaolo Bonzini #include <linux/compiler.h>
37c50d8ae3SPaolo Bonzini #include <linux/srcu.h>
38c50d8ae3SPaolo Bonzini #include <linux/slab.h>
39c50d8ae3SPaolo Bonzini #include <linux/sched/signal.h>
40c50d8ae3SPaolo Bonzini #include <linux/uaccess.h>
41c50d8ae3SPaolo Bonzini #include <linux/hash.h>
42c50d8ae3SPaolo Bonzini #include <linux/kern_levels.h>
43c50d8ae3SPaolo Bonzini #include <linux/kthread.h>
44c50d8ae3SPaolo Bonzini 
45c50d8ae3SPaolo Bonzini #include <asm/page.h>
46eb243d1dSIngo Molnar #include <asm/memtype.h>
47c50d8ae3SPaolo Bonzini #include <asm/cmpxchg.h>
48c50d8ae3SPaolo Bonzini #include <asm/e820/api.h>
49c50d8ae3SPaolo Bonzini #include <asm/io.h>
50c50d8ae3SPaolo Bonzini #include <asm/vmx.h>
51c50d8ae3SPaolo Bonzini #include <asm/kvm_page_track.h>
52c50d8ae3SPaolo Bonzini #include "trace.h"
53c50d8ae3SPaolo Bonzini 
54c50d8ae3SPaolo Bonzini extern bool itlb_multihit_kvm_mitigation;
55c50d8ae3SPaolo Bonzini 
56c50d8ae3SPaolo Bonzini static int __read_mostly nx_huge_pages = -1;
57c50d8ae3SPaolo Bonzini #ifdef CONFIG_PREEMPT_RT
58c50d8ae3SPaolo Bonzini /* Recovery can cause latency spikes, disable it for PREEMPT_RT.  */
59c50d8ae3SPaolo Bonzini static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
60c50d8ae3SPaolo Bonzini #else
61c50d8ae3SPaolo Bonzini static uint __read_mostly nx_huge_pages_recovery_ratio = 60;
62c50d8ae3SPaolo Bonzini #endif
63c50d8ae3SPaolo Bonzini 
64c50d8ae3SPaolo Bonzini static int set_nx_huge_pages(const char *val, const struct kernel_param *kp);
65c50d8ae3SPaolo Bonzini static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp);
66c50d8ae3SPaolo Bonzini 
67c50d8ae3SPaolo Bonzini static struct kernel_param_ops nx_huge_pages_ops = {
68c50d8ae3SPaolo Bonzini 	.set = set_nx_huge_pages,
69c50d8ae3SPaolo Bonzini 	.get = param_get_bool,
70c50d8ae3SPaolo Bonzini };
71c50d8ae3SPaolo Bonzini 
72c50d8ae3SPaolo Bonzini static struct kernel_param_ops nx_huge_pages_recovery_ratio_ops = {
73c50d8ae3SPaolo Bonzini 	.set = set_nx_huge_pages_recovery_ratio,
74c50d8ae3SPaolo Bonzini 	.get = param_get_uint,
75c50d8ae3SPaolo Bonzini };
76c50d8ae3SPaolo Bonzini 
77c50d8ae3SPaolo Bonzini module_param_cb(nx_huge_pages, &nx_huge_pages_ops, &nx_huge_pages, 0644);
78c50d8ae3SPaolo Bonzini __MODULE_PARM_TYPE(nx_huge_pages, "bool");
79c50d8ae3SPaolo Bonzini module_param_cb(nx_huge_pages_recovery_ratio, &nx_huge_pages_recovery_ratio_ops,
80c50d8ae3SPaolo Bonzini 		&nx_huge_pages_recovery_ratio, 0644);
81c50d8ae3SPaolo Bonzini __MODULE_PARM_TYPE(nx_huge_pages_recovery_ratio, "uint");
82c50d8ae3SPaolo Bonzini 
8371fe7013SSean Christopherson static bool __read_mostly force_flush_and_sync_on_reuse;
8471fe7013SSean Christopherson module_param_named(flush_on_reuse, force_flush_and_sync_on_reuse, bool, 0644);
8571fe7013SSean Christopherson 
86c50d8ae3SPaolo Bonzini /*
87c50d8ae3SPaolo Bonzini  * When setting this variable to true it enables Two-Dimensional-Paging
88c50d8ae3SPaolo Bonzini  * where the hardware walks 2 page tables:
89c50d8ae3SPaolo Bonzini  * 1. the guest-virtual to guest-physical
90c50d8ae3SPaolo Bonzini  * 2. while doing 1. it walks guest-physical to host-physical
91c50d8ae3SPaolo Bonzini  * If the hardware supports that we don't need to do shadow paging.
92c50d8ae3SPaolo Bonzini  */
93c50d8ae3SPaolo Bonzini bool tdp_enabled = false;
94c50d8ae3SPaolo Bonzini 
951d92d2e8SSean Christopherson static int max_huge_page_level __read_mostly;
9683013059SSean Christopherson static int max_tdp_level __read_mostly;
97703c335dSSean Christopherson 
98c50d8ae3SPaolo Bonzini enum {
99c50d8ae3SPaolo Bonzini 	AUDIT_PRE_PAGE_FAULT,
100c50d8ae3SPaolo Bonzini 	AUDIT_POST_PAGE_FAULT,
101c50d8ae3SPaolo Bonzini 	AUDIT_PRE_PTE_WRITE,
102c50d8ae3SPaolo Bonzini 	AUDIT_POST_PTE_WRITE,
103c50d8ae3SPaolo Bonzini 	AUDIT_PRE_SYNC,
104c50d8ae3SPaolo Bonzini 	AUDIT_POST_SYNC
105c50d8ae3SPaolo Bonzini };
106c50d8ae3SPaolo Bonzini 
107c50d8ae3SPaolo Bonzini #undef MMU_DEBUG
108c50d8ae3SPaolo Bonzini 
109c50d8ae3SPaolo Bonzini #ifdef MMU_DEBUG
110c50d8ae3SPaolo Bonzini static bool dbg = 0;
111c50d8ae3SPaolo Bonzini module_param(dbg, bool, 0644);
112c50d8ae3SPaolo Bonzini 
113c50d8ae3SPaolo Bonzini #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
114c50d8ae3SPaolo Bonzini #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
115c50d8ae3SPaolo Bonzini #define MMU_WARN_ON(x) WARN_ON(x)
116c50d8ae3SPaolo Bonzini #else
117c50d8ae3SPaolo Bonzini #define pgprintk(x...) do { } while (0)
118c50d8ae3SPaolo Bonzini #define rmap_printk(x...) do { } while (0)
119c50d8ae3SPaolo Bonzini #define MMU_WARN_ON(x) do { } while (0)
120c50d8ae3SPaolo Bonzini #endif
121c50d8ae3SPaolo Bonzini 
122c50d8ae3SPaolo Bonzini #define PTE_PREFETCH_NUM		8
123c50d8ae3SPaolo Bonzini 
124c50d8ae3SPaolo Bonzini #define PT_FIRST_AVAIL_BITS_SHIFT 10
125c50d8ae3SPaolo Bonzini #define PT64_SECOND_AVAIL_BITS_SHIFT 54
126c50d8ae3SPaolo Bonzini 
127c50d8ae3SPaolo Bonzini /*
128c50d8ae3SPaolo Bonzini  * The mask used to denote special SPTEs, which can be either MMIO SPTEs or
129c50d8ae3SPaolo Bonzini  * Access Tracking SPTEs.
130c50d8ae3SPaolo Bonzini  */
131c50d8ae3SPaolo Bonzini #define SPTE_SPECIAL_MASK (3ULL << 52)
132c50d8ae3SPaolo Bonzini #define SPTE_AD_ENABLED_MASK (0ULL << 52)
133c50d8ae3SPaolo Bonzini #define SPTE_AD_DISABLED_MASK (1ULL << 52)
134c50d8ae3SPaolo Bonzini #define SPTE_AD_WRPROT_ONLY_MASK (2ULL << 52)
135c50d8ae3SPaolo Bonzini #define SPTE_MMIO_MASK (3ULL << 52)
136c50d8ae3SPaolo Bonzini 
137c50d8ae3SPaolo Bonzini #define PT64_LEVEL_BITS 9
138c50d8ae3SPaolo Bonzini 
139c50d8ae3SPaolo Bonzini #define PT64_LEVEL_SHIFT(level) \
140c50d8ae3SPaolo Bonzini 		(PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
141c50d8ae3SPaolo Bonzini 
142c50d8ae3SPaolo Bonzini #define PT64_INDEX(address, level)\
143c50d8ae3SPaolo Bonzini 	(((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
144c50d8ae3SPaolo Bonzini 
145c50d8ae3SPaolo Bonzini 
146c50d8ae3SPaolo Bonzini #define PT32_LEVEL_BITS 10
147c50d8ae3SPaolo Bonzini 
148c50d8ae3SPaolo Bonzini #define PT32_LEVEL_SHIFT(level) \
149c50d8ae3SPaolo Bonzini 		(PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
150c50d8ae3SPaolo Bonzini 
151c50d8ae3SPaolo Bonzini #define PT32_LVL_OFFSET_MASK(level) \
152c50d8ae3SPaolo Bonzini 	(PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
153c50d8ae3SPaolo Bonzini 						* PT32_LEVEL_BITS))) - 1))
154c50d8ae3SPaolo Bonzini 
155c50d8ae3SPaolo Bonzini #define PT32_INDEX(address, level)\
156c50d8ae3SPaolo Bonzini 	(((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
157c50d8ae3SPaolo Bonzini 
158c50d8ae3SPaolo Bonzini 
159c50d8ae3SPaolo Bonzini #ifdef CONFIG_DYNAMIC_PHYSICAL_MASK
160c50d8ae3SPaolo Bonzini #define PT64_BASE_ADDR_MASK (physical_mask & ~(u64)(PAGE_SIZE-1))
161c50d8ae3SPaolo Bonzini #else
162c50d8ae3SPaolo Bonzini #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
163c50d8ae3SPaolo Bonzini #endif
164c50d8ae3SPaolo Bonzini #define PT64_LVL_ADDR_MASK(level) \
165c50d8ae3SPaolo Bonzini 	(PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
166c50d8ae3SPaolo Bonzini 						* PT64_LEVEL_BITS))) - 1))
167c50d8ae3SPaolo Bonzini #define PT64_LVL_OFFSET_MASK(level) \
168c50d8ae3SPaolo Bonzini 	(PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
169c50d8ae3SPaolo Bonzini 						* PT64_LEVEL_BITS))) - 1))
170c50d8ae3SPaolo Bonzini 
171c50d8ae3SPaolo Bonzini #define PT32_BASE_ADDR_MASK PAGE_MASK
172c50d8ae3SPaolo Bonzini #define PT32_DIR_BASE_ADDR_MASK \
173c50d8ae3SPaolo Bonzini 	(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
174c50d8ae3SPaolo Bonzini #define PT32_LVL_ADDR_MASK(level) \
175c50d8ae3SPaolo Bonzini 	(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
176c50d8ae3SPaolo Bonzini 					    * PT32_LEVEL_BITS))) - 1))
177c50d8ae3SPaolo Bonzini 
178c50d8ae3SPaolo Bonzini #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | shadow_user_mask \
179c50d8ae3SPaolo Bonzini 			| shadow_x_mask | shadow_nx_mask | shadow_me_mask)
180c50d8ae3SPaolo Bonzini 
181c50d8ae3SPaolo Bonzini #define ACC_EXEC_MASK    1
182c50d8ae3SPaolo Bonzini #define ACC_WRITE_MASK   PT_WRITABLE_MASK
183c50d8ae3SPaolo Bonzini #define ACC_USER_MASK    PT_USER_MASK
184c50d8ae3SPaolo Bonzini #define ACC_ALL          (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
185c50d8ae3SPaolo Bonzini 
186c50d8ae3SPaolo Bonzini /* The mask for the R/X bits in EPT PTEs */
187c50d8ae3SPaolo Bonzini #define PT64_EPT_READABLE_MASK			0x1ull
188c50d8ae3SPaolo Bonzini #define PT64_EPT_EXECUTABLE_MASK		0x4ull
189c50d8ae3SPaolo Bonzini 
190c50d8ae3SPaolo Bonzini #include <trace/events/kvm.h>
191c50d8ae3SPaolo Bonzini 
192c50d8ae3SPaolo Bonzini #define SPTE_HOST_WRITEABLE	(1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
193c50d8ae3SPaolo Bonzini #define SPTE_MMU_WRITEABLE	(1ULL << (PT_FIRST_AVAIL_BITS_SHIFT + 1))
194c50d8ae3SPaolo Bonzini 
195c50d8ae3SPaolo Bonzini #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
196c50d8ae3SPaolo Bonzini 
197c50d8ae3SPaolo Bonzini /* make pte_list_desc fit well in cache line */
198c50d8ae3SPaolo Bonzini #define PTE_LIST_EXT 3
199c50d8ae3SPaolo Bonzini 
200c50d8ae3SPaolo Bonzini /*
201c50d8ae3SPaolo Bonzini  * Return values of handle_mmio_page_fault and mmu.page_fault:
202c50d8ae3SPaolo Bonzini  * RET_PF_RETRY: let CPU fault again on the address.
203c50d8ae3SPaolo Bonzini  * RET_PF_EMULATE: mmio page fault, emulate the instruction directly.
204c50d8ae3SPaolo Bonzini  *
205c50d8ae3SPaolo Bonzini  * For handle_mmio_page_fault only:
206c50d8ae3SPaolo Bonzini  * RET_PF_INVALID: the spte is invalid, let the real page fault path update it.
207c50d8ae3SPaolo Bonzini  */
208c50d8ae3SPaolo Bonzini enum {
209c50d8ae3SPaolo Bonzini 	RET_PF_RETRY = 0,
210c50d8ae3SPaolo Bonzini 	RET_PF_EMULATE = 1,
211c50d8ae3SPaolo Bonzini 	RET_PF_INVALID = 2,
212c50d8ae3SPaolo Bonzini };
213c50d8ae3SPaolo Bonzini 
214c50d8ae3SPaolo Bonzini struct pte_list_desc {
215c50d8ae3SPaolo Bonzini 	u64 *sptes[PTE_LIST_EXT];
216c50d8ae3SPaolo Bonzini 	struct pte_list_desc *more;
217c50d8ae3SPaolo Bonzini };
218c50d8ae3SPaolo Bonzini 
219c50d8ae3SPaolo Bonzini struct kvm_shadow_walk_iterator {
220c50d8ae3SPaolo Bonzini 	u64 addr;
221c50d8ae3SPaolo Bonzini 	hpa_t shadow_addr;
222c50d8ae3SPaolo Bonzini 	u64 *sptep;
223c50d8ae3SPaolo Bonzini 	int level;
224c50d8ae3SPaolo Bonzini 	unsigned index;
225c50d8ae3SPaolo Bonzini };
226c50d8ae3SPaolo Bonzini 
227c50d8ae3SPaolo Bonzini #define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker)     \
228c50d8ae3SPaolo Bonzini 	for (shadow_walk_init_using_root(&(_walker), (_vcpu),              \
229c50d8ae3SPaolo Bonzini 					 (_root), (_addr));                \
230c50d8ae3SPaolo Bonzini 	     shadow_walk_okay(&(_walker));			           \
231c50d8ae3SPaolo Bonzini 	     shadow_walk_next(&(_walker)))
232c50d8ae3SPaolo Bonzini 
233c50d8ae3SPaolo Bonzini #define for_each_shadow_entry(_vcpu, _addr, _walker)            \
234c50d8ae3SPaolo Bonzini 	for (shadow_walk_init(&(_walker), _vcpu, _addr);	\
235c50d8ae3SPaolo Bonzini 	     shadow_walk_okay(&(_walker));			\
236c50d8ae3SPaolo Bonzini 	     shadow_walk_next(&(_walker)))
237c50d8ae3SPaolo Bonzini 
238c50d8ae3SPaolo Bonzini #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte)	\
239c50d8ae3SPaolo Bonzini 	for (shadow_walk_init(&(_walker), _vcpu, _addr);		\
240c50d8ae3SPaolo Bonzini 	     shadow_walk_okay(&(_walker)) &&				\
241c50d8ae3SPaolo Bonzini 		({ spte = mmu_spte_get_lockless(_walker.sptep); 1; });	\
242c50d8ae3SPaolo Bonzini 	     __shadow_walk_next(&(_walker), spte))
243c50d8ae3SPaolo Bonzini 
244c50d8ae3SPaolo Bonzini static struct kmem_cache *pte_list_desc_cache;
245c50d8ae3SPaolo Bonzini static struct kmem_cache *mmu_page_header_cache;
246c50d8ae3SPaolo Bonzini static struct percpu_counter kvm_total_used_mmu_pages;
247c50d8ae3SPaolo Bonzini 
248c50d8ae3SPaolo Bonzini static u64 __read_mostly shadow_nx_mask;
249c50d8ae3SPaolo Bonzini static u64 __read_mostly shadow_x_mask;	/* mutual exclusive with nx_mask */
250c50d8ae3SPaolo Bonzini static u64 __read_mostly shadow_user_mask;
251c50d8ae3SPaolo Bonzini static u64 __read_mostly shadow_accessed_mask;
252c50d8ae3SPaolo Bonzini static u64 __read_mostly shadow_dirty_mask;
253c50d8ae3SPaolo Bonzini static u64 __read_mostly shadow_mmio_value;
254c50d8ae3SPaolo Bonzini static u64 __read_mostly shadow_mmio_access_mask;
255c50d8ae3SPaolo Bonzini static u64 __read_mostly shadow_present_mask;
256c50d8ae3SPaolo Bonzini static u64 __read_mostly shadow_me_mask;
257c50d8ae3SPaolo Bonzini 
258c50d8ae3SPaolo Bonzini /*
259c50d8ae3SPaolo Bonzini  * SPTEs used by MMUs without A/D bits are marked with SPTE_AD_DISABLED_MASK;
260c50d8ae3SPaolo Bonzini  * shadow_acc_track_mask is the set of bits to be cleared in non-accessed
261c50d8ae3SPaolo Bonzini  * pages.
262c50d8ae3SPaolo Bonzini  */
263c50d8ae3SPaolo Bonzini static u64 __read_mostly shadow_acc_track_mask;
264c50d8ae3SPaolo Bonzini 
265c50d8ae3SPaolo Bonzini /*
266c50d8ae3SPaolo Bonzini  * The mask/shift to use for saving the original R/X bits when marking the PTE
267c50d8ae3SPaolo Bonzini  * as not-present for access tracking purposes. We do not save the W bit as the
268c50d8ae3SPaolo Bonzini  * PTEs being access tracked also need to be dirty tracked, so the W bit will be
269c50d8ae3SPaolo Bonzini  * restored only when a write is attempted to the page.
270c50d8ae3SPaolo Bonzini  */
271c50d8ae3SPaolo Bonzini static const u64 shadow_acc_track_saved_bits_mask = PT64_EPT_READABLE_MASK |
272c50d8ae3SPaolo Bonzini 						    PT64_EPT_EXECUTABLE_MASK;
273c50d8ae3SPaolo Bonzini static const u64 shadow_acc_track_saved_bits_shift = PT64_SECOND_AVAIL_BITS_SHIFT;
274c50d8ae3SPaolo Bonzini 
275c50d8ae3SPaolo Bonzini /*
276c50d8ae3SPaolo Bonzini  * This mask must be set on all non-zero Non-Present or Reserved SPTEs in order
277c50d8ae3SPaolo Bonzini  * to guard against L1TF attacks.
278c50d8ae3SPaolo Bonzini  */
279c50d8ae3SPaolo Bonzini static u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
280c50d8ae3SPaolo Bonzini 
281c50d8ae3SPaolo Bonzini /*
282c50d8ae3SPaolo Bonzini  * The number of high-order 1 bits to use in the mask above.
283c50d8ae3SPaolo Bonzini  */
284c50d8ae3SPaolo Bonzini static const u64 shadow_nonpresent_or_rsvd_mask_len = 5;
285c50d8ae3SPaolo Bonzini 
286c50d8ae3SPaolo Bonzini /*
287c50d8ae3SPaolo Bonzini  * In some cases, we need to preserve the GFN of a non-present or reserved
288c50d8ae3SPaolo Bonzini  * SPTE when we usurp the upper five bits of the physical address space to
289c50d8ae3SPaolo Bonzini  * defend against L1TF, e.g. for MMIO SPTEs.  To preserve the GFN, we'll
290c50d8ae3SPaolo Bonzini  * shift bits of the GFN that overlap with shadow_nonpresent_or_rsvd_mask
291c50d8ae3SPaolo Bonzini  * left into the reserved bits, i.e. the GFN in the SPTE will be split into
292c50d8ae3SPaolo Bonzini  * high and low parts.  This mask covers the lower bits of the GFN.
293c50d8ae3SPaolo Bonzini  */
294c50d8ae3SPaolo Bonzini static u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
295c50d8ae3SPaolo Bonzini 
296c50d8ae3SPaolo Bonzini /*
297c50d8ae3SPaolo Bonzini  * The number of non-reserved physical address bits irrespective of features
298c50d8ae3SPaolo Bonzini  * that repurpose legal bits, e.g. MKTME.
299c50d8ae3SPaolo Bonzini  */
300c50d8ae3SPaolo Bonzini static u8 __read_mostly shadow_phys_bits;
301c50d8ae3SPaolo Bonzini 
302c50d8ae3SPaolo Bonzini static void mmu_spte_set(u64 *sptep, u64 spte);
303c50d8ae3SPaolo Bonzini static bool is_executable_pte(u64 spte);
304c50d8ae3SPaolo Bonzini static union kvm_mmu_page_role
305c50d8ae3SPaolo Bonzini kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
306c50d8ae3SPaolo Bonzini 
307c50d8ae3SPaolo Bonzini #define CREATE_TRACE_POINTS
308c50d8ae3SPaolo Bonzini #include "mmutrace.h"
309c50d8ae3SPaolo Bonzini 
310c50d8ae3SPaolo Bonzini 
311c50d8ae3SPaolo Bonzini static inline bool kvm_available_flush_tlb_with_range(void)
312c50d8ae3SPaolo Bonzini {
313afaf0b2fSSean Christopherson 	return kvm_x86_ops.tlb_remote_flush_with_range;
314c50d8ae3SPaolo Bonzini }
315c50d8ae3SPaolo Bonzini 
316c50d8ae3SPaolo Bonzini static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm,
317c50d8ae3SPaolo Bonzini 		struct kvm_tlb_range *range)
318c50d8ae3SPaolo Bonzini {
319c50d8ae3SPaolo Bonzini 	int ret = -ENOTSUPP;
320c50d8ae3SPaolo Bonzini 
321afaf0b2fSSean Christopherson 	if (range && kvm_x86_ops.tlb_remote_flush_with_range)
322afaf0b2fSSean Christopherson 		ret = kvm_x86_ops.tlb_remote_flush_with_range(kvm, range);
323c50d8ae3SPaolo Bonzini 
324c50d8ae3SPaolo Bonzini 	if (ret)
325c50d8ae3SPaolo Bonzini 		kvm_flush_remote_tlbs(kvm);
326c50d8ae3SPaolo Bonzini }
327c50d8ae3SPaolo Bonzini 
328c50d8ae3SPaolo Bonzini static void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
329c50d8ae3SPaolo Bonzini 		u64 start_gfn, u64 pages)
330c50d8ae3SPaolo Bonzini {
331c50d8ae3SPaolo Bonzini 	struct kvm_tlb_range range;
332c50d8ae3SPaolo Bonzini 
333c50d8ae3SPaolo Bonzini 	range.start_gfn = start_gfn;
334c50d8ae3SPaolo Bonzini 	range.pages = pages;
335c50d8ae3SPaolo Bonzini 
336c50d8ae3SPaolo Bonzini 	kvm_flush_remote_tlbs_with_range(kvm, &range);
337c50d8ae3SPaolo Bonzini }
338c50d8ae3SPaolo Bonzini 
339e7581cacSPaolo Bonzini void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask)
340c50d8ae3SPaolo Bonzini {
341c50d8ae3SPaolo Bonzini 	BUG_ON((u64)(unsigned)access_mask != access_mask);
342d43e2675SPaolo Bonzini 	WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask << shadow_nonpresent_or_rsvd_mask_len));
343d43e2675SPaolo Bonzini 	WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask);
344c50d8ae3SPaolo Bonzini 	shadow_mmio_value = mmio_value | SPTE_MMIO_MASK;
345c50d8ae3SPaolo Bonzini 	shadow_mmio_access_mask = access_mask;
346c50d8ae3SPaolo Bonzini }
347c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
348c50d8ae3SPaolo Bonzini 
349c50d8ae3SPaolo Bonzini static bool is_mmio_spte(u64 spte)
350c50d8ae3SPaolo Bonzini {
351e7581cacSPaolo Bonzini 	return (spte & SPTE_SPECIAL_MASK) == SPTE_MMIO_MASK;
352c50d8ae3SPaolo Bonzini }
353c50d8ae3SPaolo Bonzini 
354c50d8ae3SPaolo Bonzini static inline bool sp_ad_disabled(struct kvm_mmu_page *sp)
355c50d8ae3SPaolo Bonzini {
356c50d8ae3SPaolo Bonzini 	return sp->role.ad_disabled;
357c50d8ae3SPaolo Bonzini }
358c50d8ae3SPaolo Bonzini 
359c50d8ae3SPaolo Bonzini static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
360c50d8ae3SPaolo Bonzini {
361c50d8ae3SPaolo Bonzini 	/*
362c50d8ae3SPaolo Bonzini 	 * When using the EPT page-modification log, the GPAs in the log
363c50d8ae3SPaolo Bonzini 	 * would come from L2 rather than L1.  Therefore, we need to rely
364c50d8ae3SPaolo Bonzini 	 * on write protection to record dirty pages.  This also bypasses
365c50d8ae3SPaolo Bonzini 	 * PML, since writes now result in a vmexit.
366c50d8ae3SPaolo Bonzini 	 */
367c50d8ae3SPaolo Bonzini 	return vcpu->arch.mmu == &vcpu->arch.guest_mmu;
368c50d8ae3SPaolo Bonzini }
369c50d8ae3SPaolo Bonzini 
370c50d8ae3SPaolo Bonzini static inline bool spte_ad_enabled(u64 spte)
371c50d8ae3SPaolo Bonzini {
372c50d8ae3SPaolo Bonzini 	MMU_WARN_ON(is_mmio_spte(spte));
373c50d8ae3SPaolo Bonzini 	return (spte & SPTE_SPECIAL_MASK) != SPTE_AD_DISABLED_MASK;
374c50d8ae3SPaolo Bonzini }
375c50d8ae3SPaolo Bonzini 
376c50d8ae3SPaolo Bonzini static inline bool spte_ad_need_write_protect(u64 spte)
377c50d8ae3SPaolo Bonzini {
378c50d8ae3SPaolo Bonzini 	MMU_WARN_ON(is_mmio_spte(spte));
379c50d8ae3SPaolo Bonzini 	return (spte & SPTE_SPECIAL_MASK) != SPTE_AD_ENABLED_MASK;
380c50d8ae3SPaolo Bonzini }
381c50d8ae3SPaolo Bonzini 
382c50d8ae3SPaolo Bonzini static bool is_nx_huge_page_enabled(void)
383c50d8ae3SPaolo Bonzini {
384c50d8ae3SPaolo Bonzini 	return READ_ONCE(nx_huge_pages);
385c50d8ae3SPaolo Bonzini }
386c50d8ae3SPaolo Bonzini 
387c50d8ae3SPaolo Bonzini static inline u64 spte_shadow_accessed_mask(u64 spte)
388c50d8ae3SPaolo Bonzini {
389c50d8ae3SPaolo Bonzini 	MMU_WARN_ON(is_mmio_spte(spte));
390c50d8ae3SPaolo Bonzini 	return spte_ad_enabled(spte) ? shadow_accessed_mask : 0;
391c50d8ae3SPaolo Bonzini }
392c50d8ae3SPaolo Bonzini 
393c50d8ae3SPaolo Bonzini static inline u64 spte_shadow_dirty_mask(u64 spte)
394c50d8ae3SPaolo Bonzini {
395c50d8ae3SPaolo Bonzini 	MMU_WARN_ON(is_mmio_spte(spte));
396c50d8ae3SPaolo Bonzini 	return spte_ad_enabled(spte) ? shadow_dirty_mask : 0;
397c50d8ae3SPaolo Bonzini }
398c50d8ae3SPaolo Bonzini 
399c50d8ae3SPaolo Bonzini static inline bool is_access_track_spte(u64 spte)
400c50d8ae3SPaolo Bonzini {
401c50d8ae3SPaolo Bonzini 	return !spte_ad_enabled(spte) && (spte & shadow_acc_track_mask) == 0;
402c50d8ae3SPaolo Bonzini }
403c50d8ae3SPaolo Bonzini 
404c50d8ae3SPaolo Bonzini /*
405c50d8ae3SPaolo Bonzini  * Due to limited space in PTEs, the MMIO generation is a 19 bit subset of
406c50d8ae3SPaolo Bonzini  * the memslots generation and is derived as follows:
407c50d8ae3SPaolo Bonzini  *
408c50d8ae3SPaolo Bonzini  * Bits 0-8 of the MMIO generation are propagated to spte bits 3-11
409c50d8ae3SPaolo Bonzini  * Bits 9-18 of the MMIO generation are propagated to spte bits 52-61
410c50d8ae3SPaolo Bonzini  *
411c50d8ae3SPaolo Bonzini  * The KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS flag is intentionally not included in
412c50d8ae3SPaolo Bonzini  * the MMIO generation number, as doing so would require stealing a bit from
413c50d8ae3SPaolo Bonzini  * the "real" generation number and thus effectively halve the maximum number
414c50d8ae3SPaolo Bonzini  * of MMIO generations that can be handled before encountering a wrap (which
415c50d8ae3SPaolo Bonzini  * requires a full MMU zap).  The flag is instead explicitly queried when
416c50d8ae3SPaolo Bonzini  * checking for MMIO spte cache hits.
417c50d8ae3SPaolo Bonzini  */
41856871d44SPaolo Bonzini #define MMIO_SPTE_GEN_MASK		GENMASK_ULL(17, 0)
419c50d8ae3SPaolo Bonzini 
420c50d8ae3SPaolo Bonzini #define MMIO_SPTE_GEN_LOW_START		3
421c50d8ae3SPaolo Bonzini #define MMIO_SPTE_GEN_LOW_END		11
422c50d8ae3SPaolo Bonzini #define MMIO_SPTE_GEN_LOW_MASK		GENMASK_ULL(MMIO_SPTE_GEN_LOW_END, \
423c50d8ae3SPaolo Bonzini 						    MMIO_SPTE_GEN_LOW_START)
424c50d8ae3SPaolo Bonzini 
42556871d44SPaolo Bonzini #define MMIO_SPTE_GEN_HIGH_START	PT64_SECOND_AVAIL_BITS_SHIFT
42656871d44SPaolo Bonzini #define MMIO_SPTE_GEN_HIGH_END		62
427c50d8ae3SPaolo Bonzini #define MMIO_SPTE_GEN_HIGH_MASK		GENMASK_ULL(MMIO_SPTE_GEN_HIGH_END, \
428c50d8ae3SPaolo Bonzini 						    MMIO_SPTE_GEN_HIGH_START)
42956871d44SPaolo Bonzini 
430c50d8ae3SPaolo Bonzini static u64 generation_mmio_spte_mask(u64 gen)
431c50d8ae3SPaolo Bonzini {
432c50d8ae3SPaolo Bonzini 	u64 mask;
433c50d8ae3SPaolo Bonzini 
434c50d8ae3SPaolo Bonzini 	WARN_ON(gen & ~MMIO_SPTE_GEN_MASK);
43556871d44SPaolo Bonzini 	BUILD_BUG_ON((MMIO_SPTE_GEN_HIGH_MASK | MMIO_SPTE_GEN_LOW_MASK) & SPTE_SPECIAL_MASK);
436c50d8ae3SPaolo Bonzini 
437c50d8ae3SPaolo Bonzini 	mask = (gen << MMIO_SPTE_GEN_LOW_START) & MMIO_SPTE_GEN_LOW_MASK;
438c50d8ae3SPaolo Bonzini 	mask |= (gen << MMIO_SPTE_GEN_HIGH_START) & MMIO_SPTE_GEN_HIGH_MASK;
439c50d8ae3SPaolo Bonzini 	return mask;
440c50d8ae3SPaolo Bonzini }
441c50d8ae3SPaolo Bonzini 
442c50d8ae3SPaolo Bonzini static u64 get_mmio_spte_generation(u64 spte)
443c50d8ae3SPaolo Bonzini {
444c50d8ae3SPaolo Bonzini 	u64 gen;
445c50d8ae3SPaolo Bonzini 
446c50d8ae3SPaolo Bonzini 	gen = (spte & MMIO_SPTE_GEN_LOW_MASK) >> MMIO_SPTE_GEN_LOW_START;
447c50d8ae3SPaolo Bonzini 	gen |= (spte & MMIO_SPTE_GEN_HIGH_MASK) >> MMIO_SPTE_GEN_HIGH_START;
448c50d8ae3SPaolo Bonzini 	return gen;
449c50d8ae3SPaolo Bonzini }
450c50d8ae3SPaolo Bonzini 
4518f79b064SBen Gardon static u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access)
452c50d8ae3SPaolo Bonzini {
4538f79b064SBen Gardon 
454c50d8ae3SPaolo Bonzini 	u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK;
455c50d8ae3SPaolo Bonzini 	u64 mask = generation_mmio_spte_mask(gen);
456c50d8ae3SPaolo Bonzini 	u64 gpa = gfn << PAGE_SHIFT;
457c50d8ae3SPaolo Bonzini 
458c50d8ae3SPaolo Bonzini 	access &= shadow_mmio_access_mask;
459c50d8ae3SPaolo Bonzini 	mask |= shadow_mmio_value | access;
460c50d8ae3SPaolo Bonzini 	mask |= gpa | shadow_nonpresent_or_rsvd_mask;
461c50d8ae3SPaolo Bonzini 	mask |= (gpa & shadow_nonpresent_or_rsvd_mask)
462c50d8ae3SPaolo Bonzini 		<< shadow_nonpresent_or_rsvd_mask_len;
463c50d8ae3SPaolo Bonzini 
4648f79b064SBen Gardon 	return mask;
4658f79b064SBen Gardon }
4668f79b064SBen Gardon 
4678f79b064SBen Gardon static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
4688f79b064SBen Gardon 			   unsigned int access)
4698f79b064SBen Gardon {
4708f79b064SBen Gardon 	u64 mask = make_mmio_spte(vcpu, gfn, access);
4718f79b064SBen Gardon 	unsigned int gen = get_mmio_spte_generation(mask);
4728f79b064SBen Gardon 
4738f79b064SBen Gardon 	access = mask & ACC_ALL;
4748f79b064SBen Gardon 
475c50d8ae3SPaolo Bonzini 	trace_mark_mmio_spte(sptep, gfn, access, gen);
476c50d8ae3SPaolo Bonzini 	mmu_spte_set(sptep, mask);
477c50d8ae3SPaolo Bonzini }
478c50d8ae3SPaolo Bonzini 
479c50d8ae3SPaolo Bonzini static gfn_t get_mmio_spte_gfn(u64 spte)
480c50d8ae3SPaolo Bonzini {
481c50d8ae3SPaolo Bonzini 	u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
482c50d8ae3SPaolo Bonzini 
483c50d8ae3SPaolo Bonzini 	gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len)
484c50d8ae3SPaolo Bonzini 	       & shadow_nonpresent_or_rsvd_mask;
485c50d8ae3SPaolo Bonzini 
486c50d8ae3SPaolo Bonzini 	return gpa >> PAGE_SHIFT;
487c50d8ae3SPaolo Bonzini }
488c50d8ae3SPaolo Bonzini 
489c50d8ae3SPaolo Bonzini static unsigned get_mmio_spte_access(u64 spte)
490c50d8ae3SPaolo Bonzini {
491c50d8ae3SPaolo Bonzini 	return spte & shadow_mmio_access_mask;
492c50d8ae3SPaolo Bonzini }
493c50d8ae3SPaolo Bonzini 
494c50d8ae3SPaolo Bonzini static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
4950a2b64c5SBen Gardon 			  kvm_pfn_t pfn, unsigned int access)
496c50d8ae3SPaolo Bonzini {
497c50d8ae3SPaolo Bonzini 	if (unlikely(is_noslot_pfn(pfn))) {
498c50d8ae3SPaolo Bonzini 		mark_mmio_spte(vcpu, sptep, gfn, access);
499c50d8ae3SPaolo Bonzini 		return true;
500c50d8ae3SPaolo Bonzini 	}
501c50d8ae3SPaolo Bonzini 
502c50d8ae3SPaolo Bonzini 	return false;
503c50d8ae3SPaolo Bonzini }
504c50d8ae3SPaolo Bonzini 
505c50d8ae3SPaolo Bonzini static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
506c50d8ae3SPaolo Bonzini {
507c50d8ae3SPaolo Bonzini 	u64 kvm_gen, spte_gen, gen;
508c50d8ae3SPaolo Bonzini 
509c50d8ae3SPaolo Bonzini 	gen = kvm_vcpu_memslots(vcpu)->generation;
510c50d8ae3SPaolo Bonzini 	if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
511c50d8ae3SPaolo Bonzini 		return false;
512c50d8ae3SPaolo Bonzini 
513c50d8ae3SPaolo Bonzini 	kvm_gen = gen & MMIO_SPTE_GEN_MASK;
514c50d8ae3SPaolo Bonzini 	spte_gen = get_mmio_spte_generation(spte);
515c50d8ae3SPaolo Bonzini 
516c50d8ae3SPaolo Bonzini 	trace_check_mmio_spte(spte, kvm_gen, spte_gen);
517c50d8ae3SPaolo Bonzini 	return likely(kvm_gen == spte_gen);
518c50d8ae3SPaolo Bonzini }
519c50d8ae3SPaolo Bonzini 
520cd313569SMohammed Gamal static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
521cd313569SMohammed Gamal                                   struct x86_exception *exception)
522cd313569SMohammed Gamal {
523ec7771abSMohammed Gamal 	/* Check if guest physical address doesn't exceed guest maximum */
524dc46515cSSean Christopherson 	if (kvm_vcpu_is_illegal_gpa(vcpu, gpa)) {
525ec7771abSMohammed Gamal 		exception->error_code |= PFERR_RSVD_MASK;
526ec7771abSMohammed Gamal 		return UNMAPPED_GVA;
527ec7771abSMohammed Gamal 	}
528ec7771abSMohammed Gamal 
529cd313569SMohammed Gamal         return gpa;
530cd313569SMohammed Gamal }
531cd313569SMohammed Gamal 
532c50d8ae3SPaolo Bonzini /*
533c50d8ae3SPaolo Bonzini  * Sets the shadow PTE masks used by the MMU.
534c50d8ae3SPaolo Bonzini  *
535c50d8ae3SPaolo Bonzini  * Assumptions:
536c50d8ae3SPaolo Bonzini  *  - Setting either @accessed_mask or @dirty_mask requires setting both
537c50d8ae3SPaolo Bonzini  *  - At least one of @accessed_mask or @acc_track_mask must be set
538c50d8ae3SPaolo Bonzini  */
539c50d8ae3SPaolo Bonzini void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
540c50d8ae3SPaolo Bonzini 		u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask,
541c50d8ae3SPaolo Bonzini 		u64 acc_track_mask, u64 me_mask)
542c50d8ae3SPaolo Bonzini {
543c50d8ae3SPaolo Bonzini 	BUG_ON(!dirty_mask != !accessed_mask);
544c50d8ae3SPaolo Bonzini 	BUG_ON(!accessed_mask && !acc_track_mask);
545c50d8ae3SPaolo Bonzini 	BUG_ON(acc_track_mask & SPTE_SPECIAL_MASK);
546c50d8ae3SPaolo Bonzini 
547c50d8ae3SPaolo Bonzini 	shadow_user_mask = user_mask;
548c50d8ae3SPaolo Bonzini 	shadow_accessed_mask = accessed_mask;
549c50d8ae3SPaolo Bonzini 	shadow_dirty_mask = dirty_mask;
550c50d8ae3SPaolo Bonzini 	shadow_nx_mask = nx_mask;
551c50d8ae3SPaolo Bonzini 	shadow_x_mask = x_mask;
552c50d8ae3SPaolo Bonzini 	shadow_present_mask = p_mask;
553c50d8ae3SPaolo Bonzini 	shadow_acc_track_mask = acc_track_mask;
554c50d8ae3SPaolo Bonzini 	shadow_me_mask = me_mask;
555c50d8ae3SPaolo Bonzini }
556c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
557c50d8ae3SPaolo Bonzini 
558c50d8ae3SPaolo Bonzini static u8 kvm_get_shadow_phys_bits(void)
559c50d8ae3SPaolo Bonzini {
560c50d8ae3SPaolo Bonzini 	/*
5617adacf5eSPaolo Bonzini 	 * boot_cpu_data.x86_phys_bits is reduced when MKTME or SME are detected
5627adacf5eSPaolo Bonzini 	 * in CPU detection code, but the processor treats those reduced bits as
5637adacf5eSPaolo Bonzini 	 * 'keyID' thus they are not reserved bits. Therefore KVM needs to look at
5647adacf5eSPaolo Bonzini 	 * the physical address bits reported by CPUID.
565c50d8ae3SPaolo Bonzini 	 */
5667adacf5eSPaolo Bonzini 	if (likely(boot_cpu_data.extended_cpuid_level >= 0x80000008))
567c50d8ae3SPaolo Bonzini 		return cpuid_eax(0x80000008) & 0xff;
5687adacf5eSPaolo Bonzini 
5697adacf5eSPaolo Bonzini 	/*
5707adacf5eSPaolo Bonzini 	 * Quite weird to have VMX or SVM but not MAXPHYADDR; probably a VM with
5717adacf5eSPaolo Bonzini 	 * custom CPUID.  Proceed with whatever the kernel found since these features
5727adacf5eSPaolo Bonzini 	 * aren't virtualizable (SME/SEV also require CPUIDs higher than 0x80000008).
5737adacf5eSPaolo Bonzini 	 */
5747adacf5eSPaolo Bonzini 	return boot_cpu_data.x86_phys_bits;
575c50d8ae3SPaolo Bonzini }
576c50d8ae3SPaolo Bonzini 
577c50d8ae3SPaolo Bonzini static void kvm_mmu_reset_all_pte_masks(void)
578c50d8ae3SPaolo Bonzini {
579c50d8ae3SPaolo Bonzini 	u8 low_phys_bits;
580c50d8ae3SPaolo Bonzini 
581c50d8ae3SPaolo Bonzini 	shadow_user_mask = 0;
582c50d8ae3SPaolo Bonzini 	shadow_accessed_mask = 0;
583c50d8ae3SPaolo Bonzini 	shadow_dirty_mask = 0;
584c50d8ae3SPaolo Bonzini 	shadow_nx_mask = 0;
585c50d8ae3SPaolo Bonzini 	shadow_x_mask = 0;
586c50d8ae3SPaolo Bonzini 	shadow_present_mask = 0;
587c50d8ae3SPaolo Bonzini 	shadow_acc_track_mask = 0;
588c50d8ae3SPaolo Bonzini 
589c50d8ae3SPaolo Bonzini 	shadow_phys_bits = kvm_get_shadow_phys_bits();
590c50d8ae3SPaolo Bonzini 
591c50d8ae3SPaolo Bonzini 	/*
592c50d8ae3SPaolo Bonzini 	 * If the CPU has 46 or less physical address bits, then set an
593c50d8ae3SPaolo Bonzini 	 * appropriate mask to guard against L1TF attacks. Otherwise, it is
594c50d8ae3SPaolo Bonzini 	 * assumed that the CPU is not vulnerable to L1TF.
595c50d8ae3SPaolo Bonzini 	 *
596c50d8ae3SPaolo Bonzini 	 * Some Intel CPUs address the L1 cache using more PA bits than are
597c50d8ae3SPaolo Bonzini 	 * reported by CPUID. Use the PA width of the L1 cache when possible
598c50d8ae3SPaolo Bonzini 	 * to achieve more effective mitigation, e.g. if system RAM overlaps
599c50d8ae3SPaolo Bonzini 	 * the most significant bits of legal physical address space.
600c50d8ae3SPaolo Bonzini 	 */
601c50d8ae3SPaolo Bonzini 	shadow_nonpresent_or_rsvd_mask = 0;
602d43e2675SPaolo Bonzini 	low_phys_bits = boot_cpu_data.x86_phys_bits;
603d43e2675SPaolo Bonzini 	if (boot_cpu_has_bug(X86_BUG_L1TF) &&
604d43e2675SPaolo Bonzini 	    !WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >=
605d43e2675SPaolo Bonzini 			  52 - shadow_nonpresent_or_rsvd_mask_len)) {
606d43e2675SPaolo Bonzini 		low_phys_bits = boot_cpu_data.x86_cache_bits
607d43e2675SPaolo Bonzini 			- shadow_nonpresent_or_rsvd_mask_len;
608c50d8ae3SPaolo Bonzini 		shadow_nonpresent_or_rsvd_mask =
609d43e2675SPaolo Bonzini 			rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1);
610d43e2675SPaolo Bonzini 	}
611c50d8ae3SPaolo Bonzini 
612c50d8ae3SPaolo Bonzini 	shadow_nonpresent_or_rsvd_lower_gfn_mask =
613c50d8ae3SPaolo Bonzini 		GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
614c50d8ae3SPaolo Bonzini }
615c50d8ae3SPaolo Bonzini 
616c50d8ae3SPaolo Bonzini static int is_cpuid_PSE36(void)
617c50d8ae3SPaolo Bonzini {
618c50d8ae3SPaolo Bonzini 	return 1;
619c50d8ae3SPaolo Bonzini }
620c50d8ae3SPaolo Bonzini 
621c50d8ae3SPaolo Bonzini static int is_nx(struct kvm_vcpu *vcpu)
622c50d8ae3SPaolo Bonzini {
623c50d8ae3SPaolo Bonzini 	return vcpu->arch.efer & EFER_NX;
624c50d8ae3SPaolo Bonzini }
625c50d8ae3SPaolo Bonzini 
626c50d8ae3SPaolo Bonzini static int is_shadow_present_pte(u64 pte)
627c50d8ae3SPaolo Bonzini {
628c50d8ae3SPaolo Bonzini 	return (pte != 0) && !is_mmio_spte(pte);
629c50d8ae3SPaolo Bonzini }
630c50d8ae3SPaolo Bonzini 
631c50d8ae3SPaolo Bonzini static int is_large_pte(u64 pte)
632c50d8ae3SPaolo Bonzini {
633c50d8ae3SPaolo Bonzini 	return pte & PT_PAGE_SIZE_MASK;
634c50d8ae3SPaolo Bonzini }
635c50d8ae3SPaolo Bonzini 
636c50d8ae3SPaolo Bonzini static int is_last_spte(u64 pte, int level)
637c50d8ae3SPaolo Bonzini {
6383bae0459SSean Christopherson 	if (level == PG_LEVEL_4K)
639c50d8ae3SPaolo Bonzini 		return 1;
640c50d8ae3SPaolo Bonzini 	if (is_large_pte(pte))
641c50d8ae3SPaolo Bonzini 		return 1;
642c50d8ae3SPaolo Bonzini 	return 0;
643c50d8ae3SPaolo Bonzini }
644c50d8ae3SPaolo Bonzini 
645c50d8ae3SPaolo Bonzini static bool is_executable_pte(u64 spte)
646c50d8ae3SPaolo Bonzini {
647c50d8ae3SPaolo Bonzini 	return (spte & (shadow_x_mask | shadow_nx_mask)) == shadow_x_mask;
648c50d8ae3SPaolo Bonzini }
649c50d8ae3SPaolo Bonzini 
650c50d8ae3SPaolo Bonzini static kvm_pfn_t spte_to_pfn(u64 pte)
651c50d8ae3SPaolo Bonzini {
652c50d8ae3SPaolo Bonzini 	return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
653c50d8ae3SPaolo Bonzini }
654c50d8ae3SPaolo Bonzini 
655c50d8ae3SPaolo Bonzini static gfn_t pse36_gfn_delta(u32 gpte)
656c50d8ae3SPaolo Bonzini {
657c50d8ae3SPaolo Bonzini 	int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
658c50d8ae3SPaolo Bonzini 
659c50d8ae3SPaolo Bonzini 	return (gpte & PT32_DIR_PSE36_MASK) << shift;
660c50d8ae3SPaolo Bonzini }
661c50d8ae3SPaolo Bonzini 
662c50d8ae3SPaolo Bonzini #ifdef CONFIG_X86_64
663c50d8ae3SPaolo Bonzini static void __set_spte(u64 *sptep, u64 spte)
664c50d8ae3SPaolo Bonzini {
665c50d8ae3SPaolo Bonzini 	WRITE_ONCE(*sptep, spte);
666c50d8ae3SPaolo Bonzini }
667c50d8ae3SPaolo Bonzini 
668c50d8ae3SPaolo Bonzini static void __update_clear_spte_fast(u64 *sptep, u64 spte)
669c50d8ae3SPaolo Bonzini {
670c50d8ae3SPaolo Bonzini 	WRITE_ONCE(*sptep, spte);
671c50d8ae3SPaolo Bonzini }
672c50d8ae3SPaolo Bonzini 
673c50d8ae3SPaolo Bonzini static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
674c50d8ae3SPaolo Bonzini {
675c50d8ae3SPaolo Bonzini 	return xchg(sptep, spte);
676c50d8ae3SPaolo Bonzini }
677c50d8ae3SPaolo Bonzini 
678c50d8ae3SPaolo Bonzini static u64 __get_spte_lockless(u64 *sptep)
679c50d8ae3SPaolo Bonzini {
680c50d8ae3SPaolo Bonzini 	return READ_ONCE(*sptep);
681c50d8ae3SPaolo Bonzini }
682c50d8ae3SPaolo Bonzini #else
683c50d8ae3SPaolo Bonzini union split_spte {
684c50d8ae3SPaolo Bonzini 	struct {
685c50d8ae3SPaolo Bonzini 		u32 spte_low;
686c50d8ae3SPaolo Bonzini 		u32 spte_high;
687c50d8ae3SPaolo Bonzini 	};
688c50d8ae3SPaolo Bonzini 	u64 spte;
689c50d8ae3SPaolo Bonzini };
690c50d8ae3SPaolo Bonzini 
691c50d8ae3SPaolo Bonzini static void count_spte_clear(u64 *sptep, u64 spte)
692c50d8ae3SPaolo Bonzini {
69357354682SSean Christopherson 	struct kvm_mmu_page *sp =  sptep_to_sp(sptep);
694c50d8ae3SPaolo Bonzini 
695c50d8ae3SPaolo Bonzini 	if (is_shadow_present_pte(spte))
696c50d8ae3SPaolo Bonzini 		return;
697c50d8ae3SPaolo Bonzini 
698c50d8ae3SPaolo Bonzini 	/* Ensure the spte is completely set before we increase the count */
699c50d8ae3SPaolo Bonzini 	smp_wmb();
700c50d8ae3SPaolo Bonzini 	sp->clear_spte_count++;
701c50d8ae3SPaolo Bonzini }
702c50d8ae3SPaolo Bonzini 
703c50d8ae3SPaolo Bonzini static void __set_spte(u64 *sptep, u64 spte)
704c50d8ae3SPaolo Bonzini {
705c50d8ae3SPaolo Bonzini 	union split_spte *ssptep, sspte;
706c50d8ae3SPaolo Bonzini 
707c50d8ae3SPaolo Bonzini 	ssptep = (union split_spte *)sptep;
708c50d8ae3SPaolo Bonzini 	sspte = (union split_spte)spte;
709c50d8ae3SPaolo Bonzini 
710c50d8ae3SPaolo Bonzini 	ssptep->spte_high = sspte.spte_high;
711c50d8ae3SPaolo Bonzini 
712c50d8ae3SPaolo Bonzini 	/*
713c50d8ae3SPaolo Bonzini 	 * If we map the spte from nonpresent to present, We should store
714c50d8ae3SPaolo Bonzini 	 * the high bits firstly, then set present bit, so cpu can not
715c50d8ae3SPaolo Bonzini 	 * fetch this spte while we are setting the spte.
716c50d8ae3SPaolo Bonzini 	 */
717c50d8ae3SPaolo Bonzini 	smp_wmb();
718c50d8ae3SPaolo Bonzini 
719c50d8ae3SPaolo Bonzini 	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
720c50d8ae3SPaolo Bonzini }
721c50d8ae3SPaolo Bonzini 
722c50d8ae3SPaolo Bonzini static void __update_clear_spte_fast(u64 *sptep, u64 spte)
723c50d8ae3SPaolo Bonzini {
724c50d8ae3SPaolo Bonzini 	union split_spte *ssptep, sspte;
725c50d8ae3SPaolo Bonzini 
726c50d8ae3SPaolo Bonzini 	ssptep = (union split_spte *)sptep;
727c50d8ae3SPaolo Bonzini 	sspte = (union split_spte)spte;
728c50d8ae3SPaolo Bonzini 
729c50d8ae3SPaolo Bonzini 	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
730c50d8ae3SPaolo Bonzini 
731c50d8ae3SPaolo Bonzini 	/*
732c50d8ae3SPaolo Bonzini 	 * If we map the spte from present to nonpresent, we should clear
733c50d8ae3SPaolo Bonzini 	 * present bit firstly to avoid vcpu fetch the old high bits.
734c50d8ae3SPaolo Bonzini 	 */
735c50d8ae3SPaolo Bonzini 	smp_wmb();
736c50d8ae3SPaolo Bonzini 
737c50d8ae3SPaolo Bonzini 	ssptep->spte_high = sspte.spte_high;
738c50d8ae3SPaolo Bonzini 	count_spte_clear(sptep, spte);
739c50d8ae3SPaolo Bonzini }
740c50d8ae3SPaolo Bonzini 
741c50d8ae3SPaolo Bonzini static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
742c50d8ae3SPaolo Bonzini {
743c50d8ae3SPaolo Bonzini 	union split_spte *ssptep, sspte, orig;
744c50d8ae3SPaolo Bonzini 
745c50d8ae3SPaolo Bonzini 	ssptep = (union split_spte *)sptep;
746c50d8ae3SPaolo Bonzini 	sspte = (union split_spte)spte;
747c50d8ae3SPaolo Bonzini 
748c50d8ae3SPaolo Bonzini 	/* xchg acts as a barrier before the setting of the high bits */
749c50d8ae3SPaolo Bonzini 	orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low);
750c50d8ae3SPaolo Bonzini 	orig.spte_high = ssptep->spte_high;
751c50d8ae3SPaolo Bonzini 	ssptep->spte_high = sspte.spte_high;
752c50d8ae3SPaolo Bonzini 	count_spte_clear(sptep, spte);
753c50d8ae3SPaolo Bonzini 
754c50d8ae3SPaolo Bonzini 	return orig.spte;
755c50d8ae3SPaolo Bonzini }
756c50d8ae3SPaolo Bonzini 
757c50d8ae3SPaolo Bonzini /*
758c50d8ae3SPaolo Bonzini  * The idea using the light way get the spte on x86_32 guest is from
759c50d8ae3SPaolo Bonzini  * gup_get_pte (mm/gup.c).
760c50d8ae3SPaolo Bonzini  *
761c50d8ae3SPaolo Bonzini  * An spte tlb flush may be pending, because kvm_set_pte_rmapp
762c50d8ae3SPaolo Bonzini  * coalesces them and we are running out of the MMU lock.  Therefore
763c50d8ae3SPaolo Bonzini  * we need to protect against in-progress updates of the spte.
764c50d8ae3SPaolo Bonzini  *
765c50d8ae3SPaolo Bonzini  * Reading the spte while an update is in progress may get the old value
766c50d8ae3SPaolo Bonzini  * for the high part of the spte.  The race is fine for a present->non-present
767c50d8ae3SPaolo Bonzini  * change (because the high part of the spte is ignored for non-present spte),
768c50d8ae3SPaolo Bonzini  * but for a present->present change we must reread the spte.
769c50d8ae3SPaolo Bonzini  *
770c50d8ae3SPaolo Bonzini  * All such changes are done in two steps (present->non-present and
771c50d8ae3SPaolo Bonzini  * non-present->present), hence it is enough to count the number of
772c50d8ae3SPaolo Bonzini  * present->non-present updates: if it changed while reading the spte,
773c50d8ae3SPaolo Bonzini  * we might have hit the race.  This is done using clear_spte_count.
774c50d8ae3SPaolo Bonzini  */
775c50d8ae3SPaolo Bonzini static u64 __get_spte_lockless(u64 *sptep)
776c50d8ae3SPaolo Bonzini {
77757354682SSean Christopherson 	struct kvm_mmu_page *sp =  sptep_to_sp(sptep);
778c50d8ae3SPaolo Bonzini 	union split_spte spte, *orig = (union split_spte *)sptep;
779c50d8ae3SPaolo Bonzini 	int count;
780c50d8ae3SPaolo Bonzini 
781c50d8ae3SPaolo Bonzini retry:
782c50d8ae3SPaolo Bonzini 	count = sp->clear_spte_count;
783c50d8ae3SPaolo Bonzini 	smp_rmb();
784c50d8ae3SPaolo Bonzini 
785c50d8ae3SPaolo Bonzini 	spte.spte_low = orig->spte_low;
786c50d8ae3SPaolo Bonzini 	smp_rmb();
787c50d8ae3SPaolo Bonzini 
788c50d8ae3SPaolo Bonzini 	spte.spte_high = orig->spte_high;
789c50d8ae3SPaolo Bonzini 	smp_rmb();
790c50d8ae3SPaolo Bonzini 
791c50d8ae3SPaolo Bonzini 	if (unlikely(spte.spte_low != orig->spte_low ||
792c50d8ae3SPaolo Bonzini 	      count != sp->clear_spte_count))
793c50d8ae3SPaolo Bonzini 		goto retry;
794c50d8ae3SPaolo Bonzini 
795c50d8ae3SPaolo Bonzini 	return spte.spte;
796c50d8ae3SPaolo Bonzini }
797c50d8ae3SPaolo Bonzini #endif
798c50d8ae3SPaolo Bonzini 
799c50d8ae3SPaolo Bonzini static bool spte_can_locklessly_be_made_writable(u64 spte)
800c50d8ae3SPaolo Bonzini {
801c50d8ae3SPaolo Bonzini 	return (spte & (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE)) ==
802c50d8ae3SPaolo Bonzini 		(SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE);
803c50d8ae3SPaolo Bonzini }
804c50d8ae3SPaolo Bonzini 
805c50d8ae3SPaolo Bonzini static bool spte_has_volatile_bits(u64 spte)
806c50d8ae3SPaolo Bonzini {
807c50d8ae3SPaolo Bonzini 	if (!is_shadow_present_pte(spte))
808c50d8ae3SPaolo Bonzini 		return false;
809c50d8ae3SPaolo Bonzini 
810c50d8ae3SPaolo Bonzini 	/*
811c50d8ae3SPaolo Bonzini 	 * Always atomically update spte if it can be updated
812c50d8ae3SPaolo Bonzini 	 * out of mmu-lock, it can ensure dirty bit is not lost,
813c50d8ae3SPaolo Bonzini 	 * also, it can help us to get a stable is_writable_pte()
814c50d8ae3SPaolo Bonzini 	 * to ensure tlb flush is not missed.
815c50d8ae3SPaolo Bonzini 	 */
816c50d8ae3SPaolo Bonzini 	if (spte_can_locklessly_be_made_writable(spte) ||
817c50d8ae3SPaolo Bonzini 	    is_access_track_spte(spte))
818c50d8ae3SPaolo Bonzini 		return true;
819c50d8ae3SPaolo Bonzini 
820c50d8ae3SPaolo Bonzini 	if (spte_ad_enabled(spte)) {
821c50d8ae3SPaolo Bonzini 		if ((spte & shadow_accessed_mask) == 0 ||
822c50d8ae3SPaolo Bonzini 	    	    (is_writable_pte(spte) && (spte & shadow_dirty_mask) == 0))
823c50d8ae3SPaolo Bonzini 			return true;
824c50d8ae3SPaolo Bonzini 	}
825c50d8ae3SPaolo Bonzini 
826c50d8ae3SPaolo Bonzini 	return false;
827c50d8ae3SPaolo Bonzini }
828c50d8ae3SPaolo Bonzini 
829c50d8ae3SPaolo Bonzini static bool is_accessed_spte(u64 spte)
830c50d8ae3SPaolo Bonzini {
831c50d8ae3SPaolo Bonzini 	u64 accessed_mask = spte_shadow_accessed_mask(spte);
832c50d8ae3SPaolo Bonzini 
833c50d8ae3SPaolo Bonzini 	return accessed_mask ? spte & accessed_mask
834c50d8ae3SPaolo Bonzini 			     : !is_access_track_spte(spte);
835c50d8ae3SPaolo Bonzini }
836c50d8ae3SPaolo Bonzini 
837c50d8ae3SPaolo Bonzini static bool is_dirty_spte(u64 spte)
838c50d8ae3SPaolo Bonzini {
839c50d8ae3SPaolo Bonzini 	u64 dirty_mask = spte_shadow_dirty_mask(spte);
840c50d8ae3SPaolo Bonzini 
841c50d8ae3SPaolo Bonzini 	return dirty_mask ? spte & dirty_mask : spte & PT_WRITABLE_MASK;
842c50d8ae3SPaolo Bonzini }
843c50d8ae3SPaolo Bonzini 
844c50d8ae3SPaolo Bonzini /* Rules for using mmu_spte_set:
845c50d8ae3SPaolo Bonzini  * Set the sptep from nonpresent to present.
846c50d8ae3SPaolo Bonzini  * Note: the sptep being assigned *must* be either not present
847c50d8ae3SPaolo Bonzini  * or in a state where the hardware will not attempt to update
848c50d8ae3SPaolo Bonzini  * the spte.
849c50d8ae3SPaolo Bonzini  */
850c50d8ae3SPaolo Bonzini static void mmu_spte_set(u64 *sptep, u64 new_spte)
851c50d8ae3SPaolo Bonzini {
852c50d8ae3SPaolo Bonzini 	WARN_ON(is_shadow_present_pte(*sptep));
853c50d8ae3SPaolo Bonzini 	__set_spte(sptep, new_spte);
854c50d8ae3SPaolo Bonzini }
855c50d8ae3SPaolo Bonzini 
856c50d8ae3SPaolo Bonzini /*
857c50d8ae3SPaolo Bonzini  * Update the SPTE (excluding the PFN), but do not track changes in its
858c50d8ae3SPaolo Bonzini  * accessed/dirty status.
859c50d8ae3SPaolo Bonzini  */
860c50d8ae3SPaolo Bonzini static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte)
861c50d8ae3SPaolo Bonzini {
862c50d8ae3SPaolo Bonzini 	u64 old_spte = *sptep;
863c50d8ae3SPaolo Bonzini 
864c50d8ae3SPaolo Bonzini 	WARN_ON(!is_shadow_present_pte(new_spte));
865c50d8ae3SPaolo Bonzini 
866c50d8ae3SPaolo Bonzini 	if (!is_shadow_present_pte(old_spte)) {
867c50d8ae3SPaolo Bonzini 		mmu_spte_set(sptep, new_spte);
868c50d8ae3SPaolo Bonzini 		return old_spte;
869c50d8ae3SPaolo Bonzini 	}
870c50d8ae3SPaolo Bonzini 
871c50d8ae3SPaolo Bonzini 	if (!spte_has_volatile_bits(old_spte))
872c50d8ae3SPaolo Bonzini 		__update_clear_spte_fast(sptep, new_spte);
873c50d8ae3SPaolo Bonzini 	else
874c50d8ae3SPaolo Bonzini 		old_spte = __update_clear_spte_slow(sptep, new_spte);
875c50d8ae3SPaolo Bonzini 
876c50d8ae3SPaolo Bonzini 	WARN_ON(spte_to_pfn(old_spte) != spte_to_pfn(new_spte));
877c50d8ae3SPaolo Bonzini 
878c50d8ae3SPaolo Bonzini 	return old_spte;
879c50d8ae3SPaolo Bonzini }
880c50d8ae3SPaolo Bonzini 
881c50d8ae3SPaolo Bonzini /* Rules for using mmu_spte_update:
882c50d8ae3SPaolo Bonzini  * Update the state bits, it means the mapped pfn is not changed.
883c50d8ae3SPaolo Bonzini  *
884c50d8ae3SPaolo Bonzini  * Whenever we overwrite a writable spte with a read-only one we
885c50d8ae3SPaolo Bonzini  * should flush remote TLBs. Otherwise rmap_write_protect
886c50d8ae3SPaolo Bonzini  * will find a read-only spte, even though the writable spte
887c50d8ae3SPaolo Bonzini  * might be cached on a CPU's TLB, the return value indicates this
888c50d8ae3SPaolo Bonzini  * case.
889c50d8ae3SPaolo Bonzini  *
890c50d8ae3SPaolo Bonzini  * Returns true if the TLB needs to be flushed
891c50d8ae3SPaolo Bonzini  */
892c50d8ae3SPaolo Bonzini static bool mmu_spte_update(u64 *sptep, u64 new_spte)
893c50d8ae3SPaolo Bonzini {
894c50d8ae3SPaolo Bonzini 	bool flush = false;
895c50d8ae3SPaolo Bonzini 	u64 old_spte = mmu_spte_update_no_track(sptep, new_spte);
896c50d8ae3SPaolo Bonzini 
897c50d8ae3SPaolo Bonzini 	if (!is_shadow_present_pte(old_spte))
898c50d8ae3SPaolo Bonzini 		return false;
899c50d8ae3SPaolo Bonzini 
900c50d8ae3SPaolo Bonzini 	/*
901c50d8ae3SPaolo Bonzini 	 * For the spte updated out of mmu-lock is safe, since
902c50d8ae3SPaolo Bonzini 	 * we always atomically update it, see the comments in
903c50d8ae3SPaolo Bonzini 	 * spte_has_volatile_bits().
904c50d8ae3SPaolo Bonzini 	 */
905c50d8ae3SPaolo Bonzini 	if (spte_can_locklessly_be_made_writable(old_spte) &&
906c50d8ae3SPaolo Bonzini 	      !is_writable_pte(new_spte))
907c50d8ae3SPaolo Bonzini 		flush = true;
908c50d8ae3SPaolo Bonzini 
909c50d8ae3SPaolo Bonzini 	/*
910c50d8ae3SPaolo Bonzini 	 * Flush TLB when accessed/dirty states are changed in the page tables,
911c50d8ae3SPaolo Bonzini 	 * to guarantee consistency between TLB and page tables.
912c50d8ae3SPaolo Bonzini 	 */
913c50d8ae3SPaolo Bonzini 
914c50d8ae3SPaolo Bonzini 	if (is_accessed_spte(old_spte) && !is_accessed_spte(new_spte)) {
915c50d8ae3SPaolo Bonzini 		flush = true;
916c50d8ae3SPaolo Bonzini 		kvm_set_pfn_accessed(spte_to_pfn(old_spte));
917c50d8ae3SPaolo Bonzini 	}
918c50d8ae3SPaolo Bonzini 
919c50d8ae3SPaolo Bonzini 	if (is_dirty_spte(old_spte) && !is_dirty_spte(new_spte)) {
920c50d8ae3SPaolo Bonzini 		flush = true;
921c50d8ae3SPaolo Bonzini 		kvm_set_pfn_dirty(spte_to_pfn(old_spte));
922c50d8ae3SPaolo Bonzini 	}
923c50d8ae3SPaolo Bonzini 
924c50d8ae3SPaolo Bonzini 	return flush;
925c50d8ae3SPaolo Bonzini }
926c50d8ae3SPaolo Bonzini 
927c50d8ae3SPaolo Bonzini /*
928c50d8ae3SPaolo Bonzini  * Rules for using mmu_spte_clear_track_bits:
929c50d8ae3SPaolo Bonzini  * It sets the sptep from present to nonpresent, and track the
930c50d8ae3SPaolo Bonzini  * state bits, it is used to clear the last level sptep.
931c50d8ae3SPaolo Bonzini  * Returns non-zero if the PTE was previously valid.
932c50d8ae3SPaolo Bonzini  */
933c50d8ae3SPaolo Bonzini static int mmu_spte_clear_track_bits(u64 *sptep)
934c50d8ae3SPaolo Bonzini {
935c50d8ae3SPaolo Bonzini 	kvm_pfn_t pfn;
936c50d8ae3SPaolo Bonzini 	u64 old_spte = *sptep;
937c50d8ae3SPaolo Bonzini 
938c50d8ae3SPaolo Bonzini 	if (!spte_has_volatile_bits(old_spte))
939c50d8ae3SPaolo Bonzini 		__update_clear_spte_fast(sptep, 0ull);
940c50d8ae3SPaolo Bonzini 	else
941c50d8ae3SPaolo Bonzini 		old_spte = __update_clear_spte_slow(sptep, 0ull);
942c50d8ae3SPaolo Bonzini 
943c50d8ae3SPaolo Bonzini 	if (!is_shadow_present_pte(old_spte))
944c50d8ae3SPaolo Bonzini 		return 0;
945c50d8ae3SPaolo Bonzini 
946c50d8ae3SPaolo Bonzini 	pfn = spte_to_pfn(old_spte);
947c50d8ae3SPaolo Bonzini 
948c50d8ae3SPaolo Bonzini 	/*
949c50d8ae3SPaolo Bonzini 	 * KVM does not hold the refcount of the page used by
950c50d8ae3SPaolo Bonzini 	 * kvm mmu, before reclaiming the page, we should
951c50d8ae3SPaolo Bonzini 	 * unmap it from mmu first.
952c50d8ae3SPaolo Bonzini 	 */
953c50d8ae3SPaolo Bonzini 	WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
954c50d8ae3SPaolo Bonzini 
955c50d8ae3SPaolo Bonzini 	if (is_accessed_spte(old_spte))
956c50d8ae3SPaolo Bonzini 		kvm_set_pfn_accessed(pfn);
957c50d8ae3SPaolo Bonzini 
958c50d8ae3SPaolo Bonzini 	if (is_dirty_spte(old_spte))
959c50d8ae3SPaolo Bonzini 		kvm_set_pfn_dirty(pfn);
960c50d8ae3SPaolo Bonzini 
961c50d8ae3SPaolo Bonzini 	return 1;
962c50d8ae3SPaolo Bonzini }
963c50d8ae3SPaolo Bonzini 
964c50d8ae3SPaolo Bonzini /*
965c50d8ae3SPaolo Bonzini  * Rules for using mmu_spte_clear_no_track:
966c50d8ae3SPaolo Bonzini  * Directly clear spte without caring the state bits of sptep,
967c50d8ae3SPaolo Bonzini  * it is used to set the upper level spte.
968c50d8ae3SPaolo Bonzini  */
969c50d8ae3SPaolo Bonzini static void mmu_spte_clear_no_track(u64 *sptep)
970c50d8ae3SPaolo Bonzini {
971c50d8ae3SPaolo Bonzini 	__update_clear_spte_fast(sptep, 0ull);
972c50d8ae3SPaolo Bonzini }
973c50d8ae3SPaolo Bonzini 
974c50d8ae3SPaolo Bonzini static u64 mmu_spte_get_lockless(u64 *sptep)
975c50d8ae3SPaolo Bonzini {
976c50d8ae3SPaolo Bonzini 	return __get_spte_lockless(sptep);
977c50d8ae3SPaolo Bonzini }
978c50d8ae3SPaolo Bonzini 
979c50d8ae3SPaolo Bonzini static u64 mark_spte_for_access_track(u64 spte)
980c50d8ae3SPaolo Bonzini {
981c50d8ae3SPaolo Bonzini 	if (spte_ad_enabled(spte))
982c50d8ae3SPaolo Bonzini 		return spte & ~shadow_accessed_mask;
983c50d8ae3SPaolo Bonzini 
984c50d8ae3SPaolo Bonzini 	if (is_access_track_spte(spte))
985c50d8ae3SPaolo Bonzini 		return spte;
986c50d8ae3SPaolo Bonzini 
987c50d8ae3SPaolo Bonzini 	/*
988c50d8ae3SPaolo Bonzini 	 * Making an Access Tracking PTE will result in removal of write access
989c50d8ae3SPaolo Bonzini 	 * from the PTE. So, verify that we will be able to restore the write
990c50d8ae3SPaolo Bonzini 	 * access in the fast page fault path later on.
991c50d8ae3SPaolo Bonzini 	 */
992c50d8ae3SPaolo Bonzini 	WARN_ONCE((spte & PT_WRITABLE_MASK) &&
993c50d8ae3SPaolo Bonzini 		  !spte_can_locklessly_be_made_writable(spte),
994c50d8ae3SPaolo Bonzini 		  "kvm: Writable SPTE is not locklessly dirty-trackable\n");
995c50d8ae3SPaolo Bonzini 
996c50d8ae3SPaolo Bonzini 	WARN_ONCE(spte & (shadow_acc_track_saved_bits_mask <<
997c50d8ae3SPaolo Bonzini 			  shadow_acc_track_saved_bits_shift),
998c50d8ae3SPaolo Bonzini 		  "kvm: Access Tracking saved bit locations are not zero\n");
999c50d8ae3SPaolo Bonzini 
1000c50d8ae3SPaolo Bonzini 	spte |= (spte & shadow_acc_track_saved_bits_mask) <<
1001c50d8ae3SPaolo Bonzini 		shadow_acc_track_saved_bits_shift;
1002c50d8ae3SPaolo Bonzini 	spte &= ~shadow_acc_track_mask;
1003c50d8ae3SPaolo Bonzini 
1004c50d8ae3SPaolo Bonzini 	return spte;
1005c50d8ae3SPaolo Bonzini }
1006c50d8ae3SPaolo Bonzini 
1007c50d8ae3SPaolo Bonzini /* Restore an acc-track PTE back to a regular PTE */
1008c50d8ae3SPaolo Bonzini static u64 restore_acc_track_spte(u64 spte)
1009c50d8ae3SPaolo Bonzini {
1010c50d8ae3SPaolo Bonzini 	u64 new_spte = spte;
1011c50d8ae3SPaolo Bonzini 	u64 saved_bits = (spte >> shadow_acc_track_saved_bits_shift)
1012c50d8ae3SPaolo Bonzini 			 & shadow_acc_track_saved_bits_mask;
1013c50d8ae3SPaolo Bonzini 
1014c50d8ae3SPaolo Bonzini 	WARN_ON_ONCE(spte_ad_enabled(spte));
1015c50d8ae3SPaolo Bonzini 	WARN_ON_ONCE(!is_access_track_spte(spte));
1016c50d8ae3SPaolo Bonzini 
1017c50d8ae3SPaolo Bonzini 	new_spte &= ~shadow_acc_track_mask;
1018c50d8ae3SPaolo Bonzini 	new_spte &= ~(shadow_acc_track_saved_bits_mask <<
1019c50d8ae3SPaolo Bonzini 		      shadow_acc_track_saved_bits_shift);
1020c50d8ae3SPaolo Bonzini 	new_spte |= saved_bits;
1021c50d8ae3SPaolo Bonzini 
1022c50d8ae3SPaolo Bonzini 	return new_spte;
1023c50d8ae3SPaolo Bonzini }
1024c50d8ae3SPaolo Bonzini 
1025c50d8ae3SPaolo Bonzini /* Returns the Accessed status of the PTE and resets it at the same time. */
1026c50d8ae3SPaolo Bonzini static bool mmu_spte_age(u64 *sptep)
1027c50d8ae3SPaolo Bonzini {
1028c50d8ae3SPaolo Bonzini 	u64 spte = mmu_spte_get_lockless(sptep);
1029c50d8ae3SPaolo Bonzini 
1030c50d8ae3SPaolo Bonzini 	if (!is_accessed_spte(spte))
1031c50d8ae3SPaolo Bonzini 		return false;
1032c50d8ae3SPaolo Bonzini 
1033c50d8ae3SPaolo Bonzini 	if (spte_ad_enabled(spte)) {
1034c50d8ae3SPaolo Bonzini 		clear_bit((ffs(shadow_accessed_mask) - 1),
1035c50d8ae3SPaolo Bonzini 			  (unsigned long *)sptep);
1036c50d8ae3SPaolo Bonzini 	} else {
1037c50d8ae3SPaolo Bonzini 		/*
1038c50d8ae3SPaolo Bonzini 		 * Capture the dirty status of the page, so that it doesn't get
1039c50d8ae3SPaolo Bonzini 		 * lost when the SPTE is marked for access tracking.
1040c50d8ae3SPaolo Bonzini 		 */
1041c50d8ae3SPaolo Bonzini 		if (is_writable_pte(spte))
1042c50d8ae3SPaolo Bonzini 			kvm_set_pfn_dirty(spte_to_pfn(spte));
1043c50d8ae3SPaolo Bonzini 
1044c50d8ae3SPaolo Bonzini 		spte = mark_spte_for_access_track(spte);
1045c50d8ae3SPaolo Bonzini 		mmu_spte_update_no_track(sptep, spte);
1046c50d8ae3SPaolo Bonzini 	}
1047c50d8ae3SPaolo Bonzini 
1048c50d8ae3SPaolo Bonzini 	return true;
1049c50d8ae3SPaolo Bonzini }
1050c50d8ae3SPaolo Bonzini 
1051c50d8ae3SPaolo Bonzini static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
1052c50d8ae3SPaolo Bonzini {
1053c50d8ae3SPaolo Bonzini 	/*
1054c50d8ae3SPaolo Bonzini 	 * Prevent page table teardown by making any free-er wait during
1055c50d8ae3SPaolo Bonzini 	 * kvm_flush_remote_tlbs() IPI to all active vcpus.
1056c50d8ae3SPaolo Bonzini 	 */
1057c50d8ae3SPaolo Bonzini 	local_irq_disable();
1058c50d8ae3SPaolo Bonzini 
1059c50d8ae3SPaolo Bonzini 	/*
1060c50d8ae3SPaolo Bonzini 	 * Make sure a following spte read is not reordered ahead of the write
1061c50d8ae3SPaolo Bonzini 	 * to vcpu->mode.
1062c50d8ae3SPaolo Bonzini 	 */
1063c50d8ae3SPaolo Bonzini 	smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
1064c50d8ae3SPaolo Bonzini }
1065c50d8ae3SPaolo Bonzini 
1066c50d8ae3SPaolo Bonzini static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
1067c50d8ae3SPaolo Bonzini {
1068c50d8ae3SPaolo Bonzini 	/*
1069c50d8ae3SPaolo Bonzini 	 * Make sure the write to vcpu->mode is not reordered in front of
1070c50d8ae3SPaolo Bonzini 	 * reads to sptes.  If it does, kvm_mmu_commit_zap_page() can see us
1071c50d8ae3SPaolo Bonzini 	 * OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
1072c50d8ae3SPaolo Bonzini 	 */
1073c50d8ae3SPaolo Bonzini 	smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
1074c50d8ae3SPaolo Bonzini 	local_irq_enable();
1075c50d8ae3SPaolo Bonzini }
1076c50d8ae3SPaolo Bonzini 
1077378f5cd6SSean Christopherson static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
1078c50d8ae3SPaolo Bonzini {
1079c50d8ae3SPaolo Bonzini 	int r;
1080c50d8ae3SPaolo Bonzini 
1081531281adSSean Christopherson 	/* 1 rmap, 1 parent PTE per level, and the prefetched rmaps. */
108294ce87efSSean Christopherson 	r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
1083531281adSSean Christopherson 				       1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);
1084c50d8ae3SPaolo Bonzini 	if (r)
1085c50d8ae3SPaolo Bonzini 		return r;
108694ce87efSSean Christopherson 	r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
1087171a90d7SSean Christopherson 				       PT64_ROOT_MAX_LEVEL);
1088171a90d7SSean Christopherson 	if (r)
1089171a90d7SSean Christopherson 		return r;
1090378f5cd6SSean Christopherson 	if (maybe_indirect) {
109194ce87efSSean Christopherson 		r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
1092171a90d7SSean Christopherson 					       PT64_ROOT_MAX_LEVEL);
1093c50d8ae3SPaolo Bonzini 		if (r)
1094c50d8ae3SPaolo Bonzini 			return r;
1095378f5cd6SSean Christopherson 	}
109694ce87efSSean Christopherson 	return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
1097531281adSSean Christopherson 					  PT64_ROOT_MAX_LEVEL);
1098c50d8ae3SPaolo Bonzini }
1099c50d8ae3SPaolo Bonzini 
1100c50d8ae3SPaolo Bonzini static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
1101c50d8ae3SPaolo Bonzini {
110294ce87efSSean Christopherson 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
110394ce87efSSean Christopherson 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
110494ce87efSSean Christopherson 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_gfn_array_cache);
110594ce87efSSean Christopherson 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
1106c50d8ae3SPaolo Bonzini }
1107c50d8ae3SPaolo Bonzini 
1108c50d8ae3SPaolo Bonzini static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu)
1109c50d8ae3SPaolo Bonzini {
111094ce87efSSean Christopherson 	return kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache);
1111c50d8ae3SPaolo Bonzini }
1112c50d8ae3SPaolo Bonzini 
1113c50d8ae3SPaolo Bonzini static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
1114c50d8ae3SPaolo Bonzini {
1115c50d8ae3SPaolo Bonzini 	kmem_cache_free(pte_list_desc_cache, pte_list_desc);
1116c50d8ae3SPaolo Bonzini }
1117c50d8ae3SPaolo Bonzini 
1118c50d8ae3SPaolo Bonzini static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
1119c50d8ae3SPaolo Bonzini {
1120c50d8ae3SPaolo Bonzini 	if (!sp->role.direct)
1121c50d8ae3SPaolo Bonzini 		return sp->gfns[index];
1122c50d8ae3SPaolo Bonzini 
1123c50d8ae3SPaolo Bonzini 	return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS));
1124c50d8ae3SPaolo Bonzini }
1125c50d8ae3SPaolo Bonzini 
1126c50d8ae3SPaolo Bonzini static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
1127c50d8ae3SPaolo Bonzini {
1128c50d8ae3SPaolo Bonzini 	if (!sp->role.direct) {
1129c50d8ae3SPaolo Bonzini 		sp->gfns[index] = gfn;
1130c50d8ae3SPaolo Bonzini 		return;
1131c50d8ae3SPaolo Bonzini 	}
1132c50d8ae3SPaolo Bonzini 
1133c50d8ae3SPaolo Bonzini 	if (WARN_ON(gfn != kvm_mmu_page_get_gfn(sp, index)))
1134c50d8ae3SPaolo Bonzini 		pr_err_ratelimited("gfn mismatch under direct page %llx "
1135c50d8ae3SPaolo Bonzini 				   "(expected %llx, got %llx)\n",
1136c50d8ae3SPaolo Bonzini 				   sp->gfn,
1137c50d8ae3SPaolo Bonzini 				   kvm_mmu_page_get_gfn(sp, index), gfn);
1138c50d8ae3SPaolo Bonzini }
1139c50d8ae3SPaolo Bonzini 
1140c50d8ae3SPaolo Bonzini /*
1141c50d8ae3SPaolo Bonzini  * Return the pointer to the large page information for a given gfn,
1142c50d8ae3SPaolo Bonzini  * handling slots that are not large page aligned.
1143c50d8ae3SPaolo Bonzini  */
1144c50d8ae3SPaolo Bonzini static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
1145c50d8ae3SPaolo Bonzini 					      struct kvm_memory_slot *slot,
1146c50d8ae3SPaolo Bonzini 					      int level)
1147c50d8ae3SPaolo Bonzini {
1148c50d8ae3SPaolo Bonzini 	unsigned long idx;
1149c50d8ae3SPaolo Bonzini 
1150c50d8ae3SPaolo Bonzini 	idx = gfn_to_index(gfn, slot->base_gfn, level);
1151c50d8ae3SPaolo Bonzini 	return &slot->arch.lpage_info[level - 2][idx];
1152c50d8ae3SPaolo Bonzini }
1153c50d8ae3SPaolo Bonzini 
1154c50d8ae3SPaolo Bonzini static void update_gfn_disallow_lpage_count(struct kvm_memory_slot *slot,
1155c50d8ae3SPaolo Bonzini 					    gfn_t gfn, int count)
1156c50d8ae3SPaolo Bonzini {
1157c50d8ae3SPaolo Bonzini 	struct kvm_lpage_info *linfo;
1158c50d8ae3SPaolo Bonzini 	int i;
1159c50d8ae3SPaolo Bonzini 
11603bae0459SSean Christopherson 	for (i = PG_LEVEL_2M; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
1161c50d8ae3SPaolo Bonzini 		linfo = lpage_info_slot(gfn, slot, i);
1162c50d8ae3SPaolo Bonzini 		linfo->disallow_lpage += count;
1163c50d8ae3SPaolo Bonzini 		WARN_ON(linfo->disallow_lpage < 0);
1164c50d8ae3SPaolo Bonzini 	}
1165c50d8ae3SPaolo Bonzini }
1166c50d8ae3SPaolo Bonzini 
1167c50d8ae3SPaolo Bonzini void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn)
1168c50d8ae3SPaolo Bonzini {
1169c50d8ae3SPaolo Bonzini 	update_gfn_disallow_lpage_count(slot, gfn, 1);
1170c50d8ae3SPaolo Bonzini }
1171c50d8ae3SPaolo Bonzini 
1172c50d8ae3SPaolo Bonzini void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn)
1173c50d8ae3SPaolo Bonzini {
1174c50d8ae3SPaolo Bonzini 	update_gfn_disallow_lpage_count(slot, gfn, -1);
1175c50d8ae3SPaolo Bonzini }
1176c50d8ae3SPaolo Bonzini 
1177c50d8ae3SPaolo Bonzini static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
1178c50d8ae3SPaolo Bonzini {
1179c50d8ae3SPaolo Bonzini 	struct kvm_memslots *slots;
1180c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
1181c50d8ae3SPaolo Bonzini 	gfn_t gfn;
1182c50d8ae3SPaolo Bonzini 
1183c50d8ae3SPaolo Bonzini 	kvm->arch.indirect_shadow_pages++;
1184c50d8ae3SPaolo Bonzini 	gfn = sp->gfn;
1185c50d8ae3SPaolo Bonzini 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
1186c50d8ae3SPaolo Bonzini 	slot = __gfn_to_memslot(slots, gfn);
1187c50d8ae3SPaolo Bonzini 
1188c50d8ae3SPaolo Bonzini 	/* the non-leaf shadow pages are keeping readonly. */
11893bae0459SSean Christopherson 	if (sp->role.level > PG_LEVEL_4K)
1190c50d8ae3SPaolo Bonzini 		return kvm_slot_page_track_add_page(kvm, slot, gfn,
1191c50d8ae3SPaolo Bonzini 						    KVM_PAGE_TRACK_WRITE);
1192c50d8ae3SPaolo Bonzini 
1193c50d8ae3SPaolo Bonzini 	kvm_mmu_gfn_disallow_lpage(slot, gfn);
1194c50d8ae3SPaolo Bonzini }
1195c50d8ae3SPaolo Bonzini 
1196c50d8ae3SPaolo Bonzini static void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1197c50d8ae3SPaolo Bonzini {
1198c50d8ae3SPaolo Bonzini 	if (sp->lpage_disallowed)
1199c50d8ae3SPaolo Bonzini 		return;
1200c50d8ae3SPaolo Bonzini 
1201c50d8ae3SPaolo Bonzini 	++kvm->stat.nx_lpage_splits;
1202c50d8ae3SPaolo Bonzini 	list_add_tail(&sp->lpage_disallowed_link,
1203c50d8ae3SPaolo Bonzini 		      &kvm->arch.lpage_disallowed_mmu_pages);
1204c50d8ae3SPaolo Bonzini 	sp->lpage_disallowed = true;
1205c50d8ae3SPaolo Bonzini }
1206c50d8ae3SPaolo Bonzini 
1207c50d8ae3SPaolo Bonzini static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
1208c50d8ae3SPaolo Bonzini {
1209c50d8ae3SPaolo Bonzini 	struct kvm_memslots *slots;
1210c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
1211c50d8ae3SPaolo Bonzini 	gfn_t gfn;
1212c50d8ae3SPaolo Bonzini 
1213c50d8ae3SPaolo Bonzini 	kvm->arch.indirect_shadow_pages--;
1214c50d8ae3SPaolo Bonzini 	gfn = sp->gfn;
1215c50d8ae3SPaolo Bonzini 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
1216c50d8ae3SPaolo Bonzini 	slot = __gfn_to_memslot(slots, gfn);
12173bae0459SSean Christopherson 	if (sp->role.level > PG_LEVEL_4K)
1218c50d8ae3SPaolo Bonzini 		return kvm_slot_page_track_remove_page(kvm, slot, gfn,
1219c50d8ae3SPaolo Bonzini 						       KVM_PAGE_TRACK_WRITE);
1220c50d8ae3SPaolo Bonzini 
1221c50d8ae3SPaolo Bonzini 	kvm_mmu_gfn_allow_lpage(slot, gfn);
1222c50d8ae3SPaolo Bonzini }
1223c50d8ae3SPaolo Bonzini 
1224c50d8ae3SPaolo Bonzini static void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1225c50d8ae3SPaolo Bonzini {
1226c50d8ae3SPaolo Bonzini 	--kvm->stat.nx_lpage_splits;
1227c50d8ae3SPaolo Bonzini 	sp->lpage_disallowed = false;
1228c50d8ae3SPaolo Bonzini 	list_del(&sp->lpage_disallowed_link);
1229c50d8ae3SPaolo Bonzini }
1230c50d8ae3SPaolo Bonzini 
1231c50d8ae3SPaolo Bonzini static struct kvm_memory_slot *
1232c50d8ae3SPaolo Bonzini gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
1233c50d8ae3SPaolo Bonzini 			    bool no_dirty_log)
1234c50d8ae3SPaolo Bonzini {
1235c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
1236c50d8ae3SPaolo Bonzini 
1237c50d8ae3SPaolo Bonzini 	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
123891b0d268SPaolo Bonzini 	if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
123991b0d268SPaolo Bonzini 		return NULL;
124091b0d268SPaolo Bonzini 	if (no_dirty_log && slot->dirty_bitmap)
124191b0d268SPaolo Bonzini 		return NULL;
1242c50d8ae3SPaolo Bonzini 
1243c50d8ae3SPaolo Bonzini 	return slot;
1244c50d8ae3SPaolo Bonzini }
1245c50d8ae3SPaolo Bonzini 
1246c50d8ae3SPaolo Bonzini /*
1247c50d8ae3SPaolo Bonzini  * About rmap_head encoding:
1248c50d8ae3SPaolo Bonzini  *
1249c50d8ae3SPaolo Bonzini  * If the bit zero of rmap_head->val is clear, then it points to the only spte
1250c50d8ae3SPaolo Bonzini  * in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct
1251c50d8ae3SPaolo Bonzini  * pte_list_desc containing more mappings.
1252c50d8ae3SPaolo Bonzini  */
1253c50d8ae3SPaolo Bonzini 
1254c50d8ae3SPaolo Bonzini /*
1255c50d8ae3SPaolo Bonzini  * Returns the number of pointers in the rmap chain, not counting the new one.
1256c50d8ae3SPaolo Bonzini  */
1257c50d8ae3SPaolo Bonzini static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
1258c50d8ae3SPaolo Bonzini 			struct kvm_rmap_head *rmap_head)
1259c50d8ae3SPaolo Bonzini {
1260c50d8ae3SPaolo Bonzini 	struct pte_list_desc *desc;
1261c50d8ae3SPaolo Bonzini 	int i, count = 0;
1262c50d8ae3SPaolo Bonzini 
1263c50d8ae3SPaolo Bonzini 	if (!rmap_head->val) {
1264c50d8ae3SPaolo Bonzini 		rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte);
1265c50d8ae3SPaolo Bonzini 		rmap_head->val = (unsigned long)spte;
1266c50d8ae3SPaolo Bonzini 	} else if (!(rmap_head->val & 1)) {
1267c50d8ae3SPaolo Bonzini 		rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte);
1268c50d8ae3SPaolo Bonzini 		desc = mmu_alloc_pte_list_desc(vcpu);
1269c50d8ae3SPaolo Bonzini 		desc->sptes[0] = (u64 *)rmap_head->val;
1270c50d8ae3SPaolo Bonzini 		desc->sptes[1] = spte;
1271c50d8ae3SPaolo Bonzini 		rmap_head->val = (unsigned long)desc | 1;
1272c50d8ae3SPaolo Bonzini 		++count;
1273c50d8ae3SPaolo Bonzini 	} else {
1274c50d8ae3SPaolo Bonzini 		rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte);
1275c50d8ae3SPaolo Bonzini 		desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1276c50d8ae3SPaolo Bonzini 		while (desc->sptes[PTE_LIST_EXT-1] && desc->more) {
1277c50d8ae3SPaolo Bonzini 			desc = desc->more;
1278c50d8ae3SPaolo Bonzini 			count += PTE_LIST_EXT;
1279c50d8ae3SPaolo Bonzini 		}
1280c50d8ae3SPaolo Bonzini 		if (desc->sptes[PTE_LIST_EXT-1]) {
1281c50d8ae3SPaolo Bonzini 			desc->more = mmu_alloc_pte_list_desc(vcpu);
1282c50d8ae3SPaolo Bonzini 			desc = desc->more;
1283c50d8ae3SPaolo Bonzini 		}
1284c50d8ae3SPaolo Bonzini 		for (i = 0; desc->sptes[i]; ++i)
1285c50d8ae3SPaolo Bonzini 			++count;
1286c50d8ae3SPaolo Bonzini 		desc->sptes[i] = spte;
1287c50d8ae3SPaolo Bonzini 	}
1288c50d8ae3SPaolo Bonzini 	return count;
1289c50d8ae3SPaolo Bonzini }
1290c50d8ae3SPaolo Bonzini 
1291c50d8ae3SPaolo Bonzini static void
1292c50d8ae3SPaolo Bonzini pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
1293c50d8ae3SPaolo Bonzini 			   struct pte_list_desc *desc, int i,
1294c50d8ae3SPaolo Bonzini 			   struct pte_list_desc *prev_desc)
1295c50d8ae3SPaolo Bonzini {
1296c50d8ae3SPaolo Bonzini 	int j;
1297c50d8ae3SPaolo Bonzini 
1298c50d8ae3SPaolo Bonzini 	for (j = PTE_LIST_EXT - 1; !desc->sptes[j] && j > i; --j)
1299c50d8ae3SPaolo Bonzini 		;
1300c50d8ae3SPaolo Bonzini 	desc->sptes[i] = desc->sptes[j];
1301c50d8ae3SPaolo Bonzini 	desc->sptes[j] = NULL;
1302c50d8ae3SPaolo Bonzini 	if (j != 0)
1303c50d8ae3SPaolo Bonzini 		return;
1304c50d8ae3SPaolo Bonzini 	if (!prev_desc && !desc->more)
1305fe3c2b4cSMiaohe Lin 		rmap_head->val = 0;
1306c50d8ae3SPaolo Bonzini 	else
1307c50d8ae3SPaolo Bonzini 		if (prev_desc)
1308c50d8ae3SPaolo Bonzini 			prev_desc->more = desc->more;
1309c50d8ae3SPaolo Bonzini 		else
1310c50d8ae3SPaolo Bonzini 			rmap_head->val = (unsigned long)desc->more | 1;
1311c50d8ae3SPaolo Bonzini 	mmu_free_pte_list_desc(desc);
1312c50d8ae3SPaolo Bonzini }
1313c50d8ae3SPaolo Bonzini 
1314c50d8ae3SPaolo Bonzini static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
1315c50d8ae3SPaolo Bonzini {
1316c50d8ae3SPaolo Bonzini 	struct pte_list_desc *desc;
1317c50d8ae3SPaolo Bonzini 	struct pte_list_desc *prev_desc;
1318c50d8ae3SPaolo Bonzini 	int i;
1319c50d8ae3SPaolo Bonzini 
1320c50d8ae3SPaolo Bonzini 	if (!rmap_head->val) {
1321c50d8ae3SPaolo Bonzini 		pr_err("%s: %p 0->BUG\n", __func__, spte);
1322c50d8ae3SPaolo Bonzini 		BUG();
1323c50d8ae3SPaolo Bonzini 	} else if (!(rmap_head->val & 1)) {
1324c50d8ae3SPaolo Bonzini 		rmap_printk("%s:  %p 1->0\n", __func__, spte);
1325c50d8ae3SPaolo Bonzini 		if ((u64 *)rmap_head->val != spte) {
1326c50d8ae3SPaolo Bonzini 			pr_err("%s:  %p 1->BUG\n", __func__, spte);
1327c50d8ae3SPaolo Bonzini 			BUG();
1328c50d8ae3SPaolo Bonzini 		}
1329c50d8ae3SPaolo Bonzini 		rmap_head->val = 0;
1330c50d8ae3SPaolo Bonzini 	} else {
1331c50d8ae3SPaolo Bonzini 		rmap_printk("%s:  %p many->many\n", __func__, spte);
1332c50d8ae3SPaolo Bonzini 		desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1333c50d8ae3SPaolo Bonzini 		prev_desc = NULL;
1334c50d8ae3SPaolo Bonzini 		while (desc) {
1335c50d8ae3SPaolo Bonzini 			for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) {
1336c50d8ae3SPaolo Bonzini 				if (desc->sptes[i] == spte) {
1337c50d8ae3SPaolo Bonzini 					pte_list_desc_remove_entry(rmap_head,
1338c50d8ae3SPaolo Bonzini 							desc, i, prev_desc);
1339c50d8ae3SPaolo Bonzini 					return;
1340c50d8ae3SPaolo Bonzini 				}
1341c50d8ae3SPaolo Bonzini 			}
1342c50d8ae3SPaolo Bonzini 			prev_desc = desc;
1343c50d8ae3SPaolo Bonzini 			desc = desc->more;
1344c50d8ae3SPaolo Bonzini 		}
1345c50d8ae3SPaolo Bonzini 		pr_err("%s: %p many->many\n", __func__, spte);
1346c50d8ae3SPaolo Bonzini 		BUG();
1347c50d8ae3SPaolo Bonzini 	}
1348c50d8ae3SPaolo Bonzini }
1349c50d8ae3SPaolo Bonzini 
1350c50d8ae3SPaolo Bonzini static void pte_list_remove(struct kvm_rmap_head *rmap_head, u64 *sptep)
1351c50d8ae3SPaolo Bonzini {
1352c50d8ae3SPaolo Bonzini 	mmu_spte_clear_track_bits(sptep);
1353c50d8ae3SPaolo Bonzini 	__pte_list_remove(sptep, rmap_head);
1354c50d8ae3SPaolo Bonzini }
1355c50d8ae3SPaolo Bonzini 
1356c50d8ae3SPaolo Bonzini static struct kvm_rmap_head *__gfn_to_rmap(gfn_t gfn, int level,
1357c50d8ae3SPaolo Bonzini 					   struct kvm_memory_slot *slot)
1358c50d8ae3SPaolo Bonzini {
1359c50d8ae3SPaolo Bonzini 	unsigned long idx;
1360c50d8ae3SPaolo Bonzini 
1361c50d8ae3SPaolo Bonzini 	idx = gfn_to_index(gfn, slot->base_gfn, level);
13623bae0459SSean Christopherson 	return &slot->arch.rmap[level - PG_LEVEL_4K][idx];
1363c50d8ae3SPaolo Bonzini }
1364c50d8ae3SPaolo Bonzini 
1365c50d8ae3SPaolo Bonzini static struct kvm_rmap_head *gfn_to_rmap(struct kvm *kvm, gfn_t gfn,
1366c50d8ae3SPaolo Bonzini 					 struct kvm_mmu_page *sp)
1367c50d8ae3SPaolo Bonzini {
1368c50d8ae3SPaolo Bonzini 	struct kvm_memslots *slots;
1369c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
1370c50d8ae3SPaolo Bonzini 
1371c50d8ae3SPaolo Bonzini 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
1372c50d8ae3SPaolo Bonzini 	slot = __gfn_to_memslot(slots, gfn);
1373c50d8ae3SPaolo Bonzini 	return __gfn_to_rmap(gfn, sp->role.level, slot);
1374c50d8ae3SPaolo Bonzini }
1375c50d8ae3SPaolo Bonzini 
1376c50d8ae3SPaolo Bonzini static bool rmap_can_add(struct kvm_vcpu *vcpu)
1377c50d8ae3SPaolo Bonzini {
1378356ec69aSSean Christopherson 	struct kvm_mmu_memory_cache *mc;
1379c50d8ae3SPaolo Bonzini 
1380356ec69aSSean Christopherson 	mc = &vcpu->arch.mmu_pte_list_desc_cache;
138194ce87efSSean Christopherson 	return kvm_mmu_memory_cache_nr_free_objects(mc);
1382c50d8ae3SPaolo Bonzini }
1383c50d8ae3SPaolo Bonzini 
1384c50d8ae3SPaolo Bonzini static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
1385c50d8ae3SPaolo Bonzini {
1386c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
1387c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *rmap_head;
1388c50d8ae3SPaolo Bonzini 
138957354682SSean Christopherson 	sp = sptep_to_sp(spte);
1390c50d8ae3SPaolo Bonzini 	kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
1391c50d8ae3SPaolo Bonzini 	rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
1392c50d8ae3SPaolo Bonzini 	return pte_list_add(vcpu, spte, rmap_head);
1393c50d8ae3SPaolo Bonzini }
1394c50d8ae3SPaolo Bonzini 
1395c50d8ae3SPaolo Bonzini static void rmap_remove(struct kvm *kvm, u64 *spte)
1396c50d8ae3SPaolo Bonzini {
1397c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
1398c50d8ae3SPaolo Bonzini 	gfn_t gfn;
1399c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *rmap_head;
1400c50d8ae3SPaolo Bonzini 
140157354682SSean Christopherson 	sp = sptep_to_sp(spte);
1402c50d8ae3SPaolo Bonzini 	gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
1403c50d8ae3SPaolo Bonzini 	rmap_head = gfn_to_rmap(kvm, gfn, sp);
1404c50d8ae3SPaolo Bonzini 	__pte_list_remove(spte, rmap_head);
1405c50d8ae3SPaolo Bonzini }
1406c50d8ae3SPaolo Bonzini 
1407c50d8ae3SPaolo Bonzini /*
1408c50d8ae3SPaolo Bonzini  * Used by the following functions to iterate through the sptes linked by a
1409c50d8ae3SPaolo Bonzini  * rmap.  All fields are private and not assumed to be used outside.
1410c50d8ae3SPaolo Bonzini  */
1411c50d8ae3SPaolo Bonzini struct rmap_iterator {
1412c50d8ae3SPaolo Bonzini 	/* private fields */
1413c50d8ae3SPaolo Bonzini 	struct pte_list_desc *desc;	/* holds the sptep if not NULL */
1414c50d8ae3SPaolo Bonzini 	int pos;			/* index of the sptep */
1415c50d8ae3SPaolo Bonzini };
1416c50d8ae3SPaolo Bonzini 
1417c50d8ae3SPaolo Bonzini /*
1418c50d8ae3SPaolo Bonzini  * Iteration must be started by this function.  This should also be used after
1419c50d8ae3SPaolo Bonzini  * removing/dropping sptes from the rmap link because in such cases the
14200a03cbdaSMiaohe Lin  * information in the iterator may not be valid.
1421c50d8ae3SPaolo Bonzini  *
1422c50d8ae3SPaolo Bonzini  * Returns sptep if found, NULL otherwise.
1423c50d8ae3SPaolo Bonzini  */
1424c50d8ae3SPaolo Bonzini static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head,
1425c50d8ae3SPaolo Bonzini 			   struct rmap_iterator *iter)
1426c50d8ae3SPaolo Bonzini {
1427c50d8ae3SPaolo Bonzini 	u64 *sptep;
1428c50d8ae3SPaolo Bonzini 
1429c50d8ae3SPaolo Bonzini 	if (!rmap_head->val)
1430c50d8ae3SPaolo Bonzini 		return NULL;
1431c50d8ae3SPaolo Bonzini 
1432c50d8ae3SPaolo Bonzini 	if (!(rmap_head->val & 1)) {
1433c50d8ae3SPaolo Bonzini 		iter->desc = NULL;
1434c50d8ae3SPaolo Bonzini 		sptep = (u64 *)rmap_head->val;
1435c50d8ae3SPaolo Bonzini 		goto out;
1436c50d8ae3SPaolo Bonzini 	}
1437c50d8ae3SPaolo Bonzini 
1438c50d8ae3SPaolo Bonzini 	iter->desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1439c50d8ae3SPaolo Bonzini 	iter->pos = 0;
1440c50d8ae3SPaolo Bonzini 	sptep = iter->desc->sptes[iter->pos];
1441c50d8ae3SPaolo Bonzini out:
1442c50d8ae3SPaolo Bonzini 	BUG_ON(!is_shadow_present_pte(*sptep));
1443c50d8ae3SPaolo Bonzini 	return sptep;
1444c50d8ae3SPaolo Bonzini }
1445c50d8ae3SPaolo Bonzini 
1446c50d8ae3SPaolo Bonzini /*
1447c50d8ae3SPaolo Bonzini  * Must be used with a valid iterator: e.g. after rmap_get_first().
1448c50d8ae3SPaolo Bonzini  *
1449c50d8ae3SPaolo Bonzini  * Returns sptep if found, NULL otherwise.
1450c50d8ae3SPaolo Bonzini  */
1451c50d8ae3SPaolo Bonzini static u64 *rmap_get_next(struct rmap_iterator *iter)
1452c50d8ae3SPaolo Bonzini {
1453c50d8ae3SPaolo Bonzini 	u64 *sptep;
1454c50d8ae3SPaolo Bonzini 
1455c50d8ae3SPaolo Bonzini 	if (iter->desc) {
1456c50d8ae3SPaolo Bonzini 		if (iter->pos < PTE_LIST_EXT - 1) {
1457c50d8ae3SPaolo Bonzini 			++iter->pos;
1458c50d8ae3SPaolo Bonzini 			sptep = iter->desc->sptes[iter->pos];
1459c50d8ae3SPaolo Bonzini 			if (sptep)
1460c50d8ae3SPaolo Bonzini 				goto out;
1461c50d8ae3SPaolo Bonzini 		}
1462c50d8ae3SPaolo Bonzini 
1463c50d8ae3SPaolo Bonzini 		iter->desc = iter->desc->more;
1464c50d8ae3SPaolo Bonzini 
1465c50d8ae3SPaolo Bonzini 		if (iter->desc) {
1466c50d8ae3SPaolo Bonzini 			iter->pos = 0;
1467c50d8ae3SPaolo Bonzini 			/* desc->sptes[0] cannot be NULL */
1468c50d8ae3SPaolo Bonzini 			sptep = iter->desc->sptes[iter->pos];
1469c50d8ae3SPaolo Bonzini 			goto out;
1470c50d8ae3SPaolo Bonzini 		}
1471c50d8ae3SPaolo Bonzini 	}
1472c50d8ae3SPaolo Bonzini 
1473c50d8ae3SPaolo Bonzini 	return NULL;
1474c50d8ae3SPaolo Bonzini out:
1475c50d8ae3SPaolo Bonzini 	BUG_ON(!is_shadow_present_pte(*sptep));
1476c50d8ae3SPaolo Bonzini 	return sptep;
1477c50d8ae3SPaolo Bonzini }
1478c50d8ae3SPaolo Bonzini 
1479c50d8ae3SPaolo Bonzini #define for_each_rmap_spte(_rmap_head_, _iter_, _spte_)			\
1480c50d8ae3SPaolo Bonzini 	for (_spte_ = rmap_get_first(_rmap_head_, _iter_);		\
1481c50d8ae3SPaolo Bonzini 	     _spte_; _spte_ = rmap_get_next(_iter_))
1482c50d8ae3SPaolo Bonzini 
1483c50d8ae3SPaolo Bonzini static void drop_spte(struct kvm *kvm, u64 *sptep)
1484c50d8ae3SPaolo Bonzini {
1485c50d8ae3SPaolo Bonzini 	if (mmu_spte_clear_track_bits(sptep))
1486c50d8ae3SPaolo Bonzini 		rmap_remove(kvm, sptep);
1487c50d8ae3SPaolo Bonzini }
1488c50d8ae3SPaolo Bonzini 
1489c50d8ae3SPaolo Bonzini 
1490c50d8ae3SPaolo Bonzini static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
1491c50d8ae3SPaolo Bonzini {
1492c50d8ae3SPaolo Bonzini 	if (is_large_pte(*sptep)) {
149357354682SSean Christopherson 		WARN_ON(sptep_to_sp(sptep)->role.level == PG_LEVEL_4K);
1494c50d8ae3SPaolo Bonzini 		drop_spte(kvm, sptep);
1495c50d8ae3SPaolo Bonzini 		--kvm->stat.lpages;
1496c50d8ae3SPaolo Bonzini 		return true;
1497c50d8ae3SPaolo Bonzini 	}
1498c50d8ae3SPaolo Bonzini 
1499c50d8ae3SPaolo Bonzini 	return false;
1500c50d8ae3SPaolo Bonzini }
1501c50d8ae3SPaolo Bonzini 
1502c50d8ae3SPaolo Bonzini static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
1503c50d8ae3SPaolo Bonzini {
1504c50d8ae3SPaolo Bonzini 	if (__drop_large_spte(vcpu->kvm, sptep)) {
150557354682SSean Christopherson 		struct kvm_mmu_page *sp = sptep_to_sp(sptep);
1506c50d8ae3SPaolo Bonzini 
1507c50d8ae3SPaolo Bonzini 		kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
1508c50d8ae3SPaolo Bonzini 			KVM_PAGES_PER_HPAGE(sp->role.level));
1509c50d8ae3SPaolo Bonzini 	}
1510c50d8ae3SPaolo Bonzini }
1511c50d8ae3SPaolo Bonzini 
1512c50d8ae3SPaolo Bonzini /*
1513c50d8ae3SPaolo Bonzini  * Write-protect on the specified @sptep, @pt_protect indicates whether
1514c50d8ae3SPaolo Bonzini  * spte write-protection is caused by protecting shadow page table.
1515c50d8ae3SPaolo Bonzini  *
1516c50d8ae3SPaolo Bonzini  * Note: write protection is difference between dirty logging and spte
1517c50d8ae3SPaolo Bonzini  * protection:
1518c50d8ae3SPaolo Bonzini  * - for dirty logging, the spte can be set to writable at anytime if
1519c50d8ae3SPaolo Bonzini  *   its dirty bitmap is properly set.
1520c50d8ae3SPaolo Bonzini  * - for spte protection, the spte can be writable only after unsync-ing
1521c50d8ae3SPaolo Bonzini  *   shadow page.
1522c50d8ae3SPaolo Bonzini  *
1523c50d8ae3SPaolo Bonzini  * Return true if tlb need be flushed.
1524c50d8ae3SPaolo Bonzini  */
1525c50d8ae3SPaolo Bonzini static bool spte_write_protect(u64 *sptep, bool pt_protect)
1526c50d8ae3SPaolo Bonzini {
1527c50d8ae3SPaolo Bonzini 	u64 spte = *sptep;
1528c50d8ae3SPaolo Bonzini 
1529c50d8ae3SPaolo Bonzini 	if (!is_writable_pte(spte) &&
1530c50d8ae3SPaolo Bonzini 	      !(pt_protect && spte_can_locklessly_be_made_writable(spte)))
1531c50d8ae3SPaolo Bonzini 		return false;
1532c50d8ae3SPaolo Bonzini 
1533c50d8ae3SPaolo Bonzini 	rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep);
1534c50d8ae3SPaolo Bonzini 
1535c50d8ae3SPaolo Bonzini 	if (pt_protect)
1536c50d8ae3SPaolo Bonzini 		spte &= ~SPTE_MMU_WRITEABLE;
1537c50d8ae3SPaolo Bonzini 	spte = spte & ~PT_WRITABLE_MASK;
1538c50d8ae3SPaolo Bonzini 
1539c50d8ae3SPaolo Bonzini 	return mmu_spte_update(sptep, spte);
1540c50d8ae3SPaolo Bonzini }
1541c50d8ae3SPaolo Bonzini 
1542c50d8ae3SPaolo Bonzini static bool __rmap_write_protect(struct kvm *kvm,
1543c50d8ae3SPaolo Bonzini 				 struct kvm_rmap_head *rmap_head,
1544c50d8ae3SPaolo Bonzini 				 bool pt_protect)
1545c50d8ae3SPaolo Bonzini {
1546c50d8ae3SPaolo Bonzini 	u64 *sptep;
1547c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
1548c50d8ae3SPaolo Bonzini 	bool flush = false;
1549c50d8ae3SPaolo Bonzini 
1550c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(rmap_head, &iter, sptep)
1551c50d8ae3SPaolo Bonzini 		flush |= spte_write_protect(sptep, pt_protect);
1552c50d8ae3SPaolo Bonzini 
1553c50d8ae3SPaolo Bonzini 	return flush;
1554c50d8ae3SPaolo Bonzini }
1555c50d8ae3SPaolo Bonzini 
1556c50d8ae3SPaolo Bonzini static bool spte_clear_dirty(u64 *sptep)
1557c50d8ae3SPaolo Bonzini {
1558c50d8ae3SPaolo Bonzini 	u64 spte = *sptep;
1559c50d8ae3SPaolo Bonzini 
1560c50d8ae3SPaolo Bonzini 	rmap_printk("rmap_clear_dirty: spte %p %llx\n", sptep, *sptep);
1561c50d8ae3SPaolo Bonzini 
1562c50d8ae3SPaolo Bonzini 	MMU_WARN_ON(!spte_ad_enabled(spte));
1563c50d8ae3SPaolo Bonzini 	spte &= ~shadow_dirty_mask;
1564c50d8ae3SPaolo Bonzini 	return mmu_spte_update(sptep, spte);
1565c50d8ae3SPaolo Bonzini }
1566c50d8ae3SPaolo Bonzini 
1567c50d8ae3SPaolo Bonzini static bool spte_wrprot_for_clear_dirty(u64 *sptep)
1568c50d8ae3SPaolo Bonzini {
1569c50d8ae3SPaolo Bonzini 	bool was_writable = test_and_clear_bit(PT_WRITABLE_SHIFT,
1570c50d8ae3SPaolo Bonzini 					       (unsigned long *)sptep);
1571c50d8ae3SPaolo Bonzini 	if (was_writable && !spte_ad_enabled(*sptep))
1572c50d8ae3SPaolo Bonzini 		kvm_set_pfn_dirty(spte_to_pfn(*sptep));
1573c50d8ae3SPaolo Bonzini 
1574c50d8ae3SPaolo Bonzini 	return was_writable;
1575c50d8ae3SPaolo Bonzini }
1576c50d8ae3SPaolo Bonzini 
1577c50d8ae3SPaolo Bonzini /*
1578c50d8ae3SPaolo Bonzini  * Gets the GFN ready for another round of dirty logging by clearing the
1579c50d8ae3SPaolo Bonzini  *	- D bit on ad-enabled SPTEs, and
1580c50d8ae3SPaolo Bonzini  *	- W bit on ad-disabled SPTEs.
1581c50d8ae3SPaolo Bonzini  * Returns true iff any D or W bits were cleared.
1582c50d8ae3SPaolo Bonzini  */
1583c50d8ae3SPaolo Bonzini static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
1584c50d8ae3SPaolo Bonzini {
1585c50d8ae3SPaolo Bonzini 	u64 *sptep;
1586c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
1587c50d8ae3SPaolo Bonzini 	bool flush = false;
1588c50d8ae3SPaolo Bonzini 
1589c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(rmap_head, &iter, sptep)
1590c50d8ae3SPaolo Bonzini 		if (spte_ad_need_write_protect(*sptep))
1591c50d8ae3SPaolo Bonzini 			flush |= spte_wrprot_for_clear_dirty(sptep);
1592c50d8ae3SPaolo Bonzini 		else
1593c50d8ae3SPaolo Bonzini 			flush |= spte_clear_dirty(sptep);
1594c50d8ae3SPaolo Bonzini 
1595c50d8ae3SPaolo Bonzini 	return flush;
1596c50d8ae3SPaolo Bonzini }
1597c50d8ae3SPaolo Bonzini 
1598c50d8ae3SPaolo Bonzini static bool spte_set_dirty(u64 *sptep)
1599c50d8ae3SPaolo Bonzini {
1600c50d8ae3SPaolo Bonzini 	u64 spte = *sptep;
1601c50d8ae3SPaolo Bonzini 
1602c50d8ae3SPaolo Bonzini 	rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep);
1603c50d8ae3SPaolo Bonzini 
1604c50d8ae3SPaolo Bonzini 	/*
1605afaf0b2fSSean Christopherson 	 * Similar to the !kvm_x86_ops.slot_disable_log_dirty case,
1606c50d8ae3SPaolo Bonzini 	 * do not bother adding back write access to pages marked
1607c50d8ae3SPaolo Bonzini 	 * SPTE_AD_WRPROT_ONLY_MASK.
1608c50d8ae3SPaolo Bonzini 	 */
1609c50d8ae3SPaolo Bonzini 	spte |= shadow_dirty_mask;
1610c50d8ae3SPaolo Bonzini 
1611c50d8ae3SPaolo Bonzini 	return mmu_spte_update(sptep, spte);
1612c50d8ae3SPaolo Bonzini }
1613c50d8ae3SPaolo Bonzini 
1614c50d8ae3SPaolo Bonzini static bool __rmap_set_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
1615c50d8ae3SPaolo Bonzini {
1616c50d8ae3SPaolo Bonzini 	u64 *sptep;
1617c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
1618c50d8ae3SPaolo Bonzini 	bool flush = false;
1619c50d8ae3SPaolo Bonzini 
1620c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(rmap_head, &iter, sptep)
1621c50d8ae3SPaolo Bonzini 		if (spte_ad_enabled(*sptep))
1622c50d8ae3SPaolo Bonzini 			flush |= spte_set_dirty(sptep);
1623c50d8ae3SPaolo Bonzini 
1624c50d8ae3SPaolo Bonzini 	return flush;
1625c50d8ae3SPaolo Bonzini }
1626c50d8ae3SPaolo Bonzini 
1627c50d8ae3SPaolo Bonzini /**
1628c50d8ae3SPaolo Bonzini  * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
1629c50d8ae3SPaolo Bonzini  * @kvm: kvm instance
1630c50d8ae3SPaolo Bonzini  * @slot: slot to protect
1631c50d8ae3SPaolo Bonzini  * @gfn_offset: start of the BITS_PER_LONG pages we care about
1632c50d8ae3SPaolo Bonzini  * @mask: indicates which pages we should protect
1633c50d8ae3SPaolo Bonzini  *
1634c50d8ae3SPaolo Bonzini  * Used when we do not need to care about huge page mappings: e.g. during dirty
1635c50d8ae3SPaolo Bonzini  * logging we do not have any such mappings.
1636c50d8ae3SPaolo Bonzini  */
1637c50d8ae3SPaolo Bonzini static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1638c50d8ae3SPaolo Bonzini 				     struct kvm_memory_slot *slot,
1639c50d8ae3SPaolo Bonzini 				     gfn_t gfn_offset, unsigned long mask)
1640c50d8ae3SPaolo Bonzini {
1641c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *rmap_head;
1642c50d8ae3SPaolo Bonzini 
1643c50d8ae3SPaolo Bonzini 	while (mask) {
1644c50d8ae3SPaolo Bonzini 		rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
16453bae0459SSean Christopherson 					  PG_LEVEL_4K, slot);
1646c50d8ae3SPaolo Bonzini 		__rmap_write_protect(kvm, rmap_head, false);
1647c50d8ae3SPaolo Bonzini 
1648c50d8ae3SPaolo Bonzini 		/* clear the first set bit */
1649c50d8ae3SPaolo Bonzini 		mask &= mask - 1;
1650c50d8ae3SPaolo Bonzini 	}
1651c50d8ae3SPaolo Bonzini }
1652c50d8ae3SPaolo Bonzini 
1653c50d8ae3SPaolo Bonzini /**
1654c50d8ae3SPaolo Bonzini  * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages, or write
1655c50d8ae3SPaolo Bonzini  * protect the page if the D-bit isn't supported.
1656c50d8ae3SPaolo Bonzini  * @kvm: kvm instance
1657c50d8ae3SPaolo Bonzini  * @slot: slot to clear D-bit
1658c50d8ae3SPaolo Bonzini  * @gfn_offset: start of the BITS_PER_LONG pages we care about
1659c50d8ae3SPaolo Bonzini  * @mask: indicates which pages we should clear D-bit
1660c50d8ae3SPaolo Bonzini  *
1661c50d8ae3SPaolo Bonzini  * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap.
1662c50d8ae3SPaolo Bonzini  */
1663c50d8ae3SPaolo Bonzini void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1664c50d8ae3SPaolo Bonzini 				     struct kvm_memory_slot *slot,
1665c50d8ae3SPaolo Bonzini 				     gfn_t gfn_offset, unsigned long mask)
1666c50d8ae3SPaolo Bonzini {
1667c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *rmap_head;
1668c50d8ae3SPaolo Bonzini 
1669c50d8ae3SPaolo Bonzini 	while (mask) {
1670c50d8ae3SPaolo Bonzini 		rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
16713bae0459SSean Christopherson 					  PG_LEVEL_4K, slot);
1672c50d8ae3SPaolo Bonzini 		__rmap_clear_dirty(kvm, rmap_head);
1673c50d8ae3SPaolo Bonzini 
1674c50d8ae3SPaolo Bonzini 		/* clear the first set bit */
1675c50d8ae3SPaolo Bonzini 		mask &= mask - 1;
1676c50d8ae3SPaolo Bonzini 	}
1677c50d8ae3SPaolo Bonzini }
1678c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_clear_dirty_pt_masked);
1679c50d8ae3SPaolo Bonzini 
1680c50d8ae3SPaolo Bonzini /**
1681c50d8ae3SPaolo Bonzini  * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
1682c50d8ae3SPaolo Bonzini  * PT level pages.
1683c50d8ae3SPaolo Bonzini  *
1684c50d8ae3SPaolo Bonzini  * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
1685c50d8ae3SPaolo Bonzini  * enable dirty logging for them.
1686c50d8ae3SPaolo Bonzini  *
1687c50d8ae3SPaolo Bonzini  * Used when we do not need to care about huge page mappings: e.g. during dirty
1688c50d8ae3SPaolo Bonzini  * logging we do not have any such mappings.
1689c50d8ae3SPaolo Bonzini  */
1690c50d8ae3SPaolo Bonzini void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1691c50d8ae3SPaolo Bonzini 				struct kvm_memory_slot *slot,
1692c50d8ae3SPaolo Bonzini 				gfn_t gfn_offset, unsigned long mask)
1693c50d8ae3SPaolo Bonzini {
1694afaf0b2fSSean Christopherson 	if (kvm_x86_ops.enable_log_dirty_pt_masked)
1695afaf0b2fSSean Christopherson 		kvm_x86_ops.enable_log_dirty_pt_masked(kvm, slot, gfn_offset,
1696c50d8ae3SPaolo Bonzini 				mask);
1697c50d8ae3SPaolo Bonzini 	else
1698c50d8ae3SPaolo Bonzini 		kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1699c50d8ae3SPaolo Bonzini }
1700c50d8ae3SPaolo Bonzini 
1701c50d8ae3SPaolo Bonzini bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
1702c50d8ae3SPaolo Bonzini 				    struct kvm_memory_slot *slot, u64 gfn)
1703c50d8ae3SPaolo Bonzini {
1704c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *rmap_head;
1705c50d8ae3SPaolo Bonzini 	int i;
1706c50d8ae3SPaolo Bonzini 	bool write_protected = false;
1707c50d8ae3SPaolo Bonzini 
17083bae0459SSean Christopherson 	for (i = PG_LEVEL_4K; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
1709c50d8ae3SPaolo Bonzini 		rmap_head = __gfn_to_rmap(gfn, i, slot);
1710c50d8ae3SPaolo Bonzini 		write_protected |= __rmap_write_protect(kvm, rmap_head, true);
1711c50d8ae3SPaolo Bonzini 	}
1712c50d8ae3SPaolo Bonzini 
1713c50d8ae3SPaolo Bonzini 	return write_protected;
1714c50d8ae3SPaolo Bonzini }
1715c50d8ae3SPaolo Bonzini 
1716c50d8ae3SPaolo Bonzini static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
1717c50d8ae3SPaolo Bonzini {
1718c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
1719c50d8ae3SPaolo Bonzini 
1720c50d8ae3SPaolo Bonzini 	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1721c50d8ae3SPaolo Bonzini 	return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn);
1722c50d8ae3SPaolo Bonzini }
1723c50d8ae3SPaolo Bonzini 
1724c50d8ae3SPaolo Bonzini static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
1725c50d8ae3SPaolo Bonzini {
1726c50d8ae3SPaolo Bonzini 	u64 *sptep;
1727c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
1728c50d8ae3SPaolo Bonzini 	bool flush = false;
1729c50d8ae3SPaolo Bonzini 
1730c50d8ae3SPaolo Bonzini 	while ((sptep = rmap_get_first(rmap_head, &iter))) {
1731c50d8ae3SPaolo Bonzini 		rmap_printk("%s: spte %p %llx.\n", __func__, sptep, *sptep);
1732c50d8ae3SPaolo Bonzini 
1733c50d8ae3SPaolo Bonzini 		pte_list_remove(rmap_head, sptep);
1734c50d8ae3SPaolo Bonzini 		flush = true;
1735c50d8ae3SPaolo Bonzini 	}
1736c50d8ae3SPaolo Bonzini 
1737c50d8ae3SPaolo Bonzini 	return flush;
1738c50d8ae3SPaolo Bonzini }
1739c50d8ae3SPaolo Bonzini 
1740c50d8ae3SPaolo Bonzini static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1741c50d8ae3SPaolo Bonzini 			   struct kvm_memory_slot *slot, gfn_t gfn, int level,
1742c50d8ae3SPaolo Bonzini 			   unsigned long data)
1743c50d8ae3SPaolo Bonzini {
1744c50d8ae3SPaolo Bonzini 	return kvm_zap_rmapp(kvm, rmap_head);
1745c50d8ae3SPaolo Bonzini }
1746c50d8ae3SPaolo Bonzini 
1747c50d8ae3SPaolo Bonzini static int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1748c50d8ae3SPaolo Bonzini 			     struct kvm_memory_slot *slot, gfn_t gfn, int level,
1749c50d8ae3SPaolo Bonzini 			     unsigned long data)
1750c50d8ae3SPaolo Bonzini {
1751c50d8ae3SPaolo Bonzini 	u64 *sptep;
1752c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
1753c50d8ae3SPaolo Bonzini 	int need_flush = 0;
1754c50d8ae3SPaolo Bonzini 	u64 new_spte;
1755c50d8ae3SPaolo Bonzini 	pte_t *ptep = (pte_t *)data;
1756c50d8ae3SPaolo Bonzini 	kvm_pfn_t new_pfn;
1757c50d8ae3SPaolo Bonzini 
1758c50d8ae3SPaolo Bonzini 	WARN_ON(pte_huge(*ptep));
1759c50d8ae3SPaolo Bonzini 	new_pfn = pte_pfn(*ptep);
1760c50d8ae3SPaolo Bonzini 
1761c50d8ae3SPaolo Bonzini restart:
1762c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(rmap_head, &iter, sptep) {
1763c50d8ae3SPaolo Bonzini 		rmap_printk("kvm_set_pte_rmapp: spte %p %llx gfn %llx (%d)\n",
1764c50d8ae3SPaolo Bonzini 			    sptep, *sptep, gfn, level);
1765c50d8ae3SPaolo Bonzini 
1766c50d8ae3SPaolo Bonzini 		need_flush = 1;
1767c50d8ae3SPaolo Bonzini 
1768c50d8ae3SPaolo Bonzini 		if (pte_write(*ptep)) {
1769c50d8ae3SPaolo Bonzini 			pte_list_remove(rmap_head, sptep);
1770c50d8ae3SPaolo Bonzini 			goto restart;
1771c50d8ae3SPaolo Bonzini 		} else {
1772c50d8ae3SPaolo Bonzini 			new_spte = *sptep & ~PT64_BASE_ADDR_MASK;
1773c50d8ae3SPaolo Bonzini 			new_spte |= (u64)new_pfn << PAGE_SHIFT;
1774c50d8ae3SPaolo Bonzini 
1775c50d8ae3SPaolo Bonzini 			new_spte &= ~PT_WRITABLE_MASK;
1776c50d8ae3SPaolo Bonzini 			new_spte &= ~SPTE_HOST_WRITEABLE;
1777c50d8ae3SPaolo Bonzini 
1778c50d8ae3SPaolo Bonzini 			new_spte = mark_spte_for_access_track(new_spte);
1779c50d8ae3SPaolo Bonzini 
1780c50d8ae3SPaolo Bonzini 			mmu_spte_clear_track_bits(sptep);
1781c50d8ae3SPaolo Bonzini 			mmu_spte_set(sptep, new_spte);
1782c50d8ae3SPaolo Bonzini 		}
1783c50d8ae3SPaolo Bonzini 	}
1784c50d8ae3SPaolo Bonzini 
1785c50d8ae3SPaolo Bonzini 	if (need_flush && kvm_available_flush_tlb_with_range()) {
1786c50d8ae3SPaolo Bonzini 		kvm_flush_remote_tlbs_with_address(kvm, gfn, 1);
1787c50d8ae3SPaolo Bonzini 		return 0;
1788c50d8ae3SPaolo Bonzini 	}
1789c50d8ae3SPaolo Bonzini 
1790c50d8ae3SPaolo Bonzini 	return need_flush;
1791c50d8ae3SPaolo Bonzini }
1792c50d8ae3SPaolo Bonzini 
1793c50d8ae3SPaolo Bonzini struct slot_rmap_walk_iterator {
1794c50d8ae3SPaolo Bonzini 	/* input fields. */
1795c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
1796c50d8ae3SPaolo Bonzini 	gfn_t start_gfn;
1797c50d8ae3SPaolo Bonzini 	gfn_t end_gfn;
1798c50d8ae3SPaolo Bonzini 	int start_level;
1799c50d8ae3SPaolo Bonzini 	int end_level;
1800c50d8ae3SPaolo Bonzini 
1801c50d8ae3SPaolo Bonzini 	/* output fields. */
1802c50d8ae3SPaolo Bonzini 	gfn_t gfn;
1803c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *rmap;
1804c50d8ae3SPaolo Bonzini 	int level;
1805c50d8ae3SPaolo Bonzini 
1806c50d8ae3SPaolo Bonzini 	/* private field. */
1807c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *end_rmap;
1808c50d8ae3SPaolo Bonzini };
1809c50d8ae3SPaolo Bonzini 
1810c50d8ae3SPaolo Bonzini static void
1811c50d8ae3SPaolo Bonzini rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level)
1812c50d8ae3SPaolo Bonzini {
1813c50d8ae3SPaolo Bonzini 	iterator->level = level;
1814c50d8ae3SPaolo Bonzini 	iterator->gfn = iterator->start_gfn;
1815c50d8ae3SPaolo Bonzini 	iterator->rmap = __gfn_to_rmap(iterator->gfn, level, iterator->slot);
1816c50d8ae3SPaolo Bonzini 	iterator->end_rmap = __gfn_to_rmap(iterator->end_gfn, level,
1817c50d8ae3SPaolo Bonzini 					   iterator->slot);
1818c50d8ae3SPaolo Bonzini }
1819c50d8ae3SPaolo Bonzini 
1820c50d8ae3SPaolo Bonzini static void
1821c50d8ae3SPaolo Bonzini slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator,
1822c50d8ae3SPaolo Bonzini 		    struct kvm_memory_slot *slot, int start_level,
1823c50d8ae3SPaolo Bonzini 		    int end_level, gfn_t start_gfn, gfn_t end_gfn)
1824c50d8ae3SPaolo Bonzini {
1825c50d8ae3SPaolo Bonzini 	iterator->slot = slot;
1826c50d8ae3SPaolo Bonzini 	iterator->start_level = start_level;
1827c50d8ae3SPaolo Bonzini 	iterator->end_level = end_level;
1828c50d8ae3SPaolo Bonzini 	iterator->start_gfn = start_gfn;
1829c50d8ae3SPaolo Bonzini 	iterator->end_gfn = end_gfn;
1830c50d8ae3SPaolo Bonzini 
1831c50d8ae3SPaolo Bonzini 	rmap_walk_init_level(iterator, iterator->start_level);
1832c50d8ae3SPaolo Bonzini }
1833c50d8ae3SPaolo Bonzini 
1834c50d8ae3SPaolo Bonzini static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator)
1835c50d8ae3SPaolo Bonzini {
1836c50d8ae3SPaolo Bonzini 	return !!iterator->rmap;
1837c50d8ae3SPaolo Bonzini }
1838c50d8ae3SPaolo Bonzini 
1839c50d8ae3SPaolo Bonzini static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
1840c50d8ae3SPaolo Bonzini {
1841c50d8ae3SPaolo Bonzini 	if (++iterator->rmap <= iterator->end_rmap) {
1842c50d8ae3SPaolo Bonzini 		iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level));
1843c50d8ae3SPaolo Bonzini 		return;
1844c50d8ae3SPaolo Bonzini 	}
1845c50d8ae3SPaolo Bonzini 
1846c50d8ae3SPaolo Bonzini 	if (++iterator->level > iterator->end_level) {
1847c50d8ae3SPaolo Bonzini 		iterator->rmap = NULL;
1848c50d8ae3SPaolo Bonzini 		return;
1849c50d8ae3SPaolo Bonzini 	}
1850c50d8ae3SPaolo Bonzini 
1851c50d8ae3SPaolo Bonzini 	rmap_walk_init_level(iterator, iterator->level);
1852c50d8ae3SPaolo Bonzini }
1853c50d8ae3SPaolo Bonzini 
1854c50d8ae3SPaolo Bonzini #define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_,	\
1855c50d8ae3SPaolo Bonzini 	   _start_gfn, _end_gfn, _iter_)				\
1856c50d8ae3SPaolo Bonzini 	for (slot_rmap_walk_init(_iter_, _slot_, _start_level_,		\
1857c50d8ae3SPaolo Bonzini 				 _end_level_, _start_gfn, _end_gfn);	\
1858c50d8ae3SPaolo Bonzini 	     slot_rmap_walk_okay(_iter_);				\
1859c50d8ae3SPaolo Bonzini 	     slot_rmap_walk_next(_iter_))
1860c50d8ae3SPaolo Bonzini 
1861c50d8ae3SPaolo Bonzini static int kvm_handle_hva_range(struct kvm *kvm,
1862c50d8ae3SPaolo Bonzini 				unsigned long start,
1863c50d8ae3SPaolo Bonzini 				unsigned long end,
1864c50d8ae3SPaolo Bonzini 				unsigned long data,
1865c50d8ae3SPaolo Bonzini 				int (*handler)(struct kvm *kvm,
1866c50d8ae3SPaolo Bonzini 					       struct kvm_rmap_head *rmap_head,
1867c50d8ae3SPaolo Bonzini 					       struct kvm_memory_slot *slot,
1868c50d8ae3SPaolo Bonzini 					       gfn_t gfn,
1869c50d8ae3SPaolo Bonzini 					       int level,
1870c50d8ae3SPaolo Bonzini 					       unsigned long data))
1871c50d8ae3SPaolo Bonzini {
1872c50d8ae3SPaolo Bonzini 	struct kvm_memslots *slots;
1873c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *memslot;
1874c50d8ae3SPaolo Bonzini 	struct slot_rmap_walk_iterator iterator;
1875c50d8ae3SPaolo Bonzini 	int ret = 0;
1876c50d8ae3SPaolo Bonzini 	int i;
1877c50d8ae3SPaolo Bonzini 
1878c50d8ae3SPaolo Bonzini 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
1879c50d8ae3SPaolo Bonzini 		slots = __kvm_memslots(kvm, i);
1880c50d8ae3SPaolo Bonzini 		kvm_for_each_memslot(memslot, slots) {
1881c50d8ae3SPaolo Bonzini 			unsigned long hva_start, hva_end;
1882c50d8ae3SPaolo Bonzini 			gfn_t gfn_start, gfn_end;
1883c50d8ae3SPaolo Bonzini 
1884c50d8ae3SPaolo Bonzini 			hva_start = max(start, memslot->userspace_addr);
1885c50d8ae3SPaolo Bonzini 			hva_end = min(end, memslot->userspace_addr +
1886c50d8ae3SPaolo Bonzini 				      (memslot->npages << PAGE_SHIFT));
1887c50d8ae3SPaolo Bonzini 			if (hva_start >= hva_end)
1888c50d8ae3SPaolo Bonzini 				continue;
1889c50d8ae3SPaolo Bonzini 			/*
1890c50d8ae3SPaolo Bonzini 			 * {gfn(page) | page intersects with [hva_start, hva_end)} =
1891c50d8ae3SPaolo Bonzini 			 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
1892c50d8ae3SPaolo Bonzini 			 */
1893c50d8ae3SPaolo Bonzini 			gfn_start = hva_to_gfn_memslot(hva_start, memslot);
1894c50d8ae3SPaolo Bonzini 			gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
1895c50d8ae3SPaolo Bonzini 
18963bae0459SSean Christopherson 			for_each_slot_rmap_range(memslot, PG_LEVEL_4K,
1897e662ec3eSSean Christopherson 						 KVM_MAX_HUGEPAGE_LEVEL,
1898c50d8ae3SPaolo Bonzini 						 gfn_start, gfn_end - 1,
1899c50d8ae3SPaolo Bonzini 						 &iterator)
1900c50d8ae3SPaolo Bonzini 				ret |= handler(kvm, iterator.rmap, memslot,
1901c50d8ae3SPaolo Bonzini 					       iterator.gfn, iterator.level, data);
1902c50d8ae3SPaolo Bonzini 		}
1903c50d8ae3SPaolo Bonzini 	}
1904c50d8ae3SPaolo Bonzini 
1905c50d8ae3SPaolo Bonzini 	return ret;
1906c50d8ae3SPaolo Bonzini }
1907c50d8ae3SPaolo Bonzini 
1908c50d8ae3SPaolo Bonzini static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
1909c50d8ae3SPaolo Bonzini 			  unsigned long data,
1910c50d8ae3SPaolo Bonzini 			  int (*handler)(struct kvm *kvm,
1911c50d8ae3SPaolo Bonzini 					 struct kvm_rmap_head *rmap_head,
1912c50d8ae3SPaolo Bonzini 					 struct kvm_memory_slot *slot,
1913c50d8ae3SPaolo Bonzini 					 gfn_t gfn, int level,
1914c50d8ae3SPaolo Bonzini 					 unsigned long data))
1915c50d8ae3SPaolo Bonzini {
1916c50d8ae3SPaolo Bonzini 	return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
1917c50d8ae3SPaolo Bonzini }
1918c50d8ae3SPaolo Bonzini 
1919fdfe7cbdSWill Deacon int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
1920fdfe7cbdSWill Deacon 			unsigned flags)
1921c50d8ae3SPaolo Bonzini {
1922c50d8ae3SPaolo Bonzini 	return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
1923c50d8ae3SPaolo Bonzini }
1924c50d8ae3SPaolo Bonzini 
1925c50d8ae3SPaolo Bonzini int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
1926c50d8ae3SPaolo Bonzini {
1927c50d8ae3SPaolo Bonzini 	return kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
1928c50d8ae3SPaolo Bonzini }
1929c50d8ae3SPaolo Bonzini 
1930c50d8ae3SPaolo Bonzini static int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1931c50d8ae3SPaolo Bonzini 			 struct kvm_memory_slot *slot, gfn_t gfn, int level,
1932c50d8ae3SPaolo Bonzini 			 unsigned long data)
1933c50d8ae3SPaolo Bonzini {
1934c50d8ae3SPaolo Bonzini 	u64 *sptep;
19353f649ab7SKees Cook 	struct rmap_iterator iter;
1936c50d8ae3SPaolo Bonzini 	int young = 0;
1937c50d8ae3SPaolo Bonzini 
1938c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(rmap_head, &iter, sptep)
1939c50d8ae3SPaolo Bonzini 		young |= mmu_spte_age(sptep);
1940c50d8ae3SPaolo Bonzini 
1941c50d8ae3SPaolo Bonzini 	trace_kvm_age_page(gfn, level, slot, young);
1942c50d8ae3SPaolo Bonzini 	return young;
1943c50d8ae3SPaolo Bonzini }
1944c50d8ae3SPaolo Bonzini 
1945c50d8ae3SPaolo Bonzini static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1946c50d8ae3SPaolo Bonzini 			      struct kvm_memory_slot *slot, gfn_t gfn,
1947c50d8ae3SPaolo Bonzini 			      int level, unsigned long data)
1948c50d8ae3SPaolo Bonzini {
1949c50d8ae3SPaolo Bonzini 	u64 *sptep;
1950c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
1951c50d8ae3SPaolo Bonzini 
1952c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(rmap_head, &iter, sptep)
1953c50d8ae3SPaolo Bonzini 		if (is_accessed_spte(*sptep))
1954c50d8ae3SPaolo Bonzini 			return 1;
1955c50d8ae3SPaolo Bonzini 	return 0;
1956c50d8ae3SPaolo Bonzini }
1957c50d8ae3SPaolo Bonzini 
1958c50d8ae3SPaolo Bonzini #define RMAP_RECYCLE_THRESHOLD 1000
1959c50d8ae3SPaolo Bonzini 
1960c50d8ae3SPaolo Bonzini static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
1961c50d8ae3SPaolo Bonzini {
1962c50d8ae3SPaolo Bonzini 	struct kvm_rmap_head *rmap_head;
1963c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
1964c50d8ae3SPaolo Bonzini 
196557354682SSean Christopherson 	sp = sptep_to_sp(spte);
1966c50d8ae3SPaolo Bonzini 
1967c50d8ae3SPaolo Bonzini 	rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
1968c50d8ae3SPaolo Bonzini 
1969c50d8ae3SPaolo Bonzini 	kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, 0);
1970c50d8ae3SPaolo Bonzini 	kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
1971c50d8ae3SPaolo Bonzini 			KVM_PAGES_PER_HPAGE(sp->role.level));
1972c50d8ae3SPaolo Bonzini }
1973c50d8ae3SPaolo Bonzini 
1974c50d8ae3SPaolo Bonzini int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
1975c50d8ae3SPaolo Bonzini {
1976c50d8ae3SPaolo Bonzini 	return kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp);
1977c50d8ae3SPaolo Bonzini }
1978c50d8ae3SPaolo Bonzini 
1979c50d8ae3SPaolo Bonzini int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
1980c50d8ae3SPaolo Bonzini {
1981c50d8ae3SPaolo Bonzini 	return kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp);
1982c50d8ae3SPaolo Bonzini }
1983c50d8ae3SPaolo Bonzini 
1984c50d8ae3SPaolo Bonzini #ifdef MMU_DEBUG
1985c50d8ae3SPaolo Bonzini static int is_empty_shadow_page(u64 *spt)
1986c50d8ae3SPaolo Bonzini {
1987c50d8ae3SPaolo Bonzini 	u64 *pos;
1988c50d8ae3SPaolo Bonzini 	u64 *end;
1989c50d8ae3SPaolo Bonzini 
1990c50d8ae3SPaolo Bonzini 	for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
1991c50d8ae3SPaolo Bonzini 		if (is_shadow_present_pte(*pos)) {
1992c50d8ae3SPaolo Bonzini 			printk(KERN_ERR "%s: %p %llx\n", __func__,
1993c50d8ae3SPaolo Bonzini 			       pos, *pos);
1994c50d8ae3SPaolo Bonzini 			return 0;
1995c50d8ae3SPaolo Bonzini 		}
1996c50d8ae3SPaolo Bonzini 	return 1;
1997c50d8ae3SPaolo Bonzini }
1998c50d8ae3SPaolo Bonzini #endif
1999c50d8ae3SPaolo Bonzini 
2000c50d8ae3SPaolo Bonzini /*
2001c50d8ae3SPaolo Bonzini  * This value is the sum of all of the kvm instances's
2002c50d8ae3SPaolo Bonzini  * kvm->arch.n_used_mmu_pages values.  We need a global,
2003c50d8ae3SPaolo Bonzini  * aggregate version in order to make the slab shrinker
2004c50d8ae3SPaolo Bonzini  * faster
2005c50d8ae3SPaolo Bonzini  */
2006c50d8ae3SPaolo Bonzini static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, unsigned long nr)
2007c50d8ae3SPaolo Bonzini {
2008c50d8ae3SPaolo Bonzini 	kvm->arch.n_used_mmu_pages += nr;
2009c50d8ae3SPaolo Bonzini 	percpu_counter_add(&kvm_total_used_mmu_pages, nr);
2010c50d8ae3SPaolo Bonzini }
2011c50d8ae3SPaolo Bonzini 
2012c50d8ae3SPaolo Bonzini static void kvm_mmu_free_page(struct kvm_mmu_page *sp)
2013c50d8ae3SPaolo Bonzini {
2014c50d8ae3SPaolo Bonzini 	MMU_WARN_ON(!is_empty_shadow_page(sp->spt));
2015c50d8ae3SPaolo Bonzini 	hlist_del(&sp->hash_link);
2016c50d8ae3SPaolo Bonzini 	list_del(&sp->link);
2017c50d8ae3SPaolo Bonzini 	free_page((unsigned long)sp->spt);
2018c50d8ae3SPaolo Bonzini 	if (!sp->role.direct)
2019c50d8ae3SPaolo Bonzini 		free_page((unsigned long)sp->gfns);
2020c50d8ae3SPaolo Bonzini 	kmem_cache_free(mmu_page_header_cache, sp);
2021c50d8ae3SPaolo Bonzini }
2022c50d8ae3SPaolo Bonzini 
2023c50d8ae3SPaolo Bonzini static unsigned kvm_page_table_hashfn(gfn_t gfn)
2024c50d8ae3SPaolo Bonzini {
2025c50d8ae3SPaolo Bonzini 	return hash_64(gfn, KVM_MMU_HASH_SHIFT);
2026c50d8ae3SPaolo Bonzini }
2027c50d8ae3SPaolo Bonzini 
2028c50d8ae3SPaolo Bonzini static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
2029c50d8ae3SPaolo Bonzini 				    struct kvm_mmu_page *sp, u64 *parent_pte)
2030c50d8ae3SPaolo Bonzini {
2031c50d8ae3SPaolo Bonzini 	if (!parent_pte)
2032c50d8ae3SPaolo Bonzini 		return;
2033c50d8ae3SPaolo Bonzini 
2034c50d8ae3SPaolo Bonzini 	pte_list_add(vcpu, parent_pte, &sp->parent_ptes);
2035c50d8ae3SPaolo Bonzini }
2036c50d8ae3SPaolo Bonzini 
2037c50d8ae3SPaolo Bonzini static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
2038c50d8ae3SPaolo Bonzini 				       u64 *parent_pte)
2039c50d8ae3SPaolo Bonzini {
2040c50d8ae3SPaolo Bonzini 	__pte_list_remove(parent_pte, &sp->parent_ptes);
2041c50d8ae3SPaolo Bonzini }
2042c50d8ae3SPaolo Bonzini 
2043c50d8ae3SPaolo Bonzini static void drop_parent_pte(struct kvm_mmu_page *sp,
2044c50d8ae3SPaolo Bonzini 			    u64 *parent_pte)
2045c50d8ae3SPaolo Bonzini {
2046c50d8ae3SPaolo Bonzini 	mmu_page_remove_parent_pte(sp, parent_pte);
2047c50d8ae3SPaolo Bonzini 	mmu_spte_clear_no_track(parent_pte);
2048c50d8ae3SPaolo Bonzini }
2049c50d8ae3SPaolo Bonzini 
2050c50d8ae3SPaolo Bonzini static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct)
2051c50d8ae3SPaolo Bonzini {
2052c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
2053c50d8ae3SPaolo Bonzini 
205494ce87efSSean Christopherson 	sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
205594ce87efSSean Christopherson 	sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
2056c50d8ae3SPaolo Bonzini 	if (!direct)
205794ce87efSSean Christopherson 		sp->gfns = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache);
2058c50d8ae3SPaolo Bonzini 	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
2059c50d8ae3SPaolo Bonzini 
2060c50d8ae3SPaolo Bonzini 	/*
2061c50d8ae3SPaolo Bonzini 	 * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
2062c50d8ae3SPaolo Bonzini 	 * depends on valid pages being added to the head of the list.  See
2063c50d8ae3SPaolo Bonzini 	 * comments in kvm_zap_obsolete_pages().
2064c50d8ae3SPaolo Bonzini 	 */
2065c50d8ae3SPaolo Bonzini 	sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
2066c50d8ae3SPaolo Bonzini 	list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
2067c50d8ae3SPaolo Bonzini 	kvm_mod_used_mmu_pages(vcpu->kvm, +1);
2068c50d8ae3SPaolo Bonzini 	return sp;
2069c50d8ae3SPaolo Bonzini }
2070c50d8ae3SPaolo Bonzini 
2071c50d8ae3SPaolo Bonzini static void mark_unsync(u64 *spte);
2072c50d8ae3SPaolo Bonzini static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
2073c50d8ae3SPaolo Bonzini {
2074c50d8ae3SPaolo Bonzini 	u64 *sptep;
2075c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
2076c50d8ae3SPaolo Bonzini 
2077c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) {
2078c50d8ae3SPaolo Bonzini 		mark_unsync(sptep);
2079c50d8ae3SPaolo Bonzini 	}
2080c50d8ae3SPaolo Bonzini }
2081c50d8ae3SPaolo Bonzini 
2082c50d8ae3SPaolo Bonzini static void mark_unsync(u64 *spte)
2083c50d8ae3SPaolo Bonzini {
2084c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
2085c50d8ae3SPaolo Bonzini 	unsigned int index;
2086c50d8ae3SPaolo Bonzini 
208757354682SSean Christopherson 	sp = sptep_to_sp(spte);
2088c50d8ae3SPaolo Bonzini 	index = spte - sp->spt;
2089c50d8ae3SPaolo Bonzini 	if (__test_and_set_bit(index, sp->unsync_child_bitmap))
2090c50d8ae3SPaolo Bonzini 		return;
2091c50d8ae3SPaolo Bonzini 	if (sp->unsync_children++)
2092c50d8ae3SPaolo Bonzini 		return;
2093c50d8ae3SPaolo Bonzini 	kvm_mmu_mark_parents_unsync(sp);
2094c50d8ae3SPaolo Bonzini }
2095c50d8ae3SPaolo Bonzini 
2096c50d8ae3SPaolo Bonzini static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
2097c50d8ae3SPaolo Bonzini 			       struct kvm_mmu_page *sp)
2098c50d8ae3SPaolo Bonzini {
2099c50d8ae3SPaolo Bonzini 	return 0;
2100c50d8ae3SPaolo Bonzini }
2101c50d8ae3SPaolo Bonzini 
2102c50d8ae3SPaolo Bonzini static void nonpaging_update_pte(struct kvm_vcpu *vcpu,
2103c50d8ae3SPaolo Bonzini 				 struct kvm_mmu_page *sp, u64 *spte,
2104c50d8ae3SPaolo Bonzini 				 const void *pte)
2105c50d8ae3SPaolo Bonzini {
2106c50d8ae3SPaolo Bonzini 	WARN_ON(1);
2107c50d8ae3SPaolo Bonzini }
2108c50d8ae3SPaolo Bonzini 
2109c50d8ae3SPaolo Bonzini #define KVM_PAGE_ARRAY_NR 16
2110c50d8ae3SPaolo Bonzini 
2111c50d8ae3SPaolo Bonzini struct kvm_mmu_pages {
2112c50d8ae3SPaolo Bonzini 	struct mmu_page_and_offset {
2113c50d8ae3SPaolo Bonzini 		struct kvm_mmu_page *sp;
2114c50d8ae3SPaolo Bonzini 		unsigned int idx;
2115c50d8ae3SPaolo Bonzini 	} page[KVM_PAGE_ARRAY_NR];
2116c50d8ae3SPaolo Bonzini 	unsigned int nr;
2117c50d8ae3SPaolo Bonzini };
2118c50d8ae3SPaolo Bonzini 
2119c50d8ae3SPaolo Bonzini static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
2120c50d8ae3SPaolo Bonzini 			 int idx)
2121c50d8ae3SPaolo Bonzini {
2122c50d8ae3SPaolo Bonzini 	int i;
2123c50d8ae3SPaolo Bonzini 
2124c50d8ae3SPaolo Bonzini 	if (sp->unsync)
2125c50d8ae3SPaolo Bonzini 		for (i=0; i < pvec->nr; i++)
2126c50d8ae3SPaolo Bonzini 			if (pvec->page[i].sp == sp)
2127c50d8ae3SPaolo Bonzini 				return 0;
2128c50d8ae3SPaolo Bonzini 
2129c50d8ae3SPaolo Bonzini 	pvec->page[pvec->nr].sp = sp;
2130c50d8ae3SPaolo Bonzini 	pvec->page[pvec->nr].idx = idx;
2131c50d8ae3SPaolo Bonzini 	pvec->nr++;
2132c50d8ae3SPaolo Bonzini 	return (pvec->nr == KVM_PAGE_ARRAY_NR);
2133c50d8ae3SPaolo Bonzini }
2134c50d8ae3SPaolo Bonzini 
2135c50d8ae3SPaolo Bonzini static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx)
2136c50d8ae3SPaolo Bonzini {
2137c50d8ae3SPaolo Bonzini 	--sp->unsync_children;
2138c50d8ae3SPaolo Bonzini 	WARN_ON((int)sp->unsync_children < 0);
2139c50d8ae3SPaolo Bonzini 	__clear_bit(idx, sp->unsync_child_bitmap);
2140c50d8ae3SPaolo Bonzini }
2141c50d8ae3SPaolo Bonzini 
2142c50d8ae3SPaolo Bonzini static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
2143c50d8ae3SPaolo Bonzini 			   struct kvm_mmu_pages *pvec)
2144c50d8ae3SPaolo Bonzini {
2145c50d8ae3SPaolo Bonzini 	int i, ret, nr_unsync_leaf = 0;
2146c50d8ae3SPaolo Bonzini 
2147c50d8ae3SPaolo Bonzini 	for_each_set_bit(i, sp->unsync_child_bitmap, 512) {
2148c50d8ae3SPaolo Bonzini 		struct kvm_mmu_page *child;
2149c50d8ae3SPaolo Bonzini 		u64 ent = sp->spt[i];
2150c50d8ae3SPaolo Bonzini 
2151c50d8ae3SPaolo Bonzini 		if (!is_shadow_present_pte(ent) || is_large_pte(ent)) {
2152c50d8ae3SPaolo Bonzini 			clear_unsync_child_bit(sp, i);
2153c50d8ae3SPaolo Bonzini 			continue;
2154c50d8ae3SPaolo Bonzini 		}
2155c50d8ae3SPaolo Bonzini 
2156e47c4aeeSSean Christopherson 		child = to_shadow_page(ent & PT64_BASE_ADDR_MASK);
2157c50d8ae3SPaolo Bonzini 
2158c50d8ae3SPaolo Bonzini 		if (child->unsync_children) {
2159c50d8ae3SPaolo Bonzini 			if (mmu_pages_add(pvec, child, i))
2160c50d8ae3SPaolo Bonzini 				return -ENOSPC;
2161c50d8ae3SPaolo Bonzini 
2162c50d8ae3SPaolo Bonzini 			ret = __mmu_unsync_walk(child, pvec);
2163c50d8ae3SPaolo Bonzini 			if (!ret) {
2164c50d8ae3SPaolo Bonzini 				clear_unsync_child_bit(sp, i);
2165c50d8ae3SPaolo Bonzini 				continue;
2166c50d8ae3SPaolo Bonzini 			} else if (ret > 0) {
2167c50d8ae3SPaolo Bonzini 				nr_unsync_leaf += ret;
2168c50d8ae3SPaolo Bonzini 			} else
2169c50d8ae3SPaolo Bonzini 				return ret;
2170c50d8ae3SPaolo Bonzini 		} else if (child->unsync) {
2171c50d8ae3SPaolo Bonzini 			nr_unsync_leaf++;
2172c50d8ae3SPaolo Bonzini 			if (mmu_pages_add(pvec, child, i))
2173c50d8ae3SPaolo Bonzini 				return -ENOSPC;
2174c50d8ae3SPaolo Bonzini 		} else
2175c50d8ae3SPaolo Bonzini 			clear_unsync_child_bit(sp, i);
2176c50d8ae3SPaolo Bonzini 	}
2177c50d8ae3SPaolo Bonzini 
2178c50d8ae3SPaolo Bonzini 	return nr_unsync_leaf;
2179c50d8ae3SPaolo Bonzini }
2180c50d8ae3SPaolo Bonzini 
2181c50d8ae3SPaolo Bonzini #define INVALID_INDEX (-1)
2182c50d8ae3SPaolo Bonzini 
2183c50d8ae3SPaolo Bonzini static int mmu_unsync_walk(struct kvm_mmu_page *sp,
2184c50d8ae3SPaolo Bonzini 			   struct kvm_mmu_pages *pvec)
2185c50d8ae3SPaolo Bonzini {
2186c50d8ae3SPaolo Bonzini 	pvec->nr = 0;
2187c50d8ae3SPaolo Bonzini 	if (!sp->unsync_children)
2188c50d8ae3SPaolo Bonzini 		return 0;
2189c50d8ae3SPaolo Bonzini 
2190c50d8ae3SPaolo Bonzini 	mmu_pages_add(pvec, sp, INVALID_INDEX);
2191c50d8ae3SPaolo Bonzini 	return __mmu_unsync_walk(sp, pvec);
2192c50d8ae3SPaolo Bonzini }
2193c50d8ae3SPaolo Bonzini 
2194c50d8ae3SPaolo Bonzini static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
2195c50d8ae3SPaolo Bonzini {
2196c50d8ae3SPaolo Bonzini 	WARN_ON(!sp->unsync);
2197c50d8ae3SPaolo Bonzini 	trace_kvm_mmu_sync_page(sp);
2198c50d8ae3SPaolo Bonzini 	sp->unsync = 0;
2199c50d8ae3SPaolo Bonzini 	--kvm->stat.mmu_unsync;
2200c50d8ae3SPaolo Bonzini }
2201c50d8ae3SPaolo Bonzini 
2202c50d8ae3SPaolo Bonzini static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2203c50d8ae3SPaolo Bonzini 				     struct list_head *invalid_list);
2204c50d8ae3SPaolo Bonzini static void kvm_mmu_commit_zap_page(struct kvm *kvm,
2205c50d8ae3SPaolo Bonzini 				    struct list_head *invalid_list);
2206c50d8ae3SPaolo Bonzini 
2207ac101b7cSSean Christopherson #define for_each_valid_sp(_kvm, _sp, _list)				\
2208ac101b7cSSean Christopherson 	hlist_for_each_entry(_sp, _list, hash_link)			\
2209c50d8ae3SPaolo Bonzini 		if (is_obsolete_sp((_kvm), (_sp))) {			\
2210c50d8ae3SPaolo Bonzini 		} else
2211c50d8ae3SPaolo Bonzini 
2212c50d8ae3SPaolo Bonzini #define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn)			\
2213ac101b7cSSean Christopherson 	for_each_valid_sp(_kvm, _sp,					\
2214ac101b7cSSean Christopherson 	  &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)])	\
2215c50d8ae3SPaolo Bonzini 		if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
2216c50d8ae3SPaolo Bonzini 
2217c50d8ae3SPaolo Bonzini static inline bool is_ept_sp(struct kvm_mmu_page *sp)
2218c50d8ae3SPaolo Bonzini {
2219c50d8ae3SPaolo Bonzini 	return sp->role.cr0_wp && sp->role.smap_andnot_wp;
2220c50d8ae3SPaolo Bonzini }
2221c50d8ae3SPaolo Bonzini 
2222c50d8ae3SPaolo Bonzini /* @sp->gfn should be write-protected at the call site */
2223c50d8ae3SPaolo Bonzini static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
2224c50d8ae3SPaolo Bonzini 			    struct list_head *invalid_list)
2225c50d8ae3SPaolo Bonzini {
2226c50d8ae3SPaolo Bonzini 	if ((!is_ept_sp(sp) && sp->role.gpte_is_8_bytes != !!is_pae(vcpu)) ||
2227c50d8ae3SPaolo Bonzini 	    vcpu->arch.mmu->sync_page(vcpu, sp) == 0) {
2228c50d8ae3SPaolo Bonzini 		kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
2229c50d8ae3SPaolo Bonzini 		return false;
2230c50d8ae3SPaolo Bonzini 	}
2231c50d8ae3SPaolo Bonzini 
2232c50d8ae3SPaolo Bonzini 	return true;
2233c50d8ae3SPaolo Bonzini }
2234c50d8ae3SPaolo Bonzini 
2235c50d8ae3SPaolo Bonzini static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
2236c50d8ae3SPaolo Bonzini 					struct list_head *invalid_list,
2237c50d8ae3SPaolo Bonzini 					bool remote_flush)
2238c50d8ae3SPaolo Bonzini {
2239c50d8ae3SPaolo Bonzini 	if (!remote_flush && list_empty(invalid_list))
2240c50d8ae3SPaolo Bonzini 		return false;
2241c50d8ae3SPaolo Bonzini 
2242c50d8ae3SPaolo Bonzini 	if (!list_empty(invalid_list))
2243c50d8ae3SPaolo Bonzini 		kvm_mmu_commit_zap_page(kvm, invalid_list);
2244c50d8ae3SPaolo Bonzini 	else
2245c50d8ae3SPaolo Bonzini 		kvm_flush_remote_tlbs(kvm);
2246c50d8ae3SPaolo Bonzini 	return true;
2247c50d8ae3SPaolo Bonzini }
2248c50d8ae3SPaolo Bonzini 
2249c50d8ae3SPaolo Bonzini static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu,
2250c50d8ae3SPaolo Bonzini 				 struct list_head *invalid_list,
2251c50d8ae3SPaolo Bonzini 				 bool remote_flush, bool local_flush)
2252c50d8ae3SPaolo Bonzini {
2253c50d8ae3SPaolo Bonzini 	if (kvm_mmu_remote_flush_or_zap(vcpu->kvm, invalid_list, remote_flush))
2254c50d8ae3SPaolo Bonzini 		return;
2255c50d8ae3SPaolo Bonzini 
2256c50d8ae3SPaolo Bonzini 	if (local_flush)
22578c8560b8SSean Christopherson 		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
2258c50d8ae3SPaolo Bonzini }
2259c50d8ae3SPaolo Bonzini 
2260c50d8ae3SPaolo Bonzini #ifdef CONFIG_KVM_MMU_AUDIT
2261c50d8ae3SPaolo Bonzini #include "mmu_audit.c"
2262c50d8ae3SPaolo Bonzini #else
2263c50d8ae3SPaolo Bonzini static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { }
2264c50d8ae3SPaolo Bonzini static void mmu_audit_disable(void) { }
2265c50d8ae3SPaolo Bonzini #endif
2266c50d8ae3SPaolo Bonzini 
2267c50d8ae3SPaolo Bonzini static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
2268c50d8ae3SPaolo Bonzini {
2269c50d8ae3SPaolo Bonzini 	return sp->role.invalid ||
2270c50d8ae3SPaolo Bonzini 	       unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
2271c50d8ae3SPaolo Bonzini }
2272c50d8ae3SPaolo Bonzini 
2273c50d8ae3SPaolo Bonzini static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
2274c50d8ae3SPaolo Bonzini 			 struct list_head *invalid_list)
2275c50d8ae3SPaolo Bonzini {
2276c50d8ae3SPaolo Bonzini 	kvm_unlink_unsync_page(vcpu->kvm, sp);
2277c50d8ae3SPaolo Bonzini 	return __kvm_sync_page(vcpu, sp, invalid_list);
2278c50d8ae3SPaolo Bonzini }
2279c50d8ae3SPaolo Bonzini 
2280c50d8ae3SPaolo Bonzini /* @gfn should be write-protected at the call site */
2281c50d8ae3SPaolo Bonzini static bool kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn,
2282c50d8ae3SPaolo Bonzini 			   struct list_head *invalid_list)
2283c50d8ae3SPaolo Bonzini {
2284c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *s;
2285c50d8ae3SPaolo Bonzini 	bool ret = false;
2286c50d8ae3SPaolo Bonzini 
2287c50d8ae3SPaolo Bonzini 	for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
2288c50d8ae3SPaolo Bonzini 		if (!s->unsync)
2289c50d8ae3SPaolo Bonzini 			continue;
2290c50d8ae3SPaolo Bonzini 
22913bae0459SSean Christopherson 		WARN_ON(s->role.level != PG_LEVEL_4K);
2292c50d8ae3SPaolo Bonzini 		ret |= kvm_sync_page(vcpu, s, invalid_list);
2293c50d8ae3SPaolo Bonzini 	}
2294c50d8ae3SPaolo Bonzini 
2295c50d8ae3SPaolo Bonzini 	return ret;
2296c50d8ae3SPaolo Bonzini }
2297c50d8ae3SPaolo Bonzini 
2298c50d8ae3SPaolo Bonzini struct mmu_page_path {
2299c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL];
2300c50d8ae3SPaolo Bonzini 	unsigned int idx[PT64_ROOT_MAX_LEVEL];
2301c50d8ae3SPaolo Bonzini };
2302c50d8ae3SPaolo Bonzini 
2303c50d8ae3SPaolo Bonzini #define for_each_sp(pvec, sp, parents, i)			\
2304c50d8ae3SPaolo Bonzini 		for (i = mmu_pages_first(&pvec, &parents);	\
2305c50d8ae3SPaolo Bonzini 			i < pvec.nr && ({ sp = pvec.page[i].sp; 1;});	\
2306c50d8ae3SPaolo Bonzini 			i = mmu_pages_next(&pvec, &parents, i))
2307c50d8ae3SPaolo Bonzini 
2308c50d8ae3SPaolo Bonzini static int mmu_pages_next(struct kvm_mmu_pages *pvec,
2309c50d8ae3SPaolo Bonzini 			  struct mmu_page_path *parents,
2310c50d8ae3SPaolo Bonzini 			  int i)
2311c50d8ae3SPaolo Bonzini {
2312c50d8ae3SPaolo Bonzini 	int n;
2313c50d8ae3SPaolo Bonzini 
2314c50d8ae3SPaolo Bonzini 	for (n = i+1; n < pvec->nr; n++) {
2315c50d8ae3SPaolo Bonzini 		struct kvm_mmu_page *sp = pvec->page[n].sp;
2316c50d8ae3SPaolo Bonzini 		unsigned idx = pvec->page[n].idx;
2317c50d8ae3SPaolo Bonzini 		int level = sp->role.level;
2318c50d8ae3SPaolo Bonzini 
2319c50d8ae3SPaolo Bonzini 		parents->idx[level-1] = idx;
23203bae0459SSean Christopherson 		if (level == PG_LEVEL_4K)
2321c50d8ae3SPaolo Bonzini 			break;
2322c50d8ae3SPaolo Bonzini 
2323c50d8ae3SPaolo Bonzini 		parents->parent[level-2] = sp;
2324c50d8ae3SPaolo Bonzini 	}
2325c50d8ae3SPaolo Bonzini 
2326c50d8ae3SPaolo Bonzini 	return n;
2327c50d8ae3SPaolo Bonzini }
2328c50d8ae3SPaolo Bonzini 
2329c50d8ae3SPaolo Bonzini static int mmu_pages_first(struct kvm_mmu_pages *pvec,
2330c50d8ae3SPaolo Bonzini 			   struct mmu_page_path *parents)
2331c50d8ae3SPaolo Bonzini {
2332c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
2333c50d8ae3SPaolo Bonzini 	int level;
2334c50d8ae3SPaolo Bonzini 
2335c50d8ae3SPaolo Bonzini 	if (pvec->nr == 0)
2336c50d8ae3SPaolo Bonzini 		return 0;
2337c50d8ae3SPaolo Bonzini 
2338c50d8ae3SPaolo Bonzini 	WARN_ON(pvec->page[0].idx != INVALID_INDEX);
2339c50d8ae3SPaolo Bonzini 
2340c50d8ae3SPaolo Bonzini 	sp = pvec->page[0].sp;
2341c50d8ae3SPaolo Bonzini 	level = sp->role.level;
23423bae0459SSean Christopherson 	WARN_ON(level == PG_LEVEL_4K);
2343c50d8ae3SPaolo Bonzini 
2344c50d8ae3SPaolo Bonzini 	parents->parent[level-2] = sp;
2345c50d8ae3SPaolo Bonzini 
2346c50d8ae3SPaolo Bonzini 	/* Also set up a sentinel.  Further entries in pvec are all
2347c50d8ae3SPaolo Bonzini 	 * children of sp, so this element is never overwritten.
2348c50d8ae3SPaolo Bonzini 	 */
2349c50d8ae3SPaolo Bonzini 	parents->parent[level-1] = NULL;
2350c50d8ae3SPaolo Bonzini 	return mmu_pages_next(pvec, parents, 0);
2351c50d8ae3SPaolo Bonzini }
2352c50d8ae3SPaolo Bonzini 
2353c50d8ae3SPaolo Bonzini static void mmu_pages_clear_parents(struct mmu_page_path *parents)
2354c50d8ae3SPaolo Bonzini {
2355c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
2356c50d8ae3SPaolo Bonzini 	unsigned int level = 0;
2357c50d8ae3SPaolo Bonzini 
2358c50d8ae3SPaolo Bonzini 	do {
2359c50d8ae3SPaolo Bonzini 		unsigned int idx = parents->idx[level];
2360c50d8ae3SPaolo Bonzini 		sp = parents->parent[level];
2361c50d8ae3SPaolo Bonzini 		if (!sp)
2362c50d8ae3SPaolo Bonzini 			return;
2363c50d8ae3SPaolo Bonzini 
2364c50d8ae3SPaolo Bonzini 		WARN_ON(idx == INVALID_INDEX);
2365c50d8ae3SPaolo Bonzini 		clear_unsync_child_bit(sp, idx);
2366c50d8ae3SPaolo Bonzini 		level++;
2367c50d8ae3SPaolo Bonzini 	} while (!sp->unsync_children);
2368c50d8ae3SPaolo Bonzini }
2369c50d8ae3SPaolo Bonzini 
2370c50d8ae3SPaolo Bonzini static void mmu_sync_children(struct kvm_vcpu *vcpu,
2371c50d8ae3SPaolo Bonzini 			      struct kvm_mmu_page *parent)
2372c50d8ae3SPaolo Bonzini {
2373c50d8ae3SPaolo Bonzini 	int i;
2374c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
2375c50d8ae3SPaolo Bonzini 	struct mmu_page_path parents;
2376c50d8ae3SPaolo Bonzini 	struct kvm_mmu_pages pages;
2377c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
2378c50d8ae3SPaolo Bonzini 	bool flush = false;
2379c50d8ae3SPaolo Bonzini 
2380c50d8ae3SPaolo Bonzini 	while (mmu_unsync_walk(parent, &pages)) {
2381c50d8ae3SPaolo Bonzini 		bool protected = false;
2382c50d8ae3SPaolo Bonzini 
2383c50d8ae3SPaolo Bonzini 		for_each_sp(pages, sp, parents, i)
2384c50d8ae3SPaolo Bonzini 			protected |= rmap_write_protect(vcpu, sp->gfn);
2385c50d8ae3SPaolo Bonzini 
2386c50d8ae3SPaolo Bonzini 		if (protected) {
2387c50d8ae3SPaolo Bonzini 			kvm_flush_remote_tlbs(vcpu->kvm);
2388c50d8ae3SPaolo Bonzini 			flush = false;
2389c50d8ae3SPaolo Bonzini 		}
2390c50d8ae3SPaolo Bonzini 
2391c50d8ae3SPaolo Bonzini 		for_each_sp(pages, sp, parents, i) {
2392c50d8ae3SPaolo Bonzini 			flush |= kvm_sync_page(vcpu, sp, &invalid_list);
2393c50d8ae3SPaolo Bonzini 			mmu_pages_clear_parents(&parents);
2394c50d8ae3SPaolo Bonzini 		}
2395c50d8ae3SPaolo Bonzini 		if (need_resched() || spin_needbreak(&vcpu->kvm->mmu_lock)) {
2396c50d8ae3SPaolo Bonzini 			kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
2397c50d8ae3SPaolo Bonzini 			cond_resched_lock(&vcpu->kvm->mmu_lock);
2398c50d8ae3SPaolo Bonzini 			flush = false;
2399c50d8ae3SPaolo Bonzini 		}
2400c50d8ae3SPaolo Bonzini 	}
2401c50d8ae3SPaolo Bonzini 
2402c50d8ae3SPaolo Bonzini 	kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
2403c50d8ae3SPaolo Bonzini }
2404c50d8ae3SPaolo Bonzini 
2405c50d8ae3SPaolo Bonzini static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
2406c50d8ae3SPaolo Bonzini {
2407c50d8ae3SPaolo Bonzini 	atomic_set(&sp->write_flooding_count,  0);
2408c50d8ae3SPaolo Bonzini }
2409c50d8ae3SPaolo Bonzini 
2410c50d8ae3SPaolo Bonzini static void clear_sp_write_flooding_count(u64 *spte)
2411c50d8ae3SPaolo Bonzini {
241257354682SSean Christopherson 	__clear_sp_write_flooding_count(sptep_to_sp(spte));
2413c50d8ae3SPaolo Bonzini }
2414c50d8ae3SPaolo Bonzini 
2415c50d8ae3SPaolo Bonzini static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
2416c50d8ae3SPaolo Bonzini 					     gfn_t gfn,
2417c50d8ae3SPaolo Bonzini 					     gva_t gaddr,
2418c50d8ae3SPaolo Bonzini 					     unsigned level,
2419c50d8ae3SPaolo Bonzini 					     int direct,
24200a2b64c5SBen Gardon 					     unsigned int access)
2421c50d8ae3SPaolo Bonzini {
2422fb58a9c3SSean Christopherson 	bool direct_mmu = vcpu->arch.mmu->direct_map;
2423c50d8ae3SPaolo Bonzini 	union kvm_mmu_page_role role;
2424ac101b7cSSean Christopherson 	struct hlist_head *sp_list;
2425c50d8ae3SPaolo Bonzini 	unsigned quadrant;
2426c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
2427c50d8ae3SPaolo Bonzini 	bool need_sync = false;
2428c50d8ae3SPaolo Bonzini 	bool flush = false;
2429c50d8ae3SPaolo Bonzini 	int collisions = 0;
2430c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
2431c50d8ae3SPaolo Bonzini 
2432c50d8ae3SPaolo Bonzini 	role = vcpu->arch.mmu->mmu_role.base;
2433c50d8ae3SPaolo Bonzini 	role.level = level;
2434c50d8ae3SPaolo Bonzini 	role.direct = direct;
2435c50d8ae3SPaolo Bonzini 	if (role.direct)
2436c50d8ae3SPaolo Bonzini 		role.gpte_is_8_bytes = true;
2437c50d8ae3SPaolo Bonzini 	role.access = access;
2438fb58a9c3SSean Christopherson 	if (!direct_mmu && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) {
2439c50d8ae3SPaolo Bonzini 		quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
2440c50d8ae3SPaolo Bonzini 		quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
2441c50d8ae3SPaolo Bonzini 		role.quadrant = quadrant;
2442c50d8ae3SPaolo Bonzini 	}
2443ac101b7cSSean Christopherson 
2444ac101b7cSSean Christopherson 	sp_list = &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
2445ac101b7cSSean Christopherson 	for_each_valid_sp(vcpu->kvm, sp, sp_list) {
2446c50d8ae3SPaolo Bonzini 		if (sp->gfn != gfn) {
2447c50d8ae3SPaolo Bonzini 			collisions++;
2448c50d8ae3SPaolo Bonzini 			continue;
2449c50d8ae3SPaolo Bonzini 		}
2450c50d8ae3SPaolo Bonzini 
2451c50d8ae3SPaolo Bonzini 		if (!need_sync && sp->unsync)
2452c50d8ae3SPaolo Bonzini 			need_sync = true;
2453c50d8ae3SPaolo Bonzini 
2454c50d8ae3SPaolo Bonzini 		if (sp->role.word != role.word)
2455c50d8ae3SPaolo Bonzini 			continue;
2456c50d8ae3SPaolo Bonzini 
2457fb58a9c3SSean Christopherson 		if (direct_mmu)
2458fb58a9c3SSean Christopherson 			goto trace_get_page;
2459fb58a9c3SSean Christopherson 
2460c50d8ae3SPaolo Bonzini 		if (sp->unsync) {
2461c50d8ae3SPaolo Bonzini 			/* The page is good, but __kvm_sync_page might still end
2462c50d8ae3SPaolo Bonzini 			 * up zapping it.  If so, break in order to rebuild it.
2463c50d8ae3SPaolo Bonzini 			 */
2464c50d8ae3SPaolo Bonzini 			if (!__kvm_sync_page(vcpu, sp, &invalid_list))
2465c50d8ae3SPaolo Bonzini 				break;
2466c50d8ae3SPaolo Bonzini 
2467c50d8ae3SPaolo Bonzini 			WARN_ON(!list_empty(&invalid_list));
24688c8560b8SSean Christopherson 			kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
2469c50d8ae3SPaolo Bonzini 		}
2470c50d8ae3SPaolo Bonzini 
2471c50d8ae3SPaolo Bonzini 		if (sp->unsync_children)
2472f6f6195bSLai Jiangshan 			kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
2473c50d8ae3SPaolo Bonzini 
2474c50d8ae3SPaolo Bonzini 		__clear_sp_write_flooding_count(sp);
2475fb58a9c3SSean Christopherson 
2476fb58a9c3SSean Christopherson trace_get_page:
2477c50d8ae3SPaolo Bonzini 		trace_kvm_mmu_get_page(sp, false);
2478c50d8ae3SPaolo Bonzini 		goto out;
2479c50d8ae3SPaolo Bonzini 	}
2480c50d8ae3SPaolo Bonzini 
2481c50d8ae3SPaolo Bonzini 	++vcpu->kvm->stat.mmu_cache_miss;
2482c50d8ae3SPaolo Bonzini 
2483c50d8ae3SPaolo Bonzini 	sp = kvm_mmu_alloc_page(vcpu, direct);
2484c50d8ae3SPaolo Bonzini 
2485c50d8ae3SPaolo Bonzini 	sp->gfn = gfn;
2486c50d8ae3SPaolo Bonzini 	sp->role = role;
2487ac101b7cSSean Christopherson 	hlist_add_head(&sp->hash_link, sp_list);
2488c50d8ae3SPaolo Bonzini 	if (!direct) {
2489c50d8ae3SPaolo Bonzini 		/*
2490c50d8ae3SPaolo Bonzini 		 * we should do write protection before syncing pages
2491c50d8ae3SPaolo Bonzini 		 * otherwise the content of the synced shadow page may
2492c50d8ae3SPaolo Bonzini 		 * be inconsistent with guest page table.
2493c50d8ae3SPaolo Bonzini 		 */
2494c50d8ae3SPaolo Bonzini 		account_shadowed(vcpu->kvm, sp);
24953bae0459SSean Christopherson 		if (level == PG_LEVEL_4K && rmap_write_protect(vcpu, gfn))
2496c50d8ae3SPaolo Bonzini 			kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
2497c50d8ae3SPaolo Bonzini 
24983bae0459SSean Christopherson 		if (level > PG_LEVEL_4K && need_sync)
2499c50d8ae3SPaolo Bonzini 			flush |= kvm_sync_pages(vcpu, gfn, &invalid_list);
2500c50d8ae3SPaolo Bonzini 	}
2501c50d8ae3SPaolo Bonzini 	trace_kvm_mmu_get_page(sp, true);
2502c50d8ae3SPaolo Bonzini 
2503c50d8ae3SPaolo Bonzini 	kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
2504c50d8ae3SPaolo Bonzini out:
2505c50d8ae3SPaolo Bonzini 	if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions)
2506c50d8ae3SPaolo Bonzini 		vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions;
2507c50d8ae3SPaolo Bonzini 	return sp;
2508c50d8ae3SPaolo Bonzini }
2509c50d8ae3SPaolo Bonzini 
2510c50d8ae3SPaolo Bonzini static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator,
2511c50d8ae3SPaolo Bonzini 					struct kvm_vcpu *vcpu, hpa_t root,
2512c50d8ae3SPaolo Bonzini 					u64 addr)
2513c50d8ae3SPaolo Bonzini {
2514c50d8ae3SPaolo Bonzini 	iterator->addr = addr;
2515c50d8ae3SPaolo Bonzini 	iterator->shadow_addr = root;
2516c50d8ae3SPaolo Bonzini 	iterator->level = vcpu->arch.mmu->shadow_root_level;
2517c50d8ae3SPaolo Bonzini 
2518c50d8ae3SPaolo Bonzini 	if (iterator->level == PT64_ROOT_4LEVEL &&
2519c50d8ae3SPaolo Bonzini 	    vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL &&
2520c50d8ae3SPaolo Bonzini 	    !vcpu->arch.mmu->direct_map)
2521c50d8ae3SPaolo Bonzini 		--iterator->level;
2522c50d8ae3SPaolo Bonzini 
2523c50d8ae3SPaolo Bonzini 	if (iterator->level == PT32E_ROOT_LEVEL) {
2524c50d8ae3SPaolo Bonzini 		/*
2525c50d8ae3SPaolo Bonzini 		 * prev_root is currently only used for 64-bit hosts. So only
2526c50d8ae3SPaolo Bonzini 		 * the active root_hpa is valid here.
2527c50d8ae3SPaolo Bonzini 		 */
2528c50d8ae3SPaolo Bonzini 		BUG_ON(root != vcpu->arch.mmu->root_hpa);
2529c50d8ae3SPaolo Bonzini 
2530c50d8ae3SPaolo Bonzini 		iterator->shadow_addr
2531c50d8ae3SPaolo Bonzini 			= vcpu->arch.mmu->pae_root[(addr >> 30) & 3];
2532c50d8ae3SPaolo Bonzini 		iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
2533c50d8ae3SPaolo Bonzini 		--iterator->level;
2534c50d8ae3SPaolo Bonzini 		if (!iterator->shadow_addr)
2535c50d8ae3SPaolo Bonzini 			iterator->level = 0;
2536c50d8ae3SPaolo Bonzini 	}
2537c50d8ae3SPaolo Bonzini }
2538c50d8ae3SPaolo Bonzini 
2539c50d8ae3SPaolo Bonzini static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
2540c50d8ae3SPaolo Bonzini 			     struct kvm_vcpu *vcpu, u64 addr)
2541c50d8ae3SPaolo Bonzini {
2542c50d8ae3SPaolo Bonzini 	shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root_hpa,
2543c50d8ae3SPaolo Bonzini 				    addr);
2544c50d8ae3SPaolo Bonzini }
2545c50d8ae3SPaolo Bonzini 
2546c50d8ae3SPaolo Bonzini static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
2547c50d8ae3SPaolo Bonzini {
25483bae0459SSean Christopherson 	if (iterator->level < PG_LEVEL_4K)
2549c50d8ae3SPaolo Bonzini 		return false;
2550c50d8ae3SPaolo Bonzini 
2551c50d8ae3SPaolo Bonzini 	iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
2552c50d8ae3SPaolo Bonzini 	iterator->sptep	= ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
2553c50d8ae3SPaolo Bonzini 	return true;
2554c50d8ae3SPaolo Bonzini }
2555c50d8ae3SPaolo Bonzini 
2556c50d8ae3SPaolo Bonzini static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator,
2557c50d8ae3SPaolo Bonzini 			       u64 spte)
2558c50d8ae3SPaolo Bonzini {
2559c50d8ae3SPaolo Bonzini 	if (is_last_spte(spte, iterator->level)) {
2560c50d8ae3SPaolo Bonzini 		iterator->level = 0;
2561c50d8ae3SPaolo Bonzini 		return;
2562c50d8ae3SPaolo Bonzini 	}
2563c50d8ae3SPaolo Bonzini 
2564c50d8ae3SPaolo Bonzini 	iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK;
2565c50d8ae3SPaolo Bonzini 	--iterator->level;
2566c50d8ae3SPaolo Bonzini }
2567c50d8ae3SPaolo Bonzini 
2568c50d8ae3SPaolo Bonzini static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
2569c50d8ae3SPaolo Bonzini {
2570c50d8ae3SPaolo Bonzini 	__shadow_walk_next(iterator, *iterator->sptep);
2571c50d8ae3SPaolo Bonzini }
2572c50d8ae3SPaolo Bonzini 
2573c50d8ae3SPaolo Bonzini static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
2574c50d8ae3SPaolo Bonzini 			     struct kvm_mmu_page *sp)
2575c50d8ae3SPaolo Bonzini {
2576c50d8ae3SPaolo Bonzini 	u64 spte;
2577c50d8ae3SPaolo Bonzini 
2578c50d8ae3SPaolo Bonzini 	BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
2579c50d8ae3SPaolo Bonzini 
2580c50d8ae3SPaolo Bonzini 	spte = __pa(sp->spt) | shadow_present_mask | PT_WRITABLE_MASK |
2581c50d8ae3SPaolo Bonzini 	       shadow_user_mask | shadow_x_mask | shadow_me_mask;
2582c50d8ae3SPaolo Bonzini 
2583c50d8ae3SPaolo Bonzini 	if (sp_ad_disabled(sp))
2584c50d8ae3SPaolo Bonzini 		spte |= SPTE_AD_DISABLED_MASK;
2585c50d8ae3SPaolo Bonzini 	else
2586c50d8ae3SPaolo Bonzini 		spte |= shadow_accessed_mask;
2587c50d8ae3SPaolo Bonzini 
2588c50d8ae3SPaolo Bonzini 	mmu_spte_set(sptep, spte);
2589c50d8ae3SPaolo Bonzini 
2590c50d8ae3SPaolo Bonzini 	mmu_page_add_parent_pte(vcpu, sp, sptep);
2591c50d8ae3SPaolo Bonzini 
2592c50d8ae3SPaolo Bonzini 	if (sp->unsync_children || sp->unsync)
2593c50d8ae3SPaolo Bonzini 		mark_unsync(sptep);
2594c50d8ae3SPaolo Bonzini }
2595c50d8ae3SPaolo Bonzini 
2596c50d8ae3SPaolo Bonzini static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2597c50d8ae3SPaolo Bonzini 				   unsigned direct_access)
2598c50d8ae3SPaolo Bonzini {
2599c50d8ae3SPaolo Bonzini 	if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
2600c50d8ae3SPaolo Bonzini 		struct kvm_mmu_page *child;
2601c50d8ae3SPaolo Bonzini 
2602c50d8ae3SPaolo Bonzini 		/*
2603c50d8ae3SPaolo Bonzini 		 * For the direct sp, if the guest pte's dirty bit
2604c50d8ae3SPaolo Bonzini 		 * changed form clean to dirty, it will corrupt the
2605c50d8ae3SPaolo Bonzini 		 * sp's access: allow writable in the read-only sp,
2606c50d8ae3SPaolo Bonzini 		 * so we should update the spte at this point to get
2607c50d8ae3SPaolo Bonzini 		 * a new sp with the correct access.
2608c50d8ae3SPaolo Bonzini 		 */
2609e47c4aeeSSean Christopherson 		child = to_shadow_page(*sptep & PT64_BASE_ADDR_MASK);
2610c50d8ae3SPaolo Bonzini 		if (child->role.access == direct_access)
2611c50d8ae3SPaolo Bonzini 			return;
2612c50d8ae3SPaolo Bonzini 
2613c50d8ae3SPaolo Bonzini 		drop_parent_pte(child, sptep);
2614c50d8ae3SPaolo Bonzini 		kvm_flush_remote_tlbs_with_address(vcpu->kvm, child->gfn, 1);
2615c50d8ae3SPaolo Bonzini 	}
2616c50d8ae3SPaolo Bonzini }
2617c50d8ae3SPaolo Bonzini 
26182de4085cSBen Gardon /* Returns the number of zapped non-leaf child shadow pages. */
26192de4085cSBen Gardon static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
26202de4085cSBen Gardon 			    u64 *spte, struct list_head *invalid_list)
2621c50d8ae3SPaolo Bonzini {
2622c50d8ae3SPaolo Bonzini 	u64 pte;
2623c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *child;
2624c50d8ae3SPaolo Bonzini 
2625c50d8ae3SPaolo Bonzini 	pte = *spte;
2626c50d8ae3SPaolo Bonzini 	if (is_shadow_present_pte(pte)) {
2627c50d8ae3SPaolo Bonzini 		if (is_last_spte(pte, sp->role.level)) {
2628c50d8ae3SPaolo Bonzini 			drop_spte(kvm, spte);
2629c50d8ae3SPaolo Bonzini 			if (is_large_pte(pte))
2630c50d8ae3SPaolo Bonzini 				--kvm->stat.lpages;
2631c50d8ae3SPaolo Bonzini 		} else {
2632e47c4aeeSSean Christopherson 			child = to_shadow_page(pte & PT64_BASE_ADDR_MASK);
2633c50d8ae3SPaolo Bonzini 			drop_parent_pte(child, spte);
26342de4085cSBen Gardon 
26352de4085cSBen Gardon 			/*
26362de4085cSBen Gardon 			 * Recursively zap nested TDP SPs, parentless SPs are
26372de4085cSBen Gardon 			 * unlikely to be used again in the near future.  This
26382de4085cSBen Gardon 			 * avoids retaining a large number of stale nested SPs.
26392de4085cSBen Gardon 			 */
26402de4085cSBen Gardon 			if (tdp_enabled && invalid_list &&
26412de4085cSBen Gardon 			    child->role.guest_mode && !child->parent_ptes.val)
26422de4085cSBen Gardon 				return kvm_mmu_prepare_zap_page(kvm, child,
26432de4085cSBen Gardon 								invalid_list);
2644c50d8ae3SPaolo Bonzini 		}
2645ace569e0SSean Christopherson 	} else if (is_mmio_spte(pte)) {
2646c50d8ae3SPaolo Bonzini 		mmu_spte_clear_no_track(spte);
2647ace569e0SSean Christopherson 	}
26482de4085cSBen Gardon 	return 0;
2649c50d8ae3SPaolo Bonzini }
2650c50d8ae3SPaolo Bonzini 
26512de4085cSBen Gardon static int kvm_mmu_page_unlink_children(struct kvm *kvm,
26522de4085cSBen Gardon 					struct kvm_mmu_page *sp,
26532de4085cSBen Gardon 					struct list_head *invalid_list)
2654c50d8ae3SPaolo Bonzini {
26552de4085cSBen Gardon 	int zapped = 0;
2656c50d8ae3SPaolo Bonzini 	unsigned i;
2657c50d8ae3SPaolo Bonzini 
2658c50d8ae3SPaolo Bonzini 	for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
26592de4085cSBen Gardon 		zapped += mmu_page_zap_pte(kvm, sp, sp->spt + i, invalid_list);
26602de4085cSBen Gardon 
26612de4085cSBen Gardon 	return zapped;
2662c50d8ae3SPaolo Bonzini }
2663c50d8ae3SPaolo Bonzini 
2664c50d8ae3SPaolo Bonzini static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
2665c50d8ae3SPaolo Bonzini {
2666c50d8ae3SPaolo Bonzini 	u64 *sptep;
2667c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
2668c50d8ae3SPaolo Bonzini 
2669c50d8ae3SPaolo Bonzini 	while ((sptep = rmap_get_first(&sp->parent_ptes, &iter)))
2670c50d8ae3SPaolo Bonzini 		drop_parent_pte(sp, sptep);
2671c50d8ae3SPaolo Bonzini }
2672c50d8ae3SPaolo Bonzini 
2673c50d8ae3SPaolo Bonzini static int mmu_zap_unsync_children(struct kvm *kvm,
2674c50d8ae3SPaolo Bonzini 				   struct kvm_mmu_page *parent,
2675c50d8ae3SPaolo Bonzini 				   struct list_head *invalid_list)
2676c50d8ae3SPaolo Bonzini {
2677c50d8ae3SPaolo Bonzini 	int i, zapped = 0;
2678c50d8ae3SPaolo Bonzini 	struct mmu_page_path parents;
2679c50d8ae3SPaolo Bonzini 	struct kvm_mmu_pages pages;
2680c50d8ae3SPaolo Bonzini 
26813bae0459SSean Christopherson 	if (parent->role.level == PG_LEVEL_4K)
2682c50d8ae3SPaolo Bonzini 		return 0;
2683c50d8ae3SPaolo Bonzini 
2684c50d8ae3SPaolo Bonzini 	while (mmu_unsync_walk(parent, &pages)) {
2685c50d8ae3SPaolo Bonzini 		struct kvm_mmu_page *sp;
2686c50d8ae3SPaolo Bonzini 
2687c50d8ae3SPaolo Bonzini 		for_each_sp(pages, sp, parents, i) {
2688c50d8ae3SPaolo Bonzini 			kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
2689c50d8ae3SPaolo Bonzini 			mmu_pages_clear_parents(&parents);
2690c50d8ae3SPaolo Bonzini 			zapped++;
2691c50d8ae3SPaolo Bonzini 		}
2692c50d8ae3SPaolo Bonzini 	}
2693c50d8ae3SPaolo Bonzini 
2694c50d8ae3SPaolo Bonzini 	return zapped;
2695c50d8ae3SPaolo Bonzini }
2696c50d8ae3SPaolo Bonzini 
2697c50d8ae3SPaolo Bonzini static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
2698c50d8ae3SPaolo Bonzini 				       struct kvm_mmu_page *sp,
2699c50d8ae3SPaolo Bonzini 				       struct list_head *invalid_list,
2700c50d8ae3SPaolo Bonzini 				       int *nr_zapped)
2701c50d8ae3SPaolo Bonzini {
2702c50d8ae3SPaolo Bonzini 	bool list_unstable;
2703c50d8ae3SPaolo Bonzini 
2704c50d8ae3SPaolo Bonzini 	trace_kvm_mmu_prepare_zap_page(sp);
2705c50d8ae3SPaolo Bonzini 	++kvm->stat.mmu_shadow_zapped;
2706c50d8ae3SPaolo Bonzini 	*nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list);
27072de4085cSBen Gardon 	*nr_zapped += kvm_mmu_page_unlink_children(kvm, sp, invalid_list);
2708c50d8ae3SPaolo Bonzini 	kvm_mmu_unlink_parents(kvm, sp);
2709c50d8ae3SPaolo Bonzini 
2710c50d8ae3SPaolo Bonzini 	/* Zapping children means active_mmu_pages has become unstable. */
2711c50d8ae3SPaolo Bonzini 	list_unstable = *nr_zapped;
2712c50d8ae3SPaolo Bonzini 
2713c50d8ae3SPaolo Bonzini 	if (!sp->role.invalid && !sp->role.direct)
2714c50d8ae3SPaolo Bonzini 		unaccount_shadowed(kvm, sp);
2715c50d8ae3SPaolo Bonzini 
2716c50d8ae3SPaolo Bonzini 	if (sp->unsync)
2717c50d8ae3SPaolo Bonzini 		kvm_unlink_unsync_page(kvm, sp);
2718c50d8ae3SPaolo Bonzini 	if (!sp->root_count) {
2719c50d8ae3SPaolo Bonzini 		/* Count self */
2720c50d8ae3SPaolo Bonzini 		(*nr_zapped)++;
2721f95eec9bSSean Christopherson 
2722f95eec9bSSean Christopherson 		/*
2723f95eec9bSSean Christopherson 		 * Already invalid pages (previously active roots) are not on
2724f95eec9bSSean Christopherson 		 * the active page list.  See list_del() in the "else" case of
2725f95eec9bSSean Christopherson 		 * !sp->root_count.
2726f95eec9bSSean Christopherson 		 */
2727f95eec9bSSean Christopherson 		if (sp->role.invalid)
2728f95eec9bSSean Christopherson 			list_add(&sp->link, invalid_list);
2729f95eec9bSSean Christopherson 		else
2730c50d8ae3SPaolo Bonzini 			list_move(&sp->link, invalid_list);
2731c50d8ae3SPaolo Bonzini 		kvm_mod_used_mmu_pages(kvm, -1);
2732c50d8ae3SPaolo Bonzini 	} else {
2733f95eec9bSSean Christopherson 		/*
2734f95eec9bSSean Christopherson 		 * Remove the active root from the active page list, the root
2735f95eec9bSSean Christopherson 		 * will be explicitly freed when the root_count hits zero.
2736f95eec9bSSean Christopherson 		 */
2737f95eec9bSSean Christopherson 		list_del(&sp->link);
2738c50d8ae3SPaolo Bonzini 
2739c50d8ae3SPaolo Bonzini 		/*
2740c50d8ae3SPaolo Bonzini 		 * Obsolete pages cannot be used on any vCPUs, see the comment
2741c50d8ae3SPaolo Bonzini 		 * in kvm_mmu_zap_all_fast().  Note, is_obsolete_sp() also
2742c50d8ae3SPaolo Bonzini 		 * treats invalid shadow pages as being obsolete.
2743c50d8ae3SPaolo Bonzini 		 */
2744c50d8ae3SPaolo Bonzini 		if (!is_obsolete_sp(kvm, sp))
2745c50d8ae3SPaolo Bonzini 			kvm_reload_remote_mmus(kvm);
2746c50d8ae3SPaolo Bonzini 	}
2747c50d8ae3SPaolo Bonzini 
2748c50d8ae3SPaolo Bonzini 	if (sp->lpage_disallowed)
2749c50d8ae3SPaolo Bonzini 		unaccount_huge_nx_page(kvm, sp);
2750c50d8ae3SPaolo Bonzini 
2751c50d8ae3SPaolo Bonzini 	sp->role.invalid = 1;
2752c50d8ae3SPaolo Bonzini 	return list_unstable;
2753c50d8ae3SPaolo Bonzini }
2754c50d8ae3SPaolo Bonzini 
2755c50d8ae3SPaolo Bonzini static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2756c50d8ae3SPaolo Bonzini 				     struct list_head *invalid_list)
2757c50d8ae3SPaolo Bonzini {
2758c50d8ae3SPaolo Bonzini 	int nr_zapped;
2759c50d8ae3SPaolo Bonzini 
2760c50d8ae3SPaolo Bonzini 	__kvm_mmu_prepare_zap_page(kvm, sp, invalid_list, &nr_zapped);
2761c50d8ae3SPaolo Bonzini 	return nr_zapped;
2762c50d8ae3SPaolo Bonzini }
2763c50d8ae3SPaolo Bonzini 
2764c50d8ae3SPaolo Bonzini static void kvm_mmu_commit_zap_page(struct kvm *kvm,
2765c50d8ae3SPaolo Bonzini 				    struct list_head *invalid_list)
2766c50d8ae3SPaolo Bonzini {
2767c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp, *nsp;
2768c50d8ae3SPaolo Bonzini 
2769c50d8ae3SPaolo Bonzini 	if (list_empty(invalid_list))
2770c50d8ae3SPaolo Bonzini 		return;
2771c50d8ae3SPaolo Bonzini 
2772c50d8ae3SPaolo Bonzini 	/*
2773c50d8ae3SPaolo Bonzini 	 * We need to make sure everyone sees our modifications to
2774c50d8ae3SPaolo Bonzini 	 * the page tables and see changes to vcpu->mode here. The barrier
2775c50d8ae3SPaolo Bonzini 	 * in the kvm_flush_remote_tlbs() achieves this. This pairs
2776c50d8ae3SPaolo Bonzini 	 * with vcpu_enter_guest and walk_shadow_page_lockless_begin/end.
2777c50d8ae3SPaolo Bonzini 	 *
2778c50d8ae3SPaolo Bonzini 	 * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit
2779c50d8ae3SPaolo Bonzini 	 * guest mode and/or lockless shadow page table walks.
2780c50d8ae3SPaolo Bonzini 	 */
2781c50d8ae3SPaolo Bonzini 	kvm_flush_remote_tlbs(kvm);
2782c50d8ae3SPaolo Bonzini 
2783c50d8ae3SPaolo Bonzini 	list_for_each_entry_safe(sp, nsp, invalid_list, link) {
2784c50d8ae3SPaolo Bonzini 		WARN_ON(!sp->role.invalid || sp->root_count);
2785c50d8ae3SPaolo Bonzini 		kvm_mmu_free_page(sp);
2786c50d8ae3SPaolo Bonzini 	}
2787c50d8ae3SPaolo Bonzini }
2788c50d8ae3SPaolo Bonzini 
27896b82ef2cSSean Christopherson static unsigned long kvm_mmu_zap_oldest_mmu_pages(struct kvm *kvm,
27906b82ef2cSSean Christopherson 						  unsigned long nr_to_zap)
2791c50d8ae3SPaolo Bonzini {
27926b82ef2cSSean Christopherson 	unsigned long total_zapped = 0;
27936b82ef2cSSean Christopherson 	struct kvm_mmu_page *sp, *tmp;
2794ba7888ddSSean Christopherson 	LIST_HEAD(invalid_list);
27956b82ef2cSSean Christopherson 	bool unstable;
27966b82ef2cSSean Christopherson 	int nr_zapped;
2797c50d8ae3SPaolo Bonzini 
2798c50d8ae3SPaolo Bonzini 	if (list_empty(&kvm->arch.active_mmu_pages))
2799ba7888ddSSean Christopherson 		return 0;
2800c50d8ae3SPaolo Bonzini 
28016b82ef2cSSean Christopherson restart:
28026b82ef2cSSean Christopherson 	list_for_each_entry_safe(sp, tmp, &kvm->arch.active_mmu_pages, link) {
28036b82ef2cSSean Christopherson 		/*
28046b82ef2cSSean Christopherson 		 * Don't zap active root pages, the page itself can't be freed
28056b82ef2cSSean Christopherson 		 * and zapping it will just force vCPUs to realloc and reload.
28066b82ef2cSSean Christopherson 		 */
28076b82ef2cSSean Christopherson 		if (sp->root_count)
28086b82ef2cSSean Christopherson 			continue;
28096b82ef2cSSean Christopherson 
28106b82ef2cSSean Christopherson 		unstable = __kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list,
28116b82ef2cSSean Christopherson 						      &nr_zapped);
28126b82ef2cSSean Christopherson 		total_zapped += nr_zapped;
28136b82ef2cSSean Christopherson 		if (total_zapped >= nr_to_zap)
2814ba7888ddSSean Christopherson 			break;
2815ba7888ddSSean Christopherson 
28166b82ef2cSSean Christopherson 		if (unstable)
28176b82ef2cSSean Christopherson 			goto restart;
2818ba7888ddSSean Christopherson 	}
28196b82ef2cSSean Christopherson 
28206b82ef2cSSean Christopherson 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
28216b82ef2cSSean Christopherson 
28226b82ef2cSSean Christopherson 	kvm->stat.mmu_recycled += total_zapped;
28236b82ef2cSSean Christopherson 	return total_zapped;
28246b82ef2cSSean Christopherson }
28256b82ef2cSSean Christopherson 
2826afe8d7e6SSean Christopherson static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
2827afe8d7e6SSean Christopherson {
2828afe8d7e6SSean Christopherson 	if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
2829afe8d7e6SSean Christopherson 		return kvm->arch.n_max_mmu_pages -
2830afe8d7e6SSean Christopherson 			kvm->arch.n_used_mmu_pages;
2831afe8d7e6SSean Christopherson 
2832afe8d7e6SSean Christopherson 	return 0;
2833c50d8ae3SPaolo Bonzini }
2834c50d8ae3SPaolo Bonzini 
2835ba7888ddSSean Christopherson static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
2836ba7888ddSSean Christopherson {
28376b82ef2cSSean Christopherson 	unsigned long avail = kvm_mmu_available_pages(vcpu->kvm);
2838ba7888ddSSean Christopherson 
28396b82ef2cSSean Christopherson 	if (likely(avail >= KVM_MIN_FREE_MMU_PAGES))
2840ba7888ddSSean Christopherson 		return 0;
2841ba7888ddSSean Christopherson 
28426b82ef2cSSean Christopherson 	kvm_mmu_zap_oldest_mmu_pages(vcpu->kvm, KVM_REFILL_PAGES - avail);
2843ba7888ddSSean Christopherson 
2844ba7888ddSSean Christopherson 	if (!kvm_mmu_available_pages(vcpu->kvm))
2845ba7888ddSSean Christopherson 		return -ENOSPC;
2846ba7888ddSSean Christopherson 	return 0;
2847ba7888ddSSean Christopherson }
2848ba7888ddSSean Christopherson 
2849c50d8ae3SPaolo Bonzini /*
2850c50d8ae3SPaolo Bonzini  * Changing the number of mmu pages allocated to the vm
2851c50d8ae3SPaolo Bonzini  * Note: if goal_nr_mmu_pages is too small, you will get dead lock
2852c50d8ae3SPaolo Bonzini  */
2853c50d8ae3SPaolo Bonzini void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
2854c50d8ae3SPaolo Bonzini {
2855c50d8ae3SPaolo Bonzini 	spin_lock(&kvm->mmu_lock);
2856c50d8ae3SPaolo Bonzini 
2857c50d8ae3SPaolo Bonzini 	if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
28586b82ef2cSSean Christopherson 		kvm_mmu_zap_oldest_mmu_pages(kvm, kvm->arch.n_used_mmu_pages -
28596b82ef2cSSean Christopherson 						  goal_nr_mmu_pages);
2860c50d8ae3SPaolo Bonzini 
2861c50d8ae3SPaolo Bonzini 		goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
2862c50d8ae3SPaolo Bonzini 	}
2863c50d8ae3SPaolo Bonzini 
2864c50d8ae3SPaolo Bonzini 	kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
2865c50d8ae3SPaolo Bonzini 
2866c50d8ae3SPaolo Bonzini 	spin_unlock(&kvm->mmu_lock);
2867c50d8ae3SPaolo Bonzini }
2868c50d8ae3SPaolo Bonzini 
2869c50d8ae3SPaolo Bonzini int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
2870c50d8ae3SPaolo Bonzini {
2871c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
2872c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
2873c50d8ae3SPaolo Bonzini 	int r;
2874c50d8ae3SPaolo Bonzini 
2875c50d8ae3SPaolo Bonzini 	pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
2876c50d8ae3SPaolo Bonzini 	r = 0;
2877c50d8ae3SPaolo Bonzini 	spin_lock(&kvm->mmu_lock);
2878c50d8ae3SPaolo Bonzini 	for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
2879c50d8ae3SPaolo Bonzini 		pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
2880c50d8ae3SPaolo Bonzini 			 sp->role.word);
2881c50d8ae3SPaolo Bonzini 		r = 1;
2882c50d8ae3SPaolo Bonzini 		kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
2883c50d8ae3SPaolo Bonzini 	}
2884c50d8ae3SPaolo Bonzini 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
2885c50d8ae3SPaolo Bonzini 	spin_unlock(&kvm->mmu_lock);
2886c50d8ae3SPaolo Bonzini 
2887c50d8ae3SPaolo Bonzini 	return r;
2888c50d8ae3SPaolo Bonzini }
2889c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page);
2890c50d8ae3SPaolo Bonzini 
2891c50d8ae3SPaolo Bonzini static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
2892c50d8ae3SPaolo Bonzini {
2893c50d8ae3SPaolo Bonzini 	trace_kvm_mmu_unsync_page(sp);
2894c50d8ae3SPaolo Bonzini 	++vcpu->kvm->stat.mmu_unsync;
2895c50d8ae3SPaolo Bonzini 	sp->unsync = 1;
2896c50d8ae3SPaolo Bonzini 
2897c50d8ae3SPaolo Bonzini 	kvm_mmu_mark_parents_unsync(sp);
2898c50d8ae3SPaolo Bonzini }
2899c50d8ae3SPaolo Bonzini 
2900c50d8ae3SPaolo Bonzini static bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
2901c50d8ae3SPaolo Bonzini 				   bool can_unsync)
2902c50d8ae3SPaolo Bonzini {
2903c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
2904c50d8ae3SPaolo Bonzini 
2905c50d8ae3SPaolo Bonzini 	if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
2906c50d8ae3SPaolo Bonzini 		return true;
2907c50d8ae3SPaolo Bonzini 
2908c50d8ae3SPaolo Bonzini 	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
2909c50d8ae3SPaolo Bonzini 		if (!can_unsync)
2910c50d8ae3SPaolo Bonzini 			return true;
2911c50d8ae3SPaolo Bonzini 
2912c50d8ae3SPaolo Bonzini 		if (sp->unsync)
2913c50d8ae3SPaolo Bonzini 			continue;
2914c50d8ae3SPaolo Bonzini 
29153bae0459SSean Christopherson 		WARN_ON(sp->role.level != PG_LEVEL_4K);
2916c50d8ae3SPaolo Bonzini 		kvm_unsync_page(vcpu, sp);
2917c50d8ae3SPaolo Bonzini 	}
2918c50d8ae3SPaolo Bonzini 
2919c50d8ae3SPaolo Bonzini 	/*
2920c50d8ae3SPaolo Bonzini 	 * We need to ensure that the marking of unsync pages is visible
2921c50d8ae3SPaolo Bonzini 	 * before the SPTE is updated to allow writes because
2922c50d8ae3SPaolo Bonzini 	 * kvm_mmu_sync_roots() checks the unsync flags without holding
2923c50d8ae3SPaolo Bonzini 	 * the MMU lock and so can race with this. If the SPTE was updated
2924c50d8ae3SPaolo Bonzini 	 * before the page had been marked as unsync-ed, something like the
2925c50d8ae3SPaolo Bonzini 	 * following could happen:
2926c50d8ae3SPaolo Bonzini 	 *
2927c50d8ae3SPaolo Bonzini 	 * CPU 1                    CPU 2
2928c50d8ae3SPaolo Bonzini 	 * ---------------------------------------------------------------------
2929c50d8ae3SPaolo Bonzini 	 * 1.2 Host updates SPTE
2930c50d8ae3SPaolo Bonzini 	 *     to be writable
2931c50d8ae3SPaolo Bonzini 	 *                      2.1 Guest writes a GPTE for GVA X.
2932c50d8ae3SPaolo Bonzini 	 *                          (GPTE being in the guest page table shadowed
2933c50d8ae3SPaolo Bonzini 	 *                           by the SP from CPU 1.)
2934c50d8ae3SPaolo Bonzini 	 *                          This reads SPTE during the page table walk.
2935c50d8ae3SPaolo Bonzini 	 *                          Since SPTE.W is read as 1, there is no
2936c50d8ae3SPaolo Bonzini 	 *                          fault.
2937c50d8ae3SPaolo Bonzini 	 *
2938c50d8ae3SPaolo Bonzini 	 *                      2.2 Guest issues TLB flush.
2939c50d8ae3SPaolo Bonzini 	 *                          That causes a VM Exit.
2940c50d8ae3SPaolo Bonzini 	 *
2941c50d8ae3SPaolo Bonzini 	 *                      2.3 kvm_mmu_sync_pages() reads sp->unsync.
2942c50d8ae3SPaolo Bonzini 	 *                          Since it is false, so it just returns.
2943c50d8ae3SPaolo Bonzini 	 *
2944c50d8ae3SPaolo Bonzini 	 *                      2.4 Guest accesses GVA X.
2945c50d8ae3SPaolo Bonzini 	 *                          Since the mapping in the SP was not updated,
2946c50d8ae3SPaolo Bonzini 	 *                          so the old mapping for GVA X incorrectly
2947c50d8ae3SPaolo Bonzini 	 *                          gets used.
2948c50d8ae3SPaolo Bonzini 	 * 1.1 Host marks SP
2949c50d8ae3SPaolo Bonzini 	 *     as unsync
2950c50d8ae3SPaolo Bonzini 	 *     (sp->unsync = true)
2951c50d8ae3SPaolo Bonzini 	 *
2952c50d8ae3SPaolo Bonzini 	 * The write barrier below ensures that 1.1 happens before 1.2 and thus
2953c50d8ae3SPaolo Bonzini 	 * the situation in 2.4 does not arise. The implicit barrier in 2.2
2954c50d8ae3SPaolo Bonzini 	 * pairs with this write barrier.
2955c50d8ae3SPaolo Bonzini 	 */
2956c50d8ae3SPaolo Bonzini 	smp_wmb();
2957c50d8ae3SPaolo Bonzini 
2958c50d8ae3SPaolo Bonzini 	return false;
2959c50d8ae3SPaolo Bonzini }
2960c50d8ae3SPaolo Bonzini 
2961c50d8ae3SPaolo Bonzini static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
2962c50d8ae3SPaolo Bonzini {
2963c50d8ae3SPaolo Bonzini 	if (pfn_valid(pfn))
2964c50d8ae3SPaolo Bonzini 		return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) &&
2965c50d8ae3SPaolo Bonzini 			/*
2966c50d8ae3SPaolo Bonzini 			 * Some reserved pages, such as those from NVDIMM
2967c50d8ae3SPaolo Bonzini 			 * DAX devices, are not for MMIO, and can be mapped
2968c50d8ae3SPaolo Bonzini 			 * with cached memory type for better performance.
2969c50d8ae3SPaolo Bonzini 			 * However, the above check misconceives those pages
2970c50d8ae3SPaolo Bonzini 			 * as MMIO, and results in KVM mapping them with UC
2971c50d8ae3SPaolo Bonzini 			 * memory type, which would hurt the performance.
2972c50d8ae3SPaolo Bonzini 			 * Therefore, we check the host memory type in addition
2973c50d8ae3SPaolo Bonzini 			 * and only treat UC/UC-/WC pages as MMIO.
2974c50d8ae3SPaolo Bonzini 			 */
2975c50d8ae3SPaolo Bonzini 			(!pat_enabled() || pat_pfn_immune_to_uc_mtrr(pfn));
2976c50d8ae3SPaolo Bonzini 
2977c50d8ae3SPaolo Bonzini 	return !e820__mapped_raw_any(pfn_to_hpa(pfn),
2978c50d8ae3SPaolo Bonzini 				     pfn_to_hpa(pfn + 1) - 1,
2979c50d8ae3SPaolo Bonzini 				     E820_TYPE_RAM);
2980c50d8ae3SPaolo Bonzini }
2981c50d8ae3SPaolo Bonzini 
2982c50d8ae3SPaolo Bonzini /* Bits which may be returned by set_spte() */
2983c50d8ae3SPaolo Bonzini #define SET_SPTE_WRITE_PROTECTED_PT	BIT(0)
2984c50d8ae3SPaolo Bonzini #define SET_SPTE_NEED_REMOTE_TLB_FLUSH	BIT(1)
2985c50d8ae3SPaolo Bonzini 
2986c50d8ae3SPaolo Bonzini static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
29870a2b64c5SBen Gardon 		    unsigned int pte_access, int level,
2988c50d8ae3SPaolo Bonzini 		    gfn_t gfn, kvm_pfn_t pfn, bool speculative,
2989c50d8ae3SPaolo Bonzini 		    bool can_unsync, bool host_writable)
2990c50d8ae3SPaolo Bonzini {
2991c50d8ae3SPaolo Bonzini 	u64 spte = 0;
2992c50d8ae3SPaolo Bonzini 	int ret = 0;
2993c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
2994c50d8ae3SPaolo Bonzini 
2995c50d8ae3SPaolo Bonzini 	if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access))
2996c50d8ae3SPaolo Bonzini 		return 0;
2997c50d8ae3SPaolo Bonzini 
299857354682SSean Christopherson 	sp = sptep_to_sp(sptep);
2999c50d8ae3SPaolo Bonzini 	if (sp_ad_disabled(sp))
3000c50d8ae3SPaolo Bonzini 		spte |= SPTE_AD_DISABLED_MASK;
3001c50d8ae3SPaolo Bonzini 	else if (kvm_vcpu_ad_need_write_protect(vcpu))
3002c50d8ae3SPaolo Bonzini 		spte |= SPTE_AD_WRPROT_ONLY_MASK;
3003c50d8ae3SPaolo Bonzini 
3004c50d8ae3SPaolo Bonzini 	/*
3005c50d8ae3SPaolo Bonzini 	 * For the EPT case, shadow_present_mask is 0 if hardware
3006c50d8ae3SPaolo Bonzini 	 * supports exec-only page table entries.  In that case,
3007c50d8ae3SPaolo Bonzini 	 * ACC_USER_MASK and shadow_user_mask are used to represent
3008c50d8ae3SPaolo Bonzini 	 * read access.  See FNAME(gpte_access) in paging_tmpl.h.
3009c50d8ae3SPaolo Bonzini 	 */
3010c50d8ae3SPaolo Bonzini 	spte |= shadow_present_mask;
3011c50d8ae3SPaolo Bonzini 	if (!speculative)
3012c50d8ae3SPaolo Bonzini 		spte |= spte_shadow_accessed_mask(spte);
3013c50d8ae3SPaolo Bonzini 
30143bae0459SSean Christopherson 	if (level > PG_LEVEL_4K && (pte_access & ACC_EXEC_MASK) &&
3015c50d8ae3SPaolo Bonzini 	    is_nx_huge_page_enabled()) {
3016c50d8ae3SPaolo Bonzini 		pte_access &= ~ACC_EXEC_MASK;
3017c50d8ae3SPaolo Bonzini 	}
3018c50d8ae3SPaolo Bonzini 
3019c50d8ae3SPaolo Bonzini 	if (pte_access & ACC_EXEC_MASK)
3020c50d8ae3SPaolo Bonzini 		spte |= shadow_x_mask;
3021c50d8ae3SPaolo Bonzini 	else
3022c50d8ae3SPaolo Bonzini 		spte |= shadow_nx_mask;
3023c50d8ae3SPaolo Bonzini 
3024c50d8ae3SPaolo Bonzini 	if (pte_access & ACC_USER_MASK)
3025c50d8ae3SPaolo Bonzini 		spte |= shadow_user_mask;
3026c50d8ae3SPaolo Bonzini 
30273bae0459SSean Christopherson 	if (level > PG_LEVEL_4K)
3028c50d8ae3SPaolo Bonzini 		spte |= PT_PAGE_SIZE_MASK;
3029c50d8ae3SPaolo Bonzini 	if (tdp_enabled)
3030afaf0b2fSSean Christopherson 		spte |= kvm_x86_ops.get_mt_mask(vcpu, gfn,
3031c50d8ae3SPaolo Bonzini 			kvm_is_mmio_pfn(pfn));
3032c50d8ae3SPaolo Bonzini 
3033c50d8ae3SPaolo Bonzini 	if (host_writable)
3034c50d8ae3SPaolo Bonzini 		spte |= SPTE_HOST_WRITEABLE;
3035c50d8ae3SPaolo Bonzini 	else
3036c50d8ae3SPaolo Bonzini 		pte_access &= ~ACC_WRITE_MASK;
3037c50d8ae3SPaolo Bonzini 
3038c50d8ae3SPaolo Bonzini 	if (!kvm_is_mmio_pfn(pfn))
3039c50d8ae3SPaolo Bonzini 		spte |= shadow_me_mask;
3040c50d8ae3SPaolo Bonzini 
3041c50d8ae3SPaolo Bonzini 	spte |= (u64)pfn << PAGE_SHIFT;
3042c50d8ae3SPaolo Bonzini 
3043c50d8ae3SPaolo Bonzini 	if (pte_access & ACC_WRITE_MASK) {
3044c50d8ae3SPaolo Bonzini 		spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE;
3045c50d8ae3SPaolo Bonzini 
3046c50d8ae3SPaolo Bonzini 		/*
3047c50d8ae3SPaolo Bonzini 		 * Optimization: for pte sync, if spte was writable the hash
3048c50d8ae3SPaolo Bonzini 		 * lookup is unnecessary (and expensive). Write protection
3049c50d8ae3SPaolo Bonzini 		 * is responsibility of mmu_get_page / kvm_sync_page.
3050c50d8ae3SPaolo Bonzini 		 * Same reasoning can be applied to dirty page accounting.
3051c50d8ae3SPaolo Bonzini 		 */
3052c50d8ae3SPaolo Bonzini 		if (!can_unsync && is_writable_pte(*sptep))
3053c50d8ae3SPaolo Bonzini 			goto set_pte;
3054c50d8ae3SPaolo Bonzini 
3055c50d8ae3SPaolo Bonzini 		if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
3056c50d8ae3SPaolo Bonzini 			pgprintk("%s: found shadow page for %llx, marking ro\n",
3057c50d8ae3SPaolo Bonzini 				 __func__, gfn);
3058c50d8ae3SPaolo Bonzini 			ret |= SET_SPTE_WRITE_PROTECTED_PT;
3059c50d8ae3SPaolo Bonzini 			pte_access &= ~ACC_WRITE_MASK;
3060c50d8ae3SPaolo Bonzini 			spte &= ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE);
3061c50d8ae3SPaolo Bonzini 		}
3062c50d8ae3SPaolo Bonzini 	}
3063c50d8ae3SPaolo Bonzini 
3064c50d8ae3SPaolo Bonzini 	if (pte_access & ACC_WRITE_MASK) {
3065c50d8ae3SPaolo Bonzini 		kvm_vcpu_mark_page_dirty(vcpu, gfn);
3066c50d8ae3SPaolo Bonzini 		spte |= spte_shadow_dirty_mask(spte);
3067c50d8ae3SPaolo Bonzini 	}
3068c50d8ae3SPaolo Bonzini 
3069c50d8ae3SPaolo Bonzini 	if (speculative)
3070c50d8ae3SPaolo Bonzini 		spte = mark_spte_for_access_track(spte);
3071c50d8ae3SPaolo Bonzini 
3072c50d8ae3SPaolo Bonzini set_pte:
3073c50d8ae3SPaolo Bonzini 	if (mmu_spte_update(sptep, spte))
3074c50d8ae3SPaolo Bonzini 		ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
3075c50d8ae3SPaolo Bonzini 	return ret;
3076c50d8ae3SPaolo Bonzini }
3077c50d8ae3SPaolo Bonzini 
30780a2b64c5SBen Gardon static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
30790a2b64c5SBen Gardon 			unsigned int pte_access, int write_fault, int level,
30800a2b64c5SBen Gardon 			gfn_t gfn, kvm_pfn_t pfn, bool speculative,
30810a2b64c5SBen Gardon 			bool host_writable)
3082c50d8ae3SPaolo Bonzini {
3083c50d8ae3SPaolo Bonzini 	int was_rmapped = 0;
3084c50d8ae3SPaolo Bonzini 	int rmap_count;
3085c50d8ae3SPaolo Bonzini 	int set_spte_ret;
3086c50d8ae3SPaolo Bonzini 	int ret = RET_PF_RETRY;
3087c50d8ae3SPaolo Bonzini 	bool flush = false;
3088c50d8ae3SPaolo Bonzini 
3089c50d8ae3SPaolo Bonzini 	pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
3090c50d8ae3SPaolo Bonzini 		 *sptep, write_fault, gfn);
3091c50d8ae3SPaolo Bonzini 
3092c50d8ae3SPaolo Bonzini 	if (is_shadow_present_pte(*sptep)) {
3093c50d8ae3SPaolo Bonzini 		/*
3094c50d8ae3SPaolo Bonzini 		 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
3095c50d8ae3SPaolo Bonzini 		 * the parent of the now unreachable PTE.
3096c50d8ae3SPaolo Bonzini 		 */
30973bae0459SSean Christopherson 		if (level > PG_LEVEL_4K && !is_large_pte(*sptep)) {
3098c50d8ae3SPaolo Bonzini 			struct kvm_mmu_page *child;
3099c50d8ae3SPaolo Bonzini 			u64 pte = *sptep;
3100c50d8ae3SPaolo Bonzini 
3101e47c4aeeSSean Christopherson 			child = to_shadow_page(pte & PT64_BASE_ADDR_MASK);
3102c50d8ae3SPaolo Bonzini 			drop_parent_pte(child, sptep);
3103c50d8ae3SPaolo Bonzini 			flush = true;
3104c50d8ae3SPaolo Bonzini 		} else if (pfn != spte_to_pfn(*sptep)) {
3105c50d8ae3SPaolo Bonzini 			pgprintk("hfn old %llx new %llx\n",
3106c50d8ae3SPaolo Bonzini 				 spte_to_pfn(*sptep), pfn);
3107c50d8ae3SPaolo Bonzini 			drop_spte(vcpu->kvm, sptep);
3108c50d8ae3SPaolo Bonzini 			flush = true;
3109c50d8ae3SPaolo Bonzini 		} else
3110c50d8ae3SPaolo Bonzini 			was_rmapped = 1;
3111c50d8ae3SPaolo Bonzini 	}
3112c50d8ae3SPaolo Bonzini 
3113c50d8ae3SPaolo Bonzini 	set_spte_ret = set_spte(vcpu, sptep, pte_access, level, gfn, pfn,
3114c50d8ae3SPaolo Bonzini 				speculative, true, host_writable);
3115c50d8ae3SPaolo Bonzini 	if (set_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
3116c50d8ae3SPaolo Bonzini 		if (write_fault)
3117c50d8ae3SPaolo Bonzini 			ret = RET_PF_EMULATE;
31188c8560b8SSean Christopherson 		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
3119c50d8ae3SPaolo Bonzini 	}
3120c50d8ae3SPaolo Bonzini 
3121c50d8ae3SPaolo Bonzini 	if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH || flush)
3122c50d8ae3SPaolo Bonzini 		kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn,
3123c50d8ae3SPaolo Bonzini 				KVM_PAGES_PER_HPAGE(level));
3124c50d8ae3SPaolo Bonzini 
3125c50d8ae3SPaolo Bonzini 	if (unlikely(is_mmio_spte(*sptep)))
3126c50d8ae3SPaolo Bonzini 		ret = RET_PF_EMULATE;
3127c50d8ae3SPaolo Bonzini 
3128c50d8ae3SPaolo Bonzini 	pgprintk("%s: setting spte %llx\n", __func__, *sptep);
3129c50d8ae3SPaolo Bonzini 	trace_kvm_mmu_set_spte(level, gfn, sptep);
3130c50d8ae3SPaolo Bonzini 	if (!was_rmapped && is_large_pte(*sptep))
3131c50d8ae3SPaolo Bonzini 		++vcpu->kvm->stat.lpages;
3132c50d8ae3SPaolo Bonzini 
3133c50d8ae3SPaolo Bonzini 	if (is_shadow_present_pte(*sptep)) {
3134c50d8ae3SPaolo Bonzini 		if (!was_rmapped) {
3135c50d8ae3SPaolo Bonzini 			rmap_count = rmap_add(vcpu, sptep, gfn);
3136c50d8ae3SPaolo Bonzini 			if (rmap_count > RMAP_RECYCLE_THRESHOLD)
3137c50d8ae3SPaolo Bonzini 				rmap_recycle(vcpu, sptep, gfn);
3138c50d8ae3SPaolo Bonzini 		}
3139c50d8ae3SPaolo Bonzini 	}
3140c50d8ae3SPaolo Bonzini 
3141c50d8ae3SPaolo Bonzini 	return ret;
3142c50d8ae3SPaolo Bonzini }
3143c50d8ae3SPaolo Bonzini 
3144c50d8ae3SPaolo Bonzini static kvm_pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
3145c50d8ae3SPaolo Bonzini 				     bool no_dirty_log)
3146c50d8ae3SPaolo Bonzini {
3147c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
3148c50d8ae3SPaolo Bonzini 
3149c50d8ae3SPaolo Bonzini 	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log);
3150c50d8ae3SPaolo Bonzini 	if (!slot)
3151c50d8ae3SPaolo Bonzini 		return KVM_PFN_ERR_FAULT;
3152c50d8ae3SPaolo Bonzini 
3153c50d8ae3SPaolo Bonzini 	return gfn_to_pfn_memslot_atomic(slot, gfn);
3154c50d8ae3SPaolo Bonzini }
3155c50d8ae3SPaolo Bonzini 
3156c50d8ae3SPaolo Bonzini static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
3157c50d8ae3SPaolo Bonzini 				    struct kvm_mmu_page *sp,
3158c50d8ae3SPaolo Bonzini 				    u64 *start, u64 *end)
3159c50d8ae3SPaolo Bonzini {
3160c50d8ae3SPaolo Bonzini 	struct page *pages[PTE_PREFETCH_NUM];
3161c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *slot;
31620a2b64c5SBen Gardon 	unsigned int access = sp->role.access;
3163c50d8ae3SPaolo Bonzini 	int i, ret;
3164c50d8ae3SPaolo Bonzini 	gfn_t gfn;
3165c50d8ae3SPaolo Bonzini 
3166c50d8ae3SPaolo Bonzini 	gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
3167c50d8ae3SPaolo Bonzini 	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
3168c50d8ae3SPaolo Bonzini 	if (!slot)
3169c50d8ae3SPaolo Bonzini 		return -1;
3170c50d8ae3SPaolo Bonzini 
3171c50d8ae3SPaolo Bonzini 	ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start);
3172c50d8ae3SPaolo Bonzini 	if (ret <= 0)
3173c50d8ae3SPaolo Bonzini 		return -1;
3174c50d8ae3SPaolo Bonzini 
3175c50d8ae3SPaolo Bonzini 	for (i = 0; i < ret; i++, gfn++, start++) {
3176c50d8ae3SPaolo Bonzini 		mmu_set_spte(vcpu, start, access, 0, sp->role.level, gfn,
3177c50d8ae3SPaolo Bonzini 			     page_to_pfn(pages[i]), true, true);
3178c50d8ae3SPaolo Bonzini 		put_page(pages[i]);
3179c50d8ae3SPaolo Bonzini 	}
3180c50d8ae3SPaolo Bonzini 
3181c50d8ae3SPaolo Bonzini 	return 0;
3182c50d8ae3SPaolo Bonzini }
3183c50d8ae3SPaolo Bonzini 
3184c50d8ae3SPaolo Bonzini static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
3185c50d8ae3SPaolo Bonzini 				  struct kvm_mmu_page *sp, u64 *sptep)
3186c50d8ae3SPaolo Bonzini {
3187c50d8ae3SPaolo Bonzini 	u64 *spte, *start = NULL;
3188c50d8ae3SPaolo Bonzini 	int i;
3189c50d8ae3SPaolo Bonzini 
3190c50d8ae3SPaolo Bonzini 	WARN_ON(!sp->role.direct);
3191c50d8ae3SPaolo Bonzini 
3192c50d8ae3SPaolo Bonzini 	i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
3193c50d8ae3SPaolo Bonzini 	spte = sp->spt + i;
3194c50d8ae3SPaolo Bonzini 
3195c50d8ae3SPaolo Bonzini 	for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
3196c50d8ae3SPaolo Bonzini 		if (is_shadow_present_pte(*spte) || spte == sptep) {
3197c50d8ae3SPaolo Bonzini 			if (!start)
3198c50d8ae3SPaolo Bonzini 				continue;
3199c50d8ae3SPaolo Bonzini 			if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
3200c50d8ae3SPaolo Bonzini 				break;
3201c50d8ae3SPaolo Bonzini 			start = NULL;
3202c50d8ae3SPaolo Bonzini 		} else if (!start)
3203c50d8ae3SPaolo Bonzini 			start = spte;
3204c50d8ae3SPaolo Bonzini 	}
3205c50d8ae3SPaolo Bonzini }
3206c50d8ae3SPaolo Bonzini 
3207c50d8ae3SPaolo Bonzini static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
3208c50d8ae3SPaolo Bonzini {
3209c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
3210c50d8ae3SPaolo Bonzini 
321157354682SSean Christopherson 	sp = sptep_to_sp(sptep);
3212c50d8ae3SPaolo Bonzini 
3213c50d8ae3SPaolo Bonzini 	/*
3214c50d8ae3SPaolo Bonzini 	 * Without accessed bits, there's no way to distinguish between
3215c50d8ae3SPaolo Bonzini 	 * actually accessed translations and prefetched, so disable pte
3216c50d8ae3SPaolo Bonzini 	 * prefetch if accessed bits aren't available.
3217c50d8ae3SPaolo Bonzini 	 */
3218c50d8ae3SPaolo Bonzini 	if (sp_ad_disabled(sp))
3219c50d8ae3SPaolo Bonzini 		return;
3220c50d8ae3SPaolo Bonzini 
32213bae0459SSean Christopherson 	if (sp->role.level > PG_LEVEL_4K)
3222c50d8ae3SPaolo Bonzini 		return;
3223c50d8ae3SPaolo Bonzini 
3224c50d8ae3SPaolo Bonzini 	__direct_pte_prefetch(vcpu, sp, sptep);
3225c50d8ae3SPaolo Bonzini }
3226c50d8ae3SPaolo Bonzini 
3227db543216SSean Christopherson static int host_pfn_mapping_level(struct kvm_vcpu *vcpu, gfn_t gfn,
3228293e306eSSean Christopherson 				  kvm_pfn_t pfn, struct kvm_memory_slot *slot)
3229db543216SSean Christopherson {
3230db543216SSean Christopherson 	unsigned long hva;
3231db543216SSean Christopherson 	pte_t *pte;
3232db543216SSean Christopherson 	int level;
3233db543216SSean Christopherson 
3234e851265aSSean Christopherson 	if (!PageCompound(pfn_to_page(pfn)) && !kvm_is_zone_device_pfn(pfn))
32353bae0459SSean Christopherson 		return PG_LEVEL_4K;
3236db543216SSean Christopherson 
3237293e306eSSean Christopherson 	/*
3238293e306eSSean Christopherson 	 * Note, using the already-retrieved memslot and __gfn_to_hva_memslot()
3239293e306eSSean Christopherson 	 * is not solely for performance, it's also necessary to avoid the
3240293e306eSSean Christopherson 	 * "writable" check in __gfn_to_hva_many(), which will always fail on
3241293e306eSSean Christopherson 	 * read-only memslots due to gfn_to_hva() assuming writes.  Earlier
3242293e306eSSean Christopherson 	 * page fault steps have already verified the guest isn't writing a
3243293e306eSSean Christopherson 	 * read-only memslot.
3244293e306eSSean Christopherson 	 */
3245db543216SSean Christopherson 	hva = __gfn_to_hva_memslot(slot, gfn);
3246db543216SSean Christopherson 
3247db543216SSean Christopherson 	pte = lookup_address_in_mm(vcpu->kvm->mm, hva, &level);
3248db543216SSean Christopherson 	if (unlikely(!pte))
32493bae0459SSean Christopherson 		return PG_LEVEL_4K;
3250db543216SSean Christopherson 
3251db543216SSean Christopherson 	return level;
3252db543216SSean Christopherson }
3253db543216SSean Christopherson 
325483f06fa7SSean Christopherson static int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
325583f06fa7SSean Christopherson 				   int max_level, kvm_pfn_t *pfnp)
32560885904dSSean Christopherson {
3257293e306eSSean Christopherson 	struct kvm_memory_slot *slot;
32582c0629f4SSean Christopherson 	struct kvm_lpage_info *linfo;
32590885904dSSean Christopherson 	kvm_pfn_t pfn = *pfnp;
326017eff019SSean Christopherson 	kvm_pfn_t mask;
326183f06fa7SSean Christopherson 	int level;
32620885904dSSean Christopherson 
32633bae0459SSean Christopherson 	if (unlikely(max_level == PG_LEVEL_4K))
32643bae0459SSean Christopherson 		return PG_LEVEL_4K;
326517eff019SSean Christopherson 
3266e851265aSSean Christopherson 	if (is_error_noslot_pfn(pfn) || kvm_is_reserved_pfn(pfn))
32673bae0459SSean Christopherson 		return PG_LEVEL_4K;
326817eff019SSean Christopherson 
3269293e306eSSean Christopherson 	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, true);
3270293e306eSSean Christopherson 	if (!slot)
32713bae0459SSean Christopherson 		return PG_LEVEL_4K;
3272293e306eSSean Christopherson 
32731d92d2e8SSean Christopherson 	max_level = min(max_level, max_huge_page_level);
32743bae0459SSean Christopherson 	for ( ; max_level > PG_LEVEL_4K; max_level--) {
32752c0629f4SSean Christopherson 		linfo = lpage_info_slot(gfn, slot, max_level);
32762c0629f4SSean Christopherson 		if (!linfo->disallow_lpage)
3277293e306eSSean Christopherson 			break;
3278293e306eSSean Christopherson 	}
3279293e306eSSean Christopherson 
32803bae0459SSean Christopherson 	if (max_level == PG_LEVEL_4K)
32813bae0459SSean Christopherson 		return PG_LEVEL_4K;
3282293e306eSSean Christopherson 
3283293e306eSSean Christopherson 	level = host_pfn_mapping_level(vcpu, gfn, pfn, slot);
32843bae0459SSean Christopherson 	if (level == PG_LEVEL_4K)
328583f06fa7SSean Christopherson 		return level;
328617eff019SSean Christopherson 
3287db543216SSean Christopherson 	level = min(level, max_level);
32884cd071d1SSean Christopherson 
32890885904dSSean Christopherson 	/*
32904cd071d1SSean Christopherson 	 * mmu_notifier_retry() was successful and mmu_lock is held, so
32914cd071d1SSean Christopherson 	 * the pmd can't be split from under us.
32920885904dSSean Christopherson 	 */
32930885904dSSean Christopherson 	mask = KVM_PAGES_PER_HPAGE(level) - 1;
32940885904dSSean Christopherson 	VM_BUG_ON((gfn & mask) != (pfn & mask));
32954cd071d1SSean Christopherson 	*pfnp = pfn & ~mask;
329683f06fa7SSean Christopherson 
329783f06fa7SSean Christopherson 	return level;
32980885904dSSean Christopherson }
32990885904dSSean Christopherson 
3300c50d8ae3SPaolo Bonzini static void disallowed_hugepage_adjust(struct kvm_shadow_walk_iterator it,
3301c50d8ae3SPaolo Bonzini 				       gfn_t gfn, kvm_pfn_t *pfnp, int *levelp)
3302c50d8ae3SPaolo Bonzini {
3303c50d8ae3SPaolo Bonzini 	int level = *levelp;
3304c50d8ae3SPaolo Bonzini 	u64 spte = *it.sptep;
3305c50d8ae3SPaolo Bonzini 
33063bae0459SSean Christopherson 	if (it.level == level && level > PG_LEVEL_4K &&
3307c50d8ae3SPaolo Bonzini 	    is_nx_huge_page_enabled() &&
3308c50d8ae3SPaolo Bonzini 	    is_shadow_present_pte(spte) &&
3309c50d8ae3SPaolo Bonzini 	    !is_large_pte(spte)) {
3310c50d8ae3SPaolo Bonzini 		/*
3311c50d8ae3SPaolo Bonzini 		 * A small SPTE exists for this pfn, but FNAME(fetch)
3312c50d8ae3SPaolo Bonzini 		 * and __direct_map would like to create a large PTE
3313c50d8ae3SPaolo Bonzini 		 * instead: just force them to go down another level,
3314c50d8ae3SPaolo Bonzini 		 * patching back for them into pfn the next 9 bits of
3315c50d8ae3SPaolo Bonzini 		 * the address.
3316c50d8ae3SPaolo Bonzini 		 */
3317c50d8ae3SPaolo Bonzini 		u64 page_mask = KVM_PAGES_PER_HPAGE(level) - KVM_PAGES_PER_HPAGE(level - 1);
3318c50d8ae3SPaolo Bonzini 		*pfnp |= gfn & page_mask;
3319c50d8ae3SPaolo Bonzini 		(*levelp)--;
3320c50d8ae3SPaolo Bonzini 	}
3321c50d8ae3SPaolo Bonzini }
3322c50d8ae3SPaolo Bonzini 
3323c50d8ae3SPaolo Bonzini static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write,
332483f06fa7SSean Christopherson 			int map_writable, int max_level, kvm_pfn_t pfn,
332583f06fa7SSean Christopherson 			bool prefault, bool account_disallowed_nx_lpage)
3326c50d8ae3SPaolo Bonzini {
3327c50d8ae3SPaolo Bonzini 	struct kvm_shadow_walk_iterator it;
3328c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
332983f06fa7SSean Christopherson 	int level, ret;
3330c50d8ae3SPaolo Bonzini 	gfn_t gfn = gpa >> PAGE_SHIFT;
3331c50d8ae3SPaolo Bonzini 	gfn_t base_gfn = gfn;
3332c50d8ae3SPaolo Bonzini 
33330c7a98e3SSean Christopherson 	if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
3334c50d8ae3SPaolo Bonzini 		return RET_PF_RETRY;
3335c50d8ae3SPaolo Bonzini 
333683f06fa7SSean Christopherson 	level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn);
33374cd071d1SSean Christopherson 
3338c50d8ae3SPaolo Bonzini 	trace_kvm_mmu_spte_requested(gpa, level, pfn);
3339c50d8ae3SPaolo Bonzini 	for_each_shadow_entry(vcpu, gpa, it) {
3340c50d8ae3SPaolo Bonzini 		/*
3341c50d8ae3SPaolo Bonzini 		 * We cannot overwrite existing page tables with an NX
3342c50d8ae3SPaolo Bonzini 		 * large page, as the leaf could be executable.
3343c50d8ae3SPaolo Bonzini 		 */
3344c50d8ae3SPaolo Bonzini 		disallowed_hugepage_adjust(it, gfn, &pfn, &level);
3345c50d8ae3SPaolo Bonzini 
3346c50d8ae3SPaolo Bonzini 		base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
3347c50d8ae3SPaolo Bonzini 		if (it.level == level)
3348c50d8ae3SPaolo Bonzini 			break;
3349c50d8ae3SPaolo Bonzini 
3350c50d8ae3SPaolo Bonzini 		drop_large_spte(vcpu, it.sptep);
3351c50d8ae3SPaolo Bonzini 		if (!is_shadow_present_pte(*it.sptep)) {
3352c50d8ae3SPaolo Bonzini 			sp = kvm_mmu_get_page(vcpu, base_gfn, it.addr,
3353c50d8ae3SPaolo Bonzini 					      it.level - 1, true, ACC_ALL);
3354c50d8ae3SPaolo Bonzini 
3355c50d8ae3SPaolo Bonzini 			link_shadow_page(vcpu, it.sptep, sp);
33562cb70fd4SSean Christopherson 			if (account_disallowed_nx_lpage)
3357c50d8ae3SPaolo Bonzini 				account_huge_nx_page(vcpu->kvm, sp);
3358c50d8ae3SPaolo Bonzini 		}
3359c50d8ae3SPaolo Bonzini 	}
3360c50d8ae3SPaolo Bonzini 
3361c50d8ae3SPaolo Bonzini 	ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL,
3362c50d8ae3SPaolo Bonzini 			   write, level, base_gfn, pfn, prefault,
3363c50d8ae3SPaolo Bonzini 			   map_writable);
3364c50d8ae3SPaolo Bonzini 	direct_pte_prefetch(vcpu, it.sptep);
3365c50d8ae3SPaolo Bonzini 	++vcpu->stat.pf_fixed;
3366c50d8ae3SPaolo Bonzini 	return ret;
3367c50d8ae3SPaolo Bonzini }
3368c50d8ae3SPaolo Bonzini 
3369c50d8ae3SPaolo Bonzini static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk)
3370c50d8ae3SPaolo Bonzini {
3371c50d8ae3SPaolo Bonzini 	send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, PAGE_SHIFT, tsk);
3372c50d8ae3SPaolo Bonzini }
3373c50d8ae3SPaolo Bonzini 
3374c50d8ae3SPaolo Bonzini static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
3375c50d8ae3SPaolo Bonzini {
3376c50d8ae3SPaolo Bonzini 	/*
3377c50d8ae3SPaolo Bonzini 	 * Do not cache the mmio info caused by writing the readonly gfn
3378c50d8ae3SPaolo Bonzini 	 * into the spte otherwise read access on readonly gfn also can
3379c50d8ae3SPaolo Bonzini 	 * caused mmio page fault and treat it as mmio access.
3380c50d8ae3SPaolo Bonzini 	 */
3381c50d8ae3SPaolo Bonzini 	if (pfn == KVM_PFN_ERR_RO_FAULT)
3382c50d8ae3SPaolo Bonzini 		return RET_PF_EMULATE;
3383c50d8ae3SPaolo Bonzini 
3384c50d8ae3SPaolo Bonzini 	if (pfn == KVM_PFN_ERR_HWPOISON) {
3385c50d8ae3SPaolo Bonzini 		kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current);
3386c50d8ae3SPaolo Bonzini 		return RET_PF_RETRY;
3387c50d8ae3SPaolo Bonzini 	}
3388c50d8ae3SPaolo Bonzini 
3389c50d8ae3SPaolo Bonzini 	return -EFAULT;
3390c50d8ae3SPaolo Bonzini }
3391c50d8ae3SPaolo Bonzini 
3392c50d8ae3SPaolo Bonzini static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
33930a2b64c5SBen Gardon 				kvm_pfn_t pfn, unsigned int access,
33940a2b64c5SBen Gardon 				int *ret_val)
3395c50d8ae3SPaolo Bonzini {
3396c50d8ae3SPaolo Bonzini 	/* The pfn is invalid, report the error! */
3397c50d8ae3SPaolo Bonzini 	if (unlikely(is_error_pfn(pfn))) {
3398c50d8ae3SPaolo Bonzini 		*ret_val = kvm_handle_bad_page(vcpu, gfn, pfn);
3399c50d8ae3SPaolo Bonzini 		return true;
3400c50d8ae3SPaolo Bonzini 	}
3401c50d8ae3SPaolo Bonzini 
3402c50d8ae3SPaolo Bonzini 	if (unlikely(is_noslot_pfn(pfn)))
3403c50d8ae3SPaolo Bonzini 		vcpu_cache_mmio_info(vcpu, gva, gfn,
3404c50d8ae3SPaolo Bonzini 				     access & shadow_mmio_access_mask);
3405c50d8ae3SPaolo Bonzini 
3406c50d8ae3SPaolo Bonzini 	return false;
3407c50d8ae3SPaolo Bonzini }
3408c50d8ae3SPaolo Bonzini 
3409c50d8ae3SPaolo Bonzini static bool page_fault_can_be_fast(u32 error_code)
3410c50d8ae3SPaolo Bonzini {
3411c50d8ae3SPaolo Bonzini 	/*
3412c50d8ae3SPaolo Bonzini 	 * Do not fix the mmio spte with invalid generation number which
3413c50d8ae3SPaolo Bonzini 	 * need to be updated by slow page fault path.
3414c50d8ae3SPaolo Bonzini 	 */
3415c50d8ae3SPaolo Bonzini 	if (unlikely(error_code & PFERR_RSVD_MASK))
3416c50d8ae3SPaolo Bonzini 		return false;
3417c50d8ae3SPaolo Bonzini 
3418c50d8ae3SPaolo Bonzini 	/* See if the page fault is due to an NX violation */
3419c50d8ae3SPaolo Bonzini 	if (unlikely(((error_code & (PFERR_FETCH_MASK | PFERR_PRESENT_MASK))
3420c50d8ae3SPaolo Bonzini 		      == (PFERR_FETCH_MASK | PFERR_PRESENT_MASK))))
3421c50d8ae3SPaolo Bonzini 		return false;
3422c50d8ae3SPaolo Bonzini 
3423c50d8ae3SPaolo Bonzini 	/*
3424c50d8ae3SPaolo Bonzini 	 * #PF can be fast if:
3425c50d8ae3SPaolo Bonzini 	 * 1. The shadow page table entry is not present, which could mean that
3426c50d8ae3SPaolo Bonzini 	 *    the fault is potentially caused by access tracking (if enabled).
3427c50d8ae3SPaolo Bonzini 	 * 2. The shadow page table entry is present and the fault
3428c50d8ae3SPaolo Bonzini 	 *    is caused by write-protect, that means we just need change the W
3429c50d8ae3SPaolo Bonzini 	 *    bit of the spte which can be done out of mmu-lock.
3430c50d8ae3SPaolo Bonzini 	 *
3431c50d8ae3SPaolo Bonzini 	 * However, if access tracking is disabled we know that a non-present
3432c50d8ae3SPaolo Bonzini 	 * page must be a genuine page fault where we have to create a new SPTE.
3433c50d8ae3SPaolo Bonzini 	 * So, if access tracking is disabled, we return true only for write
3434c50d8ae3SPaolo Bonzini 	 * accesses to a present page.
3435c50d8ae3SPaolo Bonzini 	 */
3436c50d8ae3SPaolo Bonzini 
3437c50d8ae3SPaolo Bonzini 	return shadow_acc_track_mask != 0 ||
3438c50d8ae3SPaolo Bonzini 	       ((error_code & (PFERR_WRITE_MASK | PFERR_PRESENT_MASK))
3439c50d8ae3SPaolo Bonzini 		== (PFERR_WRITE_MASK | PFERR_PRESENT_MASK));
3440c50d8ae3SPaolo Bonzini }
3441c50d8ae3SPaolo Bonzini 
3442c50d8ae3SPaolo Bonzini /*
3443c50d8ae3SPaolo Bonzini  * Returns true if the SPTE was fixed successfully. Otherwise,
3444c50d8ae3SPaolo Bonzini  * someone else modified the SPTE from its original value.
3445c50d8ae3SPaolo Bonzini  */
3446c50d8ae3SPaolo Bonzini static bool
3447c50d8ae3SPaolo Bonzini fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
3448c50d8ae3SPaolo Bonzini 			u64 *sptep, u64 old_spte, u64 new_spte)
3449c50d8ae3SPaolo Bonzini {
3450c50d8ae3SPaolo Bonzini 	gfn_t gfn;
3451c50d8ae3SPaolo Bonzini 
3452c50d8ae3SPaolo Bonzini 	WARN_ON(!sp->role.direct);
3453c50d8ae3SPaolo Bonzini 
3454c50d8ae3SPaolo Bonzini 	/*
3455c50d8ae3SPaolo Bonzini 	 * Theoretically we could also set dirty bit (and flush TLB) here in
3456c50d8ae3SPaolo Bonzini 	 * order to eliminate unnecessary PML logging. See comments in
3457c50d8ae3SPaolo Bonzini 	 * set_spte. But fast_page_fault is very unlikely to happen with PML
3458c50d8ae3SPaolo Bonzini 	 * enabled, so we do not do this. This might result in the same GPA
3459c50d8ae3SPaolo Bonzini 	 * to be logged in PML buffer again when the write really happens, and
3460c50d8ae3SPaolo Bonzini 	 * eventually to be called by mark_page_dirty twice. But it's also no
3461c50d8ae3SPaolo Bonzini 	 * harm. This also avoids the TLB flush needed after setting dirty bit
3462c50d8ae3SPaolo Bonzini 	 * so non-PML cases won't be impacted.
3463c50d8ae3SPaolo Bonzini 	 *
3464c50d8ae3SPaolo Bonzini 	 * Compare with set_spte where instead shadow_dirty_mask is set.
3465c50d8ae3SPaolo Bonzini 	 */
3466c50d8ae3SPaolo Bonzini 	if (cmpxchg64(sptep, old_spte, new_spte) != old_spte)
3467c50d8ae3SPaolo Bonzini 		return false;
3468c50d8ae3SPaolo Bonzini 
3469c50d8ae3SPaolo Bonzini 	if (is_writable_pte(new_spte) && !is_writable_pte(old_spte)) {
3470c50d8ae3SPaolo Bonzini 		/*
3471c50d8ae3SPaolo Bonzini 		 * The gfn of direct spte is stable since it is
3472c50d8ae3SPaolo Bonzini 		 * calculated by sp->gfn.
3473c50d8ae3SPaolo Bonzini 		 */
3474c50d8ae3SPaolo Bonzini 		gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
3475c50d8ae3SPaolo Bonzini 		kvm_vcpu_mark_page_dirty(vcpu, gfn);
3476c50d8ae3SPaolo Bonzini 	}
3477c50d8ae3SPaolo Bonzini 
3478c50d8ae3SPaolo Bonzini 	return true;
3479c50d8ae3SPaolo Bonzini }
3480c50d8ae3SPaolo Bonzini 
3481c50d8ae3SPaolo Bonzini static bool is_access_allowed(u32 fault_err_code, u64 spte)
3482c50d8ae3SPaolo Bonzini {
3483c50d8ae3SPaolo Bonzini 	if (fault_err_code & PFERR_FETCH_MASK)
3484c50d8ae3SPaolo Bonzini 		return is_executable_pte(spte);
3485c50d8ae3SPaolo Bonzini 
3486c50d8ae3SPaolo Bonzini 	if (fault_err_code & PFERR_WRITE_MASK)
3487c50d8ae3SPaolo Bonzini 		return is_writable_pte(spte);
3488c50d8ae3SPaolo Bonzini 
3489c50d8ae3SPaolo Bonzini 	/* Fault was on Read access */
3490c50d8ae3SPaolo Bonzini 	return spte & PT_PRESENT_MASK;
3491c50d8ae3SPaolo Bonzini }
3492c50d8ae3SPaolo Bonzini 
3493c50d8ae3SPaolo Bonzini /*
3494c50d8ae3SPaolo Bonzini  * Return value:
3495c50d8ae3SPaolo Bonzini  * - true: let the vcpu to access on the same address again.
3496c50d8ae3SPaolo Bonzini  * - false: let the real page fault path to fix it.
3497c50d8ae3SPaolo Bonzini  */
3498f9fa2509SSean Christopherson static bool fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
3499c50d8ae3SPaolo Bonzini 			    u32 error_code)
3500c50d8ae3SPaolo Bonzini {
3501c50d8ae3SPaolo Bonzini 	struct kvm_shadow_walk_iterator iterator;
3502c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
3503c50d8ae3SPaolo Bonzini 	bool fault_handled = false;
3504c50d8ae3SPaolo Bonzini 	u64 spte = 0ull;
3505c50d8ae3SPaolo Bonzini 	uint retry_count = 0;
3506c50d8ae3SPaolo Bonzini 
3507c50d8ae3SPaolo Bonzini 	if (!page_fault_can_be_fast(error_code))
3508c50d8ae3SPaolo Bonzini 		return false;
3509c50d8ae3SPaolo Bonzini 
3510c50d8ae3SPaolo Bonzini 	walk_shadow_page_lockless_begin(vcpu);
3511c50d8ae3SPaolo Bonzini 
3512c50d8ae3SPaolo Bonzini 	do {
3513c50d8ae3SPaolo Bonzini 		u64 new_spte;
3514c50d8ae3SPaolo Bonzini 
3515736c291cSSean Christopherson 		for_each_shadow_entry_lockless(vcpu, cr2_or_gpa, iterator, spte)
3516f9fa2509SSean Christopherson 			if (!is_shadow_present_pte(spte))
3517c50d8ae3SPaolo Bonzini 				break;
3518c50d8ae3SPaolo Bonzini 
351957354682SSean Christopherson 		sp = sptep_to_sp(iterator.sptep);
3520c50d8ae3SPaolo Bonzini 		if (!is_last_spte(spte, sp->role.level))
3521c50d8ae3SPaolo Bonzini 			break;
3522c50d8ae3SPaolo Bonzini 
3523c50d8ae3SPaolo Bonzini 		/*
3524c50d8ae3SPaolo Bonzini 		 * Check whether the memory access that caused the fault would
3525c50d8ae3SPaolo Bonzini 		 * still cause it if it were to be performed right now. If not,
3526c50d8ae3SPaolo Bonzini 		 * then this is a spurious fault caused by TLB lazily flushed,
3527c50d8ae3SPaolo Bonzini 		 * or some other CPU has already fixed the PTE after the
3528c50d8ae3SPaolo Bonzini 		 * current CPU took the fault.
3529c50d8ae3SPaolo Bonzini 		 *
3530c50d8ae3SPaolo Bonzini 		 * Need not check the access of upper level table entries since
3531c50d8ae3SPaolo Bonzini 		 * they are always ACC_ALL.
3532c50d8ae3SPaolo Bonzini 		 */
3533c50d8ae3SPaolo Bonzini 		if (is_access_allowed(error_code, spte)) {
3534c50d8ae3SPaolo Bonzini 			fault_handled = true;
3535c50d8ae3SPaolo Bonzini 			break;
3536c50d8ae3SPaolo Bonzini 		}
3537c50d8ae3SPaolo Bonzini 
3538c50d8ae3SPaolo Bonzini 		new_spte = spte;
3539c50d8ae3SPaolo Bonzini 
3540c50d8ae3SPaolo Bonzini 		if (is_access_track_spte(spte))
3541c50d8ae3SPaolo Bonzini 			new_spte = restore_acc_track_spte(new_spte);
3542c50d8ae3SPaolo Bonzini 
3543c50d8ae3SPaolo Bonzini 		/*
3544c50d8ae3SPaolo Bonzini 		 * Currently, to simplify the code, write-protection can
3545c50d8ae3SPaolo Bonzini 		 * be removed in the fast path only if the SPTE was
3546c50d8ae3SPaolo Bonzini 		 * write-protected for dirty-logging or access tracking.
3547c50d8ae3SPaolo Bonzini 		 */
3548c50d8ae3SPaolo Bonzini 		if ((error_code & PFERR_WRITE_MASK) &&
3549e6302698SMiaohe Lin 		    spte_can_locklessly_be_made_writable(spte)) {
3550c50d8ae3SPaolo Bonzini 			new_spte |= PT_WRITABLE_MASK;
3551c50d8ae3SPaolo Bonzini 
3552c50d8ae3SPaolo Bonzini 			/*
3553c50d8ae3SPaolo Bonzini 			 * Do not fix write-permission on the large spte.  Since
3554c50d8ae3SPaolo Bonzini 			 * we only dirty the first page into the dirty-bitmap in
3555c50d8ae3SPaolo Bonzini 			 * fast_pf_fix_direct_spte(), other pages are missed
3556c50d8ae3SPaolo Bonzini 			 * if its slot has dirty logging enabled.
3557c50d8ae3SPaolo Bonzini 			 *
3558c50d8ae3SPaolo Bonzini 			 * Instead, we let the slow page fault path create a
3559c50d8ae3SPaolo Bonzini 			 * normal spte to fix the access.
3560c50d8ae3SPaolo Bonzini 			 *
3561c50d8ae3SPaolo Bonzini 			 * See the comments in kvm_arch_commit_memory_region().
3562c50d8ae3SPaolo Bonzini 			 */
35633bae0459SSean Christopherson 			if (sp->role.level > PG_LEVEL_4K)
3564c50d8ae3SPaolo Bonzini 				break;
3565c50d8ae3SPaolo Bonzini 		}
3566c50d8ae3SPaolo Bonzini 
3567c50d8ae3SPaolo Bonzini 		/* Verify that the fault can be handled in the fast path */
3568c50d8ae3SPaolo Bonzini 		if (new_spte == spte ||
3569c50d8ae3SPaolo Bonzini 		    !is_access_allowed(error_code, new_spte))
3570c50d8ae3SPaolo Bonzini 			break;
3571c50d8ae3SPaolo Bonzini 
3572c50d8ae3SPaolo Bonzini 		/*
3573c50d8ae3SPaolo Bonzini 		 * Currently, fast page fault only works for direct mapping
3574c50d8ae3SPaolo Bonzini 		 * since the gfn is not stable for indirect shadow page. See
35753ecad8c2SMauro Carvalho Chehab 		 * Documentation/virt/kvm/locking.rst to get more detail.
3576c50d8ae3SPaolo Bonzini 		 */
3577c50d8ae3SPaolo Bonzini 		fault_handled = fast_pf_fix_direct_spte(vcpu, sp,
3578c50d8ae3SPaolo Bonzini 							iterator.sptep, spte,
3579c50d8ae3SPaolo Bonzini 							new_spte);
3580c50d8ae3SPaolo Bonzini 		if (fault_handled)
3581c50d8ae3SPaolo Bonzini 			break;
3582c50d8ae3SPaolo Bonzini 
3583c50d8ae3SPaolo Bonzini 		if (++retry_count > 4) {
3584c50d8ae3SPaolo Bonzini 			printk_once(KERN_WARNING
3585c50d8ae3SPaolo Bonzini 				"kvm: Fast #PF retrying more than 4 times.\n");
3586c50d8ae3SPaolo Bonzini 			break;
3587c50d8ae3SPaolo Bonzini 		}
3588c50d8ae3SPaolo Bonzini 
3589c50d8ae3SPaolo Bonzini 	} while (true);
3590c50d8ae3SPaolo Bonzini 
3591736c291cSSean Christopherson 	trace_fast_page_fault(vcpu, cr2_or_gpa, error_code, iterator.sptep,
3592c50d8ae3SPaolo Bonzini 			      spte, fault_handled);
3593c50d8ae3SPaolo Bonzini 	walk_shadow_page_lockless_end(vcpu);
3594c50d8ae3SPaolo Bonzini 
3595c50d8ae3SPaolo Bonzini 	return fault_handled;
3596c50d8ae3SPaolo Bonzini }
3597c50d8ae3SPaolo Bonzini 
3598c50d8ae3SPaolo Bonzini static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
3599c50d8ae3SPaolo Bonzini 			       struct list_head *invalid_list)
3600c50d8ae3SPaolo Bonzini {
3601c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
3602c50d8ae3SPaolo Bonzini 
3603c50d8ae3SPaolo Bonzini 	if (!VALID_PAGE(*root_hpa))
3604c50d8ae3SPaolo Bonzini 		return;
3605c50d8ae3SPaolo Bonzini 
3606e47c4aeeSSean Christopherson 	sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK);
3607c50d8ae3SPaolo Bonzini 	--sp->root_count;
3608c50d8ae3SPaolo Bonzini 	if (!sp->root_count && sp->role.invalid)
3609c50d8ae3SPaolo Bonzini 		kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
3610c50d8ae3SPaolo Bonzini 
3611c50d8ae3SPaolo Bonzini 	*root_hpa = INVALID_PAGE;
3612c50d8ae3SPaolo Bonzini }
3613c50d8ae3SPaolo Bonzini 
3614c50d8ae3SPaolo Bonzini /* roots_to_free must be some combination of the KVM_MMU_ROOT_* flags */
3615c50d8ae3SPaolo Bonzini void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
3616c50d8ae3SPaolo Bonzini 			ulong roots_to_free)
3617c50d8ae3SPaolo Bonzini {
36184d710de9SSean Christopherson 	struct kvm *kvm = vcpu->kvm;
3619c50d8ae3SPaolo Bonzini 	int i;
3620c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
3621c50d8ae3SPaolo Bonzini 	bool free_active_root = roots_to_free & KVM_MMU_ROOT_CURRENT;
3622c50d8ae3SPaolo Bonzini 
3623c50d8ae3SPaolo Bonzini 	BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG);
3624c50d8ae3SPaolo Bonzini 
3625c50d8ae3SPaolo Bonzini 	/* Before acquiring the MMU lock, see if we need to do any real work. */
3626c50d8ae3SPaolo Bonzini 	if (!(free_active_root && VALID_PAGE(mmu->root_hpa))) {
3627c50d8ae3SPaolo Bonzini 		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
3628c50d8ae3SPaolo Bonzini 			if ((roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) &&
3629c50d8ae3SPaolo Bonzini 			    VALID_PAGE(mmu->prev_roots[i].hpa))
3630c50d8ae3SPaolo Bonzini 				break;
3631c50d8ae3SPaolo Bonzini 
3632c50d8ae3SPaolo Bonzini 		if (i == KVM_MMU_NUM_PREV_ROOTS)
3633c50d8ae3SPaolo Bonzini 			return;
3634c50d8ae3SPaolo Bonzini 	}
3635c50d8ae3SPaolo Bonzini 
36364d710de9SSean Christopherson 	spin_lock(&kvm->mmu_lock);
3637c50d8ae3SPaolo Bonzini 
3638c50d8ae3SPaolo Bonzini 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
3639c50d8ae3SPaolo Bonzini 		if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i))
36404d710de9SSean Christopherson 			mmu_free_root_page(kvm, &mmu->prev_roots[i].hpa,
3641c50d8ae3SPaolo Bonzini 					   &invalid_list);
3642c50d8ae3SPaolo Bonzini 
3643c50d8ae3SPaolo Bonzini 	if (free_active_root) {
3644c50d8ae3SPaolo Bonzini 		if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
3645c50d8ae3SPaolo Bonzini 		    (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) {
36464d710de9SSean Christopherson 			mmu_free_root_page(kvm, &mmu->root_hpa, &invalid_list);
3647c50d8ae3SPaolo Bonzini 		} else {
3648c50d8ae3SPaolo Bonzini 			for (i = 0; i < 4; ++i)
3649c50d8ae3SPaolo Bonzini 				if (mmu->pae_root[i] != 0)
36504d710de9SSean Christopherson 					mmu_free_root_page(kvm,
3651c50d8ae3SPaolo Bonzini 							   &mmu->pae_root[i],
3652c50d8ae3SPaolo Bonzini 							   &invalid_list);
3653c50d8ae3SPaolo Bonzini 			mmu->root_hpa = INVALID_PAGE;
3654c50d8ae3SPaolo Bonzini 		}
3655be01e8e2SSean Christopherson 		mmu->root_pgd = 0;
3656c50d8ae3SPaolo Bonzini 	}
3657c50d8ae3SPaolo Bonzini 
36584d710de9SSean Christopherson 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
36594d710de9SSean Christopherson 	spin_unlock(&kvm->mmu_lock);
3660c50d8ae3SPaolo Bonzini }
3661c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_free_roots);
3662c50d8ae3SPaolo Bonzini 
3663c50d8ae3SPaolo Bonzini static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
3664c50d8ae3SPaolo Bonzini {
3665c50d8ae3SPaolo Bonzini 	int ret = 0;
3666c50d8ae3SPaolo Bonzini 
3667995decb6SVitaly Kuznetsov 	if (!kvm_vcpu_is_visible_gfn(vcpu, root_gfn)) {
3668c50d8ae3SPaolo Bonzini 		kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
3669c50d8ae3SPaolo Bonzini 		ret = 1;
3670c50d8ae3SPaolo Bonzini 	}
3671c50d8ae3SPaolo Bonzini 
3672c50d8ae3SPaolo Bonzini 	return ret;
3673c50d8ae3SPaolo Bonzini }
3674c50d8ae3SPaolo Bonzini 
36758123f265SSean Christopherson static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva,
36768123f265SSean Christopherson 			    u8 level, bool direct)
3677c50d8ae3SPaolo Bonzini {
3678c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
36798123f265SSean Christopherson 
36808123f265SSean Christopherson 	spin_lock(&vcpu->kvm->mmu_lock);
36818123f265SSean Christopherson 
36828123f265SSean Christopherson 	if (make_mmu_pages_available(vcpu)) {
36838123f265SSean Christopherson 		spin_unlock(&vcpu->kvm->mmu_lock);
36848123f265SSean Christopherson 		return INVALID_PAGE;
36858123f265SSean Christopherson 	}
36868123f265SSean Christopherson 	sp = kvm_mmu_get_page(vcpu, gfn, gva, level, direct, ACC_ALL);
36878123f265SSean Christopherson 	++sp->root_count;
36888123f265SSean Christopherson 
36898123f265SSean Christopherson 	spin_unlock(&vcpu->kvm->mmu_lock);
36908123f265SSean Christopherson 	return __pa(sp->spt);
36918123f265SSean Christopherson }
36928123f265SSean Christopherson 
36938123f265SSean Christopherson static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
36948123f265SSean Christopherson {
36958123f265SSean Christopherson 	u8 shadow_root_level = vcpu->arch.mmu->shadow_root_level;
36968123f265SSean Christopherson 	hpa_t root;
3697c50d8ae3SPaolo Bonzini 	unsigned i;
3698c50d8ae3SPaolo Bonzini 
36998123f265SSean Christopherson 	if (shadow_root_level >= PT64_ROOT_4LEVEL) {
37008123f265SSean Christopherson 		root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level, true);
37018123f265SSean Christopherson 		if (!VALID_PAGE(root))
3702c50d8ae3SPaolo Bonzini 			return -ENOSPC;
37038123f265SSean Christopherson 		vcpu->arch.mmu->root_hpa = root;
37048123f265SSean Christopherson 	} else if (shadow_root_level == PT32E_ROOT_LEVEL) {
3705c50d8ae3SPaolo Bonzini 		for (i = 0; i < 4; ++i) {
37068123f265SSean Christopherson 			MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->pae_root[i]));
3707c50d8ae3SPaolo Bonzini 
37088123f265SSean Christopherson 			root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT),
37098123f265SSean Christopherson 					      i << 30, PT32_ROOT_LEVEL, true);
37108123f265SSean Christopherson 			if (!VALID_PAGE(root))
3711c50d8ae3SPaolo Bonzini 				return -ENOSPC;
3712c50d8ae3SPaolo Bonzini 			vcpu->arch.mmu->pae_root[i] = root | PT_PRESENT_MASK;
3713c50d8ae3SPaolo Bonzini 		}
3714c50d8ae3SPaolo Bonzini 		vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
3715c50d8ae3SPaolo Bonzini 	} else
3716c50d8ae3SPaolo Bonzini 		BUG();
37173651c7fcSSean Christopherson 
3718be01e8e2SSean Christopherson 	/* root_pgd is ignored for direct MMUs. */
3719be01e8e2SSean Christopherson 	vcpu->arch.mmu->root_pgd = 0;
3720c50d8ae3SPaolo Bonzini 
3721c50d8ae3SPaolo Bonzini 	return 0;
3722c50d8ae3SPaolo Bonzini }
3723c50d8ae3SPaolo Bonzini 
3724c50d8ae3SPaolo Bonzini static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
3725c50d8ae3SPaolo Bonzini {
3726c50d8ae3SPaolo Bonzini 	u64 pdptr, pm_mask;
3727be01e8e2SSean Christopherson 	gfn_t root_gfn, root_pgd;
37288123f265SSean Christopherson 	hpa_t root;
3729c50d8ae3SPaolo Bonzini 	int i;
3730c50d8ae3SPaolo Bonzini 
3731be01e8e2SSean Christopherson 	root_pgd = vcpu->arch.mmu->get_guest_pgd(vcpu);
3732be01e8e2SSean Christopherson 	root_gfn = root_pgd >> PAGE_SHIFT;
3733c50d8ae3SPaolo Bonzini 
3734c50d8ae3SPaolo Bonzini 	if (mmu_check_root(vcpu, root_gfn))
3735c50d8ae3SPaolo Bonzini 		return 1;
3736c50d8ae3SPaolo Bonzini 
3737c50d8ae3SPaolo Bonzini 	/*
3738c50d8ae3SPaolo Bonzini 	 * Do we shadow a long mode page table? If so we need to
3739c50d8ae3SPaolo Bonzini 	 * write-protect the guests page table root.
3740c50d8ae3SPaolo Bonzini 	 */
3741c50d8ae3SPaolo Bonzini 	if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
37428123f265SSean Christopherson 		MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->root_hpa));
3743c50d8ae3SPaolo Bonzini 
37448123f265SSean Christopherson 		root = mmu_alloc_root(vcpu, root_gfn, 0,
37458123f265SSean Christopherson 				      vcpu->arch.mmu->shadow_root_level, false);
37468123f265SSean Christopherson 		if (!VALID_PAGE(root))
3747c50d8ae3SPaolo Bonzini 			return -ENOSPC;
3748c50d8ae3SPaolo Bonzini 		vcpu->arch.mmu->root_hpa = root;
3749be01e8e2SSean Christopherson 		goto set_root_pgd;
3750c50d8ae3SPaolo Bonzini 	}
3751c50d8ae3SPaolo Bonzini 
3752c50d8ae3SPaolo Bonzini 	/*
3753c50d8ae3SPaolo Bonzini 	 * We shadow a 32 bit page table. This may be a legacy 2-level
3754c50d8ae3SPaolo Bonzini 	 * or a PAE 3-level page table. In either case we need to be aware that
3755c50d8ae3SPaolo Bonzini 	 * the shadow page table may be a PAE or a long mode page table.
3756c50d8ae3SPaolo Bonzini 	 */
3757c50d8ae3SPaolo Bonzini 	pm_mask = PT_PRESENT_MASK;
3758c50d8ae3SPaolo Bonzini 	if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL)
3759c50d8ae3SPaolo Bonzini 		pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
3760c50d8ae3SPaolo Bonzini 
3761c50d8ae3SPaolo Bonzini 	for (i = 0; i < 4; ++i) {
37628123f265SSean Christopherson 		MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->pae_root[i]));
3763c50d8ae3SPaolo Bonzini 		if (vcpu->arch.mmu->root_level == PT32E_ROOT_LEVEL) {
3764c50d8ae3SPaolo Bonzini 			pdptr = vcpu->arch.mmu->get_pdptr(vcpu, i);
3765c50d8ae3SPaolo Bonzini 			if (!(pdptr & PT_PRESENT_MASK)) {
3766c50d8ae3SPaolo Bonzini 				vcpu->arch.mmu->pae_root[i] = 0;
3767c50d8ae3SPaolo Bonzini 				continue;
3768c50d8ae3SPaolo Bonzini 			}
3769c50d8ae3SPaolo Bonzini 			root_gfn = pdptr >> PAGE_SHIFT;
3770c50d8ae3SPaolo Bonzini 			if (mmu_check_root(vcpu, root_gfn))
3771c50d8ae3SPaolo Bonzini 				return 1;
3772c50d8ae3SPaolo Bonzini 		}
3773c50d8ae3SPaolo Bonzini 
37748123f265SSean Christopherson 		root = mmu_alloc_root(vcpu, root_gfn, i << 30,
37758123f265SSean Christopherson 				      PT32_ROOT_LEVEL, false);
37768123f265SSean Christopherson 		if (!VALID_PAGE(root))
37778123f265SSean Christopherson 			return -ENOSPC;
3778c50d8ae3SPaolo Bonzini 		vcpu->arch.mmu->pae_root[i] = root | pm_mask;
3779c50d8ae3SPaolo Bonzini 	}
3780c50d8ae3SPaolo Bonzini 	vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
3781c50d8ae3SPaolo Bonzini 
3782c50d8ae3SPaolo Bonzini 	/*
3783c50d8ae3SPaolo Bonzini 	 * If we shadow a 32 bit page table with a long mode page
3784c50d8ae3SPaolo Bonzini 	 * table we enter this path.
3785c50d8ae3SPaolo Bonzini 	 */
3786c50d8ae3SPaolo Bonzini 	if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
3787c50d8ae3SPaolo Bonzini 		if (vcpu->arch.mmu->lm_root == NULL) {
3788c50d8ae3SPaolo Bonzini 			/*
3789c50d8ae3SPaolo Bonzini 			 * The additional page necessary for this is only
3790c50d8ae3SPaolo Bonzini 			 * allocated on demand.
3791c50d8ae3SPaolo Bonzini 			 */
3792c50d8ae3SPaolo Bonzini 
3793c50d8ae3SPaolo Bonzini 			u64 *lm_root;
3794c50d8ae3SPaolo Bonzini 
3795c50d8ae3SPaolo Bonzini 			lm_root = (void*)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3796c50d8ae3SPaolo Bonzini 			if (lm_root == NULL)
3797c50d8ae3SPaolo Bonzini 				return 1;
3798c50d8ae3SPaolo Bonzini 
3799c50d8ae3SPaolo Bonzini 			lm_root[0] = __pa(vcpu->arch.mmu->pae_root) | pm_mask;
3800c50d8ae3SPaolo Bonzini 
3801c50d8ae3SPaolo Bonzini 			vcpu->arch.mmu->lm_root = lm_root;
3802c50d8ae3SPaolo Bonzini 		}
3803c50d8ae3SPaolo Bonzini 
3804c50d8ae3SPaolo Bonzini 		vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root);
3805c50d8ae3SPaolo Bonzini 	}
3806c50d8ae3SPaolo Bonzini 
3807be01e8e2SSean Christopherson set_root_pgd:
3808be01e8e2SSean Christopherson 	vcpu->arch.mmu->root_pgd = root_pgd;
3809c50d8ae3SPaolo Bonzini 
3810c50d8ae3SPaolo Bonzini 	return 0;
3811c50d8ae3SPaolo Bonzini }
3812c50d8ae3SPaolo Bonzini 
3813c50d8ae3SPaolo Bonzini static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
3814c50d8ae3SPaolo Bonzini {
3815c50d8ae3SPaolo Bonzini 	if (vcpu->arch.mmu->direct_map)
3816c50d8ae3SPaolo Bonzini 		return mmu_alloc_direct_roots(vcpu);
3817c50d8ae3SPaolo Bonzini 	else
3818c50d8ae3SPaolo Bonzini 		return mmu_alloc_shadow_roots(vcpu);
3819c50d8ae3SPaolo Bonzini }
3820c50d8ae3SPaolo Bonzini 
3821c50d8ae3SPaolo Bonzini void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
3822c50d8ae3SPaolo Bonzini {
3823c50d8ae3SPaolo Bonzini 	int i;
3824c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
3825c50d8ae3SPaolo Bonzini 
3826c50d8ae3SPaolo Bonzini 	if (vcpu->arch.mmu->direct_map)
3827c50d8ae3SPaolo Bonzini 		return;
3828c50d8ae3SPaolo Bonzini 
3829c50d8ae3SPaolo Bonzini 	if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
3830c50d8ae3SPaolo Bonzini 		return;
3831c50d8ae3SPaolo Bonzini 
3832c50d8ae3SPaolo Bonzini 	vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
3833c50d8ae3SPaolo Bonzini 
3834c50d8ae3SPaolo Bonzini 	if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
3835c50d8ae3SPaolo Bonzini 		hpa_t root = vcpu->arch.mmu->root_hpa;
3836e47c4aeeSSean Christopherson 		sp = to_shadow_page(root);
3837c50d8ae3SPaolo Bonzini 
3838c50d8ae3SPaolo Bonzini 		/*
3839c50d8ae3SPaolo Bonzini 		 * Even if another CPU was marking the SP as unsync-ed
3840c50d8ae3SPaolo Bonzini 		 * simultaneously, any guest page table changes are not
3841c50d8ae3SPaolo Bonzini 		 * guaranteed to be visible anyway until this VCPU issues a TLB
3842c50d8ae3SPaolo Bonzini 		 * flush strictly after those changes are made. We only need to
3843c50d8ae3SPaolo Bonzini 		 * ensure that the other CPU sets these flags before any actual
3844c50d8ae3SPaolo Bonzini 		 * changes to the page tables are made. The comments in
3845c50d8ae3SPaolo Bonzini 		 * mmu_need_write_protect() describe what could go wrong if this
3846c50d8ae3SPaolo Bonzini 		 * requirement isn't satisfied.
3847c50d8ae3SPaolo Bonzini 		 */
3848c50d8ae3SPaolo Bonzini 		if (!smp_load_acquire(&sp->unsync) &&
3849c50d8ae3SPaolo Bonzini 		    !smp_load_acquire(&sp->unsync_children))
3850c50d8ae3SPaolo Bonzini 			return;
3851c50d8ae3SPaolo Bonzini 
3852c50d8ae3SPaolo Bonzini 		spin_lock(&vcpu->kvm->mmu_lock);
3853c50d8ae3SPaolo Bonzini 		kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
3854c50d8ae3SPaolo Bonzini 
3855c50d8ae3SPaolo Bonzini 		mmu_sync_children(vcpu, sp);
3856c50d8ae3SPaolo Bonzini 
3857c50d8ae3SPaolo Bonzini 		kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
3858c50d8ae3SPaolo Bonzini 		spin_unlock(&vcpu->kvm->mmu_lock);
3859c50d8ae3SPaolo Bonzini 		return;
3860c50d8ae3SPaolo Bonzini 	}
3861c50d8ae3SPaolo Bonzini 
3862c50d8ae3SPaolo Bonzini 	spin_lock(&vcpu->kvm->mmu_lock);
3863c50d8ae3SPaolo Bonzini 	kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
3864c50d8ae3SPaolo Bonzini 
3865c50d8ae3SPaolo Bonzini 	for (i = 0; i < 4; ++i) {
3866c50d8ae3SPaolo Bonzini 		hpa_t root = vcpu->arch.mmu->pae_root[i];
3867c50d8ae3SPaolo Bonzini 
3868c50d8ae3SPaolo Bonzini 		if (root && VALID_PAGE(root)) {
3869c50d8ae3SPaolo Bonzini 			root &= PT64_BASE_ADDR_MASK;
3870e47c4aeeSSean Christopherson 			sp = to_shadow_page(root);
3871c50d8ae3SPaolo Bonzini 			mmu_sync_children(vcpu, sp);
3872c50d8ae3SPaolo Bonzini 		}
3873c50d8ae3SPaolo Bonzini 	}
3874c50d8ae3SPaolo Bonzini 
3875c50d8ae3SPaolo Bonzini 	kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
3876c50d8ae3SPaolo Bonzini 	spin_unlock(&vcpu->kvm->mmu_lock);
3877c50d8ae3SPaolo Bonzini }
3878c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_sync_roots);
3879c50d8ae3SPaolo Bonzini 
3880736c291cSSean Christopherson static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gpa_t vaddr,
3881c50d8ae3SPaolo Bonzini 				  u32 access, struct x86_exception *exception)
3882c50d8ae3SPaolo Bonzini {
3883c50d8ae3SPaolo Bonzini 	if (exception)
3884c50d8ae3SPaolo Bonzini 		exception->error_code = 0;
3885c50d8ae3SPaolo Bonzini 	return vaddr;
3886c50d8ae3SPaolo Bonzini }
3887c50d8ae3SPaolo Bonzini 
3888736c291cSSean Christopherson static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gpa_t vaddr,
3889c50d8ae3SPaolo Bonzini 					 u32 access,
3890c50d8ae3SPaolo Bonzini 					 struct x86_exception *exception)
3891c50d8ae3SPaolo Bonzini {
3892c50d8ae3SPaolo Bonzini 	if (exception)
3893c50d8ae3SPaolo Bonzini 		exception->error_code = 0;
3894c50d8ae3SPaolo Bonzini 	return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception);
3895c50d8ae3SPaolo Bonzini }
3896c50d8ae3SPaolo Bonzini 
3897c50d8ae3SPaolo Bonzini static bool
3898c50d8ae3SPaolo Bonzini __is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check, u64 pte, int level)
3899c50d8ae3SPaolo Bonzini {
3900b5c3c1b3SSean Christopherson 	int bit7 = (pte >> 7) & 1;
3901c50d8ae3SPaolo Bonzini 
3902b5c3c1b3SSean Christopherson 	return pte & rsvd_check->rsvd_bits_mask[bit7][level-1];
3903c50d8ae3SPaolo Bonzini }
3904c50d8ae3SPaolo Bonzini 
3905b5c3c1b3SSean Christopherson static bool __is_bad_mt_xwr(struct rsvd_bits_validate *rsvd_check, u64 pte)
3906c50d8ae3SPaolo Bonzini {
3907b5c3c1b3SSean Christopherson 	return rsvd_check->bad_mt_xwr & BIT_ULL(pte & 0x3f);
3908c50d8ae3SPaolo Bonzini }
3909c50d8ae3SPaolo Bonzini 
3910c50d8ae3SPaolo Bonzini static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3911c50d8ae3SPaolo Bonzini {
3912c50d8ae3SPaolo Bonzini 	/*
3913c50d8ae3SPaolo Bonzini 	 * A nested guest cannot use the MMIO cache if it is using nested
3914c50d8ae3SPaolo Bonzini 	 * page tables, because cr2 is a nGPA while the cache stores GPAs.
3915c50d8ae3SPaolo Bonzini 	 */
3916c50d8ae3SPaolo Bonzini 	if (mmu_is_nested(vcpu))
3917c50d8ae3SPaolo Bonzini 		return false;
3918c50d8ae3SPaolo Bonzini 
3919c50d8ae3SPaolo Bonzini 	if (direct)
3920c50d8ae3SPaolo Bonzini 		return vcpu_match_mmio_gpa(vcpu, addr);
3921c50d8ae3SPaolo Bonzini 
3922c50d8ae3SPaolo Bonzini 	return vcpu_match_mmio_gva(vcpu, addr);
3923c50d8ae3SPaolo Bonzini }
3924c50d8ae3SPaolo Bonzini 
3925c50d8ae3SPaolo Bonzini /* return true if reserved bit is detected on spte. */
3926c50d8ae3SPaolo Bonzini static bool
3927c50d8ae3SPaolo Bonzini walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
3928c50d8ae3SPaolo Bonzini {
3929c50d8ae3SPaolo Bonzini 	struct kvm_shadow_walk_iterator iterator;
3930c50d8ae3SPaolo Bonzini 	u64 sptes[PT64_ROOT_MAX_LEVEL], spte = 0ull;
3931b5c3c1b3SSean Christopherson 	struct rsvd_bits_validate *rsvd_check;
3932c50d8ae3SPaolo Bonzini 	int root, leaf;
3933c50d8ae3SPaolo Bonzini 	bool reserved = false;
3934c50d8ae3SPaolo Bonzini 
3935b5c3c1b3SSean Christopherson 	rsvd_check = &vcpu->arch.mmu->shadow_zero_check;
3936c50d8ae3SPaolo Bonzini 
3937c50d8ae3SPaolo Bonzini 	walk_shadow_page_lockless_begin(vcpu);
3938c50d8ae3SPaolo Bonzini 
3939c50d8ae3SPaolo Bonzini 	for (shadow_walk_init(&iterator, vcpu, addr),
3940c50d8ae3SPaolo Bonzini 		 leaf = root = iterator.level;
3941c50d8ae3SPaolo Bonzini 	     shadow_walk_okay(&iterator);
3942c50d8ae3SPaolo Bonzini 	     __shadow_walk_next(&iterator, spte)) {
3943c50d8ae3SPaolo Bonzini 		spte = mmu_spte_get_lockless(iterator.sptep);
3944c50d8ae3SPaolo Bonzini 
3945c50d8ae3SPaolo Bonzini 		sptes[leaf - 1] = spte;
3946c50d8ae3SPaolo Bonzini 		leaf--;
3947c50d8ae3SPaolo Bonzini 
3948c50d8ae3SPaolo Bonzini 		if (!is_shadow_present_pte(spte))
3949c50d8ae3SPaolo Bonzini 			break;
3950c50d8ae3SPaolo Bonzini 
3951b5c3c1b3SSean Christopherson 		/*
3952b5c3c1b3SSean Christopherson 		 * Use a bitwise-OR instead of a logical-OR to aggregate the
3953b5c3c1b3SSean Christopherson 		 * reserved bit and EPT's invalid memtype/XWR checks to avoid
3954b5c3c1b3SSean Christopherson 		 * adding a Jcc in the loop.
3955b5c3c1b3SSean Christopherson 		 */
3956b5c3c1b3SSean Christopherson 		reserved |= __is_bad_mt_xwr(rsvd_check, spte) |
3957b5c3c1b3SSean Christopherson 			    __is_rsvd_bits_set(rsvd_check, spte, iterator.level);
3958c50d8ae3SPaolo Bonzini 	}
3959c50d8ae3SPaolo Bonzini 
3960c50d8ae3SPaolo Bonzini 	walk_shadow_page_lockless_end(vcpu);
3961c50d8ae3SPaolo Bonzini 
3962c50d8ae3SPaolo Bonzini 	if (reserved) {
3963c50d8ae3SPaolo Bonzini 		pr_err("%s: detect reserved bits on spte, addr 0x%llx, dump hierarchy:\n",
3964c50d8ae3SPaolo Bonzini 		       __func__, addr);
3965c50d8ae3SPaolo Bonzini 		while (root > leaf) {
3966c50d8ae3SPaolo Bonzini 			pr_err("------ spte 0x%llx level %d.\n",
3967c50d8ae3SPaolo Bonzini 			       sptes[root - 1], root);
3968c50d8ae3SPaolo Bonzini 			root--;
3969c50d8ae3SPaolo Bonzini 		}
3970c50d8ae3SPaolo Bonzini 	}
3971ddce6208SSean Christopherson 
3972c50d8ae3SPaolo Bonzini 	*sptep = spte;
3973c50d8ae3SPaolo Bonzini 	return reserved;
3974c50d8ae3SPaolo Bonzini }
3975c50d8ae3SPaolo Bonzini 
3976c50d8ae3SPaolo Bonzini static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3977c50d8ae3SPaolo Bonzini {
3978c50d8ae3SPaolo Bonzini 	u64 spte;
3979c50d8ae3SPaolo Bonzini 	bool reserved;
3980c50d8ae3SPaolo Bonzini 
3981c50d8ae3SPaolo Bonzini 	if (mmio_info_in_cache(vcpu, addr, direct))
3982c50d8ae3SPaolo Bonzini 		return RET_PF_EMULATE;
3983c50d8ae3SPaolo Bonzini 
3984c50d8ae3SPaolo Bonzini 	reserved = walk_shadow_page_get_mmio_spte(vcpu, addr, &spte);
3985c50d8ae3SPaolo Bonzini 	if (WARN_ON(reserved))
3986c50d8ae3SPaolo Bonzini 		return -EINVAL;
3987c50d8ae3SPaolo Bonzini 
3988c50d8ae3SPaolo Bonzini 	if (is_mmio_spte(spte)) {
3989c50d8ae3SPaolo Bonzini 		gfn_t gfn = get_mmio_spte_gfn(spte);
39900a2b64c5SBen Gardon 		unsigned int access = get_mmio_spte_access(spte);
3991c50d8ae3SPaolo Bonzini 
3992c50d8ae3SPaolo Bonzini 		if (!check_mmio_spte(vcpu, spte))
3993c50d8ae3SPaolo Bonzini 			return RET_PF_INVALID;
3994c50d8ae3SPaolo Bonzini 
3995c50d8ae3SPaolo Bonzini 		if (direct)
3996c50d8ae3SPaolo Bonzini 			addr = 0;
3997c50d8ae3SPaolo Bonzini 
3998c50d8ae3SPaolo Bonzini 		trace_handle_mmio_page_fault(addr, gfn, access);
3999c50d8ae3SPaolo Bonzini 		vcpu_cache_mmio_info(vcpu, addr, gfn, access);
4000c50d8ae3SPaolo Bonzini 		return RET_PF_EMULATE;
4001c50d8ae3SPaolo Bonzini 	}
4002c50d8ae3SPaolo Bonzini 
4003c50d8ae3SPaolo Bonzini 	/*
4004c50d8ae3SPaolo Bonzini 	 * If the page table is zapped by other cpus, let CPU fault again on
4005c50d8ae3SPaolo Bonzini 	 * the address.
4006c50d8ae3SPaolo Bonzini 	 */
4007c50d8ae3SPaolo Bonzini 	return RET_PF_RETRY;
4008c50d8ae3SPaolo Bonzini }
4009c50d8ae3SPaolo Bonzini 
4010c50d8ae3SPaolo Bonzini static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
4011c50d8ae3SPaolo Bonzini 					 u32 error_code, gfn_t gfn)
4012c50d8ae3SPaolo Bonzini {
4013c50d8ae3SPaolo Bonzini 	if (unlikely(error_code & PFERR_RSVD_MASK))
4014c50d8ae3SPaolo Bonzini 		return false;
4015c50d8ae3SPaolo Bonzini 
4016c50d8ae3SPaolo Bonzini 	if (!(error_code & PFERR_PRESENT_MASK) ||
4017c50d8ae3SPaolo Bonzini 	      !(error_code & PFERR_WRITE_MASK))
4018c50d8ae3SPaolo Bonzini 		return false;
4019c50d8ae3SPaolo Bonzini 
4020c50d8ae3SPaolo Bonzini 	/*
4021c50d8ae3SPaolo Bonzini 	 * guest is writing the page which is write tracked which can
4022c50d8ae3SPaolo Bonzini 	 * not be fixed by page fault handler.
4023c50d8ae3SPaolo Bonzini 	 */
4024c50d8ae3SPaolo Bonzini 	if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
4025c50d8ae3SPaolo Bonzini 		return true;
4026c50d8ae3SPaolo Bonzini 
4027c50d8ae3SPaolo Bonzini 	return false;
4028c50d8ae3SPaolo Bonzini }
4029c50d8ae3SPaolo Bonzini 
4030c50d8ae3SPaolo Bonzini static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
4031c50d8ae3SPaolo Bonzini {
4032c50d8ae3SPaolo Bonzini 	struct kvm_shadow_walk_iterator iterator;
4033c50d8ae3SPaolo Bonzini 	u64 spte;
4034c50d8ae3SPaolo Bonzini 
4035c50d8ae3SPaolo Bonzini 	walk_shadow_page_lockless_begin(vcpu);
4036c50d8ae3SPaolo Bonzini 	for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
4037c50d8ae3SPaolo Bonzini 		clear_sp_write_flooding_count(iterator.sptep);
4038c50d8ae3SPaolo Bonzini 		if (!is_shadow_present_pte(spte))
4039c50d8ae3SPaolo Bonzini 			break;
4040c50d8ae3SPaolo Bonzini 	}
4041c50d8ae3SPaolo Bonzini 	walk_shadow_page_lockless_end(vcpu);
4042c50d8ae3SPaolo Bonzini }
4043c50d8ae3SPaolo Bonzini 
4044e8c22266SVitaly Kuznetsov static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
40459f1a8526SSean Christopherson 				    gfn_t gfn)
4046c50d8ae3SPaolo Bonzini {
4047c50d8ae3SPaolo Bonzini 	struct kvm_arch_async_pf arch;
4048c50d8ae3SPaolo Bonzini 
4049c50d8ae3SPaolo Bonzini 	arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
4050c50d8ae3SPaolo Bonzini 	arch.gfn = gfn;
4051c50d8ae3SPaolo Bonzini 	arch.direct_map = vcpu->arch.mmu->direct_map;
4052d8dd54e0SSean Christopherson 	arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
4053c50d8ae3SPaolo Bonzini 
40549f1a8526SSean Christopherson 	return kvm_setup_async_pf(vcpu, cr2_or_gpa,
40559f1a8526SSean Christopherson 				  kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
4056c50d8ae3SPaolo Bonzini }
4057c50d8ae3SPaolo Bonzini 
4058c50d8ae3SPaolo Bonzini static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
40599f1a8526SSean Christopherson 			 gpa_t cr2_or_gpa, kvm_pfn_t *pfn, bool write,
40609f1a8526SSean Christopherson 			 bool *writable)
4061c50d8ae3SPaolo Bonzini {
4062c36b7150SPaolo Bonzini 	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
4063c50d8ae3SPaolo Bonzini 	bool async;
4064c50d8ae3SPaolo Bonzini 
4065c36b7150SPaolo Bonzini 	/* Don't expose private memslots to L2. */
4066c36b7150SPaolo Bonzini 	if (is_guest_mode(vcpu) && !kvm_is_visible_memslot(slot)) {
4067c50d8ae3SPaolo Bonzini 		*pfn = KVM_PFN_NOSLOT;
4068c583eed6SSean Christopherson 		*writable = false;
4069c50d8ae3SPaolo Bonzini 		return false;
4070c50d8ae3SPaolo Bonzini 	}
4071c50d8ae3SPaolo Bonzini 
4072c50d8ae3SPaolo Bonzini 	async = false;
4073c50d8ae3SPaolo Bonzini 	*pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async, write, writable);
4074c50d8ae3SPaolo Bonzini 	if (!async)
4075c50d8ae3SPaolo Bonzini 		return false; /* *pfn has correct page already */
4076c50d8ae3SPaolo Bonzini 
4077c50d8ae3SPaolo Bonzini 	if (!prefault && kvm_can_do_async_pf(vcpu)) {
40789f1a8526SSean Christopherson 		trace_kvm_try_async_get_page(cr2_or_gpa, gfn);
4079c50d8ae3SPaolo Bonzini 		if (kvm_find_async_pf_gfn(vcpu, gfn)) {
40809f1a8526SSean Christopherson 			trace_kvm_async_pf_doublefault(cr2_or_gpa, gfn);
4081c50d8ae3SPaolo Bonzini 			kvm_make_request(KVM_REQ_APF_HALT, vcpu);
4082c50d8ae3SPaolo Bonzini 			return true;
40839f1a8526SSean Christopherson 		} else if (kvm_arch_setup_async_pf(vcpu, cr2_or_gpa, gfn))
4084c50d8ae3SPaolo Bonzini 			return true;
4085c50d8ae3SPaolo Bonzini 	}
4086c50d8ae3SPaolo Bonzini 
4087c50d8ae3SPaolo Bonzini 	*pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL, write, writable);
4088c50d8ae3SPaolo Bonzini 	return false;
4089c50d8ae3SPaolo Bonzini }
4090c50d8ae3SPaolo Bonzini 
40910f90e1c1SSean Christopherson static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
40920f90e1c1SSean Christopherson 			     bool prefault, int max_level, bool is_tdp)
4093c50d8ae3SPaolo Bonzini {
4094367fd790SSean Christopherson 	bool write = error_code & PFERR_WRITE_MASK;
4095367fd790SSean Christopherson 	bool exec = error_code & PFERR_FETCH_MASK;
4096367fd790SSean Christopherson 	bool lpage_disallowed = exec && is_nx_huge_page_enabled();
40970f90e1c1SSean Christopherson 	bool map_writable;
4098c50d8ae3SPaolo Bonzini 
40990f90e1c1SSean Christopherson 	gfn_t gfn = gpa >> PAGE_SHIFT;
41000f90e1c1SSean Christopherson 	unsigned long mmu_seq;
41010f90e1c1SSean Christopherson 	kvm_pfn_t pfn;
410283f06fa7SSean Christopherson 	int r;
4103c50d8ae3SPaolo Bonzini 
4104c50d8ae3SPaolo Bonzini 	if (page_fault_handle_page_track(vcpu, error_code, gfn))
4105c50d8ae3SPaolo Bonzini 		return RET_PF_EMULATE;
4106c50d8ae3SPaolo Bonzini 
410783291445SSean Christopherson 	if (fast_page_fault(vcpu, gpa, error_code))
410883291445SSean Christopherson 		return RET_PF_RETRY;
410983291445SSean Christopherson 
4110378f5cd6SSean Christopherson 	r = mmu_topup_memory_caches(vcpu, false);
4111c50d8ae3SPaolo Bonzini 	if (r)
4112c50d8ae3SPaolo Bonzini 		return r;
4113c50d8ae3SPaolo Bonzini 
41140f90e1c1SSean Christopherson 	if (lpage_disallowed)
41153bae0459SSean Christopherson 		max_level = PG_LEVEL_4K;
4116c50d8ae3SPaolo Bonzini 
4117367fd790SSean Christopherson 	mmu_seq = vcpu->kvm->mmu_notifier_seq;
4118367fd790SSean Christopherson 	smp_rmb();
4119367fd790SSean Christopherson 
4120367fd790SSean Christopherson 	if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
4121367fd790SSean Christopherson 		return RET_PF_RETRY;
4122367fd790SSean Christopherson 
41230f90e1c1SSean Christopherson 	if (handle_abnormal_pfn(vcpu, is_tdp ? 0 : gpa, gfn, pfn, ACC_ALL, &r))
4124367fd790SSean Christopherson 		return r;
4125367fd790SSean Christopherson 
4126367fd790SSean Christopherson 	r = RET_PF_RETRY;
4127367fd790SSean Christopherson 	spin_lock(&vcpu->kvm->mmu_lock);
4128367fd790SSean Christopherson 	if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
4129367fd790SSean Christopherson 		goto out_unlock;
41307bd7ded6SSean Christopherson 	r = make_mmu_pages_available(vcpu);
41317bd7ded6SSean Christopherson 	if (r)
4132367fd790SSean Christopherson 		goto out_unlock;
413383f06fa7SSean Christopherson 	r = __direct_map(vcpu, gpa, write, map_writable, max_level, pfn,
41344cd071d1SSean Christopherson 			 prefault, is_tdp && lpage_disallowed);
41350f90e1c1SSean Christopherson 
4136367fd790SSean Christopherson out_unlock:
4137367fd790SSean Christopherson 	spin_unlock(&vcpu->kvm->mmu_lock);
4138367fd790SSean Christopherson 	kvm_release_pfn_clean(pfn);
4139367fd790SSean Christopherson 	return r;
4140c50d8ae3SPaolo Bonzini }
4141c50d8ae3SPaolo Bonzini 
41420f90e1c1SSean Christopherson static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa,
41430f90e1c1SSean Christopherson 				u32 error_code, bool prefault)
41440f90e1c1SSean Christopherson {
41450f90e1c1SSean Christopherson 	pgprintk("%s: gva %lx error %x\n", __func__, gpa, error_code);
41460f90e1c1SSean Christopherson 
41470f90e1c1SSean Christopherson 	/* This path builds a PAE pagetable, we can map 2mb pages at maximum. */
41480f90e1c1SSean Christopherson 	return direct_page_fault(vcpu, gpa & PAGE_MASK, error_code, prefault,
41493bae0459SSean Christopherson 				 PG_LEVEL_2M, false);
41500f90e1c1SSean Christopherson }
41510f90e1c1SSean Christopherson 
4152c50d8ae3SPaolo Bonzini int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
4153c50d8ae3SPaolo Bonzini 				u64 fault_address, char *insn, int insn_len)
4154c50d8ae3SPaolo Bonzini {
4155c50d8ae3SPaolo Bonzini 	int r = 1;
41569ce372b3SVitaly Kuznetsov 	u32 flags = vcpu->arch.apf.host_apf_flags;
4157c50d8ae3SPaolo Bonzini 
4158736c291cSSean Christopherson #ifndef CONFIG_X86_64
4159736c291cSSean Christopherson 	/* A 64-bit CR2 should be impossible on 32-bit KVM. */
4160736c291cSSean Christopherson 	if (WARN_ON_ONCE(fault_address >> 32))
4161736c291cSSean Christopherson 		return -EFAULT;
4162736c291cSSean Christopherson #endif
4163736c291cSSean Christopherson 
4164c50d8ae3SPaolo Bonzini 	vcpu->arch.l1tf_flush_l1d = true;
41659ce372b3SVitaly Kuznetsov 	if (!flags) {
4166c50d8ae3SPaolo Bonzini 		trace_kvm_page_fault(fault_address, error_code);
4167c50d8ae3SPaolo Bonzini 
4168c50d8ae3SPaolo Bonzini 		if (kvm_event_needs_reinjection(vcpu))
4169c50d8ae3SPaolo Bonzini 			kvm_mmu_unprotect_page_virt(vcpu, fault_address);
4170c50d8ae3SPaolo Bonzini 		r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn,
4171c50d8ae3SPaolo Bonzini 				insn_len);
41729ce372b3SVitaly Kuznetsov 	} else if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) {
417368fd66f1SVitaly Kuznetsov 		vcpu->arch.apf.host_apf_flags = 0;
4174c50d8ae3SPaolo Bonzini 		local_irq_disable();
41756bca69adSThomas Gleixner 		kvm_async_pf_task_wait_schedule(fault_address);
4176c50d8ae3SPaolo Bonzini 		local_irq_enable();
41779ce372b3SVitaly Kuznetsov 	} else {
41789ce372b3SVitaly Kuznetsov 		WARN_ONCE(1, "Unexpected host async PF flags: %x\n", flags);
4179c50d8ae3SPaolo Bonzini 	}
41809ce372b3SVitaly Kuznetsov 
4181c50d8ae3SPaolo Bonzini 	return r;
4182c50d8ae3SPaolo Bonzini }
4183c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_handle_page_fault);
4184c50d8ae3SPaolo Bonzini 
41857a02674dSSean Christopherson int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
4186c50d8ae3SPaolo Bonzini 		       bool prefault)
4187c50d8ae3SPaolo Bonzini {
4188cb9b88c6SSean Christopherson 	int max_level;
4189c50d8ae3SPaolo Bonzini 
4190e662ec3eSSean Christopherson 	for (max_level = KVM_MAX_HUGEPAGE_LEVEL;
41913bae0459SSean Christopherson 	     max_level > PG_LEVEL_4K;
4192cb9b88c6SSean Christopherson 	     max_level--) {
4193cb9b88c6SSean Christopherson 		int page_num = KVM_PAGES_PER_HPAGE(max_level);
41940f90e1c1SSean Christopherson 		gfn_t base = (gpa >> PAGE_SHIFT) & ~(page_num - 1);
4195c50d8ae3SPaolo Bonzini 
4196cb9b88c6SSean Christopherson 		if (kvm_mtrr_check_gfn_range_consistency(vcpu, base, page_num))
4197cb9b88c6SSean Christopherson 			break;
4198c50d8ae3SPaolo Bonzini 	}
4199c50d8ae3SPaolo Bonzini 
42000f90e1c1SSean Christopherson 	return direct_page_fault(vcpu, gpa, error_code, prefault,
42010f90e1c1SSean Christopherson 				 max_level, true);
4202c50d8ae3SPaolo Bonzini }
4203c50d8ae3SPaolo Bonzini 
4204c50d8ae3SPaolo Bonzini static void nonpaging_init_context(struct kvm_vcpu *vcpu,
4205c50d8ae3SPaolo Bonzini 				   struct kvm_mmu *context)
4206c50d8ae3SPaolo Bonzini {
4207c50d8ae3SPaolo Bonzini 	context->page_fault = nonpaging_page_fault;
4208c50d8ae3SPaolo Bonzini 	context->gva_to_gpa = nonpaging_gva_to_gpa;
4209c50d8ae3SPaolo Bonzini 	context->sync_page = nonpaging_sync_page;
42105efac074SPaolo Bonzini 	context->invlpg = NULL;
4211c50d8ae3SPaolo Bonzini 	context->update_pte = nonpaging_update_pte;
4212c50d8ae3SPaolo Bonzini 	context->root_level = 0;
4213c50d8ae3SPaolo Bonzini 	context->shadow_root_level = PT32E_ROOT_LEVEL;
4214c50d8ae3SPaolo Bonzini 	context->direct_map = true;
4215c50d8ae3SPaolo Bonzini 	context->nx = false;
4216c50d8ae3SPaolo Bonzini }
4217c50d8ae3SPaolo Bonzini 
4218be01e8e2SSean Christopherson static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
42190be44352SSean Christopherson 				  union kvm_mmu_page_role role)
42200be44352SSean Christopherson {
4221be01e8e2SSean Christopherson 	return (role.direct || pgd == root->pgd) &&
4222e47c4aeeSSean Christopherson 	       VALID_PAGE(root->hpa) && to_shadow_page(root->hpa) &&
4223e47c4aeeSSean Christopherson 	       role.word == to_shadow_page(root->hpa)->role.word;
42240be44352SSean Christopherson }
42250be44352SSean Christopherson 
4226c50d8ae3SPaolo Bonzini /*
4227be01e8e2SSean Christopherson  * Find out if a previously cached root matching the new pgd/role is available.
4228c50d8ae3SPaolo Bonzini  * The current root is also inserted into the cache.
4229c50d8ae3SPaolo Bonzini  * If a matching root was found, it is assigned to kvm_mmu->root_hpa and true is
4230c50d8ae3SPaolo Bonzini  * returned.
4231c50d8ae3SPaolo Bonzini  * Otherwise, the LRU root from the cache is assigned to kvm_mmu->root_hpa and
4232c50d8ae3SPaolo Bonzini  * false is returned. This root should now be freed by the caller.
4233c50d8ae3SPaolo Bonzini  */
4234be01e8e2SSean Christopherson static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_pgd,
4235c50d8ae3SPaolo Bonzini 				  union kvm_mmu_page_role new_role)
4236c50d8ae3SPaolo Bonzini {
4237c50d8ae3SPaolo Bonzini 	uint i;
4238c50d8ae3SPaolo Bonzini 	struct kvm_mmu_root_info root;
4239c50d8ae3SPaolo Bonzini 	struct kvm_mmu *mmu = vcpu->arch.mmu;
4240c50d8ae3SPaolo Bonzini 
4241be01e8e2SSean Christopherson 	root.pgd = mmu->root_pgd;
4242c50d8ae3SPaolo Bonzini 	root.hpa = mmu->root_hpa;
4243c50d8ae3SPaolo Bonzini 
4244be01e8e2SSean Christopherson 	if (is_root_usable(&root, new_pgd, new_role))
42450be44352SSean Christopherson 		return true;
42460be44352SSean Christopherson 
4247c50d8ae3SPaolo Bonzini 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
4248c50d8ae3SPaolo Bonzini 		swap(root, mmu->prev_roots[i]);
4249c50d8ae3SPaolo Bonzini 
4250be01e8e2SSean Christopherson 		if (is_root_usable(&root, new_pgd, new_role))
4251c50d8ae3SPaolo Bonzini 			break;
4252c50d8ae3SPaolo Bonzini 	}
4253c50d8ae3SPaolo Bonzini 
4254c50d8ae3SPaolo Bonzini 	mmu->root_hpa = root.hpa;
4255be01e8e2SSean Christopherson 	mmu->root_pgd = root.pgd;
4256c50d8ae3SPaolo Bonzini 
4257c50d8ae3SPaolo Bonzini 	return i < KVM_MMU_NUM_PREV_ROOTS;
4258c50d8ae3SPaolo Bonzini }
4259c50d8ae3SPaolo Bonzini 
4260be01e8e2SSean Christopherson static bool fast_pgd_switch(struct kvm_vcpu *vcpu, gpa_t new_pgd,
4261b869855bSSean Christopherson 			    union kvm_mmu_page_role new_role)
4262c50d8ae3SPaolo Bonzini {
4263c50d8ae3SPaolo Bonzini 	struct kvm_mmu *mmu = vcpu->arch.mmu;
4264c50d8ae3SPaolo Bonzini 
4265c50d8ae3SPaolo Bonzini 	/*
4266c50d8ae3SPaolo Bonzini 	 * For now, limit the fast switch to 64-bit hosts+VMs in order to avoid
4267c50d8ae3SPaolo Bonzini 	 * having to deal with PDPTEs. We may add support for 32-bit hosts/VMs
4268c50d8ae3SPaolo Bonzini 	 * later if necessary.
4269c50d8ae3SPaolo Bonzini 	 */
4270c50d8ae3SPaolo Bonzini 	if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
4271b869855bSSean Christopherson 	    mmu->root_level >= PT64_ROOT_4LEVEL)
4272fe9304d3SVitaly Kuznetsov 		return cached_root_available(vcpu, new_pgd, new_role);
4273c50d8ae3SPaolo Bonzini 
4274c50d8ae3SPaolo Bonzini 	return false;
4275c50d8ae3SPaolo Bonzini }
4276c50d8ae3SPaolo Bonzini 
4277be01e8e2SSean Christopherson static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd,
4278c50d8ae3SPaolo Bonzini 			      union kvm_mmu_page_role new_role,
42794a632ac6SSean Christopherson 			      bool skip_tlb_flush, bool skip_mmu_sync)
4280c50d8ae3SPaolo Bonzini {
4281be01e8e2SSean Christopherson 	if (!fast_pgd_switch(vcpu, new_pgd, new_role)) {
4282b869855bSSean Christopherson 		kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, KVM_MMU_ROOT_CURRENT);
4283b869855bSSean Christopherson 		return;
4284c50d8ae3SPaolo Bonzini 	}
4285c50d8ae3SPaolo Bonzini 
4286c50d8ae3SPaolo Bonzini 	/*
4287b869855bSSean Christopherson 	 * It's possible that the cached previous root page is obsolete because
4288b869855bSSean Christopherson 	 * of a change in the MMU generation number. However, changing the
4289b869855bSSean Christopherson 	 * generation number is accompanied by KVM_REQ_MMU_RELOAD, which will
4290b869855bSSean Christopherson 	 * free the root set here and allocate a new one.
4291b869855bSSean Christopherson 	 */
4292b869855bSSean Christopherson 	kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
4293b869855bSSean Christopherson 
429471fe7013SSean Christopherson 	if (!skip_mmu_sync || force_flush_and_sync_on_reuse)
4295b869855bSSean Christopherson 		kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
429671fe7013SSean Christopherson 	if (!skip_tlb_flush || force_flush_and_sync_on_reuse)
4297b869855bSSean Christopherson 		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
4298b869855bSSean Christopherson 
4299b869855bSSean Christopherson 	/*
4300b869855bSSean Christopherson 	 * The last MMIO access's GVA and GPA are cached in the VCPU. When
4301b869855bSSean Christopherson 	 * switching to a new CR3, that GVA->GPA mapping may no longer be
4302b869855bSSean Christopherson 	 * valid. So clear any cached MMIO info even when we don't need to sync
4303b869855bSSean Christopherson 	 * the shadow page tables.
4304c50d8ae3SPaolo Bonzini 	 */
4305c50d8ae3SPaolo Bonzini 	vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
4306c50d8ae3SPaolo Bonzini 
4307e47c4aeeSSean Christopherson 	__clear_sp_write_flooding_count(to_shadow_page(vcpu->arch.mmu->root_hpa));
4308c50d8ae3SPaolo Bonzini }
4309c50d8ae3SPaolo Bonzini 
4310be01e8e2SSean Christopherson void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd, bool skip_tlb_flush,
43114a632ac6SSean Christopherson 		     bool skip_mmu_sync)
4312c50d8ae3SPaolo Bonzini {
4313be01e8e2SSean Christopherson 	__kvm_mmu_new_pgd(vcpu, new_pgd, kvm_mmu_calc_root_page_role(vcpu),
43144a632ac6SSean Christopherson 			  skip_tlb_flush, skip_mmu_sync);
4315c50d8ae3SPaolo Bonzini }
4316be01e8e2SSean Christopherson EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd);
4317c50d8ae3SPaolo Bonzini 
4318c50d8ae3SPaolo Bonzini static unsigned long get_cr3(struct kvm_vcpu *vcpu)
4319c50d8ae3SPaolo Bonzini {
4320c50d8ae3SPaolo Bonzini 	return kvm_read_cr3(vcpu);
4321c50d8ae3SPaolo Bonzini }
4322c50d8ae3SPaolo Bonzini 
4323c50d8ae3SPaolo Bonzini static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
43240a2b64c5SBen Gardon 			   unsigned int access, int *nr_present)
4325c50d8ae3SPaolo Bonzini {
4326c50d8ae3SPaolo Bonzini 	if (unlikely(is_mmio_spte(*sptep))) {
4327c50d8ae3SPaolo Bonzini 		if (gfn != get_mmio_spte_gfn(*sptep)) {
4328c50d8ae3SPaolo Bonzini 			mmu_spte_clear_no_track(sptep);
4329c50d8ae3SPaolo Bonzini 			return true;
4330c50d8ae3SPaolo Bonzini 		}
4331c50d8ae3SPaolo Bonzini 
4332c50d8ae3SPaolo Bonzini 		(*nr_present)++;
4333c50d8ae3SPaolo Bonzini 		mark_mmio_spte(vcpu, sptep, gfn, access);
4334c50d8ae3SPaolo Bonzini 		return true;
4335c50d8ae3SPaolo Bonzini 	}
4336c50d8ae3SPaolo Bonzini 
4337c50d8ae3SPaolo Bonzini 	return false;
4338c50d8ae3SPaolo Bonzini }
4339c50d8ae3SPaolo Bonzini 
4340c50d8ae3SPaolo Bonzini static inline bool is_last_gpte(struct kvm_mmu *mmu,
4341c50d8ae3SPaolo Bonzini 				unsigned level, unsigned gpte)
4342c50d8ae3SPaolo Bonzini {
4343c50d8ae3SPaolo Bonzini 	/*
4344c50d8ae3SPaolo Bonzini 	 * The RHS has bit 7 set iff level < mmu->last_nonleaf_level.
4345c50d8ae3SPaolo Bonzini 	 * If it is clear, there are no large pages at this level, so clear
4346c50d8ae3SPaolo Bonzini 	 * PT_PAGE_SIZE_MASK in gpte if that is the case.
4347c50d8ae3SPaolo Bonzini 	 */
4348c50d8ae3SPaolo Bonzini 	gpte &= level - mmu->last_nonleaf_level;
4349c50d8ae3SPaolo Bonzini 
4350c50d8ae3SPaolo Bonzini 	/*
43513bae0459SSean Christopherson 	 * PG_LEVEL_4K always terminates.  The RHS has bit 7 set
43523bae0459SSean Christopherson 	 * iff level <= PG_LEVEL_4K, which for our purpose means
43533bae0459SSean Christopherson 	 * level == PG_LEVEL_4K; set PT_PAGE_SIZE_MASK in gpte then.
4354c50d8ae3SPaolo Bonzini 	 */
43553bae0459SSean Christopherson 	gpte |= level - PG_LEVEL_4K - 1;
4356c50d8ae3SPaolo Bonzini 
4357c50d8ae3SPaolo Bonzini 	return gpte & PT_PAGE_SIZE_MASK;
4358c50d8ae3SPaolo Bonzini }
4359c50d8ae3SPaolo Bonzini 
4360c50d8ae3SPaolo Bonzini #define PTTYPE_EPT 18 /* arbitrary */
4361c50d8ae3SPaolo Bonzini #define PTTYPE PTTYPE_EPT
4362c50d8ae3SPaolo Bonzini #include "paging_tmpl.h"
4363c50d8ae3SPaolo Bonzini #undef PTTYPE
4364c50d8ae3SPaolo Bonzini 
4365c50d8ae3SPaolo Bonzini #define PTTYPE 64
4366c50d8ae3SPaolo Bonzini #include "paging_tmpl.h"
4367c50d8ae3SPaolo Bonzini #undef PTTYPE
4368c50d8ae3SPaolo Bonzini 
4369c50d8ae3SPaolo Bonzini #define PTTYPE 32
4370c50d8ae3SPaolo Bonzini #include "paging_tmpl.h"
4371c50d8ae3SPaolo Bonzini #undef PTTYPE
4372c50d8ae3SPaolo Bonzini 
4373c50d8ae3SPaolo Bonzini static void
4374c50d8ae3SPaolo Bonzini __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
4375c50d8ae3SPaolo Bonzini 			struct rsvd_bits_validate *rsvd_check,
4376c50d8ae3SPaolo Bonzini 			int maxphyaddr, int level, bool nx, bool gbpages,
4377c50d8ae3SPaolo Bonzini 			bool pse, bool amd)
4378c50d8ae3SPaolo Bonzini {
4379c50d8ae3SPaolo Bonzini 	u64 exb_bit_rsvd = 0;
4380c50d8ae3SPaolo Bonzini 	u64 gbpages_bit_rsvd = 0;
4381c50d8ae3SPaolo Bonzini 	u64 nonleaf_bit8_rsvd = 0;
4382c50d8ae3SPaolo Bonzini 
4383c50d8ae3SPaolo Bonzini 	rsvd_check->bad_mt_xwr = 0;
4384c50d8ae3SPaolo Bonzini 
4385c50d8ae3SPaolo Bonzini 	if (!nx)
4386c50d8ae3SPaolo Bonzini 		exb_bit_rsvd = rsvd_bits(63, 63);
4387c50d8ae3SPaolo Bonzini 	if (!gbpages)
4388c50d8ae3SPaolo Bonzini 		gbpages_bit_rsvd = rsvd_bits(7, 7);
4389c50d8ae3SPaolo Bonzini 
4390c50d8ae3SPaolo Bonzini 	/*
4391c50d8ae3SPaolo Bonzini 	 * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for
4392c50d8ae3SPaolo Bonzini 	 * leaf entries) on AMD CPUs only.
4393c50d8ae3SPaolo Bonzini 	 */
4394c50d8ae3SPaolo Bonzini 	if (amd)
4395c50d8ae3SPaolo Bonzini 		nonleaf_bit8_rsvd = rsvd_bits(8, 8);
4396c50d8ae3SPaolo Bonzini 
4397c50d8ae3SPaolo Bonzini 	switch (level) {
4398c50d8ae3SPaolo Bonzini 	case PT32_ROOT_LEVEL:
4399c50d8ae3SPaolo Bonzini 		/* no rsvd bits for 2 level 4K page table entries */
4400c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[0][1] = 0;
4401c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[0][0] = 0;
4402c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[1][0] =
4403c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[0][0];
4404c50d8ae3SPaolo Bonzini 
4405c50d8ae3SPaolo Bonzini 		if (!pse) {
4406c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[1][1] = 0;
4407c50d8ae3SPaolo Bonzini 			break;
4408c50d8ae3SPaolo Bonzini 		}
4409c50d8ae3SPaolo Bonzini 
4410c50d8ae3SPaolo Bonzini 		if (is_cpuid_PSE36())
4411c50d8ae3SPaolo Bonzini 			/* 36bits PSE 4MB page */
4412c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
4413c50d8ae3SPaolo Bonzini 		else
4414c50d8ae3SPaolo Bonzini 			/* 32 bits PSE 4MB page */
4415c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
4416c50d8ae3SPaolo Bonzini 		break;
4417c50d8ae3SPaolo Bonzini 	case PT32E_ROOT_LEVEL:
4418c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[0][2] =
4419c50d8ae3SPaolo Bonzini 			rsvd_bits(maxphyaddr, 63) |
4420c50d8ae3SPaolo Bonzini 			rsvd_bits(5, 8) | rsvd_bits(1, 2);	/* PDPTE */
4421c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd |
4422c50d8ae3SPaolo Bonzini 			rsvd_bits(maxphyaddr, 62);	/* PDE */
4423c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd |
4424c50d8ae3SPaolo Bonzini 			rsvd_bits(maxphyaddr, 62); 	/* PTE */
4425c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd |
4426c50d8ae3SPaolo Bonzini 			rsvd_bits(maxphyaddr, 62) |
4427c50d8ae3SPaolo Bonzini 			rsvd_bits(13, 20);		/* large page */
4428c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[1][0] =
4429c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[0][0];
4430c50d8ae3SPaolo Bonzini 		break;
4431c50d8ae3SPaolo Bonzini 	case PT64_ROOT_5LEVEL:
4432c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[0][4] = exb_bit_rsvd |
4433c50d8ae3SPaolo Bonzini 			nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
4434c50d8ae3SPaolo Bonzini 			rsvd_bits(maxphyaddr, 51);
4435c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[1][4] =
4436c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[0][4];
4437df561f66SGustavo A. R. Silva 		fallthrough;
4438c50d8ae3SPaolo Bonzini 	case PT64_ROOT_4LEVEL:
4439c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd |
4440c50d8ae3SPaolo Bonzini 			nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
4441c50d8ae3SPaolo Bonzini 			rsvd_bits(maxphyaddr, 51);
4442c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[0][2] = exb_bit_rsvd |
44435ecad245SPaolo Bonzini 			gbpages_bit_rsvd |
4444c50d8ae3SPaolo Bonzini 			rsvd_bits(maxphyaddr, 51);
4445c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd |
4446c50d8ae3SPaolo Bonzini 			rsvd_bits(maxphyaddr, 51);
4447c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd |
4448c50d8ae3SPaolo Bonzini 			rsvd_bits(maxphyaddr, 51);
4449c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[1][3] =
4450c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[0][3];
4451c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[1][2] = exb_bit_rsvd |
4452c50d8ae3SPaolo Bonzini 			gbpages_bit_rsvd | rsvd_bits(maxphyaddr, 51) |
4453c50d8ae3SPaolo Bonzini 			rsvd_bits(13, 29);
4454c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd |
4455c50d8ae3SPaolo Bonzini 			rsvd_bits(maxphyaddr, 51) |
4456c50d8ae3SPaolo Bonzini 			rsvd_bits(13, 20);		/* large page */
4457c50d8ae3SPaolo Bonzini 		rsvd_check->rsvd_bits_mask[1][0] =
4458c50d8ae3SPaolo Bonzini 			rsvd_check->rsvd_bits_mask[0][0];
4459c50d8ae3SPaolo Bonzini 		break;
4460c50d8ae3SPaolo Bonzini 	}
4461c50d8ae3SPaolo Bonzini }
4462c50d8ae3SPaolo Bonzini 
4463c50d8ae3SPaolo Bonzini static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
4464c50d8ae3SPaolo Bonzini 				  struct kvm_mmu *context)
4465c50d8ae3SPaolo Bonzini {
4466c50d8ae3SPaolo Bonzini 	__reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check,
4467c50d8ae3SPaolo Bonzini 				cpuid_maxphyaddr(vcpu), context->root_level,
4468c50d8ae3SPaolo Bonzini 				context->nx,
4469c50d8ae3SPaolo Bonzini 				guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
447023493d0aSSean Christopherson 				is_pse(vcpu),
447123493d0aSSean Christopherson 				guest_cpuid_is_amd_or_hygon(vcpu));
4472c50d8ae3SPaolo Bonzini }
4473c50d8ae3SPaolo Bonzini 
4474c50d8ae3SPaolo Bonzini static void
4475c50d8ae3SPaolo Bonzini __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
4476c50d8ae3SPaolo Bonzini 			    int maxphyaddr, bool execonly)
4477c50d8ae3SPaolo Bonzini {
4478c50d8ae3SPaolo Bonzini 	u64 bad_mt_xwr;
4479c50d8ae3SPaolo Bonzini 
4480c50d8ae3SPaolo Bonzini 	rsvd_check->rsvd_bits_mask[0][4] =
4481c50d8ae3SPaolo Bonzini 		rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7);
4482c50d8ae3SPaolo Bonzini 	rsvd_check->rsvd_bits_mask[0][3] =
4483c50d8ae3SPaolo Bonzini 		rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7);
4484c50d8ae3SPaolo Bonzini 	rsvd_check->rsvd_bits_mask[0][2] =
4485c50d8ae3SPaolo Bonzini 		rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6);
4486c50d8ae3SPaolo Bonzini 	rsvd_check->rsvd_bits_mask[0][1] =
4487c50d8ae3SPaolo Bonzini 		rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6);
4488c50d8ae3SPaolo Bonzini 	rsvd_check->rsvd_bits_mask[0][0] = rsvd_bits(maxphyaddr, 51);
4489c50d8ae3SPaolo Bonzini 
4490c50d8ae3SPaolo Bonzini 	/* large page */
4491c50d8ae3SPaolo Bonzini 	rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4];
4492c50d8ae3SPaolo Bonzini 	rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3];
4493c50d8ae3SPaolo Bonzini 	rsvd_check->rsvd_bits_mask[1][2] =
4494c50d8ae3SPaolo Bonzini 		rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 29);
4495c50d8ae3SPaolo Bonzini 	rsvd_check->rsvd_bits_mask[1][1] =
4496c50d8ae3SPaolo Bonzini 		rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 20);
4497c50d8ae3SPaolo Bonzini 	rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0];
4498c50d8ae3SPaolo Bonzini 
4499c50d8ae3SPaolo Bonzini 	bad_mt_xwr = 0xFFull << (2 * 8);	/* bits 3..5 must not be 2 */
4500c50d8ae3SPaolo Bonzini 	bad_mt_xwr |= 0xFFull << (3 * 8);	/* bits 3..5 must not be 3 */
4501c50d8ae3SPaolo Bonzini 	bad_mt_xwr |= 0xFFull << (7 * 8);	/* bits 3..5 must not be 7 */
4502c50d8ae3SPaolo Bonzini 	bad_mt_xwr |= REPEAT_BYTE(1ull << 2);	/* bits 0..2 must not be 010 */
4503c50d8ae3SPaolo Bonzini 	bad_mt_xwr |= REPEAT_BYTE(1ull << 6);	/* bits 0..2 must not be 110 */
4504c50d8ae3SPaolo Bonzini 	if (!execonly) {
4505c50d8ae3SPaolo Bonzini 		/* bits 0..2 must not be 100 unless VMX capabilities allow it */
4506c50d8ae3SPaolo Bonzini 		bad_mt_xwr |= REPEAT_BYTE(1ull << 4);
4507c50d8ae3SPaolo Bonzini 	}
4508c50d8ae3SPaolo Bonzini 	rsvd_check->bad_mt_xwr = bad_mt_xwr;
4509c50d8ae3SPaolo Bonzini }
4510c50d8ae3SPaolo Bonzini 
4511c50d8ae3SPaolo Bonzini static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
4512c50d8ae3SPaolo Bonzini 		struct kvm_mmu *context, bool execonly)
4513c50d8ae3SPaolo Bonzini {
4514c50d8ae3SPaolo Bonzini 	__reset_rsvds_bits_mask_ept(&context->guest_rsvd_check,
4515c50d8ae3SPaolo Bonzini 				    cpuid_maxphyaddr(vcpu), execonly);
4516c50d8ae3SPaolo Bonzini }
4517c50d8ae3SPaolo Bonzini 
4518c50d8ae3SPaolo Bonzini /*
4519c50d8ae3SPaolo Bonzini  * the page table on host is the shadow page table for the page
4520c50d8ae3SPaolo Bonzini  * table in guest or amd nested guest, its mmu features completely
4521c50d8ae3SPaolo Bonzini  * follow the features in guest.
4522c50d8ae3SPaolo Bonzini  */
4523c50d8ae3SPaolo Bonzini void
4524c50d8ae3SPaolo Bonzini reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
4525c50d8ae3SPaolo Bonzini {
4526c50d8ae3SPaolo Bonzini 	bool uses_nx = context->nx ||
4527c50d8ae3SPaolo Bonzini 		context->mmu_role.base.smep_andnot_wp;
4528c50d8ae3SPaolo Bonzini 	struct rsvd_bits_validate *shadow_zero_check;
4529c50d8ae3SPaolo Bonzini 	int i;
4530c50d8ae3SPaolo Bonzini 
4531c50d8ae3SPaolo Bonzini 	/*
4532c50d8ae3SPaolo Bonzini 	 * Passing "true" to the last argument is okay; it adds a check
4533c50d8ae3SPaolo Bonzini 	 * on bit 8 of the SPTEs which KVM doesn't use anyway.
4534c50d8ae3SPaolo Bonzini 	 */
4535c50d8ae3SPaolo Bonzini 	shadow_zero_check = &context->shadow_zero_check;
4536c50d8ae3SPaolo Bonzini 	__reset_rsvds_bits_mask(vcpu, shadow_zero_check,
4537c50d8ae3SPaolo Bonzini 				shadow_phys_bits,
4538c50d8ae3SPaolo Bonzini 				context->shadow_root_level, uses_nx,
4539c50d8ae3SPaolo Bonzini 				guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
4540c50d8ae3SPaolo Bonzini 				is_pse(vcpu), true);
4541c50d8ae3SPaolo Bonzini 
4542c50d8ae3SPaolo Bonzini 	if (!shadow_me_mask)
4543c50d8ae3SPaolo Bonzini 		return;
4544c50d8ae3SPaolo Bonzini 
4545c50d8ae3SPaolo Bonzini 	for (i = context->shadow_root_level; --i >= 0;) {
4546c50d8ae3SPaolo Bonzini 		shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
4547c50d8ae3SPaolo Bonzini 		shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
4548c50d8ae3SPaolo Bonzini 	}
4549c50d8ae3SPaolo Bonzini 
4550c50d8ae3SPaolo Bonzini }
4551c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask);
4552c50d8ae3SPaolo Bonzini 
4553c50d8ae3SPaolo Bonzini static inline bool boot_cpu_is_amd(void)
4554c50d8ae3SPaolo Bonzini {
4555c50d8ae3SPaolo Bonzini 	WARN_ON_ONCE(!tdp_enabled);
4556c50d8ae3SPaolo Bonzini 	return shadow_x_mask == 0;
4557c50d8ae3SPaolo Bonzini }
4558c50d8ae3SPaolo Bonzini 
4559c50d8ae3SPaolo Bonzini /*
4560c50d8ae3SPaolo Bonzini  * the direct page table on host, use as much mmu features as
4561c50d8ae3SPaolo Bonzini  * possible, however, kvm currently does not do execution-protection.
4562c50d8ae3SPaolo Bonzini  */
4563c50d8ae3SPaolo Bonzini static void
4564c50d8ae3SPaolo Bonzini reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
4565c50d8ae3SPaolo Bonzini 				struct kvm_mmu *context)
4566c50d8ae3SPaolo Bonzini {
4567c50d8ae3SPaolo Bonzini 	struct rsvd_bits_validate *shadow_zero_check;
4568c50d8ae3SPaolo Bonzini 	int i;
4569c50d8ae3SPaolo Bonzini 
4570c50d8ae3SPaolo Bonzini 	shadow_zero_check = &context->shadow_zero_check;
4571c50d8ae3SPaolo Bonzini 
4572c50d8ae3SPaolo Bonzini 	if (boot_cpu_is_amd())
4573c50d8ae3SPaolo Bonzini 		__reset_rsvds_bits_mask(vcpu, shadow_zero_check,
4574c50d8ae3SPaolo Bonzini 					shadow_phys_bits,
4575c50d8ae3SPaolo Bonzini 					context->shadow_root_level, false,
4576c50d8ae3SPaolo Bonzini 					boot_cpu_has(X86_FEATURE_GBPAGES),
4577c50d8ae3SPaolo Bonzini 					true, true);
4578c50d8ae3SPaolo Bonzini 	else
4579c50d8ae3SPaolo Bonzini 		__reset_rsvds_bits_mask_ept(shadow_zero_check,
4580c50d8ae3SPaolo Bonzini 					    shadow_phys_bits,
4581c50d8ae3SPaolo Bonzini 					    false);
4582c50d8ae3SPaolo Bonzini 
4583c50d8ae3SPaolo Bonzini 	if (!shadow_me_mask)
4584c50d8ae3SPaolo Bonzini 		return;
4585c50d8ae3SPaolo Bonzini 
4586c50d8ae3SPaolo Bonzini 	for (i = context->shadow_root_level; --i >= 0;) {
4587c50d8ae3SPaolo Bonzini 		shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
4588c50d8ae3SPaolo Bonzini 		shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
4589c50d8ae3SPaolo Bonzini 	}
4590c50d8ae3SPaolo Bonzini }
4591c50d8ae3SPaolo Bonzini 
4592c50d8ae3SPaolo Bonzini /*
4593c50d8ae3SPaolo Bonzini  * as the comments in reset_shadow_zero_bits_mask() except it
4594c50d8ae3SPaolo Bonzini  * is the shadow page table for intel nested guest.
4595c50d8ae3SPaolo Bonzini  */
4596c50d8ae3SPaolo Bonzini static void
4597c50d8ae3SPaolo Bonzini reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
4598c50d8ae3SPaolo Bonzini 				struct kvm_mmu *context, bool execonly)
4599c50d8ae3SPaolo Bonzini {
4600c50d8ae3SPaolo Bonzini 	__reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
4601c50d8ae3SPaolo Bonzini 				    shadow_phys_bits, execonly);
4602c50d8ae3SPaolo Bonzini }
4603c50d8ae3SPaolo Bonzini 
4604c50d8ae3SPaolo Bonzini #define BYTE_MASK(access) \
4605c50d8ae3SPaolo Bonzini 	((1 & (access) ? 2 : 0) | \
4606c50d8ae3SPaolo Bonzini 	 (2 & (access) ? 4 : 0) | \
4607c50d8ae3SPaolo Bonzini 	 (3 & (access) ? 8 : 0) | \
4608c50d8ae3SPaolo Bonzini 	 (4 & (access) ? 16 : 0) | \
4609c50d8ae3SPaolo Bonzini 	 (5 & (access) ? 32 : 0) | \
4610c50d8ae3SPaolo Bonzini 	 (6 & (access) ? 64 : 0) | \
4611c50d8ae3SPaolo Bonzini 	 (7 & (access) ? 128 : 0))
4612c50d8ae3SPaolo Bonzini 
4613c50d8ae3SPaolo Bonzini 
4614c50d8ae3SPaolo Bonzini static void update_permission_bitmask(struct kvm_vcpu *vcpu,
4615c50d8ae3SPaolo Bonzini 				      struct kvm_mmu *mmu, bool ept)
4616c50d8ae3SPaolo Bonzini {
4617c50d8ae3SPaolo Bonzini 	unsigned byte;
4618c50d8ae3SPaolo Bonzini 
4619c50d8ae3SPaolo Bonzini 	const u8 x = BYTE_MASK(ACC_EXEC_MASK);
4620c50d8ae3SPaolo Bonzini 	const u8 w = BYTE_MASK(ACC_WRITE_MASK);
4621c50d8ae3SPaolo Bonzini 	const u8 u = BYTE_MASK(ACC_USER_MASK);
4622c50d8ae3SPaolo Bonzini 
4623c50d8ae3SPaolo Bonzini 	bool cr4_smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP) != 0;
4624c50d8ae3SPaolo Bonzini 	bool cr4_smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP) != 0;
4625c50d8ae3SPaolo Bonzini 	bool cr0_wp = is_write_protection(vcpu);
4626c50d8ae3SPaolo Bonzini 
4627c50d8ae3SPaolo Bonzini 	for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) {
4628c50d8ae3SPaolo Bonzini 		unsigned pfec = byte << 1;
4629c50d8ae3SPaolo Bonzini 
4630c50d8ae3SPaolo Bonzini 		/*
4631c50d8ae3SPaolo Bonzini 		 * Each "*f" variable has a 1 bit for each UWX value
4632c50d8ae3SPaolo Bonzini 		 * that causes a fault with the given PFEC.
4633c50d8ae3SPaolo Bonzini 		 */
4634c50d8ae3SPaolo Bonzini 
4635c50d8ae3SPaolo Bonzini 		/* Faults from writes to non-writable pages */
4636c50d8ae3SPaolo Bonzini 		u8 wf = (pfec & PFERR_WRITE_MASK) ? (u8)~w : 0;
4637c50d8ae3SPaolo Bonzini 		/* Faults from user mode accesses to supervisor pages */
4638c50d8ae3SPaolo Bonzini 		u8 uf = (pfec & PFERR_USER_MASK) ? (u8)~u : 0;
4639c50d8ae3SPaolo Bonzini 		/* Faults from fetches of non-executable pages*/
4640c50d8ae3SPaolo Bonzini 		u8 ff = (pfec & PFERR_FETCH_MASK) ? (u8)~x : 0;
4641c50d8ae3SPaolo Bonzini 		/* Faults from kernel mode fetches of user pages */
4642c50d8ae3SPaolo Bonzini 		u8 smepf = 0;
4643c50d8ae3SPaolo Bonzini 		/* Faults from kernel mode accesses of user pages */
4644c50d8ae3SPaolo Bonzini 		u8 smapf = 0;
4645c50d8ae3SPaolo Bonzini 
4646c50d8ae3SPaolo Bonzini 		if (!ept) {
4647c50d8ae3SPaolo Bonzini 			/* Faults from kernel mode accesses to user pages */
4648c50d8ae3SPaolo Bonzini 			u8 kf = (pfec & PFERR_USER_MASK) ? 0 : u;
4649c50d8ae3SPaolo Bonzini 
4650c50d8ae3SPaolo Bonzini 			/* Not really needed: !nx will cause pte.nx to fault */
4651c50d8ae3SPaolo Bonzini 			if (!mmu->nx)
4652c50d8ae3SPaolo Bonzini 				ff = 0;
4653c50d8ae3SPaolo Bonzini 
4654c50d8ae3SPaolo Bonzini 			/* Allow supervisor writes if !cr0.wp */
4655c50d8ae3SPaolo Bonzini 			if (!cr0_wp)
4656c50d8ae3SPaolo Bonzini 				wf = (pfec & PFERR_USER_MASK) ? wf : 0;
4657c50d8ae3SPaolo Bonzini 
4658c50d8ae3SPaolo Bonzini 			/* Disallow supervisor fetches of user code if cr4.smep */
4659c50d8ae3SPaolo Bonzini 			if (cr4_smep)
4660c50d8ae3SPaolo Bonzini 				smepf = (pfec & PFERR_FETCH_MASK) ? kf : 0;
4661c50d8ae3SPaolo Bonzini 
4662c50d8ae3SPaolo Bonzini 			/*
4663c50d8ae3SPaolo Bonzini 			 * SMAP:kernel-mode data accesses from user-mode
4664c50d8ae3SPaolo Bonzini 			 * mappings should fault. A fault is considered
4665c50d8ae3SPaolo Bonzini 			 * as a SMAP violation if all of the following
4666c50d8ae3SPaolo Bonzini 			 * conditions are true:
4667c50d8ae3SPaolo Bonzini 			 *   - X86_CR4_SMAP is set in CR4
4668c50d8ae3SPaolo Bonzini 			 *   - A user page is accessed
4669c50d8ae3SPaolo Bonzini 			 *   - The access is not a fetch
4670c50d8ae3SPaolo Bonzini 			 *   - Page fault in kernel mode
4671c50d8ae3SPaolo Bonzini 			 *   - if CPL = 3 or X86_EFLAGS_AC is clear
4672c50d8ae3SPaolo Bonzini 			 *
4673c50d8ae3SPaolo Bonzini 			 * Here, we cover the first three conditions.
4674c50d8ae3SPaolo Bonzini 			 * The fourth is computed dynamically in permission_fault();
4675c50d8ae3SPaolo Bonzini 			 * PFERR_RSVD_MASK bit will be set in PFEC if the access is
4676c50d8ae3SPaolo Bonzini 			 * *not* subject to SMAP restrictions.
4677c50d8ae3SPaolo Bonzini 			 */
4678c50d8ae3SPaolo Bonzini 			if (cr4_smap)
4679c50d8ae3SPaolo Bonzini 				smapf = (pfec & (PFERR_RSVD_MASK|PFERR_FETCH_MASK)) ? 0 : kf;
4680c50d8ae3SPaolo Bonzini 		}
4681c50d8ae3SPaolo Bonzini 
4682c50d8ae3SPaolo Bonzini 		mmu->permissions[byte] = ff | uf | wf | smepf | smapf;
4683c50d8ae3SPaolo Bonzini 	}
4684c50d8ae3SPaolo Bonzini }
4685c50d8ae3SPaolo Bonzini 
4686c50d8ae3SPaolo Bonzini /*
4687c50d8ae3SPaolo Bonzini * PKU is an additional mechanism by which the paging controls access to
4688c50d8ae3SPaolo Bonzini * user-mode addresses based on the value in the PKRU register.  Protection
4689c50d8ae3SPaolo Bonzini * key violations are reported through a bit in the page fault error code.
4690c50d8ae3SPaolo Bonzini * Unlike other bits of the error code, the PK bit is not known at the
4691c50d8ae3SPaolo Bonzini * call site of e.g. gva_to_gpa; it must be computed directly in
4692c50d8ae3SPaolo Bonzini * permission_fault based on two bits of PKRU, on some machine state (CR4,
4693c50d8ae3SPaolo Bonzini * CR0, EFER, CPL), and on other bits of the error code and the page tables.
4694c50d8ae3SPaolo Bonzini *
4695c50d8ae3SPaolo Bonzini * In particular the following conditions come from the error code, the
4696c50d8ae3SPaolo Bonzini * page tables and the machine state:
4697c50d8ae3SPaolo Bonzini * - PK is always zero unless CR4.PKE=1 and EFER.LMA=1
4698c50d8ae3SPaolo Bonzini * - PK is always zero if RSVD=1 (reserved bit set) or F=1 (instruction fetch)
4699c50d8ae3SPaolo Bonzini * - PK is always zero if U=0 in the page tables
4700c50d8ae3SPaolo Bonzini * - PKRU.WD is ignored if CR0.WP=0 and the access is a supervisor access.
4701c50d8ae3SPaolo Bonzini *
4702c50d8ae3SPaolo Bonzini * The PKRU bitmask caches the result of these four conditions.  The error
4703c50d8ae3SPaolo Bonzini * code (minus the P bit) and the page table's U bit form an index into the
4704c50d8ae3SPaolo Bonzini * PKRU bitmask.  Two bits of the PKRU bitmask are then extracted and ANDed
4705c50d8ae3SPaolo Bonzini * with the two bits of the PKRU register corresponding to the protection key.
4706c50d8ae3SPaolo Bonzini * For the first three conditions above the bits will be 00, thus masking
4707c50d8ae3SPaolo Bonzini * away both AD and WD.  For all reads or if the last condition holds, WD
4708c50d8ae3SPaolo Bonzini * only will be masked away.
4709c50d8ae3SPaolo Bonzini */
4710c50d8ae3SPaolo Bonzini static void update_pkru_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
4711c50d8ae3SPaolo Bonzini 				bool ept)
4712c50d8ae3SPaolo Bonzini {
4713c50d8ae3SPaolo Bonzini 	unsigned bit;
4714c50d8ae3SPaolo Bonzini 	bool wp;
4715c50d8ae3SPaolo Bonzini 
4716c50d8ae3SPaolo Bonzini 	if (ept) {
4717c50d8ae3SPaolo Bonzini 		mmu->pkru_mask = 0;
4718c50d8ae3SPaolo Bonzini 		return;
4719c50d8ae3SPaolo Bonzini 	}
4720c50d8ae3SPaolo Bonzini 
4721c50d8ae3SPaolo Bonzini 	/* PKEY is enabled only if CR4.PKE and EFER.LMA are both set. */
4722c50d8ae3SPaolo Bonzini 	if (!kvm_read_cr4_bits(vcpu, X86_CR4_PKE) || !is_long_mode(vcpu)) {
4723c50d8ae3SPaolo Bonzini 		mmu->pkru_mask = 0;
4724c50d8ae3SPaolo Bonzini 		return;
4725c50d8ae3SPaolo Bonzini 	}
4726c50d8ae3SPaolo Bonzini 
4727c50d8ae3SPaolo Bonzini 	wp = is_write_protection(vcpu);
4728c50d8ae3SPaolo Bonzini 
4729c50d8ae3SPaolo Bonzini 	for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) {
4730c50d8ae3SPaolo Bonzini 		unsigned pfec, pkey_bits;
4731c50d8ae3SPaolo Bonzini 		bool check_pkey, check_write, ff, uf, wf, pte_user;
4732c50d8ae3SPaolo Bonzini 
4733c50d8ae3SPaolo Bonzini 		pfec = bit << 1;
4734c50d8ae3SPaolo Bonzini 		ff = pfec & PFERR_FETCH_MASK;
4735c50d8ae3SPaolo Bonzini 		uf = pfec & PFERR_USER_MASK;
4736c50d8ae3SPaolo Bonzini 		wf = pfec & PFERR_WRITE_MASK;
4737c50d8ae3SPaolo Bonzini 
4738c50d8ae3SPaolo Bonzini 		/* PFEC.RSVD is replaced by ACC_USER_MASK. */
4739c50d8ae3SPaolo Bonzini 		pte_user = pfec & PFERR_RSVD_MASK;
4740c50d8ae3SPaolo Bonzini 
4741c50d8ae3SPaolo Bonzini 		/*
4742c50d8ae3SPaolo Bonzini 		 * Only need to check the access which is not an
4743c50d8ae3SPaolo Bonzini 		 * instruction fetch and is to a user page.
4744c50d8ae3SPaolo Bonzini 		 */
4745c50d8ae3SPaolo Bonzini 		check_pkey = (!ff && pte_user);
4746c50d8ae3SPaolo Bonzini 		/*
4747c50d8ae3SPaolo Bonzini 		 * write access is controlled by PKRU if it is a
4748c50d8ae3SPaolo Bonzini 		 * user access or CR0.WP = 1.
4749c50d8ae3SPaolo Bonzini 		 */
4750c50d8ae3SPaolo Bonzini 		check_write = check_pkey && wf && (uf || wp);
4751c50d8ae3SPaolo Bonzini 
4752c50d8ae3SPaolo Bonzini 		/* PKRU.AD stops both read and write access. */
4753c50d8ae3SPaolo Bonzini 		pkey_bits = !!check_pkey;
4754c50d8ae3SPaolo Bonzini 		/* PKRU.WD stops write access. */
4755c50d8ae3SPaolo Bonzini 		pkey_bits |= (!!check_write) << 1;
4756c50d8ae3SPaolo Bonzini 
4757c50d8ae3SPaolo Bonzini 		mmu->pkru_mask |= (pkey_bits & 3) << pfec;
4758c50d8ae3SPaolo Bonzini 	}
4759c50d8ae3SPaolo Bonzini }
4760c50d8ae3SPaolo Bonzini 
4761c50d8ae3SPaolo Bonzini static void update_last_nonleaf_level(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
4762c50d8ae3SPaolo Bonzini {
4763c50d8ae3SPaolo Bonzini 	unsigned root_level = mmu->root_level;
4764c50d8ae3SPaolo Bonzini 
4765c50d8ae3SPaolo Bonzini 	mmu->last_nonleaf_level = root_level;
4766c50d8ae3SPaolo Bonzini 	if (root_level == PT32_ROOT_LEVEL && is_pse(vcpu))
4767c50d8ae3SPaolo Bonzini 		mmu->last_nonleaf_level++;
4768c50d8ae3SPaolo Bonzini }
4769c50d8ae3SPaolo Bonzini 
4770c50d8ae3SPaolo Bonzini static void paging64_init_context_common(struct kvm_vcpu *vcpu,
4771c50d8ae3SPaolo Bonzini 					 struct kvm_mmu *context,
4772c50d8ae3SPaolo Bonzini 					 int level)
4773c50d8ae3SPaolo Bonzini {
4774c50d8ae3SPaolo Bonzini 	context->nx = is_nx(vcpu);
4775c50d8ae3SPaolo Bonzini 	context->root_level = level;
4776c50d8ae3SPaolo Bonzini 
4777c50d8ae3SPaolo Bonzini 	reset_rsvds_bits_mask(vcpu, context);
4778c50d8ae3SPaolo Bonzini 	update_permission_bitmask(vcpu, context, false);
4779c50d8ae3SPaolo Bonzini 	update_pkru_bitmask(vcpu, context, false);
4780c50d8ae3SPaolo Bonzini 	update_last_nonleaf_level(vcpu, context);
4781c50d8ae3SPaolo Bonzini 
4782c50d8ae3SPaolo Bonzini 	MMU_WARN_ON(!is_pae(vcpu));
4783c50d8ae3SPaolo Bonzini 	context->page_fault = paging64_page_fault;
4784c50d8ae3SPaolo Bonzini 	context->gva_to_gpa = paging64_gva_to_gpa;
4785c50d8ae3SPaolo Bonzini 	context->sync_page = paging64_sync_page;
4786c50d8ae3SPaolo Bonzini 	context->invlpg = paging64_invlpg;
4787c50d8ae3SPaolo Bonzini 	context->update_pte = paging64_update_pte;
4788c50d8ae3SPaolo Bonzini 	context->shadow_root_level = level;
4789c50d8ae3SPaolo Bonzini 	context->direct_map = false;
4790c50d8ae3SPaolo Bonzini }
4791c50d8ae3SPaolo Bonzini 
4792c50d8ae3SPaolo Bonzini static void paging64_init_context(struct kvm_vcpu *vcpu,
4793c50d8ae3SPaolo Bonzini 				  struct kvm_mmu *context)
4794c50d8ae3SPaolo Bonzini {
4795c50d8ae3SPaolo Bonzini 	int root_level = is_la57_mode(vcpu) ?
4796c50d8ae3SPaolo Bonzini 			 PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
4797c50d8ae3SPaolo Bonzini 
4798c50d8ae3SPaolo Bonzini 	paging64_init_context_common(vcpu, context, root_level);
4799c50d8ae3SPaolo Bonzini }
4800c50d8ae3SPaolo Bonzini 
4801c50d8ae3SPaolo Bonzini static void paging32_init_context(struct kvm_vcpu *vcpu,
4802c50d8ae3SPaolo Bonzini 				  struct kvm_mmu *context)
4803c50d8ae3SPaolo Bonzini {
4804c50d8ae3SPaolo Bonzini 	context->nx = false;
4805c50d8ae3SPaolo Bonzini 	context->root_level = PT32_ROOT_LEVEL;
4806c50d8ae3SPaolo Bonzini 
4807c50d8ae3SPaolo Bonzini 	reset_rsvds_bits_mask(vcpu, context);
4808c50d8ae3SPaolo Bonzini 	update_permission_bitmask(vcpu, context, false);
4809c50d8ae3SPaolo Bonzini 	update_pkru_bitmask(vcpu, context, false);
4810c50d8ae3SPaolo Bonzini 	update_last_nonleaf_level(vcpu, context);
4811c50d8ae3SPaolo Bonzini 
4812c50d8ae3SPaolo Bonzini 	context->page_fault = paging32_page_fault;
4813c50d8ae3SPaolo Bonzini 	context->gva_to_gpa = paging32_gva_to_gpa;
4814c50d8ae3SPaolo Bonzini 	context->sync_page = paging32_sync_page;
4815c50d8ae3SPaolo Bonzini 	context->invlpg = paging32_invlpg;
4816c50d8ae3SPaolo Bonzini 	context->update_pte = paging32_update_pte;
4817c50d8ae3SPaolo Bonzini 	context->shadow_root_level = PT32E_ROOT_LEVEL;
4818c50d8ae3SPaolo Bonzini 	context->direct_map = false;
4819c50d8ae3SPaolo Bonzini }
4820c50d8ae3SPaolo Bonzini 
4821c50d8ae3SPaolo Bonzini static void paging32E_init_context(struct kvm_vcpu *vcpu,
4822c50d8ae3SPaolo Bonzini 				   struct kvm_mmu *context)
4823c50d8ae3SPaolo Bonzini {
4824c50d8ae3SPaolo Bonzini 	paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL);
4825c50d8ae3SPaolo Bonzini }
4826c50d8ae3SPaolo Bonzini 
4827c50d8ae3SPaolo Bonzini static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu)
4828c50d8ae3SPaolo Bonzini {
4829c50d8ae3SPaolo Bonzini 	union kvm_mmu_extended_role ext = {0};
4830c50d8ae3SPaolo Bonzini 
4831c50d8ae3SPaolo Bonzini 	ext.cr0_pg = !!is_paging(vcpu);
4832c50d8ae3SPaolo Bonzini 	ext.cr4_pae = !!is_pae(vcpu);
4833c50d8ae3SPaolo Bonzini 	ext.cr4_smep = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
4834c50d8ae3SPaolo Bonzini 	ext.cr4_smap = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
4835c50d8ae3SPaolo Bonzini 	ext.cr4_pse = !!is_pse(vcpu);
4836c50d8ae3SPaolo Bonzini 	ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE);
4837c50d8ae3SPaolo Bonzini 	ext.maxphyaddr = cpuid_maxphyaddr(vcpu);
4838c50d8ae3SPaolo Bonzini 
4839c50d8ae3SPaolo Bonzini 	ext.valid = 1;
4840c50d8ae3SPaolo Bonzini 
4841c50d8ae3SPaolo Bonzini 	return ext;
4842c50d8ae3SPaolo Bonzini }
4843c50d8ae3SPaolo Bonzini 
4844c50d8ae3SPaolo Bonzini static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
4845c50d8ae3SPaolo Bonzini 						   bool base_only)
4846c50d8ae3SPaolo Bonzini {
4847c50d8ae3SPaolo Bonzini 	union kvm_mmu_role role = {0};
4848c50d8ae3SPaolo Bonzini 
4849c50d8ae3SPaolo Bonzini 	role.base.access = ACC_ALL;
4850c50d8ae3SPaolo Bonzini 	role.base.nxe = !!is_nx(vcpu);
4851c50d8ae3SPaolo Bonzini 	role.base.cr0_wp = is_write_protection(vcpu);
4852c50d8ae3SPaolo Bonzini 	role.base.smm = is_smm(vcpu);
4853c50d8ae3SPaolo Bonzini 	role.base.guest_mode = is_guest_mode(vcpu);
4854c50d8ae3SPaolo Bonzini 
4855c50d8ae3SPaolo Bonzini 	if (base_only)
4856c50d8ae3SPaolo Bonzini 		return role;
4857c50d8ae3SPaolo Bonzini 
4858c50d8ae3SPaolo Bonzini 	role.ext = kvm_calc_mmu_role_ext(vcpu);
4859c50d8ae3SPaolo Bonzini 
4860c50d8ae3SPaolo Bonzini 	return role;
4861c50d8ae3SPaolo Bonzini }
4862c50d8ae3SPaolo Bonzini 
4863d468d94bSSean Christopherson static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
4864d468d94bSSean Christopherson {
4865d468d94bSSean Christopherson 	/* Use 5-level TDP if and only if it's useful/necessary. */
486683013059SSean Christopherson 	if (max_tdp_level == 5 && cpuid_maxphyaddr(vcpu) <= 48)
4867d468d94bSSean Christopherson 		return 4;
4868d468d94bSSean Christopherson 
486983013059SSean Christopherson 	return max_tdp_level;
4870d468d94bSSean Christopherson }
4871d468d94bSSean Christopherson 
4872c50d8ae3SPaolo Bonzini static union kvm_mmu_role
4873c50d8ae3SPaolo Bonzini kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
4874c50d8ae3SPaolo Bonzini {
4875c50d8ae3SPaolo Bonzini 	union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);
4876c50d8ae3SPaolo Bonzini 
4877c50d8ae3SPaolo Bonzini 	role.base.ad_disabled = (shadow_accessed_mask == 0);
4878d468d94bSSean Christopherson 	role.base.level = kvm_mmu_get_tdp_level(vcpu);
4879c50d8ae3SPaolo Bonzini 	role.base.direct = true;
4880c50d8ae3SPaolo Bonzini 	role.base.gpte_is_8_bytes = true;
4881c50d8ae3SPaolo Bonzini 
4882c50d8ae3SPaolo Bonzini 	return role;
4883c50d8ae3SPaolo Bonzini }
4884c50d8ae3SPaolo Bonzini 
4885c50d8ae3SPaolo Bonzini static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
4886c50d8ae3SPaolo Bonzini {
48878c008659SPaolo Bonzini 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
4888c50d8ae3SPaolo Bonzini 	union kvm_mmu_role new_role =
4889c50d8ae3SPaolo Bonzini 		kvm_calc_tdp_mmu_root_page_role(vcpu, false);
4890c50d8ae3SPaolo Bonzini 
4891c50d8ae3SPaolo Bonzini 	if (new_role.as_u64 == context->mmu_role.as_u64)
4892c50d8ae3SPaolo Bonzini 		return;
4893c50d8ae3SPaolo Bonzini 
4894c50d8ae3SPaolo Bonzini 	context->mmu_role.as_u64 = new_role.as_u64;
48957a02674dSSean Christopherson 	context->page_fault = kvm_tdp_page_fault;
4896c50d8ae3SPaolo Bonzini 	context->sync_page = nonpaging_sync_page;
48975efac074SPaolo Bonzini 	context->invlpg = NULL;
4898c50d8ae3SPaolo Bonzini 	context->update_pte = nonpaging_update_pte;
4899d468d94bSSean Christopherson 	context->shadow_root_level = kvm_mmu_get_tdp_level(vcpu);
4900c50d8ae3SPaolo Bonzini 	context->direct_map = true;
4901d8dd54e0SSean Christopherson 	context->get_guest_pgd = get_cr3;
4902c50d8ae3SPaolo Bonzini 	context->get_pdptr = kvm_pdptr_read;
4903c50d8ae3SPaolo Bonzini 	context->inject_page_fault = kvm_inject_page_fault;
4904c50d8ae3SPaolo Bonzini 
4905c50d8ae3SPaolo Bonzini 	if (!is_paging(vcpu)) {
4906c50d8ae3SPaolo Bonzini 		context->nx = false;
4907c50d8ae3SPaolo Bonzini 		context->gva_to_gpa = nonpaging_gva_to_gpa;
4908c50d8ae3SPaolo Bonzini 		context->root_level = 0;
4909c50d8ae3SPaolo Bonzini 	} else if (is_long_mode(vcpu)) {
4910c50d8ae3SPaolo Bonzini 		context->nx = is_nx(vcpu);
4911c50d8ae3SPaolo Bonzini 		context->root_level = is_la57_mode(vcpu) ?
4912c50d8ae3SPaolo Bonzini 				PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
4913c50d8ae3SPaolo Bonzini 		reset_rsvds_bits_mask(vcpu, context);
4914c50d8ae3SPaolo Bonzini 		context->gva_to_gpa = paging64_gva_to_gpa;
4915c50d8ae3SPaolo Bonzini 	} else if (is_pae(vcpu)) {
4916c50d8ae3SPaolo Bonzini 		context->nx = is_nx(vcpu);
4917c50d8ae3SPaolo Bonzini 		context->root_level = PT32E_ROOT_LEVEL;
4918c50d8ae3SPaolo Bonzini 		reset_rsvds_bits_mask(vcpu, context);
4919c50d8ae3SPaolo Bonzini 		context->gva_to_gpa = paging64_gva_to_gpa;
4920c50d8ae3SPaolo Bonzini 	} else {
4921c50d8ae3SPaolo Bonzini 		context->nx = false;
4922c50d8ae3SPaolo Bonzini 		context->root_level = PT32_ROOT_LEVEL;
4923c50d8ae3SPaolo Bonzini 		reset_rsvds_bits_mask(vcpu, context);
4924c50d8ae3SPaolo Bonzini 		context->gva_to_gpa = paging32_gva_to_gpa;
4925c50d8ae3SPaolo Bonzini 	}
4926c50d8ae3SPaolo Bonzini 
4927c50d8ae3SPaolo Bonzini 	update_permission_bitmask(vcpu, context, false);
4928c50d8ae3SPaolo Bonzini 	update_pkru_bitmask(vcpu, context, false);
4929c50d8ae3SPaolo Bonzini 	update_last_nonleaf_level(vcpu, context);
4930c50d8ae3SPaolo Bonzini 	reset_tdp_shadow_zero_bits_mask(vcpu, context);
4931c50d8ae3SPaolo Bonzini }
4932c50d8ae3SPaolo Bonzini 
4933c50d8ae3SPaolo Bonzini static union kvm_mmu_role
493459505b55SSean Christopherson kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu, bool base_only)
4935c50d8ae3SPaolo Bonzini {
4936c50d8ae3SPaolo Bonzini 	union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);
4937c50d8ae3SPaolo Bonzini 
4938c50d8ae3SPaolo Bonzini 	role.base.smep_andnot_wp = role.ext.cr4_smep &&
4939c50d8ae3SPaolo Bonzini 		!is_write_protection(vcpu);
4940c50d8ae3SPaolo Bonzini 	role.base.smap_andnot_wp = role.ext.cr4_smap &&
4941c50d8ae3SPaolo Bonzini 		!is_write_protection(vcpu);
4942c50d8ae3SPaolo Bonzini 	role.base.gpte_is_8_bytes = !!is_pae(vcpu);
4943c50d8ae3SPaolo Bonzini 
494459505b55SSean Christopherson 	return role;
494559505b55SSean Christopherson }
494659505b55SSean Christopherson 
494759505b55SSean Christopherson static union kvm_mmu_role
494859505b55SSean Christopherson kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
494959505b55SSean Christopherson {
495059505b55SSean Christopherson 	union kvm_mmu_role role =
495159505b55SSean Christopherson 		kvm_calc_shadow_root_page_role_common(vcpu, base_only);
495259505b55SSean Christopherson 
495359505b55SSean Christopherson 	role.base.direct = !is_paging(vcpu);
495459505b55SSean Christopherson 
4955c50d8ae3SPaolo Bonzini 	if (!is_long_mode(vcpu))
4956c50d8ae3SPaolo Bonzini 		role.base.level = PT32E_ROOT_LEVEL;
4957c50d8ae3SPaolo Bonzini 	else if (is_la57_mode(vcpu))
4958c50d8ae3SPaolo Bonzini 		role.base.level = PT64_ROOT_5LEVEL;
4959c50d8ae3SPaolo Bonzini 	else
4960c50d8ae3SPaolo Bonzini 		role.base.level = PT64_ROOT_4LEVEL;
4961c50d8ae3SPaolo Bonzini 
4962c50d8ae3SPaolo Bonzini 	return role;
4963c50d8ae3SPaolo Bonzini }
4964c50d8ae3SPaolo Bonzini 
49658c008659SPaolo Bonzini static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
49668c008659SPaolo Bonzini 				    u32 cr0, u32 cr4, u32 efer,
49678c008659SPaolo Bonzini 				    union kvm_mmu_role new_role)
4968c50d8ae3SPaolo Bonzini {
4969929d1cfaSPaolo Bonzini 	if (!(cr0 & X86_CR0_PG))
4970c50d8ae3SPaolo Bonzini 		nonpaging_init_context(vcpu, context);
4971929d1cfaSPaolo Bonzini 	else if (efer & EFER_LMA)
4972c50d8ae3SPaolo Bonzini 		paging64_init_context(vcpu, context);
4973929d1cfaSPaolo Bonzini 	else if (cr4 & X86_CR4_PAE)
4974c50d8ae3SPaolo Bonzini 		paging32E_init_context(vcpu, context);
4975c50d8ae3SPaolo Bonzini 	else
4976c50d8ae3SPaolo Bonzini 		paging32_init_context(vcpu, context);
4977c50d8ae3SPaolo Bonzini 
4978c50d8ae3SPaolo Bonzini 	context->mmu_role.as_u64 = new_role.as_u64;
4979c50d8ae3SPaolo Bonzini 	reset_shadow_zero_bits_mask(vcpu, context);
4980c50d8ae3SPaolo Bonzini }
49810f04a2acSVitaly Kuznetsov 
49820f04a2acSVitaly Kuznetsov static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer)
49830f04a2acSVitaly Kuznetsov {
49848c008659SPaolo Bonzini 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
49850f04a2acSVitaly Kuznetsov 	union kvm_mmu_role new_role =
49860f04a2acSVitaly Kuznetsov 		kvm_calc_shadow_mmu_root_page_role(vcpu, false);
49870f04a2acSVitaly Kuznetsov 
49880f04a2acSVitaly Kuznetsov 	if (new_role.as_u64 != context->mmu_role.as_u64)
49898c008659SPaolo Bonzini 		shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role);
49900f04a2acSVitaly Kuznetsov }
49910f04a2acSVitaly Kuznetsov 
499259505b55SSean Christopherson static union kvm_mmu_role
499359505b55SSean Christopherson kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu)
499459505b55SSean Christopherson {
499559505b55SSean Christopherson 	union kvm_mmu_role role =
499659505b55SSean Christopherson 		kvm_calc_shadow_root_page_role_common(vcpu, false);
499759505b55SSean Christopherson 
499859505b55SSean Christopherson 	role.base.direct = false;
4999d468d94bSSean Christopherson 	role.base.level = kvm_mmu_get_tdp_level(vcpu);
500059505b55SSean Christopherson 
500159505b55SSean Christopherson 	return role;
500259505b55SSean Christopherson }
500359505b55SSean Christopherson 
50040f04a2acSVitaly Kuznetsov void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer,
50050f04a2acSVitaly Kuznetsov 			     gpa_t nested_cr3)
50060f04a2acSVitaly Kuznetsov {
50078c008659SPaolo Bonzini 	struct kvm_mmu *context = &vcpu->arch.guest_mmu;
500859505b55SSean Christopherson 	union kvm_mmu_role new_role = kvm_calc_shadow_npt_root_page_role(vcpu);
50090f04a2acSVitaly Kuznetsov 
5010096586fdSSean Christopherson 	context->shadow_root_level = new_role.base.level;
5011096586fdSSean Christopherson 
5012a506fdd2SVitaly Kuznetsov 	__kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base, false, false);
5013a506fdd2SVitaly Kuznetsov 
50140f04a2acSVitaly Kuznetsov 	if (new_role.as_u64 != context->mmu_role.as_u64)
50158c008659SPaolo Bonzini 		shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role);
50160f04a2acSVitaly Kuznetsov }
50170f04a2acSVitaly Kuznetsov EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);
5018c50d8ae3SPaolo Bonzini 
5019c50d8ae3SPaolo Bonzini static union kvm_mmu_role
5020c50d8ae3SPaolo Bonzini kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
5021bb1fcc70SSean Christopherson 				   bool execonly, u8 level)
5022c50d8ae3SPaolo Bonzini {
5023c50d8ae3SPaolo Bonzini 	union kvm_mmu_role role = {0};
5024c50d8ae3SPaolo Bonzini 
5025c50d8ae3SPaolo Bonzini 	/* SMM flag is inherited from root_mmu */
5026c50d8ae3SPaolo Bonzini 	role.base.smm = vcpu->arch.root_mmu.mmu_role.base.smm;
5027c50d8ae3SPaolo Bonzini 
5028bb1fcc70SSean Christopherson 	role.base.level = level;
5029c50d8ae3SPaolo Bonzini 	role.base.gpte_is_8_bytes = true;
5030c50d8ae3SPaolo Bonzini 	role.base.direct = false;
5031c50d8ae3SPaolo Bonzini 	role.base.ad_disabled = !accessed_dirty;
5032c50d8ae3SPaolo Bonzini 	role.base.guest_mode = true;
5033c50d8ae3SPaolo Bonzini 	role.base.access = ACC_ALL;
5034c50d8ae3SPaolo Bonzini 
5035c50d8ae3SPaolo Bonzini 	/*
5036c50d8ae3SPaolo Bonzini 	 * WP=1 and NOT_WP=1 is an impossible combination, use WP and the
5037c50d8ae3SPaolo Bonzini 	 * SMAP variation to denote shadow EPT entries.
5038c50d8ae3SPaolo Bonzini 	 */
5039c50d8ae3SPaolo Bonzini 	role.base.cr0_wp = true;
5040c50d8ae3SPaolo Bonzini 	role.base.smap_andnot_wp = true;
5041c50d8ae3SPaolo Bonzini 
5042c50d8ae3SPaolo Bonzini 	role.ext = kvm_calc_mmu_role_ext(vcpu);
5043c50d8ae3SPaolo Bonzini 	role.ext.execonly = execonly;
5044c50d8ae3SPaolo Bonzini 
5045c50d8ae3SPaolo Bonzini 	return role;
5046c50d8ae3SPaolo Bonzini }
5047c50d8ae3SPaolo Bonzini 
5048c50d8ae3SPaolo Bonzini void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
5049c50d8ae3SPaolo Bonzini 			     bool accessed_dirty, gpa_t new_eptp)
5050c50d8ae3SPaolo Bonzini {
50518c008659SPaolo Bonzini 	struct kvm_mmu *context = &vcpu->arch.guest_mmu;
5052bb1fcc70SSean Christopherson 	u8 level = vmx_eptp_page_walk_level(new_eptp);
5053c50d8ae3SPaolo Bonzini 	union kvm_mmu_role new_role =
5054c50d8ae3SPaolo Bonzini 		kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
5055bb1fcc70SSean Christopherson 						   execonly, level);
5056c50d8ae3SPaolo Bonzini 
5057be01e8e2SSean Christopherson 	__kvm_mmu_new_pgd(vcpu, new_eptp, new_role.base, true, true);
5058c50d8ae3SPaolo Bonzini 
5059c50d8ae3SPaolo Bonzini 	if (new_role.as_u64 == context->mmu_role.as_u64)
5060c50d8ae3SPaolo Bonzini 		return;
5061c50d8ae3SPaolo Bonzini 
5062bb1fcc70SSean Christopherson 	context->shadow_root_level = level;
5063c50d8ae3SPaolo Bonzini 
5064c50d8ae3SPaolo Bonzini 	context->nx = true;
5065c50d8ae3SPaolo Bonzini 	context->ept_ad = accessed_dirty;
5066c50d8ae3SPaolo Bonzini 	context->page_fault = ept_page_fault;
5067c50d8ae3SPaolo Bonzini 	context->gva_to_gpa = ept_gva_to_gpa;
5068c50d8ae3SPaolo Bonzini 	context->sync_page = ept_sync_page;
5069c50d8ae3SPaolo Bonzini 	context->invlpg = ept_invlpg;
5070c50d8ae3SPaolo Bonzini 	context->update_pte = ept_update_pte;
5071bb1fcc70SSean Christopherson 	context->root_level = level;
5072c50d8ae3SPaolo Bonzini 	context->direct_map = false;
5073c50d8ae3SPaolo Bonzini 	context->mmu_role.as_u64 = new_role.as_u64;
5074c50d8ae3SPaolo Bonzini 
5075c50d8ae3SPaolo Bonzini 	update_permission_bitmask(vcpu, context, true);
5076c50d8ae3SPaolo Bonzini 	update_pkru_bitmask(vcpu, context, true);
5077c50d8ae3SPaolo Bonzini 	update_last_nonleaf_level(vcpu, context);
5078c50d8ae3SPaolo Bonzini 	reset_rsvds_bits_mask_ept(vcpu, context, execonly);
5079c50d8ae3SPaolo Bonzini 	reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
5080c50d8ae3SPaolo Bonzini }
5081c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
5082c50d8ae3SPaolo Bonzini 
5083c50d8ae3SPaolo Bonzini static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
5084c50d8ae3SPaolo Bonzini {
50858c008659SPaolo Bonzini 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
5086c50d8ae3SPaolo Bonzini 
5087929d1cfaSPaolo Bonzini 	kvm_init_shadow_mmu(vcpu,
5088929d1cfaSPaolo Bonzini 			    kvm_read_cr0_bits(vcpu, X86_CR0_PG),
5089929d1cfaSPaolo Bonzini 			    kvm_read_cr4_bits(vcpu, X86_CR4_PAE),
5090929d1cfaSPaolo Bonzini 			    vcpu->arch.efer);
5091929d1cfaSPaolo Bonzini 
5092d8dd54e0SSean Christopherson 	context->get_guest_pgd     = get_cr3;
5093c50d8ae3SPaolo Bonzini 	context->get_pdptr         = kvm_pdptr_read;
5094c50d8ae3SPaolo Bonzini 	context->inject_page_fault = kvm_inject_page_fault;
5095c50d8ae3SPaolo Bonzini }
5096c50d8ae3SPaolo Bonzini 
5097c50d8ae3SPaolo Bonzini static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
5098c50d8ae3SPaolo Bonzini {
5099c50d8ae3SPaolo Bonzini 	union kvm_mmu_role new_role = kvm_calc_mmu_role_common(vcpu, false);
5100c50d8ae3SPaolo Bonzini 	struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
5101c50d8ae3SPaolo Bonzini 
5102c50d8ae3SPaolo Bonzini 	if (new_role.as_u64 == g_context->mmu_role.as_u64)
5103c50d8ae3SPaolo Bonzini 		return;
5104c50d8ae3SPaolo Bonzini 
5105c50d8ae3SPaolo Bonzini 	g_context->mmu_role.as_u64 = new_role.as_u64;
5106d8dd54e0SSean Christopherson 	g_context->get_guest_pgd     = get_cr3;
5107c50d8ae3SPaolo Bonzini 	g_context->get_pdptr         = kvm_pdptr_read;
5108c50d8ae3SPaolo Bonzini 	g_context->inject_page_fault = kvm_inject_page_fault;
5109c50d8ae3SPaolo Bonzini 
5110c50d8ae3SPaolo Bonzini 	/*
51115efac074SPaolo Bonzini 	 * L2 page tables are never shadowed, so there is no need to sync
51125efac074SPaolo Bonzini 	 * SPTEs.
51135efac074SPaolo Bonzini 	 */
51145efac074SPaolo Bonzini 	g_context->invlpg            = NULL;
51155efac074SPaolo Bonzini 
51165efac074SPaolo Bonzini 	/*
5117c50d8ae3SPaolo Bonzini 	 * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using
5118c50d8ae3SPaolo Bonzini 	 * L1's nested page tables (e.g. EPT12). The nested translation
5119c50d8ae3SPaolo Bonzini 	 * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using
5120c50d8ae3SPaolo Bonzini 	 * L2's page tables as the first level of translation and L1's
5121c50d8ae3SPaolo Bonzini 	 * nested page tables as the second level of translation. Basically
5122c50d8ae3SPaolo Bonzini 	 * the gva_to_gpa functions between mmu and nested_mmu are swapped.
5123c50d8ae3SPaolo Bonzini 	 */
5124c50d8ae3SPaolo Bonzini 	if (!is_paging(vcpu)) {
5125c50d8ae3SPaolo Bonzini 		g_context->nx = false;
5126c50d8ae3SPaolo Bonzini 		g_context->root_level = 0;
5127c50d8ae3SPaolo Bonzini 		g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested;
5128c50d8ae3SPaolo Bonzini 	} else if (is_long_mode(vcpu)) {
5129c50d8ae3SPaolo Bonzini 		g_context->nx = is_nx(vcpu);
5130c50d8ae3SPaolo Bonzini 		g_context->root_level = is_la57_mode(vcpu) ?
5131c50d8ae3SPaolo Bonzini 					PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
5132c50d8ae3SPaolo Bonzini 		reset_rsvds_bits_mask(vcpu, g_context);
5133c50d8ae3SPaolo Bonzini 		g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
5134c50d8ae3SPaolo Bonzini 	} else if (is_pae(vcpu)) {
5135c50d8ae3SPaolo Bonzini 		g_context->nx = is_nx(vcpu);
5136c50d8ae3SPaolo Bonzini 		g_context->root_level = PT32E_ROOT_LEVEL;
5137c50d8ae3SPaolo Bonzini 		reset_rsvds_bits_mask(vcpu, g_context);
5138c50d8ae3SPaolo Bonzini 		g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
5139c50d8ae3SPaolo Bonzini 	} else {
5140c50d8ae3SPaolo Bonzini 		g_context->nx = false;
5141c50d8ae3SPaolo Bonzini 		g_context->root_level = PT32_ROOT_LEVEL;
5142c50d8ae3SPaolo Bonzini 		reset_rsvds_bits_mask(vcpu, g_context);
5143c50d8ae3SPaolo Bonzini 		g_context->gva_to_gpa = paging32_gva_to_gpa_nested;
5144c50d8ae3SPaolo Bonzini 	}
5145c50d8ae3SPaolo Bonzini 
5146c50d8ae3SPaolo Bonzini 	update_permission_bitmask(vcpu, g_context, false);
5147c50d8ae3SPaolo Bonzini 	update_pkru_bitmask(vcpu, g_context, false);
5148c50d8ae3SPaolo Bonzini 	update_last_nonleaf_level(vcpu, g_context);
5149c50d8ae3SPaolo Bonzini }
5150c50d8ae3SPaolo Bonzini 
5151c50d8ae3SPaolo Bonzini void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots)
5152c50d8ae3SPaolo Bonzini {
5153c50d8ae3SPaolo Bonzini 	if (reset_roots) {
5154c50d8ae3SPaolo Bonzini 		uint i;
5155c50d8ae3SPaolo Bonzini 
5156c50d8ae3SPaolo Bonzini 		vcpu->arch.mmu->root_hpa = INVALID_PAGE;
5157c50d8ae3SPaolo Bonzini 
5158c50d8ae3SPaolo Bonzini 		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
5159c50d8ae3SPaolo Bonzini 			vcpu->arch.mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
5160c50d8ae3SPaolo Bonzini 	}
5161c50d8ae3SPaolo Bonzini 
5162c50d8ae3SPaolo Bonzini 	if (mmu_is_nested(vcpu))
5163c50d8ae3SPaolo Bonzini 		init_kvm_nested_mmu(vcpu);
5164c50d8ae3SPaolo Bonzini 	else if (tdp_enabled)
5165c50d8ae3SPaolo Bonzini 		init_kvm_tdp_mmu(vcpu);
5166c50d8ae3SPaolo Bonzini 	else
5167c50d8ae3SPaolo Bonzini 		init_kvm_softmmu(vcpu);
5168c50d8ae3SPaolo Bonzini }
5169c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_init_mmu);
5170c50d8ae3SPaolo Bonzini 
5171c50d8ae3SPaolo Bonzini static union kvm_mmu_page_role
5172c50d8ae3SPaolo Bonzini kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu)
5173c50d8ae3SPaolo Bonzini {
5174c50d8ae3SPaolo Bonzini 	union kvm_mmu_role role;
5175c50d8ae3SPaolo Bonzini 
5176c50d8ae3SPaolo Bonzini 	if (tdp_enabled)
5177c50d8ae3SPaolo Bonzini 		role = kvm_calc_tdp_mmu_root_page_role(vcpu, true);
5178c50d8ae3SPaolo Bonzini 	else
5179c50d8ae3SPaolo Bonzini 		role = kvm_calc_shadow_mmu_root_page_role(vcpu, true);
5180c50d8ae3SPaolo Bonzini 
5181c50d8ae3SPaolo Bonzini 	return role.base;
5182c50d8ae3SPaolo Bonzini }
5183c50d8ae3SPaolo Bonzini 
5184c50d8ae3SPaolo Bonzini void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
5185c50d8ae3SPaolo Bonzini {
5186c50d8ae3SPaolo Bonzini 	kvm_mmu_unload(vcpu);
5187c50d8ae3SPaolo Bonzini 	kvm_init_mmu(vcpu, true);
5188c50d8ae3SPaolo Bonzini }
5189c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
5190c50d8ae3SPaolo Bonzini 
5191c50d8ae3SPaolo Bonzini int kvm_mmu_load(struct kvm_vcpu *vcpu)
5192c50d8ae3SPaolo Bonzini {
5193c50d8ae3SPaolo Bonzini 	int r;
5194c50d8ae3SPaolo Bonzini 
5195378f5cd6SSean Christopherson 	r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->direct_map);
5196c50d8ae3SPaolo Bonzini 	if (r)
5197c50d8ae3SPaolo Bonzini 		goto out;
5198c50d8ae3SPaolo Bonzini 	r = mmu_alloc_roots(vcpu);
5199c50d8ae3SPaolo Bonzini 	kvm_mmu_sync_roots(vcpu);
5200c50d8ae3SPaolo Bonzini 	if (r)
5201c50d8ae3SPaolo Bonzini 		goto out;
5202727a7e27SPaolo Bonzini 	kvm_mmu_load_pgd(vcpu);
52038c8560b8SSean Christopherson 	kvm_x86_ops.tlb_flush_current(vcpu);
5204c50d8ae3SPaolo Bonzini out:
5205c50d8ae3SPaolo Bonzini 	return r;
5206c50d8ae3SPaolo Bonzini }
5207c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_load);
5208c50d8ae3SPaolo Bonzini 
5209c50d8ae3SPaolo Bonzini void kvm_mmu_unload(struct kvm_vcpu *vcpu)
5210c50d8ae3SPaolo Bonzini {
5211c50d8ae3SPaolo Bonzini 	kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL);
5212c50d8ae3SPaolo Bonzini 	WARN_ON(VALID_PAGE(vcpu->arch.root_mmu.root_hpa));
5213c50d8ae3SPaolo Bonzini 	kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
5214c50d8ae3SPaolo Bonzini 	WARN_ON(VALID_PAGE(vcpu->arch.guest_mmu.root_hpa));
5215c50d8ae3SPaolo Bonzini }
5216c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_unload);
5217c50d8ae3SPaolo Bonzini 
5218c50d8ae3SPaolo Bonzini static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
5219c50d8ae3SPaolo Bonzini 				  struct kvm_mmu_page *sp, u64 *spte,
5220c50d8ae3SPaolo Bonzini 				  const void *new)
5221c50d8ae3SPaolo Bonzini {
52223bae0459SSean Christopherson 	if (sp->role.level != PG_LEVEL_4K) {
5223c50d8ae3SPaolo Bonzini 		++vcpu->kvm->stat.mmu_pde_zapped;
5224c50d8ae3SPaolo Bonzini 		return;
5225c50d8ae3SPaolo Bonzini         }
5226c50d8ae3SPaolo Bonzini 
5227c50d8ae3SPaolo Bonzini 	++vcpu->kvm->stat.mmu_pte_updated;
5228c50d8ae3SPaolo Bonzini 	vcpu->arch.mmu->update_pte(vcpu, sp, spte, new);
5229c50d8ae3SPaolo Bonzini }
5230c50d8ae3SPaolo Bonzini 
5231c50d8ae3SPaolo Bonzini static bool need_remote_flush(u64 old, u64 new)
5232c50d8ae3SPaolo Bonzini {
5233c50d8ae3SPaolo Bonzini 	if (!is_shadow_present_pte(old))
5234c50d8ae3SPaolo Bonzini 		return false;
5235c50d8ae3SPaolo Bonzini 	if (!is_shadow_present_pte(new))
5236c50d8ae3SPaolo Bonzini 		return true;
5237c50d8ae3SPaolo Bonzini 	if ((old ^ new) & PT64_BASE_ADDR_MASK)
5238c50d8ae3SPaolo Bonzini 		return true;
5239c50d8ae3SPaolo Bonzini 	old ^= shadow_nx_mask;
5240c50d8ae3SPaolo Bonzini 	new ^= shadow_nx_mask;
5241c50d8ae3SPaolo Bonzini 	return (old & ~new & PT64_PERM_MASK) != 0;
5242c50d8ae3SPaolo Bonzini }
5243c50d8ae3SPaolo Bonzini 
5244c50d8ae3SPaolo Bonzini static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
5245c50d8ae3SPaolo Bonzini 				    int *bytes)
5246c50d8ae3SPaolo Bonzini {
5247c50d8ae3SPaolo Bonzini 	u64 gentry = 0;
5248c50d8ae3SPaolo Bonzini 	int r;
5249c50d8ae3SPaolo Bonzini 
5250c50d8ae3SPaolo Bonzini 	/*
5251c50d8ae3SPaolo Bonzini 	 * Assume that the pte write on a page table of the same type
5252c50d8ae3SPaolo Bonzini 	 * as the current vcpu paging mode since we update the sptes only
5253c50d8ae3SPaolo Bonzini 	 * when they have the same mode.
5254c50d8ae3SPaolo Bonzini 	 */
5255c50d8ae3SPaolo Bonzini 	if (is_pae(vcpu) && *bytes == 4) {
5256c50d8ae3SPaolo Bonzini 		/* Handle a 32-bit guest writing two halves of a 64-bit gpte */
5257c50d8ae3SPaolo Bonzini 		*gpa &= ~(gpa_t)7;
5258c50d8ae3SPaolo Bonzini 		*bytes = 8;
5259c50d8ae3SPaolo Bonzini 	}
5260c50d8ae3SPaolo Bonzini 
5261c50d8ae3SPaolo Bonzini 	if (*bytes == 4 || *bytes == 8) {
5262c50d8ae3SPaolo Bonzini 		r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes);
5263c50d8ae3SPaolo Bonzini 		if (r)
5264c50d8ae3SPaolo Bonzini 			gentry = 0;
5265c50d8ae3SPaolo Bonzini 	}
5266c50d8ae3SPaolo Bonzini 
5267c50d8ae3SPaolo Bonzini 	return gentry;
5268c50d8ae3SPaolo Bonzini }
5269c50d8ae3SPaolo Bonzini 
5270c50d8ae3SPaolo Bonzini /*
5271c50d8ae3SPaolo Bonzini  * If we're seeing too many writes to a page, it may no longer be a page table,
5272c50d8ae3SPaolo Bonzini  * or we may be forking, in which case it is better to unmap the page.
5273c50d8ae3SPaolo Bonzini  */
5274c50d8ae3SPaolo Bonzini static bool detect_write_flooding(struct kvm_mmu_page *sp)
5275c50d8ae3SPaolo Bonzini {
5276c50d8ae3SPaolo Bonzini 	/*
5277c50d8ae3SPaolo Bonzini 	 * Skip write-flooding detected for the sp whose level is 1, because
5278c50d8ae3SPaolo Bonzini 	 * it can become unsync, then the guest page is not write-protected.
5279c50d8ae3SPaolo Bonzini 	 */
52803bae0459SSean Christopherson 	if (sp->role.level == PG_LEVEL_4K)
5281c50d8ae3SPaolo Bonzini 		return false;
5282c50d8ae3SPaolo Bonzini 
5283c50d8ae3SPaolo Bonzini 	atomic_inc(&sp->write_flooding_count);
5284c50d8ae3SPaolo Bonzini 	return atomic_read(&sp->write_flooding_count) >= 3;
5285c50d8ae3SPaolo Bonzini }
5286c50d8ae3SPaolo Bonzini 
5287c50d8ae3SPaolo Bonzini /*
5288c50d8ae3SPaolo Bonzini  * Misaligned accesses are too much trouble to fix up; also, they usually
5289c50d8ae3SPaolo Bonzini  * indicate a page is not used as a page table.
5290c50d8ae3SPaolo Bonzini  */
5291c50d8ae3SPaolo Bonzini static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
5292c50d8ae3SPaolo Bonzini 				    int bytes)
5293c50d8ae3SPaolo Bonzini {
5294c50d8ae3SPaolo Bonzini 	unsigned offset, pte_size, misaligned;
5295c50d8ae3SPaolo Bonzini 
5296c50d8ae3SPaolo Bonzini 	pgprintk("misaligned: gpa %llx bytes %d role %x\n",
5297c50d8ae3SPaolo Bonzini 		 gpa, bytes, sp->role.word);
5298c50d8ae3SPaolo Bonzini 
5299c50d8ae3SPaolo Bonzini 	offset = offset_in_page(gpa);
5300c50d8ae3SPaolo Bonzini 	pte_size = sp->role.gpte_is_8_bytes ? 8 : 4;
5301c50d8ae3SPaolo Bonzini 
5302c50d8ae3SPaolo Bonzini 	/*
5303c50d8ae3SPaolo Bonzini 	 * Sometimes, the OS only writes the last one bytes to update status
5304c50d8ae3SPaolo Bonzini 	 * bits, for example, in linux, andb instruction is used in clear_bit().
5305c50d8ae3SPaolo Bonzini 	 */
5306c50d8ae3SPaolo Bonzini 	if (!(offset & (pte_size - 1)) && bytes == 1)
5307c50d8ae3SPaolo Bonzini 		return false;
5308c50d8ae3SPaolo Bonzini 
5309c50d8ae3SPaolo Bonzini 	misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
5310c50d8ae3SPaolo Bonzini 	misaligned |= bytes < 4;
5311c50d8ae3SPaolo Bonzini 
5312c50d8ae3SPaolo Bonzini 	return misaligned;
5313c50d8ae3SPaolo Bonzini }
5314c50d8ae3SPaolo Bonzini 
5315c50d8ae3SPaolo Bonzini static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
5316c50d8ae3SPaolo Bonzini {
5317c50d8ae3SPaolo Bonzini 	unsigned page_offset, quadrant;
5318c50d8ae3SPaolo Bonzini 	u64 *spte;
5319c50d8ae3SPaolo Bonzini 	int level;
5320c50d8ae3SPaolo Bonzini 
5321c50d8ae3SPaolo Bonzini 	page_offset = offset_in_page(gpa);
5322c50d8ae3SPaolo Bonzini 	level = sp->role.level;
5323c50d8ae3SPaolo Bonzini 	*nspte = 1;
5324c50d8ae3SPaolo Bonzini 	if (!sp->role.gpte_is_8_bytes) {
5325c50d8ae3SPaolo Bonzini 		page_offset <<= 1;	/* 32->64 */
5326c50d8ae3SPaolo Bonzini 		/*
5327c50d8ae3SPaolo Bonzini 		 * A 32-bit pde maps 4MB while the shadow pdes map
5328c50d8ae3SPaolo Bonzini 		 * only 2MB.  So we need to double the offset again
5329c50d8ae3SPaolo Bonzini 		 * and zap two pdes instead of one.
5330c50d8ae3SPaolo Bonzini 		 */
5331c50d8ae3SPaolo Bonzini 		if (level == PT32_ROOT_LEVEL) {
5332c50d8ae3SPaolo Bonzini 			page_offset &= ~7; /* kill rounding error */
5333c50d8ae3SPaolo Bonzini 			page_offset <<= 1;
5334c50d8ae3SPaolo Bonzini 			*nspte = 2;
5335c50d8ae3SPaolo Bonzini 		}
5336c50d8ae3SPaolo Bonzini 		quadrant = page_offset >> PAGE_SHIFT;
5337c50d8ae3SPaolo Bonzini 		page_offset &= ~PAGE_MASK;
5338c50d8ae3SPaolo Bonzini 		if (quadrant != sp->role.quadrant)
5339c50d8ae3SPaolo Bonzini 			return NULL;
5340c50d8ae3SPaolo Bonzini 	}
5341c50d8ae3SPaolo Bonzini 
5342c50d8ae3SPaolo Bonzini 	spte = &sp->spt[page_offset / sizeof(*spte)];
5343c50d8ae3SPaolo Bonzini 	return spte;
5344c50d8ae3SPaolo Bonzini }
5345c50d8ae3SPaolo Bonzini 
5346a102a674SSean Christopherson /*
5347a102a674SSean Christopherson  * Ignore various flags when determining if a SPTE can be immediately
5348a102a674SSean Christopherson  * overwritten for the current MMU.
5349a102a674SSean Christopherson  *  - level: explicitly checked in mmu_pte_write_new_pte(), and will never
5350a102a674SSean Christopherson  *    match the current MMU role, as MMU's level tracks the root level.
5351a102a674SSean Christopherson  *  - access: updated based on the new guest PTE
5352a102a674SSean Christopherson  *  - quadrant: handled by get_written_sptes()
5353a102a674SSean Christopherson  *  - invalid: always false (loop only walks valid shadow pages)
5354a102a674SSean Christopherson  */
5355a102a674SSean Christopherson static const union kvm_mmu_page_role role_ign = {
5356a102a674SSean Christopherson 	.level = 0xf,
5357a102a674SSean Christopherson 	.access = 0x7,
5358a102a674SSean Christopherson 	.quadrant = 0x3,
5359a102a674SSean Christopherson 	.invalid = 0x1,
5360a102a674SSean Christopherson };
5361a102a674SSean Christopherson 
5362c50d8ae3SPaolo Bonzini static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
5363c50d8ae3SPaolo Bonzini 			      const u8 *new, int bytes,
5364c50d8ae3SPaolo Bonzini 			      struct kvm_page_track_notifier_node *node)
5365c50d8ae3SPaolo Bonzini {
5366c50d8ae3SPaolo Bonzini 	gfn_t gfn = gpa >> PAGE_SHIFT;
5367c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
5368c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
5369c50d8ae3SPaolo Bonzini 	u64 entry, gentry, *spte;
5370c50d8ae3SPaolo Bonzini 	int npte;
5371c50d8ae3SPaolo Bonzini 	bool remote_flush, local_flush;
5372c50d8ae3SPaolo Bonzini 
5373c50d8ae3SPaolo Bonzini 	/*
5374c50d8ae3SPaolo Bonzini 	 * If we don't have indirect shadow pages, it means no page is
5375c50d8ae3SPaolo Bonzini 	 * write-protected, so we can exit simply.
5376c50d8ae3SPaolo Bonzini 	 */
5377c50d8ae3SPaolo Bonzini 	if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
5378c50d8ae3SPaolo Bonzini 		return;
5379c50d8ae3SPaolo Bonzini 
5380c50d8ae3SPaolo Bonzini 	remote_flush = local_flush = false;
5381c50d8ae3SPaolo Bonzini 
5382c50d8ae3SPaolo Bonzini 	pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
5383c50d8ae3SPaolo Bonzini 
5384c50d8ae3SPaolo Bonzini 	/*
5385c50d8ae3SPaolo Bonzini 	 * No need to care whether allocation memory is successful
5386c50d8ae3SPaolo Bonzini 	 * or not since pte prefetch is skiped if it does not have
5387c50d8ae3SPaolo Bonzini 	 * enough objects in the cache.
5388c50d8ae3SPaolo Bonzini 	 */
5389378f5cd6SSean Christopherson 	mmu_topup_memory_caches(vcpu, true);
5390c50d8ae3SPaolo Bonzini 
5391c50d8ae3SPaolo Bonzini 	spin_lock(&vcpu->kvm->mmu_lock);
5392c50d8ae3SPaolo Bonzini 
5393c50d8ae3SPaolo Bonzini 	gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes);
5394c50d8ae3SPaolo Bonzini 
5395c50d8ae3SPaolo Bonzini 	++vcpu->kvm->stat.mmu_pte_write;
5396c50d8ae3SPaolo Bonzini 	kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
5397c50d8ae3SPaolo Bonzini 
5398c50d8ae3SPaolo Bonzini 	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
5399c50d8ae3SPaolo Bonzini 		if (detect_write_misaligned(sp, gpa, bytes) ||
5400c50d8ae3SPaolo Bonzini 		      detect_write_flooding(sp)) {
5401c50d8ae3SPaolo Bonzini 			kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
5402c50d8ae3SPaolo Bonzini 			++vcpu->kvm->stat.mmu_flooded;
5403c50d8ae3SPaolo Bonzini 			continue;
5404c50d8ae3SPaolo Bonzini 		}
5405c50d8ae3SPaolo Bonzini 
5406c50d8ae3SPaolo Bonzini 		spte = get_written_sptes(sp, gpa, &npte);
5407c50d8ae3SPaolo Bonzini 		if (!spte)
5408c50d8ae3SPaolo Bonzini 			continue;
5409c50d8ae3SPaolo Bonzini 
5410c50d8ae3SPaolo Bonzini 		local_flush = true;
5411c50d8ae3SPaolo Bonzini 		while (npte--) {
5412c50d8ae3SPaolo Bonzini 			u32 base_role = vcpu->arch.mmu->mmu_role.base.word;
5413c50d8ae3SPaolo Bonzini 
5414c50d8ae3SPaolo Bonzini 			entry = *spte;
54152de4085cSBen Gardon 			mmu_page_zap_pte(vcpu->kvm, sp, spte, NULL);
5416c50d8ae3SPaolo Bonzini 			if (gentry &&
5417a102a674SSean Christopherson 			    !((sp->role.word ^ base_role) & ~role_ign.word) &&
5418a102a674SSean Christopherson 			    rmap_can_add(vcpu))
5419c50d8ae3SPaolo Bonzini 				mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
5420c50d8ae3SPaolo Bonzini 			if (need_remote_flush(entry, *spte))
5421c50d8ae3SPaolo Bonzini 				remote_flush = true;
5422c50d8ae3SPaolo Bonzini 			++spte;
5423c50d8ae3SPaolo Bonzini 		}
5424c50d8ae3SPaolo Bonzini 	}
5425c50d8ae3SPaolo Bonzini 	kvm_mmu_flush_or_zap(vcpu, &invalid_list, remote_flush, local_flush);
5426c50d8ae3SPaolo Bonzini 	kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
5427c50d8ae3SPaolo Bonzini 	spin_unlock(&vcpu->kvm->mmu_lock);
5428c50d8ae3SPaolo Bonzini }
5429c50d8ae3SPaolo Bonzini 
5430c50d8ae3SPaolo Bonzini int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
5431c50d8ae3SPaolo Bonzini {
5432c50d8ae3SPaolo Bonzini 	gpa_t gpa;
5433c50d8ae3SPaolo Bonzini 	int r;
5434c50d8ae3SPaolo Bonzini 
5435c50d8ae3SPaolo Bonzini 	if (vcpu->arch.mmu->direct_map)
5436c50d8ae3SPaolo Bonzini 		return 0;
5437c50d8ae3SPaolo Bonzini 
5438c50d8ae3SPaolo Bonzini 	gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
5439c50d8ae3SPaolo Bonzini 
5440c50d8ae3SPaolo Bonzini 	r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
5441c50d8ae3SPaolo Bonzini 
5442c50d8ae3SPaolo Bonzini 	return r;
5443c50d8ae3SPaolo Bonzini }
5444c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
5445c50d8ae3SPaolo Bonzini 
5446736c291cSSean Christopherson int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
5447c50d8ae3SPaolo Bonzini 		       void *insn, int insn_len)
5448c50d8ae3SPaolo Bonzini {
544992daa48bSSean Christopherson 	int r, emulation_type = EMULTYPE_PF;
5450c50d8ae3SPaolo Bonzini 	bool direct = vcpu->arch.mmu->direct_map;
5451c50d8ae3SPaolo Bonzini 
54526948199aSSean Christopherson 	if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
5453ddce6208SSean Christopherson 		return RET_PF_RETRY;
5454ddce6208SSean Christopherson 
5455c50d8ae3SPaolo Bonzini 	r = RET_PF_INVALID;
5456c50d8ae3SPaolo Bonzini 	if (unlikely(error_code & PFERR_RSVD_MASK)) {
5457736c291cSSean Christopherson 		r = handle_mmio_page_fault(vcpu, cr2_or_gpa, direct);
5458c50d8ae3SPaolo Bonzini 		if (r == RET_PF_EMULATE)
5459c50d8ae3SPaolo Bonzini 			goto emulate;
5460c50d8ae3SPaolo Bonzini 	}
5461c50d8ae3SPaolo Bonzini 
5462c50d8ae3SPaolo Bonzini 	if (r == RET_PF_INVALID) {
54637a02674dSSean Christopherson 		r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa,
54647a02674dSSean Christopherson 					  lower_32_bits(error_code), false);
5465*7b367bc9SSean Christopherson 		if (WARN_ON_ONCE(r == RET_PF_INVALID))
5466*7b367bc9SSean Christopherson 			return -EIO;
5467c50d8ae3SPaolo Bonzini 	}
5468c50d8ae3SPaolo Bonzini 
5469c50d8ae3SPaolo Bonzini 	if (r == RET_PF_RETRY)
5470c50d8ae3SPaolo Bonzini 		return 1;
5471c50d8ae3SPaolo Bonzini 	if (r < 0)
5472c50d8ae3SPaolo Bonzini 		return r;
5473c50d8ae3SPaolo Bonzini 
5474c50d8ae3SPaolo Bonzini 	/*
5475c50d8ae3SPaolo Bonzini 	 * Before emulating the instruction, check if the error code
5476c50d8ae3SPaolo Bonzini 	 * was due to a RO violation while translating the guest page.
5477c50d8ae3SPaolo Bonzini 	 * This can occur when using nested virtualization with nested
5478c50d8ae3SPaolo Bonzini 	 * paging in both guests. If true, we simply unprotect the page
5479c50d8ae3SPaolo Bonzini 	 * and resume the guest.
5480c50d8ae3SPaolo Bonzini 	 */
5481c50d8ae3SPaolo Bonzini 	if (vcpu->arch.mmu->direct_map &&
5482c50d8ae3SPaolo Bonzini 	    (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) {
5483736c291cSSean Christopherson 		kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2_or_gpa));
5484c50d8ae3SPaolo Bonzini 		return 1;
5485c50d8ae3SPaolo Bonzini 	}
5486c50d8ae3SPaolo Bonzini 
5487c50d8ae3SPaolo Bonzini 	/*
5488c50d8ae3SPaolo Bonzini 	 * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still
5489c50d8ae3SPaolo Bonzini 	 * optimistically try to just unprotect the page and let the processor
5490c50d8ae3SPaolo Bonzini 	 * re-execute the instruction that caused the page fault.  Do not allow
5491c50d8ae3SPaolo Bonzini 	 * retrying MMIO emulation, as it's not only pointless but could also
5492c50d8ae3SPaolo Bonzini 	 * cause us to enter an infinite loop because the processor will keep
5493c50d8ae3SPaolo Bonzini 	 * faulting on the non-existent MMIO address.  Retrying an instruction
5494c50d8ae3SPaolo Bonzini 	 * from a nested guest is also pointless and dangerous as we are only
5495c50d8ae3SPaolo Bonzini 	 * explicitly shadowing L1's page tables, i.e. unprotecting something
5496c50d8ae3SPaolo Bonzini 	 * for L1 isn't going to magically fix whatever issue cause L2 to fail.
5497c50d8ae3SPaolo Bonzini 	 */
5498736c291cSSean Christopherson 	if (!mmio_info_in_cache(vcpu, cr2_or_gpa, direct) && !is_guest_mode(vcpu))
549992daa48bSSean Christopherson 		emulation_type |= EMULTYPE_ALLOW_RETRY_PF;
5500c50d8ae3SPaolo Bonzini emulate:
5501736c291cSSean Christopherson 	return x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn,
5502c50d8ae3SPaolo Bonzini 				       insn_len);
5503c50d8ae3SPaolo Bonzini }
5504c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
5505c50d8ae3SPaolo Bonzini 
55065efac074SPaolo Bonzini void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
55075efac074SPaolo Bonzini 			    gva_t gva, hpa_t root_hpa)
5508c50d8ae3SPaolo Bonzini {
5509c50d8ae3SPaolo Bonzini 	int i;
5510c50d8ae3SPaolo Bonzini 
55115efac074SPaolo Bonzini 	/* It's actually a GPA for vcpu->arch.guest_mmu.  */
55125efac074SPaolo Bonzini 	if (mmu != &vcpu->arch.guest_mmu) {
55135efac074SPaolo Bonzini 		/* INVLPG on a non-canonical address is a NOP according to the SDM.  */
5514c50d8ae3SPaolo Bonzini 		if (is_noncanonical_address(gva, vcpu))
5515c50d8ae3SPaolo Bonzini 			return;
5516c50d8ae3SPaolo Bonzini 
55175efac074SPaolo Bonzini 		kvm_x86_ops.tlb_flush_gva(vcpu, gva);
55185efac074SPaolo Bonzini 	}
55195efac074SPaolo Bonzini 
55205efac074SPaolo Bonzini 	if (!mmu->invlpg)
55215efac074SPaolo Bonzini 		return;
55225efac074SPaolo Bonzini 
55235efac074SPaolo Bonzini 	if (root_hpa == INVALID_PAGE) {
5524c50d8ae3SPaolo Bonzini 		mmu->invlpg(vcpu, gva, mmu->root_hpa);
5525c50d8ae3SPaolo Bonzini 
5526c50d8ae3SPaolo Bonzini 		/*
5527c50d8ae3SPaolo Bonzini 		 * INVLPG is required to invalidate any global mappings for the VA,
5528c50d8ae3SPaolo Bonzini 		 * irrespective of PCID. Since it would take us roughly similar amount
5529c50d8ae3SPaolo Bonzini 		 * of work to determine whether any of the prev_root mappings of the VA
5530c50d8ae3SPaolo Bonzini 		 * is marked global, or to just sync it blindly, so we might as well
5531c50d8ae3SPaolo Bonzini 		 * just always sync it.
5532c50d8ae3SPaolo Bonzini 		 *
5533c50d8ae3SPaolo Bonzini 		 * Mappings not reachable via the current cr3 or the prev_roots will be
5534c50d8ae3SPaolo Bonzini 		 * synced when switching to that cr3, so nothing needs to be done here
5535c50d8ae3SPaolo Bonzini 		 * for them.
5536c50d8ae3SPaolo Bonzini 		 */
5537c50d8ae3SPaolo Bonzini 		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
5538c50d8ae3SPaolo Bonzini 			if (VALID_PAGE(mmu->prev_roots[i].hpa))
5539c50d8ae3SPaolo Bonzini 				mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
55405efac074SPaolo Bonzini 	} else {
55415efac074SPaolo Bonzini 		mmu->invlpg(vcpu, gva, root_hpa);
55425efac074SPaolo Bonzini 	}
55435efac074SPaolo Bonzini }
55445efac074SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_invalidate_gva);
5545c50d8ae3SPaolo Bonzini 
55465efac074SPaolo Bonzini void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
55475efac074SPaolo Bonzini {
55485efac074SPaolo Bonzini 	kvm_mmu_invalidate_gva(vcpu, vcpu->arch.mmu, gva, INVALID_PAGE);
5549c50d8ae3SPaolo Bonzini 	++vcpu->stat.invlpg;
5550c50d8ae3SPaolo Bonzini }
5551c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
5552c50d8ae3SPaolo Bonzini 
55535efac074SPaolo Bonzini 
5554c50d8ae3SPaolo Bonzini void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
5555c50d8ae3SPaolo Bonzini {
5556c50d8ae3SPaolo Bonzini 	struct kvm_mmu *mmu = vcpu->arch.mmu;
5557c50d8ae3SPaolo Bonzini 	bool tlb_flush = false;
5558c50d8ae3SPaolo Bonzini 	uint i;
5559c50d8ae3SPaolo Bonzini 
5560c50d8ae3SPaolo Bonzini 	if (pcid == kvm_get_active_pcid(vcpu)) {
5561c50d8ae3SPaolo Bonzini 		mmu->invlpg(vcpu, gva, mmu->root_hpa);
5562c50d8ae3SPaolo Bonzini 		tlb_flush = true;
5563c50d8ae3SPaolo Bonzini 	}
5564c50d8ae3SPaolo Bonzini 
5565c50d8ae3SPaolo Bonzini 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
5566c50d8ae3SPaolo Bonzini 		if (VALID_PAGE(mmu->prev_roots[i].hpa) &&
5567be01e8e2SSean Christopherson 		    pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd)) {
5568c50d8ae3SPaolo Bonzini 			mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
5569c50d8ae3SPaolo Bonzini 			tlb_flush = true;
5570c50d8ae3SPaolo Bonzini 		}
5571c50d8ae3SPaolo Bonzini 	}
5572c50d8ae3SPaolo Bonzini 
5573c50d8ae3SPaolo Bonzini 	if (tlb_flush)
5574afaf0b2fSSean Christopherson 		kvm_x86_ops.tlb_flush_gva(vcpu, gva);
5575c50d8ae3SPaolo Bonzini 
5576c50d8ae3SPaolo Bonzini 	++vcpu->stat.invlpg;
5577c50d8ae3SPaolo Bonzini 
5578c50d8ae3SPaolo Bonzini 	/*
5579c50d8ae3SPaolo Bonzini 	 * Mappings not reachable via the current cr3 or the prev_roots will be
5580c50d8ae3SPaolo Bonzini 	 * synced when switching to that cr3, so nothing needs to be done here
5581c50d8ae3SPaolo Bonzini 	 * for them.
5582c50d8ae3SPaolo Bonzini 	 */
5583c50d8ae3SPaolo Bonzini }
5584c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_invpcid_gva);
5585c50d8ae3SPaolo Bonzini 
558683013059SSean Christopherson void kvm_configure_mmu(bool enable_tdp, int tdp_max_root_level,
558783013059SSean Christopherson 		       int tdp_huge_page_level)
5588c50d8ae3SPaolo Bonzini {
5589bde77235SSean Christopherson 	tdp_enabled = enable_tdp;
559083013059SSean Christopherson 	max_tdp_level = tdp_max_root_level;
5591703c335dSSean Christopherson 
5592703c335dSSean Christopherson 	/*
55931d92d2e8SSean Christopherson 	 * max_huge_page_level reflects KVM's MMU capabilities irrespective
5594703c335dSSean Christopherson 	 * of kernel support, e.g. KVM may be capable of using 1GB pages when
5595703c335dSSean Christopherson 	 * the kernel is not.  But, KVM never creates a page size greater than
5596703c335dSSean Christopherson 	 * what is used by the kernel for any given HVA, i.e. the kernel's
5597703c335dSSean Christopherson 	 * capabilities are ultimately consulted by kvm_mmu_hugepage_adjust().
5598703c335dSSean Christopherson 	 */
5599703c335dSSean Christopherson 	if (tdp_enabled)
56001d92d2e8SSean Christopherson 		max_huge_page_level = tdp_huge_page_level;
5601703c335dSSean Christopherson 	else if (boot_cpu_has(X86_FEATURE_GBPAGES))
56021d92d2e8SSean Christopherson 		max_huge_page_level = PG_LEVEL_1G;
5603703c335dSSean Christopherson 	else
56041d92d2e8SSean Christopherson 		max_huge_page_level = PG_LEVEL_2M;
5605c50d8ae3SPaolo Bonzini }
5606bde77235SSean Christopherson EXPORT_SYMBOL_GPL(kvm_configure_mmu);
5607c50d8ae3SPaolo Bonzini 
5608c50d8ae3SPaolo Bonzini /* The return value indicates if tlb flush on all vcpus is needed. */
5609c50d8ae3SPaolo Bonzini typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head);
5610c50d8ae3SPaolo Bonzini 
5611c50d8ae3SPaolo Bonzini /* The caller should hold mmu-lock before calling this function. */
5612c50d8ae3SPaolo Bonzini static __always_inline bool
5613c50d8ae3SPaolo Bonzini slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
5614c50d8ae3SPaolo Bonzini 			slot_level_handler fn, int start_level, int end_level,
5615c50d8ae3SPaolo Bonzini 			gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb)
5616c50d8ae3SPaolo Bonzini {
5617c50d8ae3SPaolo Bonzini 	struct slot_rmap_walk_iterator iterator;
5618c50d8ae3SPaolo Bonzini 	bool flush = false;
5619c50d8ae3SPaolo Bonzini 
5620c50d8ae3SPaolo Bonzini 	for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn,
5621c50d8ae3SPaolo Bonzini 			end_gfn, &iterator) {
5622c50d8ae3SPaolo Bonzini 		if (iterator.rmap)
5623c50d8ae3SPaolo Bonzini 			flush |= fn(kvm, iterator.rmap);
5624c50d8ae3SPaolo Bonzini 
5625c50d8ae3SPaolo Bonzini 		if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
5626c50d8ae3SPaolo Bonzini 			if (flush && lock_flush_tlb) {
5627c50d8ae3SPaolo Bonzini 				kvm_flush_remote_tlbs_with_address(kvm,
5628c50d8ae3SPaolo Bonzini 						start_gfn,
5629c50d8ae3SPaolo Bonzini 						iterator.gfn - start_gfn + 1);
5630c50d8ae3SPaolo Bonzini 				flush = false;
5631c50d8ae3SPaolo Bonzini 			}
5632c50d8ae3SPaolo Bonzini 			cond_resched_lock(&kvm->mmu_lock);
5633c50d8ae3SPaolo Bonzini 		}
5634c50d8ae3SPaolo Bonzini 	}
5635c50d8ae3SPaolo Bonzini 
5636c50d8ae3SPaolo Bonzini 	if (flush && lock_flush_tlb) {
5637c50d8ae3SPaolo Bonzini 		kvm_flush_remote_tlbs_with_address(kvm, start_gfn,
5638c50d8ae3SPaolo Bonzini 						   end_gfn - start_gfn + 1);
5639c50d8ae3SPaolo Bonzini 		flush = false;
5640c50d8ae3SPaolo Bonzini 	}
5641c50d8ae3SPaolo Bonzini 
5642c50d8ae3SPaolo Bonzini 	return flush;
5643c50d8ae3SPaolo Bonzini }
5644c50d8ae3SPaolo Bonzini 
5645c50d8ae3SPaolo Bonzini static __always_inline bool
5646c50d8ae3SPaolo Bonzini slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
5647c50d8ae3SPaolo Bonzini 		  slot_level_handler fn, int start_level, int end_level,
5648c50d8ae3SPaolo Bonzini 		  bool lock_flush_tlb)
5649c50d8ae3SPaolo Bonzini {
5650c50d8ae3SPaolo Bonzini 	return slot_handle_level_range(kvm, memslot, fn, start_level,
5651c50d8ae3SPaolo Bonzini 			end_level, memslot->base_gfn,
5652c50d8ae3SPaolo Bonzini 			memslot->base_gfn + memslot->npages - 1,
5653c50d8ae3SPaolo Bonzini 			lock_flush_tlb);
5654c50d8ae3SPaolo Bonzini }
5655c50d8ae3SPaolo Bonzini 
5656c50d8ae3SPaolo Bonzini static __always_inline bool
5657c50d8ae3SPaolo Bonzini slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
5658c50d8ae3SPaolo Bonzini 		      slot_level_handler fn, bool lock_flush_tlb)
5659c50d8ae3SPaolo Bonzini {
56603bae0459SSean Christopherson 	return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K,
5661e662ec3eSSean Christopherson 				 KVM_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
5662c50d8ae3SPaolo Bonzini }
5663c50d8ae3SPaolo Bonzini 
5664c50d8ae3SPaolo Bonzini static __always_inline bool
5665c50d8ae3SPaolo Bonzini slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
5666c50d8ae3SPaolo Bonzini 			slot_level_handler fn, bool lock_flush_tlb)
5667c50d8ae3SPaolo Bonzini {
56683bae0459SSean Christopherson 	return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K + 1,
5669e662ec3eSSean Christopherson 				 KVM_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
5670c50d8ae3SPaolo Bonzini }
5671c50d8ae3SPaolo Bonzini 
5672c50d8ae3SPaolo Bonzini static __always_inline bool
5673c50d8ae3SPaolo Bonzini slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
5674c50d8ae3SPaolo Bonzini 		 slot_level_handler fn, bool lock_flush_tlb)
5675c50d8ae3SPaolo Bonzini {
56763bae0459SSean Christopherson 	return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K,
56773bae0459SSean Christopherson 				 PG_LEVEL_4K, lock_flush_tlb);
5678c50d8ae3SPaolo Bonzini }
5679c50d8ae3SPaolo Bonzini 
5680c50d8ae3SPaolo Bonzini static void free_mmu_pages(struct kvm_mmu *mmu)
5681c50d8ae3SPaolo Bonzini {
5682c50d8ae3SPaolo Bonzini 	free_page((unsigned long)mmu->pae_root);
5683c50d8ae3SPaolo Bonzini 	free_page((unsigned long)mmu->lm_root);
5684c50d8ae3SPaolo Bonzini }
5685c50d8ae3SPaolo Bonzini 
5686c50d8ae3SPaolo Bonzini static int alloc_mmu_pages(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
5687c50d8ae3SPaolo Bonzini {
5688c50d8ae3SPaolo Bonzini 	struct page *page;
5689c50d8ae3SPaolo Bonzini 	int i;
5690c50d8ae3SPaolo Bonzini 
5691c50d8ae3SPaolo Bonzini 	/*
5692c50d8ae3SPaolo Bonzini 	 * When using PAE paging, the four PDPTEs are treated as 'root' pages,
5693c50d8ae3SPaolo Bonzini 	 * while the PDP table is a per-vCPU construct that's allocated at MMU
5694c50d8ae3SPaolo Bonzini 	 * creation.  When emulating 32-bit mode, cr3 is only 32 bits even on
5695c50d8ae3SPaolo Bonzini 	 * x86_64.  Therefore we need to allocate the PDP table in the first
5696c50d8ae3SPaolo Bonzini 	 * 4GB of memory, which happens to fit the DMA32 zone.  Except for
5697c50d8ae3SPaolo Bonzini 	 * SVM's 32-bit NPT support, TDP paging doesn't use PAE paging and can
5698c50d8ae3SPaolo Bonzini 	 * skip allocating the PDP table.
5699c50d8ae3SPaolo Bonzini 	 */
5700d468d94bSSean Christopherson 	if (tdp_enabled && kvm_mmu_get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
5701c50d8ae3SPaolo Bonzini 		return 0;
5702c50d8ae3SPaolo Bonzini 
5703c50d8ae3SPaolo Bonzini 	page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32);
5704c50d8ae3SPaolo Bonzini 	if (!page)
5705c50d8ae3SPaolo Bonzini 		return -ENOMEM;
5706c50d8ae3SPaolo Bonzini 
5707c50d8ae3SPaolo Bonzini 	mmu->pae_root = page_address(page);
5708c50d8ae3SPaolo Bonzini 	for (i = 0; i < 4; ++i)
5709c50d8ae3SPaolo Bonzini 		mmu->pae_root[i] = INVALID_PAGE;
5710c50d8ae3SPaolo Bonzini 
5711c50d8ae3SPaolo Bonzini 	return 0;
5712c50d8ae3SPaolo Bonzini }
5713c50d8ae3SPaolo Bonzini 
5714c50d8ae3SPaolo Bonzini int kvm_mmu_create(struct kvm_vcpu *vcpu)
5715c50d8ae3SPaolo Bonzini {
5716c50d8ae3SPaolo Bonzini 	uint i;
5717c50d8ae3SPaolo Bonzini 	int ret;
5718c50d8ae3SPaolo Bonzini 
57195962bfb7SSean Christopherson 	vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
57205f6078f9SSean Christopherson 	vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;
57215f6078f9SSean Christopherson 
57225962bfb7SSean Christopherson 	vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
57235f6078f9SSean Christopherson 	vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
57245962bfb7SSean Christopherson 
572596880883SSean Christopherson 	vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
572696880883SSean Christopherson 
5727c50d8ae3SPaolo Bonzini 	vcpu->arch.mmu = &vcpu->arch.root_mmu;
5728c50d8ae3SPaolo Bonzini 	vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
5729c50d8ae3SPaolo Bonzini 
5730c50d8ae3SPaolo Bonzini 	vcpu->arch.root_mmu.root_hpa = INVALID_PAGE;
5731be01e8e2SSean Christopherson 	vcpu->arch.root_mmu.root_pgd = 0;
5732c50d8ae3SPaolo Bonzini 	vcpu->arch.root_mmu.translate_gpa = translate_gpa;
5733c50d8ae3SPaolo Bonzini 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
5734c50d8ae3SPaolo Bonzini 		vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
5735c50d8ae3SPaolo Bonzini 
5736c50d8ae3SPaolo Bonzini 	vcpu->arch.guest_mmu.root_hpa = INVALID_PAGE;
5737be01e8e2SSean Christopherson 	vcpu->arch.guest_mmu.root_pgd = 0;
5738c50d8ae3SPaolo Bonzini 	vcpu->arch.guest_mmu.translate_gpa = translate_gpa;
5739c50d8ae3SPaolo Bonzini 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
5740c50d8ae3SPaolo Bonzini 		vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
5741c50d8ae3SPaolo Bonzini 
5742c50d8ae3SPaolo Bonzini 	vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
5743c50d8ae3SPaolo Bonzini 
5744c50d8ae3SPaolo Bonzini 	ret = alloc_mmu_pages(vcpu, &vcpu->arch.guest_mmu);
5745c50d8ae3SPaolo Bonzini 	if (ret)
5746c50d8ae3SPaolo Bonzini 		return ret;
5747c50d8ae3SPaolo Bonzini 
5748c50d8ae3SPaolo Bonzini 	ret = alloc_mmu_pages(vcpu, &vcpu->arch.root_mmu);
5749c50d8ae3SPaolo Bonzini 	if (ret)
5750c50d8ae3SPaolo Bonzini 		goto fail_allocate_root;
5751c50d8ae3SPaolo Bonzini 
5752c50d8ae3SPaolo Bonzini 	return ret;
5753c50d8ae3SPaolo Bonzini  fail_allocate_root:
5754c50d8ae3SPaolo Bonzini 	free_mmu_pages(&vcpu->arch.guest_mmu);
5755c50d8ae3SPaolo Bonzini 	return ret;
5756c50d8ae3SPaolo Bonzini }
5757c50d8ae3SPaolo Bonzini 
5758c50d8ae3SPaolo Bonzini #define BATCH_ZAP_PAGES	10
5759c50d8ae3SPaolo Bonzini static void kvm_zap_obsolete_pages(struct kvm *kvm)
5760c50d8ae3SPaolo Bonzini {
5761c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp, *node;
5762c50d8ae3SPaolo Bonzini 	int nr_zapped, batch = 0;
5763c50d8ae3SPaolo Bonzini 
5764c50d8ae3SPaolo Bonzini restart:
5765c50d8ae3SPaolo Bonzini 	list_for_each_entry_safe_reverse(sp, node,
5766c50d8ae3SPaolo Bonzini 	      &kvm->arch.active_mmu_pages, link) {
5767c50d8ae3SPaolo Bonzini 		/*
5768c50d8ae3SPaolo Bonzini 		 * No obsolete valid page exists before a newly created page
5769c50d8ae3SPaolo Bonzini 		 * since active_mmu_pages is a FIFO list.
5770c50d8ae3SPaolo Bonzini 		 */
5771c50d8ae3SPaolo Bonzini 		if (!is_obsolete_sp(kvm, sp))
5772c50d8ae3SPaolo Bonzini 			break;
5773c50d8ae3SPaolo Bonzini 
5774c50d8ae3SPaolo Bonzini 		/*
5775f95eec9bSSean Christopherson 		 * Invalid pages should never land back on the list of active
5776f95eec9bSSean Christopherson 		 * pages.  Skip the bogus page, otherwise we'll get stuck in an
5777f95eec9bSSean Christopherson 		 * infinite loop if the page gets put back on the list (again).
5778c50d8ae3SPaolo Bonzini 		 */
5779f95eec9bSSean Christopherson 		if (WARN_ON(sp->role.invalid))
5780c50d8ae3SPaolo Bonzini 			continue;
5781c50d8ae3SPaolo Bonzini 
5782c50d8ae3SPaolo Bonzini 		/*
5783c50d8ae3SPaolo Bonzini 		 * No need to flush the TLB since we're only zapping shadow
5784c50d8ae3SPaolo Bonzini 		 * pages with an obsolete generation number and all vCPUS have
5785c50d8ae3SPaolo Bonzini 		 * loaded a new root, i.e. the shadow pages being zapped cannot
5786c50d8ae3SPaolo Bonzini 		 * be in active use by the guest.
5787c50d8ae3SPaolo Bonzini 		 */
5788c50d8ae3SPaolo Bonzini 		if (batch >= BATCH_ZAP_PAGES &&
5789c50d8ae3SPaolo Bonzini 		    cond_resched_lock(&kvm->mmu_lock)) {
5790c50d8ae3SPaolo Bonzini 			batch = 0;
5791c50d8ae3SPaolo Bonzini 			goto restart;
5792c50d8ae3SPaolo Bonzini 		}
5793c50d8ae3SPaolo Bonzini 
5794c50d8ae3SPaolo Bonzini 		if (__kvm_mmu_prepare_zap_page(kvm, sp,
5795c50d8ae3SPaolo Bonzini 				&kvm->arch.zapped_obsolete_pages, &nr_zapped)) {
5796c50d8ae3SPaolo Bonzini 			batch += nr_zapped;
5797c50d8ae3SPaolo Bonzini 			goto restart;
5798c50d8ae3SPaolo Bonzini 		}
5799c50d8ae3SPaolo Bonzini 	}
5800c50d8ae3SPaolo Bonzini 
5801c50d8ae3SPaolo Bonzini 	/*
5802c50d8ae3SPaolo Bonzini 	 * Trigger a remote TLB flush before freeing the page tables to ensure
5803c50d8ae3SPaolo Bonzini 	 * KVM is not in the middle of a lockless shadow page table walk, which
5804c50d8ae3SPaolo Bonzini 	 * may reference the pages.
5805c50d8ae3SPaolo Bonzini 	 */
5806c50d8ae3SPaolo Bonzini 	kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages);
5807c50d8ae3SPaolo Bonzini }
5808c50d8ae3SPaolo Bonzini 
5809c50d8ae3SPaolo Bonzini /*
5810c50d8ae3SPaolo Bonzini  * Fast invalidate all shadow pages and use lock-break technique
5811c50d8ae3SPaolo Bonzini  * to zap obsolete pages.
5812c50d8ae3SPaolo Bonzini  *
5813c50d8ae3SPaolo Bonzini  * It's required when memslot is being deleted or VM is being
5814c50d8ae3SPaolo Bonzini  * destroyed, in these cases, we should ensure that KVM MMU does
5815c50d8ae3SPaolo Bonzini  * not use any resource of the being-deleted slot or all slots
5816c50d8ae3SPaolo Bonzini  * after calling the function.
5817c50d8ae3SPaolo Bonzini  */
5818c50d8ae3SPaolo Bonzini static void kvm_mmu_zap_all_fast(struct kvm *kvm)
5819c50d8ae3SPaolo Bonzini {
5820c50d8ae3SPaolo Bonzini 	lockdep_assert_held(&kvm->slots_lock);
5821c50d8ae3SPaolo Bonzini 
5822c50d8ae3SPaolo Bonzini 	spin_lock(&kvm->mmu_lock);
5823c50d8ae3SPaolo Bonzini 	trace_kvm_mmu_zap_all_fast(kvm);
5824c50d8ae3SPaolo Bonzini 
5825c50d8ae3SPaolo Bonzini 	/*
5826c50d8ae3SPaolo Bonzini 	 * Toggle mmu_valid_gen between '0' and '1'.  Because slots_lock is
5827c50d8ae3SPaolo Bonzini 	 * held for the entire duration of zapping obsolete pages, it's
5828c50d8ae3SPaolo Bonzini 	 * impossible for there to be multiple invalid generations associated
5829c50d8ae3SPaolo Bonzini 	 * with *valid* shadow pages at any given time, i.e. there is exactly
5830c50d8ae3SPaolo Bonzini 	 * one valid generation and (at most) one invalid generation.
5831c50d8ae3SPaolo Bonzini 	 */
5832c50d8ae3SPaolo Bonzini 	kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1;
5833c50d8ae3SPaolo Bonzini 
5834c50d8ae3SPaolo Bonzini 	/*
5835c50d8ae3SPaolo Bonzini 	 * Notify all vcpus to reload its shadow page table and flush TLB.
5836c50d8ae3SPaolo Bonzini 	 * Then all vcpus will switch to new shadow page table with the new
5837c50d8ae3SPaolo Bonzini 	 * mmu_valid_gen.
5838c50d8ae3SPaolo Bonzini 	 *
5839c50d8ae3SPaolo Bonzini 	 * Note: we need to do this under the protection of mmu_lock,
5840c50d8ae3SPaolo Bonzini 	 * otherwise, vcpu would purge shadow page but miss tlb flush.
5841c50d8ae3SPaolo Bonzini 	 */
5842c50d8ae3SPaolo Bonzini 	kvm_reload_remote_mmus(kvm);
5843c50d8ae3SPaolo Bonzini 
5844c50d8ae3SPaolo Bonzini 	kvm_zap_obsolete_pages(kvm);
5845c50d8ae3SPaolo Bonzini 	spin_unlock(&kvm->mmu_lock);
5846c50d8ae3SPaolo Bonzini }
5847c50d8ae3SPaolo Bonzini 
5848c50d8ae3SPaolo Bonzini static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
5849c50d8ae3SPaolo Bonzini {
5850c50d8ae3SPaolo Bonzini 	return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
5851c50d8ae3SPaolo Bonzini }
5852c50d8ae3SPaolo Bonzini 
5853c50d8ae3SPaolo Bonzini static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
5854c50d8ae3SPaolo Bonzini 			struct kvm_memory_slot *slot,
5855c50d8ae3SPaolo Bonzini 			struct kvm_page_track_notifier_node *node)
5856c50d8ae3SPaolo Bonzini {
5857c50d8ae3SPaolo Bonzini 	kvm_mmu_zap_all_fast(kvm);
5858c50d8ae3SPaolo Bonzini }
5859c50d8ae3SPaolo Bonzini 
5860c50d8ae3SPaolo Bonzini void kvm_mmu_init_vm(struct kvm *kvm)
5861c50d8ae3SPaolo Bonzini {
5862c50d8ae3SPaolo Bonzini 	struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
5863c50d8ae3SPaolo Bonzini 
5864c50d8ae3SPaolo Bonzini 	node->track_write = kvm_mmu_pte_write;
5865c50d8ae3SPaolo Bonzini 	node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
5866c50d8ae3SPaolo Bonzini 	kvm_page_track_register_notifier(kvm, node);
5867c50d8ae3SPaolo Bonzini }
5868c50d8ae3SPaolo Bonzini 
5869c50d8ae3SPaolo Bonzini void kvm_mmu_uninit_vm(struct kvm *kvm)
5870c50d8ae3SPaolo Bonzini {
5871c50d8ae3SPaolo Bonzini 	struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
5872c50d8ae3SPaolo Bonzini 
5873c50d8ae3SPaolo Bonzini 	kvm_page_track_unregister_notifier(kvm, node);
5874c50d8ae3SPaolo Bonzini }
5875c50d8ae3SPaolo Bonzini 
5876c50d8ae3SPaolo Bonzini void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
5877c50d8ae3SPaolo Bonzini {
5878c50d8ae3SPaolo Bonzini 	struct kvm_memslots *slots;
5879c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *memslot;
5880c50d8ae3SPaolo Bonzini 	int i;
5881c50d8ae3SPaolo Bonzini 
5882c50d8ae3SPaolo Bonzini 	spin_lock(&kvm->mmu_lock);
5883c50d8ae3SPaolo Bonzini 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
5884c50d8ae3SPaolo Bonzini 		slots = __kvm_memslots(kvm, i);
5885c50d8ae3SPaolo Bonzini 		kvm_for_each_memslot(memslot, slots) {
5886c50d8ae3SPaolo Bonzini 			gfn_t start, end;
5887c50d8ae3SPaolo Bonzini 
5888c50d8ae3SPaolo Bonzini 			start = max(gfn_start, memslot->base_gfn);
5889c50d8ae3SPaolo Bonzini 			end = min(gfn_end, memslot->base_gfn + memslot->npages);
5890c50d8ae3SPaolo Bonzini 			if (start >= end)
5891c50d8ae3SPaolo Bonzini 				continue;
5892c50d8ae3SPaolo Bonzini 
5893c50d8ae3SPaolo Bonzini 			slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
58943bae0459SSean Christopherson 						PG_LEVEL_4K,
5895e662ec3eSSean Christopherson 						KVM_MAX_HUGEPAGE_LEVEL,
5896c50d8ae3SPaolo Bonzini 						start, end - 1, true);
5897c50d8ae3SPaolo Bonzini 		}
5898c50d8ae3SPaolo Bonzini 	}
5899c50d8ae3SPaolo Bonzini 
5900c50d8ae3SPaolo Bonzini 	spin_unlock(&kvm->mmu_lock);
5901c50d8ae3SPaolo Bonzini }
5902c50d8ae3SPaolo Bonzini 
5903c50d8ae3SPaolo Bonzini static bool slot_rmap_write_protect(struct kvm *kvm,
5904c50d8ae3SPaolo Bonzini 				    struct kvm_rmap_head *rmap_head)
5905c50d8ae3SPaolo Bonzini {
5906c50d8ae3SPaolo Bonzini 	return __rmap_write_protect(kvm, rmap_head, false);
5907c50d8ae3SPaolo Bonzini }
5908c50d8ae3SPaolo Bonzini 
5909c50d8ae3SPaolo Bonzini void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
59103c9bd400SJay Zhou 				      struct kvm_memory_slot *memslot,
59113c9bd400SJay Zhou 				      int start_level)
5912c50d8ae3SPaolo Bonzini {
5913c50d8ae3SPaolo Bonzini 	bool flush;
5914c50d8ae3SPaolo Bonzini 
5915c50d8ae3SPaolo Bonzini 	spin_lock(&kvm->mmu_lock);
59163c9bd400SJay Zhou 	flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
5917e662ec3eSSean Christopherson 				start_level, KVM_MAX_HUGEPAGE_LEVEL, false);
5918c50d8ae3SPaolo Bonzini 	spin_unlock(&kvm->mmu_lock);
5919c50d8ae3SPaolo Bonzini 
5920c50d8ae3SPaolo Bonzini 	/*
5921c50d8ae3SPaolo Bonzini 	 * We can flush all the TLBs out of the mmu lock without TLB
5922c50d8ae3SPaolo Bonzini 	 * corruption since we just change the spte from writable to
5923c50d8ae3SPaolo Bonzini 	 * readonly so that we only need to care the case of changing
5924c50d8ae3SPaolo Bonzini 	 * spte from present to present (changing the spte from present
5925c50d8ae3SPaolo Bonzini 	 * to nonpresent will flush all the TLBs immediately), in other
5926c50d8ae3SPaolo Bonzini 	 * words, the only case we care is mmu_spte_update() where we
5927c50d8ae3SPaolo Bonzini 	 * have checked SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE
5928c50d8ae3SPaolo Bonzini 	 * instead of PT_WRITABLE_MASK, that means it does not depend
5929c50d8ae3SPaolo Bonzini 	 * on PT_WRITABLE_MASK anymore.
5930c50d8ae3SPaolo Bonzini 	 */
5931c50d8ae3SPaolo Bonzini 	if (flush)
59327f42aa76SSean Christopherson 		kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
5933c50d8ae3SPaolo Bonzini }
5934c50d8ae3SPaolo Bonzini 
5935c50d8ae3SPaolo Bonzini static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
5936c50d8ae3SPaolo Bonzini 					 struct kvm_rmap_head *rmap_head)
5937c50d8ae3SPaolo Bonzini {
5938c50d8ae3SPaolo Bonzini 	u64 *sptep;
5939c50d8ae3SPaolo Bonzini 	struct rmap_iterator iter;
5940c50d8ae3SPaolo Bonzini 	int need_tlb_flush = 0;
5941c50d8ae3SPaolo Bonzini 	kvm_pfn_t pfn;
5942c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
5943c50d8ae3SPaolo Bonzini 
5944c50d8ae3SPaolo Bonzini restart:
5945c50d8ae3SPaolo Bonzini 	for_each_rmap_spte(rmap_head, &iter, sptep) {
594657354682SSean Christopherson 		sp = sptep_to_sp(sptep);
5947c50d8ae3SPaolo Bonzini 		pfn = spte_to_pfn(*sptep);
5948c50d8ae3SPaolo Bonzini 
5949c50d8ae3SPaolo Bonzini 		/*
5950c50d8ae3SPaolo Bonzini 		 * We cannot do huge page mapping for indirect shadow pages,
5951c50d8ae3SPaolo Bonzini 		 * which are found on the last rmap (level = 1) when not using
5952c50d8ae3SPaolo Bonzini 		 * tdp; such shadow pages are synced with the page table in
5953c50d8ae3SPaolo Bonzini 		 * the guest, and the guest page table is using 4K page size
5954c50d8ae3SPaolo Bonzini 		 * mapping if the indirect sp has level = 1.
5955c50d8ae3SPaolo Bonzini 		 */
5956c50d8ae3SPaolo Bonzini 		if (sp->role.direct && !kvm_is_reserved_pfn(pfn) &&
5957e851265aSSean Christopherson 		    (kvm_is_zone_device_pfn(pfn) ||
5958e851265aSSean Christopherson 		     PageCompound(pfn_to_page(pfn)))) {
5959c50d8ae3SPaolo Bonzini 			pte_list_remove(rmap_head, sptep);
5960c50d8ae3SPaolo Bonzini 
5961c50d8ae3SPaolo Bonzini 			if (kvm_available_flush_tlb_with_range())
5962c50d8ae3SPaolo Bonzini 				kvm_flush_remote_tlbs_with_address(kvm, sp->gfn,
5963c50d8ae3SPaolo Bonzini 					KVM_PAGES_PER_HPAGE(sp->role.level));
5964c50d8ae3SPaolo Bonzini 			else
5965c50d8ae3SPaolo Bonzini 				need_tlb_flush = 1;
5966c50d8ae3SPaolo Bonzini 
5967c50d8ae3SPaolo Bonzini 			goto restart;
5968c50d8ae3SPaolo Bonzini 		}
5969c50d8ae3SPaolo Bonzini 	}
5970c50d8ae3SPaolo Bonzini 
5971c50d8ae3SPaolo Bonzini 	return need_tlb_flush;
5972c50d8ae3SPaolo Bonzini }
5973c50d8ae3SPaolo Bonzini 
5974c50d8ae3SPaolo Bonzini void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
5975c50d8ae3SPaolo Bonzini 				   const struct kvm_memory_slot *memslot)
5976c50d8ae3SPaolo Bonzini {
5977c50d8ae3SPaolo Bonzini 	/* FIXME: const-ify all uses of struct kvm_memory_slot.  */
5978c50d8ae3SPaolo Bonzini 	spin_lock(&kvm->mmu_lock);
5979c50d8ae3SPaolo Bonzini 	slot_handle_leaf(kvm, (struct kvm_memory_slot *)memslot,
5980c50d8ae3SPaolo Bonzini 			 kvm_mmu_zap_collapsible_spte, true);
5981c50d8ae3SPaolo Bonzini 	spin_unlock(&kvm->mmu_lock);
5982c50d8ae3SPaolo Bonzini }
5983c50d8ae3SPaolo Bonzini 
5984b3594ffbSSean Christopherson void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
5985b3594ffbSSean Christopherson 					struct kvm_memory_slot *memslot)
5986b3594ffbSSean Christopherson {
5987b3594ffbSSean Christopherson 	/*
59887f42aa76SSean Christopherson 	 * All current use cases for flushing the TLBs for a specific memslot
59897f42aa76SSean Christopherson 	 * are related to dirty logging, and do the TLB flush out of mmu_lock.
59907f42aa76SSean Christopherson 	 * The interaction between the various operations on memslot must be
59917f42aa76SSean Christopherson 	 * serialized by slots_locks to ensure the TLB flush from one operation
59927f42aa76SSean Christopherson 	 * is observed by any other operation on the same memslot.
5993b3594ffbSSean Christopherson 	 */
5994b3594ffbSSean Christopherson 	lockdep_assert_held(&kvm->slots_lock);
5995cec37648SSean Christopherson 	kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
5996cec37648SSean Christopherson 					   memslot->npages);
5997b3594ffbSSean Christopherson }
5998b3594ffbSSean Christopherson 
5999c50d8ae3SPaolo Bonzini void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
6000c50d8ae3SPaolo Bonzini 				   struct kvm_memory_slot *memslot)
6001c50d8ae3SPaolo Bonzini {
6002c50d8ae3SPaolo Bonzini 	bool flush;
6003c50d8ae3SPaolo Bonzini 
6004c50d8ae3SPaolo Bonzini 	spin_lock(&kvm->mmu_lock);
6005c50d8ae3SPaolo Bonzini 	flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false);
6006c50d8ae3SPaolo Bonzini 	spin_unlock(&kvm->mmu_lock);
6007c50d8ae3SPaolo Bonzini 
6008c50d8ae3SPaolo Bonzini 	/*
6009c50d8ae3SPaolo Bonzini 	 * It's also safe to flush TLBs out of mmu lock here as currently this
6010c50d8ae3SPaolo Bonzini 	 * function is only used for dirty logging, in which case flushing TLB
6011c50d8ae3SPaolo Bonzini 	 * out of mmu lock also guarantees no dirty pages will be lost in
6012c50d8ae3SPaolo Bonzini 	 * dirty_bitmap.
6013c50d8ae3SPaolo Bonzini 	 */
6014c50d8ae3SPaolo Bonzini 	if (flush)
60157f42aa76SSean Christopherson 		kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
6016c50d8ae3SPaolo Bonzini }
6017c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_slot_leaf_clear_dirty);
6018c50d8ae3SPaolo Bonzini 
6019c50d8ae3SPaolo Bonzini void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
6020c50d8ae3SPaolo Bonzini 					struct kvm_memory_slot *memslot)
6021c50d8ae3SPaolo Bonzini {
6022c50d8ae3SPaolo Bonzini 	bool flush;
6023c50d8ae3SPaolo Bonzini 
6024c50d8ae3SPaolo Bonzini 	spin_lock(&kvm->mmu_lock);
6025c50d8ae3SPaolo Bonzini 	flush = slot_handle_large_level(kvm, memslot, slot_rmap_write_protect,
6026c50d8ae3SPaolo Bonzini 					false);
6027c50d8ae3SPaolo Bonzini 	spin_unlock(&kvm->mmu_lock);
6028c50d8ae3SPaolo Bonzini 
6029c50d8ae3SPaolo Bonzini 	if (flush)
60307f42aa76SSean Christopherson 		kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
6031c50d8ae3SPaolo Bonzini }
6032c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access);
6033c50d8ae3SPaolo Bonzini 
6034c50d8ae3SPaolo Bonzini void kvm_mmu_slot_set_dirty(struct kvm *kvm,
6035c50d8ae3SPaolo Bonzini 			    struct kvm_memory_slot *memslot)
6036c50d8ae3SPaolo Bonzini {
6037c50d8ae3SPaolo Bonzini 	bool flush;
6038c50d8ae3SPaolo Bonzini 
6039c50d8ae3SPaolo Bonzini 	spin_lock(&kvm->mmu_lock);
6040c50d8ae3SPaolo Bonzini 	flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false);
6041c50d8ae3SPaolo Bonzini 	spin_unlock(&kvm->mmu_lock);
6042c50d8ae3SPaolo Bonzini 
6043c50d8ae3SPaolo Bonzini 	if (flush)
60447f42aa76SSean Christopherson 		kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
6045c50d8ae3SPaolo Bonzini }
6046c50d8ae3SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);
6047c50d8ae3SPaolo Bonzini 
6048c50d8ae3SPaolo Bonzini void kvm_mmu_zap_all(struct kvm *kvm)
6049c50d8ae3SPaolo Bonzini {
6050c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp, *node;
6051c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
6052c50d8ae3SPaolo Bonzini 	int ign;
6053c50d8ae3SPaolo Bonzini 
6054c50d8ae3SPaolo Bonzini 	spin_lock(&kvm->mmu_lock);
6055c50d8ae3SPaolo Bonzini restart:
6056c50d8ae3SPaolo Bonzini 	list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
6057f95eec9bSSean Christopherson 		if (WARN_ON(sp->role.invalid))
6058c50d8ae3SPaolo Bonzini 			continue;
6059c50d8ae3SPaolo Bonzini 		if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
6060c50d8ae3SPaolo Bonzini 			goto restart;
6061c50d8ae3SPaolo Bonzini 		if (cond_resched_lock(&kvm->mmu_lock))
6062c50d8ae3SPaolo Bonzini 			goto restart;
6063c50d8ae3SPaolo Bonzini 	}
6064c50d8ae3SPaolo Bonzini 
6065c50d8ae3SPaolo Bonzini 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
6066c50d8ae3SPaolo Bonzini 	spin_unlock(&kvm->mmu_lock);
6067c50d8ae3SPaolo Bonzini }
6068c50d8ae3SPaolo Bonzini 
6069c50d8ae3SPaolo Bonzini void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
6070c50d8ae3SPaolo Bonzini {
6071c50d8ae3SPaolo Bonzini 	WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
6072c50d8ae3SPaolo Bonzini 
6073c50d8ae3SPaolo Bonzini 	gen &= MMIO_SPTE_GEN_MASK;
6074c50d8ae3SPaolo Bonzini 
6075c50d8ae3SPaolo Bonzini 	/*
6076c50d8ae3SPaolo Bonzini 	 * Generation numbers are incremented in multiples of the number of
6077c50d8ae3SPaolo Bonzini 	 * address spaces in order to provide unique generations across all
6078c50d8ae3SPaolo Bonzini 	 * address spaces.  Strip what is effectively the address space
6079c50d8ae3SPaolo Bonzini 	 * modifier prior to checking for a wrap of the MMIO generation so
6080c50d8ae3SPaolo Bonzini 	 * that a wrap in any address space is detected.
6081c50d8ae3SPaolo Bonzini 	 */
6082c50d8ae3SPaolo Bonzini 	gen &= ~((u64)KVM_ADDRESS_SPACE_NUM - 1);
6083c50d8ae3SPaolo Bonzini 
6084c50d8ae3SPaolo Bonzini 	/*
6085c50d8ae3SPaolo Bonzini 	 * The very rare case: if the MMIO generation number has wrapped,
6086c50d8ae3SPaolo Bonzini 	 * zap all shadow pages.
6087c50d8ae3SPaolo Bonzini 	 */
6088c50d8ae3SPaolo Bonzini 	if (unlikely(gen == 0)) {
6089c50d8ae3SPaolo Bonzini 		kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
6090c50d8ae3SPaolo Bonzini 		kvm_mmu_zap_all_fast(kvm);
6091c50d8ae3SPaolo Bonzini 	}
6092c50d8ae3SPaolo Bonzini }
6093c50d8ae3SPaolo Bonzini 
6094c50d8ae3SPaolo Bonzini static unsigned long
6095c50d8ae3SPaolo Bonzini mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
6096c50d8ae3SPaolo Bonzini {
6097c50d8ae3SPaolo Bonzini 	struct kvm *kvm;
6098c50d8ae3SPaolo Bonzini 	int nr_to_scan = sc->nr_to_scan;
6099c50d8ae3SPaolo Bonzini 	unsigned long freed = 0;
6100c50d8ae3SPaolo Bonzini 
6101c50d8ae3SPaolo Bonzini 	mutex_lock(&kvm_lock);
6102c50d8ae3SPaolo Bonzini 
6103c50d8ae3SPaolo Bonzini 	list_for_each_entry(kvm, &vm_list, vm_list) {
6104c50d8ae3SPaolo Bonzini 		int idx;
6105c50d8ae3SPaolo Bonzini 		LIST_HEAD(invalid_list);
6106c50d8ae3SPaolo Bonzini 
6107c50d8ae3SPaolo Bonzini 		/*
6108c50d8ae3SPaolo Bonzini 		 * Never scan more than sc->nr_to_scan VM instances.
6109c50d8ae3SPaolo Bonzini 		 * Will not hit this condition practically since we do not try
6110c50d8ae3SPaolo Bonzini 		 * to shrink more than one VM and it is very unlikely to see
6111c50d8ae3SPaolo Bonzini 		 * !n_used_mmu_pages so many times.
6112c50d8ae3SPaolo Bonzini 		 */
6113c50d8ae3SPaolo Bonzini 		if (!nr_to_scan--)
6114c50d8ae3SPaolo Bonzini 			break;
6115c50d8ae3SPaolo Bonzini 		/*
6116c50d8ae3SPaolo Bonzini 		 * n_used_mmu_pages is accessed without holding kvm->mmu_lock
6117c50d8ae3SPaolo Bonzini 		 * here. We may skip a VM instance errorneosly, but we do not
6118c50d8ae3SPaolo Bonzini 		 * want to shrink a VM that only started to populate its MMU
6119c50d8ae3SPaolo Bonzini 		 * anyway.
6120c50d8ae3SPaolo Bonzini 		 */
6121c50d8ae3SPaolo Bonzini 		if (!kvm->arch.n_used_mmu_pages &&
6122c50d8ae3SPaolo Bonzini 		    !kvm_has_zapped_obsolete_pages(kvm))
6123c50d8ae3SPaolo Bonzini 			continue;
6124c50d8ae3SPaolo Bonzini 
6125c50d8ae3SPaolo Bonzini 		idx = srcu_read_lock(&kvm->srcu);
6126c50d8ae3SPaolo Bonzini 		spin_lock(&kvm->mmu_lock);
6127c50d8ae3SPaolo Bonzini 
6128c50d8ae3SPaolo Bonzini 		if (kvm_has_zapped_obsolete_pages(kvm)) {
6129c50d8ae3SPaolo Bonzini 			kvm_mmu_commit_zap_page(kvm,
6130c50d8ae3SPaolo Bonzini 			      &kvm->arch.zapped_obsolete_pages);
6131c50d8ae3SPaolo Bonzini 			goto unlock;
6132c50d8ae3SPaolo Bonzini 		}
6133c50d8ae3SPaolo Bonzini 
6134ebdb292dSSean Christopherson 		freed = kvm_mmu_zap_oldest_mmu_pages(kvm, sc->nr_to_scan);
6135c50d8ae3SPaolo Bonzini 
6136c50d8ae3SPaolo Bonzini unlock:
6137c50d8ae3SPaolo Bonzini 		spin_unlock(&kvm->mmu_lock);
6138c50d8ae3SPaolo Bonzini 		srcu_read_unlock(&kvm->srcu, idx);
6139c50d8ae3SPaolo Bonzini 
6140c50d8ae3SPaolo Bonzini 		/*
6141c50d8ae3SPaolo Bonzini 		 * unfair on small ones
6142c50d8ae3SPaolo Bonzini 		 * per-vm shrinkers cry out
6143c50d8ae3SPaolo Bonzini 		 * sadness comes quickly
6144c50d8ae3SPaolo Bonzini 		 */
6145c50d8ae3SPaolo Bonzini 		list_move_tail(&kvm->vm_list, &vm_list);
6146c50d8ae3SPaolo Bonzini 		break;
6147c50d8ae3SPaolo Bonzini 	}
6148c50d8ae3SPaolo Bonzini 
6149c50d8ae3SPaolo Bonzini 	mutex_unlock(&kvm_lock);
6150c50d8ae3SPaolo Bonzini 	return freed;
6151c50d8ae3SPaolo Bonzini }
6152c50d8ae3SPaolo Bonzini 
6153c50d8ae3SPaolo Bonzini static unsigned long
6154c50d8ae3SPaolo Bonzini mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
6155c50d8ae3SPaolo Bonzini {
6156c50d8ae3SPaolo Bonzini 	return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
6157c50d8ae3SPaolo Bonzini }
6158c50d8ae3SPaolo Bonzini 
6159c50d8ae3SPaolo Bonzini static struct shrinker mmu_shrinker = {
6160c50d8ae3SPaolo Bonzini 	.count_objects = mmu_shrink_count,
6161c50d8ae3SPaolo Bonzini 	.scan_objects = mmu_shrink_scan,
6162c50d8ae3SPaolo Bonzini 	.seeks = DEFAULT_SEEKS * 10,
6163c50d8ae3SPaolo Bonzini };
6164c50d8ae3SPaolo Bonzini 
6165c50d8ae3SPaolo Bonzini static void mmu_destroy_caches(void)
6166c50d8ae3SPaolo Bonzini {
6167c50d8ae3SPaolo Bonzini 	kmem_cache_destroy(pte_list_desc_cache);
6168c50d8ae3SPaolo Bonzini 	kmem_cache_destroy(mmu_page_header_cache);
6169c50d8ae3SPaolo Bonzini }
6170c50d8ae3SPaolo Bonzini 
6171c50d8ae3SPaolo Bonzini static void kvm_set_mmio_spte_mask(void)
6172c50d8ae3SPaolo Bonzini {
6173c50d8ae3SPaolo Bonzini 	u64 mask;
6174c50d8ae3SPaolo Bonzini 
6175c50d8ae3SPaolo Bonzini 	/*
61766129ed87SSean Christopherson 	 * Set a reserved PA bit in MMIO SPTEs to generate page faults with
61776129ed87SSean Christopherson 	 * PFEC.RSVD=1 on MMIO accesses.  64-bit PTEs (PAE, x86-64, and EPT
61786129ed87SSean Christopherson 	 * paging) support a maximum of 52 bits of PA, i.e. if the CPU supports
61796129ed87SSean Christopherson 	 * 52-bit physical addresses then there are no reserved PA bits in the
61806129ed87SSean Christopherson 	 * PTEs and so the reserved PA approach must be disabled.
6181c50d8ae3SPaolo Bonzini 	 */
61826129ed87SSean Christopherson 	if (shadow_phys_bits < 52)
61836129ed87SSean Christopherson 		mask = BIT_ULL(51) | PT_PRESENT_MASK;
61846129ed87SSean Christopherson 	else
61856129ed87SSean Christopherson 		mask = 0;
6186c50d8ae3SPaolo Bonzini 
6187e7581cacSPaolo Bonzini 	kvm_mmu_set_mmio_spte_mask(mask, ACC_WRITE_MASK | ACC_USER_MASK);
6188c50d8ae3SPaolo Bonzini }
6189c50d8ae3SPaolo Bonzini 
6190c50d8ae3SPaolo Bonzini static bool get_nx_auto_mode(void)
6191c50d8ae3SPaolo Bonzini {
6192c50d8ae3SPaolo Bonzini 	/* Return true when CPU has the bug, and mitigations are ON */
6193c50d8ae3SPaolo Bonzini 	return boot_cpu_has_bug(X86_BUG_ITLB_MULTIHIT) && !cpu_mitigations_off();
6194c50d8ae3SPaolo Bonzini }
6195c50d8ae3SPaolo Bonzini 
6196c50d8ae3SPaolo Bonzini static void __set_nx_huge_pages(bool val)
6197c50d8ae3SPaolo Bonzini {
6198c50d8ae3SPaolo Bonzini 	nx_huge_pages = itlb_multihit_kvm_mitigation = val;
6199c50d8ae3SPaolo Bonzini }
6200c50d8ae3SPaolo Bonzini 
6201c50d8ae3SPaolo Bonzini static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
6202c50d8ae3SPaolo Bonzini {
6203c50d8ae3SPaolo Bonzini 	bool old_val = nx_huge_pages;
6204c50d8ae3SPaolo Bonzini 	bool new_val;
6205c50d8ae3SPaolo Bonzini 
6206c50d8ae3SPaolo Bonzini 	/* In "auto" mode deploy workaround only if CPU has the bug. */
6207c50d8ae3SPaolo Bonzini 	if (sysfs_streq(val, "off"))
6208c50d8ae3SPaolo Bonzini 		new_val = 0;
6209c50d8ae3SPaolo Bonzini 	else if (sysfs_streq(val, "force"))
6210c50d8ae3SPaolo Bonzini 		new_val = 1;
6211c50d8ae3SPaolo Bonzini 	else if (sysfs_streq(val, "auto"))
6212c50d8ae3SPaolo Bonzini 		new_val = get_nx_auto_mode();
6213c50d8ae3SPaolo Bonzini 	else if (strtobool(val, &new_val) < 0)
6214c50d8ae3SPaolo Bonzini 		return -EINVAL;
6215c50d8ae3SPaolo Bonzini 
6216c50d8ae3SPaolo Bonzini 	__set_nx_huge_pages(new_val);
6217c50d8ae3SPaolo Bonzini 
6218c50d8ae3SPaolo Bonzini 	if (new_val != old_val) {
6219c50d8ae3SPaolo Bonzini 		struct kvm *kvm;
6220c50d8ae3SPaolo Bonzini 
6221c50d8ae3SPaolo Bonzini 		mutex_lock(&kvm_lock);
6222c50d8ae3SPaolo Bonzini 
6223c50d8ae3SPaolo Bonzini 		list_for_each_entry(kvm, &vm_list, vm_list) {
6224c50d8ae3SPaolo Bonzini 			mutex_lock(&kvm->slots_lock);
6225c50d8ae3SPaolo Bonzini 			kvm_mmu_zap_all_fast(kvm);
6226c50d8ae3SPaolo Bonzini 			mutex_unlock(&kvm->slots_lock);
6227c50d8ae3SPaolo Bonzini 
6228c50d8ae3SPaolo Bonzini 			wake_up_process(kvm->arch.nx_lpage_recovery_thread);
6229c50d8ae3SPaolo Bonzini 		}
6230c50d8ae3SPaolo Bonzini 		mutex_unlock(&kvm_lock);
6231c50d8ae3SPaolo Bonzini 	}
6232c50d8ae3SPaolo Bonzini 
6233c50d8ae3SPaolo Bonzini 	return 0;
6234c50d8ae3SPaolo Bonzini }
6235c50d8ae3SPaolo Bonzini 
6236c50d8ae3SPaolo Bonzini int kvm_mmu_module_init(void)
6237c50d8ae3SPaolo Bonzini {
6238c50d8ae3SPaolo Bonzini 	int ret = -ENOMEM;
6239c50d8ae3SPaolo Bonzini 
6240c50d8ae3SPaolo Bonzini 	if (nx_huge_pages == -1)
6241c50d8ae3SPaolo Bonzini 		__set_nx_huge_pages(get_nx_auto_mode());
6242c50d8ae3SPaolo Bonzini 
6243c50d8ae3SPaolo Bonzini 	/*
6244c50d8ae3SPaolo Bonzini 	 * MMU roles use union aliasing which is, generally speaking, an
6245c50d8ae3SPaolo Bonzini 	 * undefined behavior. However, we supposedly know how compilers behave
6246c50d8ae3SPaolo Bonzini 	 * and the current status quo is unlikely to change. Guardians below are
6247c50d8ae3SPaolo Bonzini 	 * supposed to let us know if the assumption becomes false.
6248c50d8ae3SPaolo Bonzini 	 */
6249c50d8ae3SPaolo Bonzini 	BUILD_BUG_ON(sizeof(union kvm_mmu_page_role) != sizeof(u32));
6250c50d8ae3SPaolo Bonzini 	BUILD_BUG_ON(sizeof(union kvm_mmu_extended_role) != sizeof(u32));
6251c50d8ae3SPaolo Bonzini 	BUILD_BUG_ON(sizeof(union kvm_mmu_role) != sizeof(u64));
6252c50d8ae3SPaolo Bonzini 
6253c50d8ae3SPaolo Bonzini 	kvm_mmu_reset_all_pte_masks();
6254c50d8ae3SPaolo Bonzini 
6255c50d8ae3SPaolo Bonzini 	kvm_set_mmio_spte_mask();
6256c50d8ae3SPaolo Bonzini 
6257c50d8ae3SPaolo Bonzini 	pte_list_desc_cache = kmem_cache_create("pte_list_desc",
6258c50d8ae3SPaolo Bonzini 					    sizeof(struct pte_list_desc),
6259c50d8ae3SPaolo Bonzini 					    0, SLAB_ACCOUNT, NULL);
6260c50d8ae3SPaolo Bonzini 	if (!pte_list_desc_cache)
6261c50d8ae3SPaolo Bonzini 		goto out;
6262c50d8ae3SPaolo Bonzini 
6263c50d8ae3SPaolo Bonzini 	mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
6264c50d8ae3SPaolo Bonzini 						  sizeof(struct kvm_mmu_page),
6265c50d8ae3SPaolo Bonzini 						  0, SLAB_ACCOUNT, NULL);
6266c50d8ae3SPaolo Bonzini 	if (!mmu_page_header_cache)
6267c50d8ae3SPaolo Bonzini 		goto out;
6268c50d8ae3SPaolo Bonzini 
6269c50d8ae3SPaolo Bonzini 	if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL))
6270c50d8ae3SPaolo Bonzini 		goto out;
6271c50d8ae3SPaolo Bonzini 
6272c50d8ae3SPaolo Bonzini 	ret = register_shrinker(&mmu_shrinker);
6273c50d8ae3SPaolo Bonzini 	if (ret)
6274c50d8ae3SPaolo Bonzini 		goto out;
6275c50d8ae3SPaolo Bonzini 
6276c50d8ae3SPaolo Bonzini 	return 0;
6277c50d8ae3SPaolo Bonzini 
6278c50d8ae3SPaolo Bonzini out:
6279c50d8ae3SPaolo Bonzini 	mmu_destroy_caches();
6280c50d8ae3SPaolo Bonzini 	return ret;
6281c50d8ae3SPaolo Bonzini }
6282c50d8ae3SPaolo Bonzini 
6283c50d8ae3SPaolo Bonzini /*
6284c50d8ae3SPaolo Bonzini  * Calculate mmu pages needed for kvm.
6285c50d8ae3SPaolo Bonzini  */
6286c50d8ae3SPaolo Bonzini unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
6287c50d8ae3SPaolo Bonzini {
6288c50d8ae3SPaolo Bonzini 	unsigned long nr_mmu_pages;
6289c50d8ae3SPaolo Bonzini 	unsigned long nr_pages = 0;
6290c50d8ae3SPaolo Bonzini 	struct kvm_memslots *slots;
6291c50d8ae3SPaolo Bonzini 	struct kvm_memory_slot *memslot;
6292c50d8ae3SPaolo Bonzini 	int i;
6293c50d8ae3SPaolo Bonzini 
6294c50d8ae3SPaolo Bonzini 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
6295c50d8ae3SPaolo Bonzini 		slots = __kvm_memslots(kvm, i);
6296c50d8ae3SPaolo Bonzini 
6297c50d8ae3SPaolo Bonzini 		kvm_for_each_memslot(memslot, slots)
6298c50d8ae3SPaolo Bonzini 			nr_pages += memslot->npages;
6299c50d8ae3SPaolo Bonzini 	}
6300c50d8ae3SPaolo Bonzini 
6301c50d8ae3SPaolo Bonzini 	nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
6302c50d8ae3SPaolo Bonzini 	nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES);
6303c50d8ae3SPaolo Bonzini 
6304c50d8ae3SPaolo Bonzini 	return nr_mmu_pages;
6305c50d8ae3SPaolo Bonzini }
6306c50d8ae3SPaolo Bonzini 
6307c50d8ae3SPaolo Bonzini void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
6308c50d8ae3SPaolo Bonzini {
6309c50d8ae3SPaolo Bonzini 	kvm_mmu_unload(vcpu);
6310c50d8ae3SPaolo Bonzini 	free_mmu_pages(&vcpu->arch.root_mmu);
6311c50d8ae3SPaolo Bonzini 	free_mmu_pages(&vcpu->arch.guest_mmu);
6312c50d8ae3SPaolo Bonzini 	mmu_free_memory_caches(vcpu);
6313c50d8ae3SPaolo Bonzini }
6314c50d8ae3SPaolo Bonzini 
6315c50d8ae3SPaolo Bonzini void kvm_mmu_module_exit(void)
6316c50d8ae3SPaolo Bonzini {
6317c50d8ae3SPaolo Bonzini 	mmu_destroy_caches();
6318c50d8ae3SPaolo Bonzini 	percpu_counter_destroy(&kvm_total_used_mmu_pages);
6319c50d8ae3SPaolo Bonzini 	unregister_shrinker(&mmu_shrinker);
6320c50d8ae3SPaolo Bonzini 	mmu_audit_disable();
6321c50d8ae3SPaolo Bonzini }
6322c50d8ae3SPaolo Bonzini 
6323c50d8ae3SPaolo Bonzini static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp)
6324c50d8ae3SPaolo Bonzini {
6325c50d8ae3SPaolo Bonzini 	unsigned int old_val;
6326c50d8ae3SPaolo Bonzini 	int err;
6327c50d8ae3SPaolo Bonzini 
6328c50d8ae3SPaolo Bonzini 	old_val = nx_huge_pages_recovery_ratio;
6329c50d8ae3SPaolo Bonzini 	err = param_set_uint(val, kp);
6330c50d8ae3SPaolo Bonzini 	if (err)
6331c50d8ae3SPaolo Bonzini 		return err;
6332c50d8ae3SPaolo Bonzini 
6333c50d8ae3SPaolo Bonzini 	if (READ_ONCE(nx_huge_pages) &&
6334c50d8ae3SPaolo Bonzini 	    !old_val && nx_huge_pages_recovery_ratio) {
6335c50d8ae3SPaolo Bonzini 		struct kvm *kvm;
6336c50d8ae3SPaolo Bonzini 
6337c50d8ae3SPaolo Bonzini 		mutex_lock(&kvm_lock);
6338c50d8ae3SPaolo Bonzini 
6339c50d8ae3SPaolo Bonzini 		list_for_each_entry(kvm, &vm_list, vm_list)
6340c50d8ae3SPaolo Bonzini 			wake_up_process(kvm->arch.nx_lpage_recovery_thread);
6341c50d8ae3SPaolo Bonzini 
6342c50d8ae3SPaolo Bonzini 		mutex_unlock(&kvm_lock);
6343c50d8ae3SPaolo Bonzini 	}
6344c50d8ae3SPaolo Bonzini 
6345c50d8ae3SPaolo Bonzini 	return err;
6346c50d8ae3SPaolo Bonzini }
6347c50d8ae3SPaolo Bonzini 
6348c50d8ae3SPaolo Bonzini static void kvm_recover_nx_lpages(struct kvm *kvm)
6349c50d8ae3SPaolo Bonzini {
6350c50d8ae3SPaolo Bonzini 	int rcu_idx;
6351c50d8ae3SPaolo Bonzini 	struct kvm_mmu_page *sp;
6352c50d8ae3SPaolo Bonzini 	unsigned int ratio;
6353c50d8ae3SPaolo Bonzini 	LIST_HEAD(invalid_list);
6354c50d8ae3SPaolo Bonzini 	ulong to_zap;
6355c50d8ae3SPaolo Bonzini 
6356c50d8ae3SPaolo Bonzini 	rcu_idx = srcu_read_lock(&kvm->srcu);
6357c50d8ae3SPaolo Bonzini 	spin_lock(&kvm->mmu_lock);
6358c50d8ae3SPaolo Bonzini 
6359c50d8ae3SPaolo Bonzini 	ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
6360c50d8ae3SPaolo Bonzini 	to_zap = ratio ? DIV_ROUND_UP(kvm->stat.nx_lpage_splits, ratio) : 0;
6361c50d8ae3SPaolo Bonzini 	while (to_zap && !list_empty(&kvm->arch.lpage_disallowed_mmu_pages)) {
6362c50d8ae3SPaolo Bonzini 		/*
6363c50d8ae3SPaolo Bonzini 		 * We use a separate list instead of just using active_mmu_pages
6364c50d8ae3SPaolo Bonzini 		 * because the number of lpage_disallowed pages is expected to
6365c50d8ae3SPaolo Bonzini 		 * be relatively small compared to the total.
6366c50d8ae3SPaolo Bonzini 		 */
6367c50d8ae3SPaolo Bonzini 		sp = list_first_entry(&kvm->arch.lpage_disallowed_mmu_pages,
6368c50d8ae3SPaolo Bonzini 				      struct kvm_mmu_page,
6369c50d8ae3SPaolo Bonzini 				      lpage_disallowed_link);
6370c50d8ae3SPaolo Bonzini 		WARN_ON_ONCE(!sp->lpage_disallowed);
6371c50d8ae3SPaolo Bonzini 		kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
6372c50d8ae3SPaolo Bonzini 		WARN_ON_ONCE(sp->lpage_disallowed);
6373c50d8ae3SPaolo Bonzini 
6374c50d8ae3SPaolo Bonzini 		if (!--to_zap || need_resched() || spin_needbreak(&kvm->mmu_lock)) {
6375c50d8ae3SPaolo Bonzini 			kvm_mmu_commit_zap_page(kvm, &invalid_list);
6376c50d8ae3SPaolo Bonzini 			if (to_zap)
6377c50d8ae3SPaolo Bonzini 				cond_resched_lock(&kvm->mmu_lock);
6378c50d8ae3SPaolo Bonzini 		}
6379c50d8ae3SPaolo Bonzini 	}
6380c50d8ae3SPaolo Bonzini 
6381c50d8ae3SPaolo Bonzini 	spin_unlock(&kvm->mmu_lock);
6382c50d8ae3SPaolo Bonzini 	srcu_read_unlock(&kvm->srcu, rcu_idx);
6383c50d8ae3SPaolo Bonzini }
6384c50d8ae3SPaolo Bonzini 
6385c50d8ae3SPaolo Bonzini static long get_nx_lpage_recovery_timeout(u64 start_time)
6386c50d8ae3SPaolo Bonzini {
6387c50d8ae3SPaolo Bonzini 	return READ_ONCE(nx_huge_pages) && READ_ONCE(nx_huge_pages_recovery_ratio)
6388c50d8ae3SPaolo Bonzini 		? start_time + 60 * HZ - get_jiffies_64()
6389c50d8ae3SPaolo Bonzini 		: MAX_SCHEDULE_TIMEOUT;
6390c50d8ae3SPaolo Bonzini }
6391c50d8ae3SPaolo Bonzini 
6392c50d8ae3SPaolo Bonzini static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data)
6393c50d8ae3SPaolo Bonzini {
6394c50d8ae3SPaolo Bonzini 	u64 start_time;
6395c50d8ae3SPaolo Bonzini 	long remaining_time;
6396c50d8ae3SPaolo Bonzini 
6397c50d8ae3SPaolo Bonzini 	while (true) {
6398c50d8ae3SPaolo Bonzini 		start_time = get_jiffies_64();
6399c50d8ae3SPaolo Bonzini 		remaining_time = get_nx_lpage_recovery_timeout(start_time);
6400c50d8ae3SPaolo Bonzini 
6401c50d8ae3SPaolo Bonzini 		set_current_state(TASK_INTERRUPTIBLE);
6402c50d8ae3SPaolo Bonzini 		while (!kthread_should_stop() && remaining_time > 0) {
6403c50d8ae3SPaolo Bonzini 			schedule_timeout(remaining_time);
6404c50d8ae3SPaolo Bonzini 			remaining_time = get_nx_lpage_recovery_timeout(start_time);
6405c50d8ae3SPaolo Bonzini 			set_current_state(TASK_INTERRUPTIBLE);
6406c50d8ae3SPaolo Bonzini 		}
6407c50d8ae3SPaolo Bonzini 
6408c50d8ae3SPaolo Bonzini 		set_current_state(TASK_RUNNING);
6409c50d8ae3SPaolo Bonzini 
6410c50d8ae3SPaolo Bonzini 		if (kthread_should_stop())
6411c50d8ae3SPaolo Bonzini 			return 0;
6412c50d8ae3SPaolo Bonzini 
6413c50d8ae3SPaolo Bonzini 		kvm_recover_nx_lpages(kvm);
6414c50d8ae3SPaolo Bonzini 	}
6415c50d8ae3SPaolo Bonzini }
6416c50d8ae3SPaolo Bonzini 
6417c50d8ae3SPaolo Bonzini int kvm_mmu_post_init_vm(struct kvm *kvm)
6418c50d8ae3SPaolo Bonzini {
6419c50d8ae3SPaolo Bonzini 	int err;
6420c50d8ae3SPaolo Bonzini 
6421c50d8ae3SPaolo Bonzini 	err = kvm_vm_create_worker_thread(kvm, kvm_nx_lpage_recovery_worker, 0,
6422c50d8ae3SPaolo Bonzini 					  "kvm-nx-lpage-recovery",
6423c50d8ae3SPaolo Bonzini 					  &kvm->arch.nx_lpage_recovery_thread);
6424c50d8ae3SPaolo Bonzini 	if (!err)
6425c50d8ae3SPaolo Bonzini 		kthread_unpark(kvm->arch.nx_lpage_recovery_thread);
6426c50d8ae3SPaolo Bonzini 
6427c50d8ae3SPaolo Bonzini 	return err;
6428c50d8ae3SPaolo Bonzini }
6429c50d8ae3SPaolo Bonzini 
6430c50d8ae3SPaolo Bonzini void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
6431c50d8ae3SPaolo Bonzini {
6432c50d8ae3SPaolo Bonzini 	if (kvm->arch.nx_lpage_recovery_thread)
6433c50d8ae3SPaolo Bonzini 		kthread_stop(kvm->arch.nx_lpage_recovery_thread);
6434c50d8ae3SPaolo Bonzini }
6435