xref: /linux/arch/x86/kvm/mmu/mmu_internal.h (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
16ca9a6f3SSean Christopherson /* SPDX-License-Identifier: GPL-2.0 */
26ca9a6f3SSean Christopherson #ifndef __KVM_X86_MMU_INTERNAL_H
36ca9a6f3SSean Christopherson #define __KVM_X86_MMU_INTERNAL_H
46ca9a6f3SSean Christopherson 
5985ab278SSean Christopherson #include <linux/types.h>
65a9624afSPaolo Bonzini #include <linux/kvm_host.h>
7985ab278SSean Christopherson #include <asm/kvm_host.h>
8985ab278SSean Christopherson 
9870d4d4eSSean Christopherson #ifdef CONFIG_KVM_PROVE_MMU
1020ba462dSSean Christopherson #define KVM_MMU_WARN_ON(x) WARN_ON_ONCE(x)
115a9624afSPaolo Bonzini #else
123328dfe0SSean Christopherson #define KVM_MMU_WARN_ON(x) BUILD_BUG_ON_INVALID(x)
135a9624afSPaolo Bonzini #endif
145a9624afSPaolo Bonzini 
1542c88ff8SSean Christopherson /* Page table builder macros common to shadow (host) PTEs and guest PTEs. */
16a130066fSBinbin Wu #define __PT_BASE_ADDR_MASK GENMASK_ULL(51, 12)
1742c88ff8SSean Christopherson #define __PT_LEVEL_SHIFT(level, bits_per_level)	\
1842c88ff8SSean Christopherson 	(PAGE_SHIFT + ((level) - 1) * (bits_per_level))
1942c88ff8SSean Christopherson #define __PT_INDEX(address, level, bits_per_level) \
2042c88ff8SSean Christopherson 	(((address) >> __PT_LEVEL_SHIFT(level, bits_per_level)) & ((1 << (bits_per_level)) - 1))
2142c88ff8SSean Christopherson 
2242c88ff8SSean Christopherson #define __PT_LVL_ADDR_MASK(base_addr_mask, level, bits_per_level) \
2342c88ff8SSean Christopherson 	((base_addr_mask) & ~((1ULL << (PAGE_SHIFT + (((level) - 1) * (bits_per_level)))) - 1))
2442c88ff8SSean Christopherson 
2542c88ff8SSean Christopherson #define __PT_LVL_OFFSET_MASK(base_addr_mask, level, bits_per_level) \
2642c88ff8SSean Christopherson 	((base_addr_mask) & ((1ULL << (PAGE_SHIFT + (((level) - 1) * (bits_per_level)))) - 1))
2742c88ff8SSean Christopherson 
2842c88ff8SSean Christopherson #define __PT_ENT_PER_PAGE(bits_per_level)  (1 << (bits_per_level))
2942c88ff8SSean Christopherson 
30c834e5e4SSean Christopherson /*
31c834e5e4SSean Christopherson  * Unlike regular MMU roots, PAE "roots", a.k.a. PDPTEs/PDPTRs, have a PRESENT
32c834e5e4SSean Christopherson  * bit, and thus are guaranteed to be non-zero when valid.  And, when a guest
33c834e5e4SSean Christopherson  * PDPTR is !PRESENT, its corresponding PAE root cannot be set to INVALID_PAGE,
34c834e5e4SSean Christopherson  * as the CPU would treat that as PRESENT PDPTR with reserved bits set.  Use
35c834e5e4SSean Christopherson  * '0' instead of INVALID_PAGE to indicate an invalid PAE root.
36c834e5e4SSean Christopherson  */
37c834e5e4SSean Christopherson #define INVALID_PAE_ROOT	0
38c834e5e4SSean Christopherson #define IS_VALID_PAE_ROOT(x)	(!!(x))
39c834e5e4SSean Christopherson 
kvm_mmu_get_dummy_root(void)400e3223d8SSean Christopherson static inline hpa_t kvm_mmu_get_dummy_root(void)
410e3223d8SSean Christopherson {
420e3223d8SSean Christopherson 	return my_zero_pfn(0) << PAGE_SHIFT;
430e3223d8SSean Christopherson }
440e3223d8SSean Christopherson 
kvm_mmu_is_dummy_root(hpa_t shadow_page)450e3223d8SSean Christopherson static inline bool kvm_mmu_is_dummy_root(hpa_t shadow_page)
460e3223d8SSean Christopherson {
470e3223d8SSean Christopherson 	return is_zero_pfn(shadow_page >> PAGE_SHIFT);
480e3223d8SSean Christopherson }
490e3223d8SSean Christopherson 
50c10743a1SSean Christopherson typedef u64 __rcu *tdp_ptep_t;
51c10743a1SSean Christopherson 
52985ab278SSean Christopherson struct kvm_mmu_page {
531148bfc4SSean Christopherson 	/*
541148bfc4SSean Christopherson 	 * Note, "link" through "spt" fit in a single 64 byte cache line on
551148bfc4SSean Christopherson 	 * 64-bit kernels, keep it that way unless there's a reason not to.
561148bfc4SSean Christopherson 	 */
57985ab278SSean Christopherson 	struct list_head link;
58985ab278SSean Christopherson 	struct hlist_node hash_link;
59985ab278SSean Christopherson 
60ca41c34cSSean Christopherson 	bool tdp_mmu_page;
61985ab278SSean Christopherson 	bool unsync;
620df9dab8SSean Christopherson 	union {
63985ab278SSean Christopherson 		u8 mmu_valid_gen;
6455c510e2SSean Christopherson 
650df9dab8SSean Christopherson 		/* Only accessed under slots_lock.  */
660df9dab8SSean Christopherson 		bool tdp_mmu_scheduled_root_to_zap;
670df9dab8SSean Christopherson 	};
680df9dab8SSean Christopherson 
6955c510e2SSean Christopherson 	 /*
7055c510e2SSean Christopherson 	  * The shadow page can't be replaced by an equivalent huge page
7155c510e2SSean Christopherson 	  * because it is being used to map an executable page in the guest
7255c510e2SSean Christopherson 	  * and the NX huge page mitigation is enabled.
7355c510e2SSean Christopherson 	  */
7455c510e2SSean Christopherson 	bool nx_huge_page_disallowed;
75985ab278SSean Christopherson 
76985ab278SSean Christopherson 	/*
77985ab278SSean Christopherson 	 * The following two entries are used to key the shadow page in the
78985ab278SSean Christopherson 	 * hash table.
79985ab278SSean Christopherson 	 */
80985ab278SSean Christopherson 	union kvm_mmu_page_role role;
81985ab278SSean Christopherson 	gfn_t gfn;
82985ab278SSean Christopherson 
83985ab278SSean Christopherson 	u64 *spt;
846a97575dSDavid Matlack 
856a97575dSDavid Matlack 	/*
866a97575dSDavid Matlack 	 * Stores the result of the guest translation being shadowed by each
876a97575dSDavid Matlack 	 * SPTE.  KVM shadows two types of guest translations: nGPA -> GPA
886a97575dSDavid Matlack 	 * (shadow EPT/NPT) and GVA -> GPA (traditional shadow paging). In both
896a97575dSDavid Matlack 	 * cases the result of the translation is a GPA and a set of access
906a97575dSDavid Matlack 	 * constraints.
916a97575dSDavid Matlack 	 *
926a97575dSDavid Matlack 	 * The GFN is stored in the upper bits (PAGE_SHIFT) and the shadowed
936a97575dSDavid Matlack 	 * access permissions are stored in the lower bits. Note, for
946a97575dSDavid Matlack 	 * convenience and uniformity across guests, the access permissions are
956a97575dSDavid Matlack 	 * stored in KVM format (e.g.  ACC_EXEC_MASK) not the raw guest format.
966a97575dSDavid Matlack 	 */
976a97575dSDavid Matlack 	u64 *shadowed_translation;
986a97575dSDavid Matlack 
9911cccf5cSBen Gardon 	/* Currently serving as active root */
10011cccf5cSBen Gardon 	union {
10111cccf5cSBen Gardon 		int root_count;
10211cccf5cSBen Gardon 		refcount_t tdp_mmu_root_count;
10311cccf5cSBen Gardon 	};
104985ab278SSean Christopherson 	unsigned int unsync_children;
105c10743a1SSean Christopherson 	union {
106985ab278SSean Christopherson 		struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
107c10743a1SSean Christopherson 		tdp_ptep_t ptep;
108c10743a1SSean Christopherson 	};
109985ab278SSean Christopherson 	DECLARE_BITMAP(unsync_child_bitmap, 512);
110985ab278SSean Christopherson 
111428e9216SSean Christopherson 	/*
112428e9216SSean Christopherson 	 * Tracks shadow pages that, if zapped, would allow KVM to create an NX
11355c510e2SSean Christopherson 	 * huge page.  A shadow page will have nx_huge_page_disallowed set but
11455c510e2SSean Christopherson 	 * not be on the list if a huge page is disallowed for other reasons,
11555c510e2SSean Christopherson 	 * e.g. because KVM is shadowing a PTE at the same gfn, the memslot
11655c510e2SSean Christopherson 	 * isn't properly aligned, etc...
117428e9216SSean Christopherson 	 */
11855c510e2SSean Christopherson 	struct list_head possible_nx_huge_page_link;
119985ab278SSean Christopherson #ifdef CONFIG_X86_32
120985ab278SSean Christopherson 	/*
121985ab278SSean Christopherson 	 * Used out of the mmu-lock to avoid reading spte values while an
122985ab278SSean Christopherson 	 * update is in progress; see the comments in __get_spte_lockless().
123985ab278SSean Christopherson 	 */
124985ab278SSean Christopherson 	int clear_spte_count;
125985ab278SSean Christopherson #endif
126985ab278SSean Christopherson 
127985ab278SSean Christopherson 	/* Number of writes since the last time traversal visited this page.  */
128985ab278SSean Christopherson 	atomic_t write_flooding_count;
12902c00b3aSBen Gardon 
130897218ffSPaolo Bonzini #ifdef CONFIG_X86_64
131d9f6e12fSIngo Molnar 	/* Used for freeing the page asynchronously if it is a TDP MMU page. */
1327cca2d0bSBen Gardon 	struct rcu_head rcu_head;
133897218ffSPaolo Bonzini #endif
134985ab278SSean Christopherson };
135985ab278SSean Christopherson 
13602c00b3aSBen Gardon extern struct kmem_cache *mmu_page_header_cache;
13702c00b3aSBen Gardon 
kvm_mmu_role_as_id(union kvm_mmu_page_role role)138a3f15bdaSSean Christopherson static inline int kvm_mmu_role_as_id(union kvm_mmu_page_role role)
139a3f15bdaSSean Christopherson {
140a3f15bdaSSean Christopherson 	return role.smm ? 1 : 0;
141a3f15bdaSSean Christopherson }
142a3f15bdaSSean Christopherson 
kvm_mmu_page_as_id(struct kvm_mmu_page * sp)14308889894SSean Christopherson static inline int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)
14408889894SSean Christopherson {
145a3f15bdaSSean Christopherson 	return kvm_mmu_role_as_id(sp->role);
14608889894SSean Christopherson }
14708889894SSean Christopherson 
kvm_mmu_page_ad_need_write_protect(struct kvm_mmu_page * sp)148ce92ef76SSean Christopherson static inline bool kvm_mmu_page_ad_need_write_protect(struct kvm_mmu_page *sp)
1495a9624afSPaolo Bonzini {
1505a9624afSPaolo Bonzini 	/*
15144ac5958SSean Christopherson 	 * When using the EPT page-modification log, the GPAs in the CPU dirty
15244ac5958SSean Christopherson 	 * log would come from L2 rather than L1.  Therefore, we need to rely
15344ac5958SSean Christopherson 	 * on write protection to record dirty pages, which bypasses PML, since
15444ac5958SSean Christopherson 	 * writes now result in a vmexit.  Note, the check on CPU dirty logging
15544ac5958SSean Christopherson 	 * being enabled is mandatory as the bits used to denote WP-only SPTEs
156ce92ef76SSean Christopherson 	 * are reserved for PAE paging (32-bit KVM).
1575a9624afSPaolo Bonzini 	 */
158ce92ef76SSean Christopherson 	return kvm_x86_ops.cpu_dirty_log_size && sp->role.guest_mode;
1595a9624afSPaolo Bonzini }
1605a9624afSPaolo Bonzini 
gfn_round_for_level(gfn_t gfn,int level)161c667a3baSHou Wenlong static inline gfn_t gfn_round_for_level(gfn_t gfn, int level)
162c667a3baSHou Wenlong {
163c667a3baSHou Wenlong 	return gfn & -KVM_PAGES_PER_HPAGE(level);
164c667a3baSHou Wenlong }
165c667a3baSHou Wenlong 
1668283e36aSBen Gardon int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot,
1672839180cSPaolo Bonzini 			    gfn_t gfn, bool can_unsync, bool prefetch);
1685a9624afSPaolo Bonzini 
169269e9552SHamza Mahfooz void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
170269e9552SHamza Mahfooz void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
1716ca9a6f3SSean Christopherson bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
1723ad93562SKeqian Zhu 				    struct kvm_memory_slot *slot, u64 gfn,
1733ad93562SKeqian Zhu 				    int min_level);
1749ffe9265SHou Wenlong 
1759ffe9265SHou Wenlong /* Flush the given page (huge or not) of guest memory. */
kvm_flush_remote_tlbs_gfn(struct kvm * kvm,gfn_t gfn,int level)1769ffe9265SHou Wenlong static inline void kvm_flush_remote_tlbs_gfn(struct kvm *kvm, gfn_t gfn, int level)
1779ffe9265SHou Wenlong {
1788c63e8c2SDavid Matlack 	kvm_flush_remote_tlbs_range(kvm, gfn_round_for_level(gfn, level),
1799ffe9265SHou Wenlong 				    KVM_PAGES_PER_HPAGE(level));
1809ffe9265SHou Wenlong }
1819ffe9265SHou Wenlong 
1823bcd0662SPeter Xu unsigned int pte_list_count(struct kvm_rmap_head *rmap_head);
1836ca9a6f3SSean Christopherson 
1848a009d5bSSean Christopherson extern int nx_huge_pages;
is_nx_huge_page_enabled(struct kvm * kvm)185084cc29fSBen Gardon static inline bool is_nx_huge_page_enabled(struct kvm *kvm)
1868a009d5bSSean Christopherson {
187084cc29fSBen Gardon 	return READ_ONCE(nx_huge_pages) && !kvm->arch.disable_nx_huge_pages;
1888a009d5bSSean Christopherson }
1898a009d5bSSean Christopherson 
1908a009d5bSSean Christopherson struct kvm_page_fault {
1918a009d5bSSean Christopherson 	/* arguments to kvm_mmu_do_page_fault.  */
1928a009d5bSSean Christopherson 	const gpa_t addr;
193c9710130SIsaku Yamahata 	const u64 error_code;
1948a009d5bSSean Christopherson 	const bool prefetch;
1958a009d5bSSean Christopherson 
1968a009d5bSSean Christopherson 	/* Derived from error_code.  */
1978a009d5bSSean Christopherson 	const bool exec;
1988a009d5bSSean Christopherson 	const bool write;
1998a009d5bSSean Christopherson 	const bool present;
2008a009d5bSSean Christopherson 	const bool rsvd;
2018a009d5bSSean Christopherson 	const bool user;
2028a009d5bSSean Christopherson 
2038a009d5bSSean Christopherson 	/* Derived from mmu and global state.  */
2048a009d5bSSean Christopherson 	const bool is_tdp;
2058dd2eee9SChao Peng 	const bool is_private;
2068a009d5bSSean Christopherson 	const bool nx_huge_page_workaround_enabled;
2078a009d5bSSean Christopherson 
208bb18842eSBen Gardon 	/*
2098a009d5bSSean Christopherson 	 * Whether a >4KB mapping can be created or is forbidden due to NX
2108a009d5bSSean Christopherson 	 * hugepages.
2118a009d5bSSean Christopherson 	 */
2128a009d5bSSean Christopherson 	bool huge_page_disallowed;
2138a009d5bSSean Christopherson 
2148a009d5bSSean Christopherson 	/*
2158a009d5bSSean Christopherson 	 * Maximum page size that can be created for this fault; input to
2166c882ef4SDavid Matlack 	 * FNAME(fetch), direct_map() and kvm_tdp_mmu_map().
2178a009d5bSSean Christopherson 	 */
2188a009d5bSSean Christopherson 	u8 max_level;
2198a009d5bSSean Christopherson 
2208a009d5bSSean Christopherson 	/*
2218a009d5bSSean Christopherson 	 * Page size that can be created based on the max_level and the
2228a009d5bSSean Christopherson 	 * page size used by the host mapping.
2238a009d5bSSean Christopherson 	 */
2248a009d5bSSean Christopherson 	u8 req_level;
2258a009d5bSSean Christopherson 
2268a009d5bSSean Christopherson 	/*
2278a009d5bSSean Christopherson 	 * Page size that will be created based on the req_level and
2288a009d5bSSean Christopherson 	 * huge_page_disallowed.
2298a009d5bSSean Christopherson 	 */
2308a009d5bSSean Christopherson 	u8 goal_level;
2318a009d5bSSean Christopherson 
2328a009d5bSSean Christopherson 	/* Shifted addr, or result of guest page table walk if addr is a gva.  */
2338a009d5bSSean Christopherson 	gfn_t gfn;
2348a009d5bSSean Christopherson 
2358a009d5bSSean Christopherson 	/* The memslot containing gfn. May be NULL. */
2368a009d5bSSean Christopherson 	struct kvm_memory_slot *slot;
2378a009d5bSSean Christopherson 
2388a009d5bSSean Christopherson 	/* Outputs of kvm_faultin_pfn.  */
239ba6e3fe2SDavid Matlack 	unsigned long mmu_seq;
2408a009d5bSSean Christopherson 	kvm_pfn_t pfn;
2418a009d5bSSean Christopherson 	hva_t hva;
2428a009d5bSSean Christopherson 	bool map_writable;
243258d985fSSean Christopherson 
244258d985fSSean Christopherson 	/*
245258d985fSSean Christopherson 	 * Indicates the guest is trying to write a gfn that contains one or
246258d985fSSean Christopherson 	 * more of the PTEs used to translate the write itself, i.e. the access
247258d985fSSean Christopherson 	 * is changing its own translation in the guest page tables.
248258d985fSSean Christopherson 	 */
249258d985fSSean Christopherson 	bool write_fault_to_shadow_pgtable;
2508a009d5bSSean Christopherson };
2518a009d5bSSean Christopherson 
2528a009d5bSSean Christopherson int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
2538a009d5bSSean Christopherson 
2548a009d5bSSean Christopherson /*
2558a009d5bSSean Christopherson  * Return values of handle_mmio_page_fault(), mmu.page_fault(), fast_page_fault(),
2568a009d5bSSean Christopherson  * and of course kvm_mmu_do_page_fault().
257bb18842eSBen Gardon  *
2585276c616SSean Christopherson  * RET_PF_CONTINUE: So far, so good, keep handling the page fault.
259bb18842eSBen Gardon  * RET_PF_RETRY: let CPU fault again on the address.
260bb18842eSBen Gardon  * RET_PF_EMULATE: mmio page fault, emulate the instruction directly.
261bb18842eSBen Gardon  * RET_PF_INVALID: the spte is invalid, let the real page fault path update it.
262bb18842eSBen Gardon  * RET_PF_FIXED: The faulting entry has been fixed.
263bb18842eSBen Gardon  * RET_PF_SPURIOUS: The faulting entry was already fixed, e.g. by another vCPU.
26461bcd360SDavid Matlack  *
26561bcd360SDavid Matlack  * Any names added to this enum should be exported to userspace for use in
26661bcd360SDavid Matlack  * tracepoints via TRACE_DEFINE_ENUM() in mmutrace.h
2675276c616SSean Christopherson  *
2685276c616SSean Christopherson  * Note, all values must be greater than or equal to zero so as not to encroach
2695276c616SSean Christopherson  * on -errno return values.  Somewhat arbitrarily use '0' for CONTINUE, which
2705276c616SSean Christopherson  * will allow for efficient machine code when checking for CONTINUE, e.g.
2715276c616SSean Christopherson  * "TEST %rax, %rax, JNZ", as all "stop!" values are non-zero.
272bb18842eSBen Gardon  */
273bb18842eSBen Gardon enum {
2745276c616SSean Christopherson 	RET_PF_CONTINUE = 0,
2755276c616SSean Christopherson 	RET_PF_RETRY,
276bb18842eSBen Gardon 	RET_PF_EMULATE,
277bb18842eSBen Gardon 	RET_PF_INVALID,
278bb18842eSBen Gardon 	RET_PF_FIXED,
279bb18842eSBen Gardon 	RET_PF_SPURIOUS,
280bb18842eSBen Gardon };
281bb18842eSBen Gardon 
kvm_mmu_prepare_memory_fault_exit(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)282d0bf8e6eSSean Christopherson static inline void kvm_mmu_prepare_memory_fault_exit(struct kvm_vcpu *vcpu,
283d0bf8e6eSSean Christopherson 						     struct kvm_page_fault *fault)
284d0bf8e6eSSean Christopherson {
285d0bf8e6eSSean Christopherson 	kvm_prepare_memory_fault_exit(vcpu, fault->gfn << PAGE_SHIFT,
286d0bf8e6eSSean Christopherson 				      PAGE_SIZE, fault->write, fault->exec,
287d0bf8e6eSSean Christopherson 				      fault->is_private);
288d0bf8e6eSSean Christopherson }
289d0bf8e6eSSean Christopherson 
kvm_mmu_do_page_fault(struct kvm_vcpu * vcpu,gpa_t cr2_or_gpa,u64 err,bool prefetch,int * emulation_type,u8 * level)2908a009d5bSSean Christopherson static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
291*58ef2469SPaolo Bonzini 					u64 err, bool prefetch,
292*58ef2469SPaolo Bonzini 					int *emulation_type, u8 *level)
2938a009d5bSSean Christopherson {
2948a009d5bSSean Christopherson 	struct kvm_page_fault fault = {
2958a009d5bSSean Christopherson 		.addr = cr2_or_gpa,
2968a009d5bSSean Christopherson 		.error_code = err,
2978a009d5bSSean Christopherson 		.exec = err & PFERR_FETCH_MASK,
2988a009d5bSSean Christopherson 		.write = err & PFERR_WRITE_MASK,
2998a009d5bSSean Christopherson 		.present = err & PFERR_PRESENT_MASK,
3008a009d5bSSean Christopherson 		.rsvd = err & PFERR_RSVD_MASK,
3018a009d5bSSean Christopherson 		.user = err & PFERR_USER_MASK,
3028a009d5bSSean Christopherson 		.prefetch = prefetch,
3038a009d5bSSean Christopherson 		.is_tdp = likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault),
304084cc29fSBen Gardon 		.nx_huge_page_workaround_enabled =
305084cc29fSBen Gardon 			is_nx_huge_page_enabled(vcpu->kvm),
3068a009d5bSSean Christopherson 
3078a009d5bSSean Christopherson 		.max_level = KVM_MAX_HUGEPAGE_LEVEL,
3088a009d5bSSean Christopherson 		.req_level = PG_LEVEL_4K,
3098a009d5bSSean Christopherson 		.goal_level = PG_LEVEL_4K,
310b3d5dc62SSean Christopherson 		.is_private = err & PFERR_PRIVATE_ACCESS,
311f3310e62SSean Christopherson 
312f3310e62SSean Christopherson 		.pfn = KVM_PFN_ERR_FAULT,
313f3310e62SSean Christopherson 		.hva = KVM_HVA_ERR_BAD,
3148a009d5bSSean Christopherson 	};
3151075d41eSSean Christopherson 	int r;
3161075d41eSSean Christopherson 
317e5e6f8d2SDavid Matlack 	if (vcpu->arch.mmu->root_role.direct) {
318e5e6f8d2SDavid Matlack 		fault.gfn = fault.addr >> PAGE_SHIFT;
319e5e6f8d2SDavid Matlack 		fault.slot = kvm_vcpu_gfn_to_memslot(vcpu, fault.gfn);
320e5e6f8d2SDavid Matlack 	}
321e5e6f8d2SDavid Matlack 
322aefb2f2eSBreno Leitao 	if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) && fault.is_tdp)
3231075d41eSSean Christopherson 		r = kvm_tdp_page_fault(vcpu, &fault);
3241075d41eSSean Christopherson 	else
3251075d41eSSean Christopherson 		r = vcpu->arch.mmu->page_fault(vcpu, &fault);
3268d5265b1SSean Christopherson 
327d0bf8e6eSSean Christopherson 	/*
328d0bf8e6eSSean Christopherson 	 * Not sure what's happening, but punt to userspace and hope that
329d0bf8e6eSSean Christopherson 	 * they can fix it by changing memory to shared, or they can
330d0bf8e6eSSean Christopherson 	 * provide a better error.
331d0bf8e6eSSean Christopherson 	 */
332d0bf8e6eSSean Christopherson 	if (r == RET_PF_EMULATE && fault.is_private) {
333d0bf8e6eSSean Christopherson 		pr_warn_ratelimited("kvm: unexpected emulation request on private memory\n");
334d0bf8e6eSSean Christopherson 		kvm_mmu_prepare_memory_fault_exit(vcpu, &fault);
335d0bf8e6eSSean Christopherson 		return -EFAULT;
336d0bf8e6eSSean Christopherson 	}
337d0bf8e6eSSean Christopherson 
338258d985fSSean Christopherson 	if (fault.write_fault_to_shadow_pgtable && emulation_type)
339258d985fSSean Christopherson 		*emulation_type |= EMULTYPE_WRITE_PF_TO_SP;
340*58ef2469SPaolo Bonzini 	if (level)
341*58ef2469SPaolo Bonzini 		*level = fault.goal_level;
342258d985fSSean Christopherson 
3431075d41eSSean Christopherson 	return r;
3448a009d5bSSean Christopherson }
3458a009d5bSSean Christopherson 
3468ca6f063SBen Gardon int kvm_mmu_max_mapping_level(struct kvm *kvm,
3478ca6f063SBen Gardon 			      const struct kvm_memory_slot *slot, gfn_t gfn,
348a8ac499bSSean Christopherson 			      int max_level);
34973a3c659SPaolo Bonzini void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
350536f0e6aSPaolo Bonzini void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level);
351bb18842eSBen Gardon 
352bb18842eSBen Gardon void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
353bb18842eSBen Gardon 
35461f94478SSean Christopherson void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp);
35561f94478SSean Christopherson void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp);
35629cf0f50SBen Gardon 
3576ca9a6f3SSean Christopherson #endif /* __KVM_X86_MMU_INTERNAL_H */
358