xref: /linux/arch/x86/kvm/mmu.h (revision 61b05a9fd4aec2dff0bbdf9d16ee000b24b33f41)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2edf88417SAvi Kivity #ifndef __KVM_X86_MMU_H
3edf88417SAvi Kivity #define __KVM_X86_MMU_H
4edf88417SAvi Kivity 
5edf88417SAvi Kivity #include <linux/kvm_host.h>
6fc78f519SAvi Kivity #include "kvm_cache_regs.h"
789786147SMohammed Gamal #include "cpuid.h"
8edf88417SAvi Kivity 
98c6d6adcSSheng Yang #define PT64_PT_BITS 9
108c6d6adcSSheng Yang #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
118c6d6adcSSheng Yang #define PT32_PT_BITS 10
128c6d6adcSSheng Yang #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
138c6d6adcSSheng Yang 
148c6d6adcSSheng Yang #define PT_WRITABLE_SHIFT 1
15be94f6b7SHuaitong Han #define PT_USER_SHIFT 2
168c6d6adcSSheng Yang 
178c6d6adcSSheng Yang #define PT_PRESENT_MASK (1ULL << 0)
188c6d6adcSSheng Yang #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
19be94f6b7SHuaitong Han #define PT_USER_MASK (1ULL << PT_USER_SHIFT)
208c6d6adcSSheng Yang #define PT_PWT_MASK (1ULL << 3)
218c6d6adcSSheng Yang #define PT_PCD_MASK (1ULL << 4)
221b7fcd32SAvi Kivity #define PT_ACCESSED_SHIFT 5
231b7fcd32SAvi Kivity #define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT)
248ea667f2SAvi Kivity #define PT_DIRTY_SHIFT 6
258ea667f2SAvi Kivity #define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT)
266fd01b71SAvi Kivity #define PT_PAGE_SIZE_SHIFT 7
276fd01b71SAvi Kivity #define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT)
288c6d6adcSSheng Yang #define PT_PAT_MASK (1ULL << 7)
298c6d6adcSSheng Yang #define PT_GLOBAL_MASK (1ULL << 8)
308c6d6adcSSheng Yang #define PT64_NX_SHIFT 63
318c6d6adcSSheng Yang #define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
328c6d6adcSSheng Yang 
338c6d6adcSSheng Yang #define PT_PAT_SHIFT 7
348c6d6adcSSheng Yang #define PT_DIR_PAT_SHIFT 12
358c6d6adcSSheng Yang #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
368c6d6adcSSheng Yang 
378c6d6adcSSheng Yang #define PT32_DIR_PSE36_SIZE 4
388c6d6adcSSheng Yang #define PT32_DIR_PSE36_SHIFT 13
398c6d6adcSSheng Yang #define PT32_DIR_PSE36_MASK \
408c6d6adcSSheng Yang 	(((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
418c6d6adcSSheng Yang 
42855feb67SYu Zhang #define PT64_ROOT_5LEVEL 5
432a7266a8SYu Zhang #define PT64_ROOT_4LEVEL 4
448c6d6adcSSheng Yang #define PT32_ROOT_LEVEL 2
458c6d6adcSSheng Yang #define PT32E_ROOT_LEVEL 3
468c6d6adcSSheng Yang 
47a91a7c70SLai Jiangshan #define KVM_MMU_CR4_ROLE_BITS (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_LA57 | \
48a91a7c70SLai Jiangshan 			       X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE)
4920f632bdSSean Christopherson 
5020f632bdSSean Christopherson #define KVM_MMU_CR0_ROLE_BITS (X86_CR0_PG | X86_CR0_WP)
5120f632bdSSean Christopherson 
52eb79cd00SSean Christopherson static __always_inline u64 rsvd_bits(int s, int e)
53d1431483STiejun Chen {
54eb79cd00SSean Christopherson 	BUILD_BUG_ON(__builtin_constant_p(e) && __builtin_constant_p(s) && e < s);
55eb79cd00SSean Christopherson 
56eb79cd00SSean Christopherson 	if (__builtin_constant_p(e))
57eb79cd00SSean Christopherson 		BUILD_BUG_ON(e > 63);
58eb79cd00SSean Christopherson 	else
59eb79cd00SSean Christopherson 		e &= 63;
60eb79cd00SSean Christopherson 
61d1cd3ce9SYu Zhang 	if (e < s)
62d1cd3ce9SYu Zhang 		return 0;
63d1cd3ce9SYu Zhang 
642f80d502SPaolo Bonzini 	return ((2ULL << (e - s)) - 1) << s;
65d1431483STiejun Chen }
66d1431483STiejun Chen 
678120337aSSean Christopherson void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask);
68e7b7bdeaSSean Christopherson void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only);
69b37fbea6SXiao Guangrong 
70c9060662SSean Christopherson void kvm_init_mmu(struct kvm_vcpu *vcpu);
71dbc4739bSSean Christopherson void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
72dbc4739bSSean Christopherson 			     unsigned long cr4, u64 efer, gpa_t nested_cr3);
73ae1e2d10SPaolo Bonzini void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
7450c28f21SJunaid Shahid 			     bool accessed_dirty, gpa_t new_eptp);
759bc1f09fSWanpeng Li bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
761261bfa3SWanpeng Li int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
77d0006530SPaolo Bonzini 				u64 fault_address, char *insn, int insn_len);
7894d8b056SMarcelo Tosatti 
7961a1773eSSean Christopherson int kvm_mmu_load(struct kvm_vcpu *vcpu);
8061a1773eSSean Christopherson void kvm_mmu_unload(struct kvm_vcpu *vcpu);
8161a1773eSSean Christopherson void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
82*61b05a9fSLai Jiangshan void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu);
8361a1773eSSean Christopherson 
84edf88417SAvi Kivity static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
85edf88417SAvi Kivity {
8644dd3ffaSVitaly Kuznetsov 	if (likely(vcpu->arch.mmu->root_hpa != INVALID_PAGE))
87edf88417SAvi Kivity 		return 0;
88edf88417SAvi Kivity 
89edf88417SAvi Kivity 	return kvm_mmu_load(vcpu);
90edf88417SAvi Kivity }
91edf88417SAvi Kivity 
92c9470a2eSJunaid Shahid static inline unsigned long kvm_get_pcid(struct kvm_vcpu *vcpu, gpa_t cr3)
93c9470a2eSJunaid Shahid {
94c9470a2eSJunaid Shahid 	BUILD_BUG_ON((X86_CR3_PCID_MASK & PAGE_MASK) != 0);
95c9470a2eSJunaid Shahid 
96c9470a2eSJunaid Shahid 	return kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)
97c9470a2eSJunaid Shahid 	       ? cr3 & X86_CR3_PCID_MASK
98c9470a2eSJunaid Shahid 	       : 0;
99c9470a2eSJunaid Shahid }
100c9470a2eSJunaid Shahid 
101c9470a2eSJunaid Shahid static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
102c9470a2eSJunaid Shahid {
103c9470a2eSJunaid Shahid 	return kvm_get_pcid(vcpu, kvm_read_cr3(vcpu));
104c9470a2eSJunaid Shahid }
105c9470a2eSJunaid Shahid 
106689f3bf2SPaolo Bonzini static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
1076e42782fSJunaid Shahid {
1082a40b900SSean Christopherson 	u64 root_hpa = vcpu->arch.mmu->root_hpa;
1092a40b900SSean Christopherson 
1102a40b900SSean Christopherson 	if (!VALID_PAGE(root_hpa))
1112a40b900SSean Christopherson 		return;
1122a40b900SSean Christopherson 
113e83bc09cSSean Christopherson 	static_call(kvm_x86_load_mmu_pgd)(vcpu, root_hpa,
1142a40b900SSean Christopherson 					  vcpu->arch.mmu->shadow_root_level);
1156e42782fSJunaid Shahid }
1166e42782fSJunaid Shahid 
1176defd9bbSPaolo Bonzini struct kvm_page_fault {
1186defd9bbSPaolo Bonzini 	/* arguments to kvm_mmu_do_page_fault.  */
1196defd9bbSPaolo Bonzini 	const gpa_t addr;
1206defd9bbSPaolo Bonzini 	const u32 error_code;
1212839180cSPaolo Bonzini 	const bool prefetch;
1226defd9bbSPaolo Bonzini 
1236defd9bbSPaolo Bonzini 	/* Derived from error_code.  */
1246defd9bbSPaolo Bonzini 	const bool exec;
1256defd9bbSPaolo Bonzini 	const bool write;
1266defd9bbSPaolo Bonzini 	const bool present;
1276defd9bbSPaolo Bonzini 	const bool rsvd;
1286defd9bbSPaolo Bonzini 	const bool user;
1296defd9bbSPaolo Bonzini 
13073a3c659SPaolo Bonzini 	/* Derived from mmu and global state.  */
1316defd9bbSPaolo Bonzini 	const bool is_tdp;
13273a3c659SPaolo Bonzini 	const bool nx_huge_page_workaround_enabled;
1334326e57eSPaolo Bonzini 
13473a3c659SPaolo Bonzini 	/*
13573a3c659SPaolo Bonzini 	 * Whether a >4KB mapping can be created or is forbidden due to NX
13673a3c659SPaolo Bonzini 	 * hugepages.
13773a3c659SPaolo Bonzini 	 */
13873a3c659SPaolo Bonzini 	bool huge_page_disallowed;
13973a3c659SPaolo Bonzini 
14073a3c659SPaolo Bonzini 	/*
14173a3c659SPaolo Bonzini 	 * Maximum page size that can be created for this fault; input to
14273a3c659SPaolo Bonzini 	 * FNAME(fetch), __direct_map and kvm_tdp_mmu_map.
14373a3c659SPaolo Bonzini 	 */
1444326e57eSPaolo Bonzini 	u8 max_level;
145b8a5d551SPaolo Bonzini 
14673a3c659SPaolo Bonzini 	/*
14773a3c659SPaolo Bonzini 	 * Page size that can be created based on the max_level and the
14873a3c659SPaolo Bonzini 	 * page size used by the host mapping.
14973a3c659SPaolo Bonzini 	 */
15073a3c659SPaolo Bonzini 	u8 req_level;
15173a3c659SPaolo Bonzini 
15273a3c659SPaolo Bonzini 	/*
15373a3c659SPaolo Bonzini 	 * Page size that will be created based on the req_level and
15473a3c659SPaolo Bonzini 	 * huge_page_disallowed.
15573a3c659SPaolo Bonzini 	 */
15673a3c659SPaolo Bonzini 	u8 goal_level;
15773a3c659SPaolo Bonzini 
158b8a5d551SPaolo Bonzini 	/* Shifted addr, or result of guest page table walk if addr is a gva.  */
159b8a5d551SPaolo Bonzini 	gfn_t gfn;
1603647cd04SPaolo Bonzini 
161e710c5f6SDavid Matlack 	/* The memslot containing gfn. May be NULL. */
162e710c5f6SDavid Matlack 	struct kvm_memory_slot *slot;
163e710c5f6SDavid Matlack 
1643647cd04SPaolo Bonzini 	/* Outputs of kvm_faultin_pfn.  */
1653647cd04SPaolo Bonzini 	kvm_pfn_t pfn;
1663647cd04SPaolo Bonzini 	hva_t hva;
1673647cd04SPaolo Bonzini 	bool map_writable;
1686defd9bbSPaolo Bonzini };
1696defd9bbSPaolo Bonzini 
170c501040aSPaolo Bonzini int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
1717a02674dSSean Christopherson 
17273a3c659SPaolo Bonzini extern int nx_huge_pages;
17373a3c659SPaolo Bonzini static inline bool is_nx_huge_page_enabled(void)
17473a3c659SPaolo Bonzini {
17573a3c659SPaolo Bonzini 	return READ_ONCE(nx_huge_pages);
17673a3c659SPaolo Bonzini }
17773a3c659SPaolo Bonzini 
1787a02674dSSean Christopherson static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
1792839180cSPaolo Bonzini 					u32 err, bool prefetch)
1807a02674dSSean Christopherson {
1816defd9bbSPaolo Bonzini 	struct kvm_page_fault fault = {
1826defd9bbSPaolo Bonzini 		.addr = cr2_or_gpa,
1836defd9bbSPaolo Bonzini 		.error_code = err,
1846defd9bbSPaolo Bonzini 		.exec = err & PFERR_FETCH_MASK,
1856defd9bbSPaolo Bonzini 		.write = err & PFERR_WRITE_MASK,
1866defd9bbSPaolo Bonzini 		.present = err & PFERR_PRESENT_MASK,
1876defd9bbSPaolo Bonzini 		.rsvd = err & PFERR_RSVD_MASK,
1886defd9bbSPaolo Bonzini 		.user = err & PFERR_USER_MASK,
1892839180cSPaolo Bonzini 		.prefetch = prefetch,
1906defd9bbSPaolo Bonzini 		.is_tdp = likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault),
19173a3c659SPaolo Bonzini 		.nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(),
1924326e57eSPaolo Bonzini 
1934326e57eSPaolo Bonzini 		.max_level = KVM_MAX_HUGEPAGE_LEVEL,
19473a3c659SPaolo Bonzini 		.req_level = PG_LEVEL_4K,
19573a3c659SPaolo Bonzini 		.goal_level = PG_LEVEL_4K,
1966defd9bbSPaolo Bonzini 	};
1977a02674dSSean Christopherson #ifdef CONFIG_RETPOLINE
1986defd9bbSPaolo Bonzini 	if (fault.is_tdp)
199c501040aSPaolo Bonzini 		return kvm_tdp_page_fault(vcpu, &fault);
2007a02674dSSean Christopherson #endif
201c501040aSPaolo Bonzini 	return vcpu->arch.mmu->page_fault(vcpu, &fault);
2027a02674dSSean Christopherson }
2037a02674dSSean Christopherson 
204198c74f4SXiao Guangrong /*
205198c74f4SXiao Guangrong  * Currently, we have two sorts of write-protection, a) the first one
206198c74f4SXiao Guangrong  * write-protects guest page to sync the guest modification, b) another one is
207198c74f4SXiao Guangrong  * used to sync dirty bitmap when we do KVM_GET_DIRTY_LOG. The differences
208198c74f4SXiao Guangrong  * between these two sorts are:
2095fc3424fSSean Christopherson  * 1) the first case clears MMU-writable bit.
210198c74f4SXiao Guangrong  * 2) the first case requires flushing tlb immediately avoiding corrupting
211198c74f4SXiao Guangrong  *    shadow page table between all vcpus so it should be in the protection of
212198c74f4SXiao Guangrong  *    mmu-lock. And the another case does not need to flush tlb until returning
213198c74f4SXiao Guangrong  *    the dirty bitmap to userspace since it only write-protects the page
214198c74f4SXiao Guangrong  *    logged in the bitmap, that means the page in the dirty bitmap is not
215198c74f4SXiao Guangrong  *    missed, so it can flush tlb out of mmu-lock.
216198c74f4SXiao Guangrong  *
217198c74f4SXiao Guangrong  * So, there is the problem: the first case can meet the corrupted tlb caused
218198c74f4SXiao Guangrong  * by another case which write-protects pages but without flush tlb
219198c74f4SXiao Guangrong  * immediately. In order to making the first case be aware this problem we let
2205fc3424fSSean Christopherson  * it flush tlb if we try to write-protect a spte whose MMU-writable bit
2215fc3424fSSean Christopherson  * is set, it works since another case never touches MMU-writable bit.
222198c74f4SXiao Guangrong  *
223198c74f4SXiao Guangrong  * Anyway, whenever a spte is updated (only permission and status bits are
2245fc3424fSSean Christopherson  * changed) we need to check whether the spte with MMU-writable becomes
225198c74f4SXiao Guangrong  * readonly, if that happens, we need to flush tlb. Fortunately,
226198c74f4SXiao Guangrong  * mmu_spte_update() has already handled it perfectly.
227198c74f4SXiao Guangrong  *
2285fc3424fSSean Christopherson  * The rules to use MMU-writable and PT_WRITABLE_MASK:
229198c74f4SXiao Guangrong  * - if we want to see if it has writable tlb entry or if the spte can be
2305fc3424fSSean Christopherson  *   writable on the mmu mapping, check MMU-writable, this is the most
231198c74f4SXiao Guangrong  *   case, otherwise
232198c74f4SXiao Guangrong  * - if we fix page fault on the spte or do write-protection by dirty logging,
233198c74f4SXiao Guangrong  *   check PT_WRITABLE_MASK.
234198c74f4SXiao Guangrong  *
235198c74f4SXiao Guangrong  * TODO: introduce APIs to split these two cases.
236198c74f4SXiao Guangrong  */
23715e6a7e5SSean Christopherson static inline bool is_writable_pte(unsigned long pte)
238bebb106aSXiao Guangrong {
239bebb106aSXiao Guangrong 	return pte & PT_WRITABLE_MASK;
240bebb106aSXiao Guangrong }
241bebb106aSXiao Guangrong 
24297d64b78SAvi Kivity /*
243f13577e8SPaolo Bonzini  * Check if a given access (described through the I/D, W/R and U/S bits of a
244f13577e8SPaolo Bonzini  * page fault error code pfec) causes a permission fault with the given PTE
245f13577e8SPaolo Bonzini  * access rights (in ACC_* format).
246f13577e8SPaolo Bonzini  *
247f13577e8SPaolo Bonzini  * Return zero if the access does not fault; return the page fault error code
248f13577e8SPaolo Bonzini  * if the access faults.
24997d64b78SAvi Kivity  */
250f13577e8SPaolo Bonzini static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
251be94f6b7SHuaitong Han 				  unsigned pte_access, unsigned pte_pkey,
252be94f6b7SHuaitong Han 				  unsigned pfec)
253bebb106aSXiao Guangrong {
254b3646477SJason Baron 	int cpl = static_call(kvm_x86_get_cpl)(vcpu);
255b3646477SJason Baron 	unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu);
25697ec8c06SFeng Wu 
25797ec8c06SFeng Wu 	/*
25897ec8c06SFeng Wu 	 * If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1.
25997ec8c06SFeng Wu 	 *
26097ec8c06SFeng Wu 	 * If CPL = 3, SMAP applies to all supervisor-mode data accesses
26197ec8c06SFeng Wu 	 * (these are implicit supervisor accesses) regardless of the value
26297ec8c06SFeng Wu 	 * of EFLAGS.AC.
26397ec8c06SFeng Wu 	 *
26497ec8c06SFeng Wu 	 * This computes (cpl < 3) && (rflags & X86_EFLAGS_AC), leaving
26597ec8c06SFeng Wu 	 * the result in X86_EFLAGS_AC. We then insert it in place of
26697ec8c06SFeng Wu 	 * the PFERR_RSVD_MASK bit; this bit will always be zero in pfec,
26797ec8c06SFeng Wu 	 * but it will be one in index if SMAP checks are being overridden.
26897ec8c06SFeng Wu 	 * It is important to keep this branchless.
26997ec8c06SFeng Wu 	 */
27097ec8c06SFeng Wu 	unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC);
27197ec8c06SFeng Wu 	int index = (pfec >> 1) +
27297ec8c06SFeng Wu 		    (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
273be94f6b7SHuaitong Han 	bool fault = (mmu->permissions[index] >> pte_access) & 1;
2747a98205dSXiao Guangrong 	u32 errcode = PFERR_PRESENT_MASK;
27597ec8c06SFeng Wu 
276be94f6b7SHuaitong Han 	WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
277be94f6b7SHuaitong Han 	if (unlikely(mmu->pkru_mask)) {
278be94f6b7SHuaitong Han 		u32 pkru_bits, offset;
279be94f6b7SHuaitong Han 
280be94f6b7SHuaitong Han 		/*
281be94f6b7SHuaitong Han 		* PKRU defines 32 bits, there are 16 domains and 2
282be94f6b7SHuaitong Han 		* attribute bits per domain in pkru.  pte_pkey is the
283be94f6b7SHuaitong Han 		* index of the protection domain, so pte_pkey * 2 is
284be94f6b7SHuaitong Han 		* is the index of the first bit for the domain.
285be94f6b7SHuaitong Han 		*/
286b9dd21e1SPaolo Bonzini 		pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
287be94f6b7SHuaitong Han 
288be94f6b7SHuaitong Han 		/* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
2897a98205dSXiao Guangrong 		offset = (pfec & ~1) +
290be94f6b7SHuaitong Han 			((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT));
291be94f6b7SHuaitong Han 
292be94f6b7SHuaitong Han 		pkru_bits &= mmu->pkru_mask >> offset;
2937a98205dSXiao Guangrong 		errcode |= -pkru_bits & PFERR_PK_MASK;
294be94f6b7SHuaitong Han 		fault |= (pkru_bits != 0);
295be94f6b7SHuaitong Han 	}
296be94f6b7SHuaitong Han 
2977a98205dSXiao Guangrong 	return -(u32)fault & errcode;
298bebb106aSXiao Guangrong }
29997d64b78SAvi Kivity 
300efdfe536SXiao Guangrong void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
301547ffaedSXiao Guangrong 
3026ca9a6f3SSean Christopherson int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
3031aa9b957SJunaid Shahid 
3041aa9b957SJunaid Shahid int kvm_mmu_post_init_vm(struct kvm *kvm);
3051aa9b957SJunaid Shahid void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
3061aa9b957SJunaid Shahid 
3071e76a3ceSDavid Stevens static inline bool kvm_shadow_root_allocated(struct kvm *kvm)
308e2209710SBen Gardon {
309d501f747SBen Gardon 	/*
3101e76a3ceSDavid Stevens 	 * Read shadow_root_allocated before related pointers. Hence, threads
3111e76a3ceSDavid Stevens 	 * reading shadow_root_allocated in any lock context are guaranteed to
3121e76a3ceSDavid Stevens 	 * see the pointers. Pairs with smp_store_release in
3131e76a3ceSDavid Stevens 	 * mmu_first_shadow_root_alloc.
314d501f747SBen Gardon 	 */
3151e76a3ceSDavid Stevens 	return smp_load_acquire(&kvm->arch.shadow_root_allocated);
3161e76a3ceSDavid Stevens }
3171e76a3ceSDavid Stevens 
3181e76a3ceSDavid Stevens #ifdef CONFIG_X86_64
3191e76a3ceSDavid Stevens static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return kvm->arch.tdp_mmu_enabled; }
3201e76a3ceSDavid Stevens #else
3211e76a3ceSDavid Stevens static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; }
3221e76a3ceSDavid Stevens #endif
3231e76a3ceSDavid Stevens 
3241e76a3ceSDavid Stevens static inline bool kvm_memslots_have_rmaps(struct kvm *kvm)
3251e76a3ceSDavid Stevens {
3261e76a3ceSDavid Stevens 	return !is_tdp_mmu_enabled(kvm) || kvm_shadow_root_allocated(kvm);
327e2209710SBen Gardon }
328e2209710SBen Gardon 
3294139b197SPeter Xu static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
3304139b197SPeter Xu {
3314139b197SPeter Xu 	/* KVM_HPAGE_GFN_SHIFT(PG_LEVEL_4K) must be 0. */
3324139b197SPeter Xu 	return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
3334139b197SPeter Xu 		(base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
3344139b197SPeter Xu }
3354139b197SPeter Xu 
3364139b197SPeter Xu static inline unsigned long
3374139b197SPeter Xu __kvm_mmu_slot_lpages(struct kvm_memory_slot *slot, unsigned long npages,
3384139b197SPeter Xu 		      int level)
3394139b197SPeter Xu {
3404139b197SPeter Xu 	return gfn_to_index(slot->base_gfn + npages - 1,
3414139b197SPeter Xu 			    slot->base_gfn, level) + 1;
3424139b197SPeter Xu }
3434139b197SPeter Xu 
3444139b197SPeter Xu static inline unsigned long
3454139b197SPeter Xu kvm_mmu_slot_lpages(struct kvm_memory_slot *slot, int level)
3464139b197SPeter Xu {
3474139b197SPeter Xu 	return __kvm_mmu_slot_lpages(slot, slot->npages, level);
3484139b197SPeter Xu }
3494139b197SPeter Xu 
35071f51d2cSMingwei Zhang static inline void kvm_update_page_stats(struct kvm *kvm, int level, int count)
35171f51d2cSMingwei Zhang {
35271f51d2cSMingwei Zhang 	atomic64_add(count, &kvm->stat.pages[level - 1]);
35371f51d2cSMingwei Zhang }
354edf88417SAvi Kivity #endif
355