xref: /linux/arch/x86/kvm/mmu.h (revision 6defd9bb178cc18bd9a45a3aec9c8ef8ffc417ad)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2edf88417SAvi Kivity #ifndef __KVM_X86_MMU_H
3edf88417SAvi Kivity #define __KVM_X86_MMU_H
4edf88417SAvi Kivity 
5edf88417SAvi Kivity #include <linux/kvm_host.h>
6fc78f519SAvi Kivity #include "kvm_cache_regs.h"
789786147SMohammed Gamal #include "cpuid.h"
8edf88417SAvi Kivity 
98c6d6adcSSheng Yang #define PT64_PT_BITS 9
108c6d6adcSSheng Yang #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
118c6d6adcSSheng Yang #define PT32_PT_BITS 10
128c6d6adcSSheng Yang #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
138c6d6adcSSheng Yang 
148c6d6adcSSheng Yang #define PT_WRITABLE_SHIFT 1
15be94f6b7SHuaitong Han #define PT_USER_SHIFT 2
168c6d6adcSSheng Yang 
178c6d6adcSSheng Yang #define PT_PRESENT_MASK (1ULL << 0)
188c6d6adcSSheng Yang #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
19be94f6b7SHuaitong Han #define PT_USER_MASK (1ULL << PT_USER_SHIFT)
208c6d6adcSSheng Yang #define PT_PWT_MASK (1ULL << 3)
218c6d6adcSSheng Yang #define PT_PCD_MASK (1ULL << 4)
221b7fcd32SAvi Kivity #define PT_ACCESSED_SHIFT 5
231b7fcd32SAvi Kivity #define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT)
248ea667f2SAvi Kivity #define PT_DIRTY_SHIFT 6
258ea667f2SAvi Kivity #define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT)
266fd01b71SAvi Kivity #define PT_PAGE_SIZE_SHIFT 7
276fd01b71SAvi Kivity #define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT)
288c6d6adcSSheng Yang #define PT_PAT_MASK (1ULL << 7)
298c6d6adcSSheng Yang #define PT_GLOBAL_MASK (1ULL << 8)
308c6d6adcSSheng Yang #define PT64_NX_SHIFT 63
318c6d6adcSSheng Yang #define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
328c6d6adcSSheng Yang 
338c6d6adcSSheng Yang #define PT_PAT_SHIFT 7
348c6d6adcSSheng Yang #define PT_DIR_PAT_SHIFT 12
358c6d6adcSSheng Yang #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
368c6d6adcSSheng Yang 
378c6d6adcSSheng Yang #define PT32_DIR_PSE36_SIZE 4
388c6d6adcSSheng Yang #define PT32_DIR_PSE36_SHIFT 13
398c6d6adcSSheng Yang #define PT32_DIR_PSE36_MASK \
408c6d6adcSSheng Yang 	(((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
418c6d6adcSSheng Yang 
42855feb67SYu Zhang #define PT64_ROOT_5LEVEL 5
432a7266a8SYu Zhang #define PT64_ROOT_4LEVEL 4
448c6d6adcSSheng Yang #define PT32_ROOT_LEVEL 2
458c6d6adcSSheng Yang #define PT32E_ROOT_LEVEL 3
468c6d6adcSSheng Yang 
4720f632bdSSean Christopherson #define KVM_MMU_CR4_ROLE_BITS (X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | \
4820f632bdSSean Christopherson 			       X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE | \
4920f632bdSSean Christopherson 			       X86_CR4_LA57)
5020f632bdSSean Christopherson 
5120f632bdSSean Christopherson #define KVM_MMU_CR0_ROLE_BITS (X86_CR0_PG | X86_CR0_WP)
5220f632bdSSean Christopherson 
53eb79cd00SSean Christopherson static __always_inline u64 rsvd_bits(int s, int e)
54d1431483STiejun Chen {
55eb79cd00SSean Christopherson 	BUILD_BUG_ON(__builtin_constant_p(e) && __builtin_constant_p(s) && e < s);
56eb79cd00SSean Christopherson 
57eb79cd00SSean Christopherson 	if (__builtin_constant_p(e))
58eb79cd00SSean Christopherson 		BUILD_BUG_ON(e > 63);
59eb79cd00SSean Christopherson 	else
60eb79cd00SSean Christopherson 		e &= 63;
61eb79cd00SSean Christopherson 
62d1cd3ce9SYu Zhang 	if (e < s)
63d1cd3ce9SYu Zhang 		return 0;
64d1cd3ce9SYu Zhang 
652f80d502SPaolo Bonzini 	return ((2ULL << (e - s)) - 1) << s;
66d1431483STiejun Chen }
67d1431483STiejun Chen 
688120337aSSean Christopherson void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask);
69e7b7bdeaSSean Christopherson void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only);
70b37fbea6SXiao Guangrong 
71c9060662SSean Christopherson void kvm_init_mmu(struct kvm_vcpu *vcpu);
72dbc4739bSSean Christopherson void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
73dbc4739bSSean Christopherson 			     unsigned long cr4, u64 efer, gpa_t nested_cr3);
74ae1e2d10SPaolo Bonzini void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
7550c28f21SJunaid Shahid 			     bool accessed_dirty, gpa_t new_eptp);
769bc1f09fSWanpeng Li bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
771261bfa3SWanpeng Li int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
78d0006530SPaolo Bonzini 				u64 fault_address, char *insn, int insn_len);
7994d8b056SMarcelo Tosatti 
8061a1773eSSean Christopherson int kvm_mmu_load(struct kvm_vcpu *vcpu);
8161a1773eSSean Christopherson void kvm_mmu_unload(struct kvm_vcpu *vcpu);
8261a1773eSSean Christopherson void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
8361a1773eSSean Christopherson 
84edf88417SAvi Kivity static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
85edf88417SAvi Kivity {
8644dd3ffaSVitaly Kuznetsov 	if (likely(vcpu->arch.mmu->root_hpa != INVALID_PAGE))
87edf88417SAvi Kivity 		return 0;
88edf88417SAvi Kivity 
89edf88417SAvi Kivity 	return kvm_mmu_load(vcpu);
90edf88417SAvi Kivity }
91edf88417SAvi Kivity 
92c9470a2eSJunaid Shahid static inline unsigned long kvm_get_pcid(struct kvm_vcpu *vcpu, gpa_t cr3)
93c9470a2eSJunaid Shahid {
94c9470a2eSJunaid Shahid 	BUILD_BUG_ON((X86_CR3_PCID_MASK & PAGE_MASK) != 0);
95c9470a2eSJunaid Shahid 
96c9470a2eSJunaid Shahid 	return kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)
97c9470a2eSJunaid Shahid 	       ? cr3 & X86_CR3_PCID_MASK
98c9470a2eSJunaid Shahid 	       : 0;
99c9470a2eSJunaid Shahid }
100c9470a2eSJunaid Shahid 
101c9470a2eSJunaid Shahid static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
102c9470a2eSJunaid Shahid {
103c9470a2eSJunaid Shahid 	return kvm_get_pcid(vcpu, kvm_read_cr3(vcpu));
104c9470a2eSJunaid Shahid }
105c9470a2eSJunaid Shahid 
106689f3bf2SPaolo Bonzini static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
1076e42782fSJunaid Shahid {
1082a40b900SSean Christopherson 	u64 root_hpa = vcpu->arch.mmu->root_hpa;
1092a40b900SSean Christopherson 
1102a40b900SSean Christopherson 	if (!VALID_PAGE(root_hpa))
1112a40b900SSean Christopherson 		return;
1122a40b900SSean Christopherson 
113e83bc09cSSean Christopherson 	static_call(kvm_x86_load_mmu_pgd)(vcpu, root_hpa,
1142a40b900SSean Christopherson 					  vcpu->arch.mmu->shadow_root_level);
1156e42782fSJunaid Shahid }
1166e42782fSJunaid Shahid 
117*6defd9bbSPaolo Bonzini struct kvm_page_fault {
118*6defd9bbSPaolo Bonzini 	/* arguments to kvm_mmu_do_page_fault.  */
119*6defd9bbSPaolo Bonzini 	const gpa_t addr;
120*6defd9bbSPaolo Bonzini 	const u32 error_code;
121*6defd9bbSPaolo Bonzini 	const bool prefault;
122*6defd9bbSPaolo Bonzini 
123*6defd9bbSPaolo Bonzini 	/* Derived from error_code.  */
124*6defd9bbSPaolo Bonzini 	const bool exec;
125*6defd9bbSPaolo Bonzini 	const bool write;
126*6defd9bbSPaolo Bonzini 	const bool present;
127*6defd9bbSPaolo Bonzini 	const bool rsvd;
128*6defd9bbSPaolo Bonzini 	const bool user;
129*6defd9bbSPaolo Bonzini 
130*6defd9bbSPaolo Bonzini 	/* Derived from mmu.  */
131*6defd9bbSPaolo Bonzini 	const bool is_tdp;
132*6defd9bbSPaolo Bonzini };
133*6defd9bbSPaolo Bonzini 
1347a02674dSSean Christopherson int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
1357a02674dSSean Christopherson 		       bool prefault);
1367a02674dSSean Christopherson 
1377a02674dSSean Christopherson static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
1387a02674dSSean Christopherson 					u32 err, bool prefault)
1397a02674dSSean Christopherson {
140*6defd9bbSPaolo Bonzini 	struct kvm_page_fault fault = {
141*6defd9bbSPaolo Bonzini 		.addr = cr2_or_gpa,
142*6defd9bbSPaolo Bonzini 		.error_code = err,
143*6defd9bbSPaolo Bonzini 		.exec = err & PFERR_FETCH_MASK,
144*6defd9bbSPaolo Bonzini 		.write = err & PFERR_WRITE_MASK,
145*6defd9bbSPaolo Bonzini 		.present = err & PFERR_PRESENT_MASK,
146*6defd9bbSPaolo Bonzini 		.rsvd = err & PFERR_RSVD_MASK,
147*6defd9bbSPaolo Bonzini 		.user = err & PFERR_USER_MASK,
148*6defd9bbSPaolo Bonzini 		.prefault = prefault,
149*6defd9bbSPaolo Bonzini 		.is_tdp = likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault),
150*6defd9bbSPaolo Bonzini 	};
1517a02674dSSean Christopherson #ifdef CONFIG_RETPOLINE
152*6defd9bbSPaolo Bonzini 	if (fault.is_tdp)
153*6defd9bbSPaolo Bonzini 		return kvm_tdp_page_fault(vcpu, fault.addr, fault.error_code, fault.prefault);
1547a02674dSSean Christopherson #endif
155*6defd9bbSPaolo Bonzini 	return vcpu->arch.mmu->page_fault(vcpu, fault.addr, fault.error_code, fault.prefault);
1567a02674dSSean Christopherson }
1577a02674dSSean Christopherson 
158198c74f4SXiao Guangrong /*
159198c74f4SXiao Guangrong  * Currently, we have two sorts of write-protection, a) the first one
160198c74f4SXiao Guangrong  * write-protects guest page to sync the guest modification, b) another one is
161198c74f4SXiao Guangrong  * used to sync dirty bitmap when we do KVM_GET_DIRTY_LOG. The differences
162198c74f4SXiao Guangrong  * between these two sorts are:
1635fc3424fSSean Christopherson  * 1) the first case clears MMU-writable bit.
164198c74f4SXiao Guangrong  * 2) the first case requires flushing tlb immediately avoiding corrupting
165198c74f4SXiao Guangrong  *    shadow page table between all vcpus so it should be in the protection of
166198c74f4SXiao Guangrong  *    mmu-lock. And the another case does not need to flush tlb until returning
167198c74f4SXiao Guangrong  *    the dirty bitmap to userspace since it only write-protects the page
168198c74f4SXiao Guangrong  *    logged in the bitmap, that means the page in the dirty bitmap is not
169198c74f4SXiao Guangrong  *    missed, so it can flush tlb out of mmu-lock.
170198c74f4SXiao Guangrong  *
171198c74f4SXiao Guangrong  * So, there is the problem: the first case can meet the corrupted tlb caused
172198c74f4SXiao Guangrong  * by another case which write-protects pages but without flush tlb
173198c74f4SXiao Guangrong  * immediately. In order to making the first case be aware this problem we let
1745fc3424fSSean Christopherson  * it flush tlb if we try to write-protect a spte whose MMU-writable bit
1755fc3424fSSean Christopherson  * is set, it works since another case never touches MMU-writable bit.
176198c74f4SXiao Guangrong  *
177198c74f4SXiao Guangrong  * Anyway, whenever a spte is updated (only permission and status bits are
1785fc3424fSSean Christopherson  * changed) we need to check whether the spte with MMU-writable becomes
179198c74f4SXiao Guangrong  * readonly, if that happens, we need to flush tlb. Fortunately,
180198c74f4SXiao Guangrong  * mmu_spte_update() has already handled it perfectly.
181198c74f4SXiao Guangrong  *
1825fc3424fSSean Christopherson  * The rules to use MMU-writable and PT_WRITABLE_MASK:
183198c74f4SXiao Guangrong  * - if we want to see if it has writable tlb entry or if the spte can be
1845fc3424fSSean Christopherson  *   writable on the mmu mapping, check MMU-writable, this is the most
185198c74f4SXiao Guangrong  *   case, otherwise
186198c74f4SXiao Guangrong  * - if we fix page fault on the spte or do write-protection by dirty logging,
187198c74f4SXiao Guangrong  *   check PT_WRITABLE_MASK.
188198c74f4SXiao Guangrong  *
189198c74f4SXiao Guangrong  * TODO: introduce APIs to split these two cases.
190198c74f4SXiao Guangrong  */
19115e6a7e5SSean Christopherson static inline bool is_writable_pte(unsigned long pte)
192bebb106aSXiao Guangrong {
193bebb106aSXiao Guangrong 	return pte & PT_WRITABLE_MASK;
194bebb106aSXiao Guangrong }
195bebb106aSXiao Guangrong 
19697d64b78SAvi Kivity /*
197f13577e8SPaolo Bonzini  * Check if a given access (described through the I/D, W/R and U/S bits of a
198f13577e8SPaolo Bonzini  * page fault error code pfec) causes a permission fault with the given PTE
199f13577e8SPaolo Bonzini  * access rights (in ACC_* format).
200f13577e8SPaolo Bonzini  *
201f13577e8SPaolo Bonzini  * Return zero if the access does not fault; return the page fault error code
202f13577e8SPaolo Bonzini  * if the access faults.
20397d64b78SAvi Kivity  */
204f13577e8SPaolo Bonzini static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
205be94f6b7SHuaitong Han 				  unsigned pte_access, unsigned pte_pkey,
206be94f6b7SHuaitong Han 				  unsigned pfec)
207bebb106aSXiao Guangrong {
208b3646477SJason Baron 	int cpl = static_call(kvm_x86_get_cpl)(vcpu);
209b3646477SJason Baron 	unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu);
21097ec8c06SFeng Wu 
21197ec8c06SFeng Wu 	/*
21297ec8c06SFeng Wu 	 * If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1.
21397ec8c06SFeng Wu 	 *
21497ec8c06SFeng Wu 	 * If CPL = 3, SMAP applies to all supervisor-mode data accesses
21597ec8c06SFeng Wu 	 * (these are implicit supervisor accesses) regardless of the value
21697ec8c06SFeng Wu 	 * of EFLAGS.AC.
21797ec8c06SFeng Wu 	 *
21897ec8c06SFeng Wu 	 * This computes (cpl < 3) && (rflags & X86_EFLAGS_AC), leaving
21997ec8c06SFeng Wu 	 * the result in X86_EFLAGS_AC. We then insert it in place of
22097ec8c06SFeng Wu 	 * the PFERR_RSVD_MASK bit; this bit will always be zero in pfec,
22197ec8c06SFeng Wu 	 * but it will be one in index if SMAP checks are being overridden.
22297ec8c06SFeng Wu 	 * It is important to keep this branchless.
22397ec8c06SFeng Wu 	 */
22497ec8c06SFeng Wu 	unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC);
22597ec8c06SFeng Wu 	int index = (pfec >> 1) +
22697ec8c06SFeng Wu 		    (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
227be94f6b7SHuaitong Han 	bool fault = (mmu->permissions[index] >> pte_access) & 1;
2287a98205dSXiao Guangrong 	u32 errcode = PFERR_PRESENT_MASK;
22997ec8c06SFeng Wu 
230be94f6b7SHuaitong Han 	WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
231be94f6b7SHuaitong Han 	if (unlikely(mmu->pkru_mask)) {
232be94f6b7SHuaitong Han 		u32 pkru_bits, offset;
233be94f6b7SHuaitong Han 
234be94f6b7SHuaitong Han 		/*
235be94f6b7SHuaitong Han 		* PKRU defines 32 bits, there are 16 domains and 2
236be94f6b7SHuaitong Han 		* attribute bits per domain in pkru.  pte_pkey is the
237be94f6b7SHuaitong Han 		* index of the protection domain, so pte_pkey * 2 is
238be94f6b7SHuaitong Han 		* is the index of the first bit for the domain.
239be94f6b7SHuaitong Han 		*/
240b9dd21e1SPaolo Bonzini 		pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
241be94f6b7SHuaitong Han 
242be94f6b7SHuaitong Han 		/* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
2437a98205dSXiao Guangrong 		offset = (pfec & ~1) +
244be94f6b7SHuaitong Han 			((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT));
245be94f6b7SHuaitong Han 
246be94f6b7SHuaitong Han 		pkru_bits &= mmu->pkru_mask >> offset;
2477a98205dSXiao Guangrong 		errcode |= -pkru_bits & PFERR_PK_MASK;
248be94f6b7SHuaitong Han 		fault |= (pkru_bits != 0);
249be94f6b7SHuaitong Han 	}
250be94f6b7SHuaitong Han 
2517a98205dSXiao Guangrong 	return -(u32)fault & errcode;
252bebb106aSXiao Guangrong }
25397d64b78SAvi Kivity 
254efdfe536SXiao Guangrong void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
255547ffaedSXiao Guangrong 
2566ca9a6f3SSean Christopherson int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
2571aa9b957SJunaid Shahid 
2581aa9b957SJunaid Shahid int kvm_mmu_post_init_vm(struct kvm *kvm);
2591aa9b957SJunaid Shahid void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
2601aa9b957SJunaid Shahid 
261e2209710SBen Gardon static inline bool kvm_memslots_have_rmaps(struct kvm *kvm)
262e2209710SBen Gardon {
263d501f747SBen Gardon 	/*
264d501f747SBen Gardon 	 * Read memslot_have_rmaps before rmap pointers.  Hence, threads reading
265d501f747SBen Gardon 	 * memslots_have_rmaps in any lock context are guaranteed to see the
266d501f747SBen Gardon 	 * pointers.  Pairs with smp_store_release in alloc_all_memslots_rmaps.
267d501f747SBen Gardon 	 */
268d501f747SBen Gardon 	return smp_load_acquire(&kvm->arch.memslots_have_rmaps);
269e2209710SBen Gardon }
270e2209710SBen Gardon 
2714139b197SPeter Xu static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
2724139b197SPeter Xu {
2734139b197SPeter Xu 	/* KVM_HPAGE_GFN_SHIFT(PG_LEVEL_4K) must be 0. */
2744139b197SPeter Xu 	return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
2754139b197SPeter Xu 		(base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
2764139b197SPeter Xu }
2774139b197SPeter Xu 
2784139b197SPeter Xu static inline unsigned long
2794139b197SPeter Xu __kvm_mmu_slot_lpages(struct kvm_memory_slot *slot, unsigned long npages,
2804139b197SPeter Xu 		      int level)
2814139b197SPeter Xu {
2824139b197SPeter Xu 	return gfn_to_index(slot->base_gfn + npages - 1,
2834139b197SPeter Xu 			    slot->base_gfn, level) + 1;
2844139b197SPeter Xu }
2854139b197SPeter Xu 
2864139b197SPeter Xu static inline unsigned long
2874139b197SPeter Xu kvm_mmu_slot_lpages(struct kvm_memory_slot *slot, int level)
2884139b197SPeter Xu {
2894139b197SPeter Xu 	return __kvm_mmu_slot_lpages(slot, slot->npages, level);
2904139b197SPeter Xu }
2914139b197SPeter Xu 
29271f51d2cSMingwei Zhang static inline void kvm_update_page_stats(struct kvm *kvm, int level, int count)
29371f51d2cSMingwei Zhang {
29471f51d2cSMingwei Zhang 	atomic64_add(count, &kvm->stat.pages[level - 1]);
29571f51d2cSMingwei Zhang }
296edf88417SAvi Kivity #endif
297