xref: /linux/arch/x86/kvm/mmu.h (revision 97ec8c067d322d32effdc1701760d3babbc5595f)
1edf88417SAvi Kivity #ifndef __KVM_X86_MMU_H
2edf88417SAvi Kivity #define __KVM_X86_MMU_H
3edf88417SAvi Kivity 
4edf88417SAvi Kivity #include <linux/kvm_host.h>
5fc78f519SAvi Kivity #include "kvm_cache_regs.h"
6edf88417SAvi Kivity 
78c6d6adcSSheng Yang #define PT64_PT_BITS 9
88c6d6adcSSheng Yang #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
98c6d6adcSSheng Yang #define PT32_PT_BITS 10
108c6d6adcSSheng Yang #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
118c6d6adcSSheng Yang 
128c6d6adcSSheng Yang #define PT_WRITABLE_SHIFT 1
138c6d6adcSSheng Yang 
148c6d6adcSSheng Yang #define PT_PRESENT_MASK (1ULL << 0)
158c6d6adcSSheng Yang #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
168c6d6adcSSheng Yang #define PT_USER_MASK (1ULL << 2)
178c6d6adcSSheng Yang #define PT_PWT_MASK (1ULL << 3)
188c6d6adcSSheng Yang #define PT_PCD_MASK (1ULL << 4)
191b7fcd32SAvi Kivity #define PT_ACCESSED_SHIFT 5
201b7fcd32SAvi Kivity #define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT)
218ea667f2SAvi Kivity #define PT_DIRTY_SHIFT 6
228ea667f2SAvi Kivity #define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT)
236fd01b71SAvi Kivity #define PT_PAGE_SIZE_SHIFT 7
246fd01b71SAvi Kivity #define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT)
258c6d6adcSSheng Yang #define PT_PAT_MASK (1ULL << 7)
268c6d6adcSSheng Yang #define PT_GLOBAL_MASK (1ULL << 8)
278c6d6adcSSheng Yang #define PT64_NX_SHIFT 63
288c6d6adcSSheng Yang #define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
298c6d6adcSSheng Yang 
308c6d6adcSSheng Yang #define PT_PAT_SHIFT 7
318c6d6adcSSheng Yang #define PT_DIR_PAT_SHIFT 12
328c6d6adcSSheng Yang #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
338c6d6adcSSheng Yang 
348c6d6adcSSheng Yang #define PT32_DIR_PSE36_SIZE 4
358c6d6adcSSheng Yang #define PT32_DIR_PSE36_SHIFT 13
368c6d6adcSSheng Yang #define PT32_DIR_PSE36_MASK \
378c6d6adcSSheng Yang 	(((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
388c6d6adcSSheng Yang 
398c6d6adcSSheng Yang #define PT64_ROOT_LEVEL 4
408c6d6adcSSheng Yang #define PT32_ROOT_LEVEL 2
418c6d6adcSSheng Yang #define PT32E_ROOT_LEVEL 3
428c6d6adcSSheng Yang 
43c9c54174SSheng Yang #define PT_PDPE_LEVEL 3
44c9c54174SSheng Yang #define PT_DIRECTORY_LEVEL 2
45c9c54174SSheng Yang #define PT_PAGE_TABLE_LEVEL 1
46c9c54174SSheng Yang 
47*97ec8c06SFeng Wu #define PFERR_PRESENT_BIT 0
48*97ec8c06SFeng Wu #define PFERR_WRITE_BIT 1
49*97ec8c06SFeng Wu #define PFERR_USER_BIT 2
50*97ec8c06SFeng Wu #define PFERR_RSVD_BIT 3
51*97ec8c06SFeng Wu #define PFERR_FETCH_BIT 4
52*97ec8c06SFeng Wu 
53*97ec8c06SFeng Wu #define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT)
54*97ec8c06SFeng Wu #define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT)
55*97ec8c06SFeng Wu #define PFERR_USER_MASK (1U << PFERR_USER_BIT)
56*97ec8c06SFeng Wu #define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT)
57*97ec8c06SFeng Wu #define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT)
581871c602SGleb Natapov 
5994d8b056SMarcelo Tosatti int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
60ce88decfSXiao Guangrong void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask);
61b37fbea6SXiao Guangrong 
62b37fbea6SXiao Guangrong /*
63b37fbea6SXiao Guangrong  * Return values of handle_mmio_page_fault_common:
64b37fbea6SXiao Guangrong  * RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction
65b37fbea6SXiao Guangrong  *			directly.
66f8f55942SXiao Guangrong  * RET_MMIO_PF_INVALID: invalid spte is detected then let the real page
67f8f55942SXiao Guangrong  *			fault path update the mmio spte.
68b37fbea6SXiao Guangrong  * RET_MMIO_PF_RETRY: let CPU fault again on the address.
69b37fbea6SXiao Guangrong  * RET_MMIO_PF_BUG: bug is detected.
70b37fbea6SXiao Guangrong  */
71b37fbea6SXiao Guangrong enum {
72b37fbea6SXiao Guangrong 	RET_MMIO_PF_EMULATE = 1,
73f8f55942SXiao Guangrong 	RET_MMIO_PF_INVALID = 2,
74b37fbea6SXiao Guangrong 	RET_MMIO_PF_RETRY = 0,
75b37fbea6SXiao Guangrong 	RET_MMIO_PF_BUG = -1
76b37fbea6SXiao Guangrong };
77b37fbea6SXiao Guangrong 
78ce88decfSXiao Guangrong int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
798a3c1a33SPaolo Bonzini void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
808a3c1a33SPaolo Bonzini void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
81155a97a3SNadav Har'El 		bool execonly);
82*97ec8c06SFeng Wu void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
83*97ec8c06SFeng Wu 		bool ept);
8494d8b056SMarcelo Tosatti 
85e0df7b9fSDave Hansen static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
86e0df7b9fSDave Hansen {
875d218814SMarcelo Tosatti 	if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
8849d5ca26SDave Hansen 		return kvm->arch.n_max_mmu_pages -
8949d5ca26SDave Hansen 			kvm->arch.n_used_mmu_pages;
905d218814SMarcelo Tosatti 
915d218814SMarcelo Tosatti 	return 0;
92e0df7b9fSDave Hansen }
93e0df7b9fSDave Hansen 
94edf88417SAvi Kivity static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
95edf88417SAvi Kivity {
96edf88417SAvi Kivity 	if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE))
97edf88417SAvi Kivity 		return 0;
98edf88417SAvi Kivity 
99edf88417SAvi Kivity 	return kvm_mmu_load(vcpu);
100edf88417SAvi Kivity }
101edf88417SAvi Kivity 
10243a3795aSAvi Kivity static inline int is_present_gpte(unsigned long pte)
10320c466b5SDong, Eddie {
10420c466b5SDong, Eddie 	return pte & PT_PRESENT_MASK;
10520c466b5SDong, Eddie }
10620c466b5SDong, Eddie 
107bebb106aSXiao Guangrong static inline int is_writable_pte(unsigned long pte)
108bebb106aSXiao Guangrong {
109bebb106aSXiao Guangrong 	return pte & PT_WRITABLE_MASK;
110bebb106aSXiao Guangrong }
111bebb106aSXiao Guangrong 
112bebb106aSXiao Guangrong static inline bool is_write_protection(struct kvm_vcpu *vcpu)
113bebb106aSXiao Guangrong {
114bebb106aSXiao Guangrong 	return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
115bebb106aSXiao Guangrong }
116bebb106aSXiao Guangrong 
11797d64b78SAvi Kivity /*
11897d64b78SAvi Kivity  * Will a fault with a given page-fault error code (pfec) cause a permission
11997d64b78SAvi Kivity  * fault with the given access (in ACC_* format)?
12097d64b78SAvi Kivity  */
121*97ec8c06SFeng Wu static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
122*97ec8c06SFeng Wu 				    unsigned pte_access, unsigned pfec)
123bebb106aSXiao Guangrong {
124*97ec8c06SFeng Wu 	int cpl = kvm_x86_ops->get_cpl(vcpu);
125*97ec8c06SFeng Wu 	unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
126*97ec8c06SFeng Wu 
127*97ec8c06SFeng Wu 	/*
128*97ec8c06SFeng Wu 	 * If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1.
129*97ec8c06SFeng Wu 	 *
130*97ec8c06SFeng Wu 	 * If CPL = 3, SMAP applies to all supervisor-mode data accesses
131*97ec8c06SFeng Wu 	 * (these are implicit supervisor accesses) regardless of the value
132*97ec8c06SFeng Wu 	 * of EFLAGS.AC.
133*97ec8c06SFeng Wu 	 *
134*97ec8c06SFeng Wu 	 * This computes (cpl < 3) && (rflags & X86_EFLAGS_AC), leaving
135*97ec8c06SFeng Wu 	 * the result in X86_EFLAGS_AC. We then insert it in place of
136*97ec8c06SFeng Wu 	 * the PFERR_RSVD_MASK bit; this bit will always be zero in pfec,
137*97ec8c06SFeng Wu 	 * but it will be one in index if SMAP checks are being overridden.
138*97ec8c06SFeng Wu 	 * It is important to keep this branchless.
139*97ec8c06SFeng Wu 	 */
140*97ec8c06SFeng Wu 	unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC);
141*97ec8c06SFeng Wu 	int index = (pfec >> 1) +
142*97ec8c06SFeng Wu 		    (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
143*97ec8c06SFeng Wu 
144*97ec8c06SFeng Wu 	return (mmu->permissions[index] >> pte_access) & 1;
145bebb106aSXiao Guangrong }
14697d64b78SAvi Kivity 
1475304b8d3SXiao Guangrong void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm);
148edf88417SAvi Kivity #endif
149