xref: /linux/arch/x86/kvm/mmu.h (revision c4c11dd160a8cc98f402c4e12f94b1572e822ffd)
1 #ifndef __KVM_X86_MMU_H
2 #define __KVM_X86_MMU_H
3 
4 #include <linux/kvm_host.h>
5 #include "kvm_cache_regs.h"
6 
7 #define PT64_PT_BITS 9
8 #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
9 #define PT32_PT_BITS 10
10 #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
11 
12 #define PT_WRITABLE_SHIFT 1
13 
14 #define PT_PRESENT_MASK (1ULL << 0)
15 #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
16 #define PT_USER_MASK (1ULL << 2)
17 #define PT_PWT_MASK (1ULL << 3)
18 #define PT_PCD_MASK (1ULL << 4)
19 #define PT_ACCESSED_SHIFT 5
20 #define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT)
21 #define PT_DIRTY_SHIFT 6
22 #define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT)
23 #define PT_PAGE_SIZE_SHIFT 7
24 #define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT)
25 #define PT_PAT_MASK (1ULL << 7)
26 #define PT_GLOBAL_MASK (1ULL << 8)
27 #define PT64_NX_SHIFT 63
28 #define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
29 
30 #define PT_PAT_SHIFT 7
31 #define PT_DIR_PAT_SHIFT 12
32 #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
33 
34 #define PT32_DIR_PSE36_SIZE 4
35 #define PT32_DIR_PSE36_SHIFT 13
36 #define PT32_DIR_PSE36_MASK \
37 	(((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
38 
39 #define PT64_ROOT_LEVEL 4
40 #define PT32_ROOT_LEVEL 2
41 #define PT32E_ROOT_LEVEL 3
42 
43 #define PT_PDPE_LEVEL 3
44 #define PT_DIRECTORY_LEVEL 2
45 #define PT_PAGE_TABLE_LEVEL 1
46 
47 #define PFERR_PRESENT_MASK (1U << 0)
48 #define PFERR_WRITE_MASK (1U << 1)
49 #define PFERR_USER_MASK (1U << 2)
50 #define PFERR_RSVD_MASK (1U << 3)
51 #define PFERR_FETCH_MASK (1U << 4)
52 
53 int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
54 void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask);
55 
56 /*
57  * Return values of handle_mmio_page_fault_common:
58  * RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction
59  *			directly.
60  * RET_MMIO_PF_INVALID: invalid spte is detected then let the real page
61  *			fault path update the mmio spte.
62  * RET_MMIO_PF_RETRY: let CPU fault again on the address.
63  * RET_MMIO_PF_BUG: bug is detected.
64  */
65 enum {
66 	RET_MMIO_PF_EMULATE = 1,
67 	RET_MMIO_PF_INVALID = 2,
68 	RET_MMIO_PF_RETRY = 0,
69 	RET_MMIO_PF_BUG = -1
70 };
71 
72 int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
73 int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
74 
75 static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
76 {
77 	if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
78 		return kvm->arch.n_max_mmu_pages -
79 			kvm->arch.n_used_mmu_pages;
80 
81 	return 0;
82 }
83 
84 static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
85 {
86 	if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE))
87 		return 0;
88 
89 	return kvm_mmu_load(vcpu);
90 }
91 
92 static inline int is_present_gpte(unsigned long pte)
93 {
94 	return pte & PT_PRESENT_MASK;
95 }
96 
97 static inline int is_writable_pte(unsigned long pte)
98 {
99 	return pte & PT_WRITABLE_MASK;
100 }
101 
102 static inline bool is_write_protection(struct kvm_vcpu *vcpu)
103 {
104 	return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
105 }
106 
107 /*
108  * Will a fault with a given page-fault error code (pfec) cause a permission
109  * fault with the given access (in ACC_* format)?
110  */
111 static inline bool permission_fault(struct kvm_mmu *mmu, unsigned pte_access,
112 				    unsigned pfec)
113 {
114 	return (mmu->permissions[pfec >> 1] >> pte_access) & 1;
115 }
116 
117 void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm);
118 #endif
119