xref: /linux/arch/x86/kvm/mmu.h (revision b6ebbac51bedf9e98e837688bc838f400196da5e)
1 #ifndef __KVM_X86_MMU_H
2 #define __KVM_X86_MMU_H
3 
4 #include <linux/kvm_host.h>
5 #include "kvm_cache_regs.h"
6 
7 #define PT64_PT_BITS 9
8 #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
9 #define PT32_PT_BITS 10
10 #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
11 
12 #define PT_WRITABLE_SHIFT 1
13 #define PT_USER_SHIFT 2
14 
15 #define PT_PRESENT_MASK (1ULL << 0)
16 #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
17 #define PT_USER_MASK (1ULL << PT_USER_SHIFT)
18 #define PT_PWT_MASK (1ULL << 3)
19 #define PT_PCD_MASK (1ULL << 4)
20 #define PT_ACCESSED_SHIFT 5
21 #define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT)
22 #define PT_DIRTY_SHIFT 6
23 #define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT)
24 #define PT_PAGE_SIZE_SHIFT 7
25 #define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT)
26 #define PT_PAT_MASK (1ULL << 7)
27 #define PT_GLOBAL_MASK (1ULL << 8)
28 #define PT64_NX_SHIFT 63
29 #define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
30 
31 #define PT_PAT_SHIFT 7
32 #define PT_DIR_PAT_SHIFT 12
33 #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
34 
35 #define PT32_DIR_PSE36_SIZE 4
36 #define PT32_DIR_PSE36_SHIFT 13
37 #define PT32_DIR_PSE36_MASK \
38 	(((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
39 
40 #define PT64_ROOT_LEVEL 4
41 #define PT32_ROOT_LEVEL 2
42 #define PT32E_ROOT_LEVEL 3
43 
44 #define PT_PDPE_LEVEL 3
45 #define PT_DIRECTORY_LEVEL 2
46 #define PT_PAGE_TABLE_LEVEL 1
47 #define PT_MAX_HUGEPAGE_LEVEL (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES - 1)
48 
49 static inline u64 rsvd_bits(int s, int e)
50 {
51 	return ((1ULL << (e - s + 1)) - 1) << s;
52 }
53 
54 void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask);
55 
56 void
57 reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
58 
59 /*
60  * Return values of handle_mmio_page_fault:
61  * RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction
62  *			directly.
63  * RET_MMIO_PF_INVALID: invalid spte is detected then let the real page
64  *			fault path update the mmio spte.
65  * RET_MMIO_PF_RETRY: let CPU fault again on the address.
66  * RET_MMIO_PF_BUG: a bug was detected (and a WARN was printed).
67  */
68 enum {
69 	RET_MMIO_PF_EMULATE = 1,
70 	RET_MMIO_PF_INVALID = 2,
71 	RET_MMIO_PF_RETRY = 0,
72 	RET_MMIO_PF_BUG = -1
73 };
74 
75 int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct);
76 void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
77 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly);
78 
79 static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
80 {
81 	if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
82 		return kvm->arch.n_max_mmu_pages -
83 			kvm->arch.n_used_mmu_pages;
84 
85 	return 0;
86 }
87 
88 static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
89 {
90 	if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE))
91 		return 0;
92 
93 	return kvm_mmu_load(vcpu);
94 }
95 
96 /*
97  * Currently, we have two sorts of write-protection, a) the first one
98  * write-protects guest page to sync the guest modification, b) another one is
99  * used to sync dirty bitmap when we do KVM_GET_DIRTY_LOG. The differences
100  * between these two sorts are:
101  * 1) the first case clears SPTE_MMU_WRITEABLE bit.
102  * 2) the first case requires flushing tlb immediately avoiding corrupting
103  *    shadow page table between all vcpus so it should be in the protection of
104  *    mmu-lock. And the another case does not need to flush tlb until returning
105  *    the dirty bitmap to userspace since it only write-protects the page
106  *    logged in the bitmap, that means the page in the dirty bitmap is not
107  *    missed, so it can flush tlb out of mmu-lock.
108  *
109  * So, there is the problem: the first case can meet the corrupted tlb caused
110  * by another case which write-protects pages but without flush tlb
111  * immediately. In order to making the first case be aware this problem we let
112  * it flush tlb if we try to write-protect a spte whose SPTE_MMU_WRITEABLE bit
113  * is set, it works since another case never touches SPTE_MMU_WRITEABLE bit.
114  *
115  * Anyway, whenever a spte is updated (only permission and status bits are
116  * changed) we need to check whether the spte with SPTE_MMU_WRITEABLE becomes
117  * readonly, if that happens, we need to flush tlb. Fortunately,
118  * mmu_spte_update() has already handled it perfectly.
119  *
120  * The rules to use SPTE_MMU_WRITEABLE and PT_WRITABLE_MASK:
121  * - if we want to see if it has writable tlb entry or if the spte can be
122  *   writable on the mmu mapping, check SPTE_MMU_WRITEABLE, this is the most
123  *   case, otherwise
124  * - if we fix page fault on the spte or do write-protection by dirty logging,
125  *   check PT_WRITABLE_MASK.
126  *
127  * TODO: introduce APIs to split these two cases.
128  */
129 static inline int is_writable_pte(unsigned long pte)
130 {
131 	return pte & PT_WRITABLE_MASK;
132 }
133 
134 static inline bool is_write_protection(struct kvm_vcpu *vcpu)
135 {
136 	return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
137 }
138 
139 /*
140  * Check if a given access (described through the I/D, W/R and U/S bits of a
141  * page fault error code pfec) causes a permission fault with the given PTE
142  * access rights (in ACC_* format).
143  *
144  * Return zero if the access does not fault; return the page fault error code
145  * if the access faults.
146  */
147 static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
148 				  unsigned pte_access, unsigned pte_pkey,
149 				  unsigned pfec)
150 {
151 	int cpl = kvm_x86_ops->get_cpl(vcpu);
152 	unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
153 
154 	/*
155 	 * If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1.
156 	 *
157 	 * If CPL = 3, SMAP applies to all supervisor-mode data accesses
158 	 * (these are implicit supervisor accesses) regardless of the value
159 	 * of EFLAGS.AC.
160 	 *
161 	 * This computes (cpl < 3) && (rflags & X86_EFLAGS_AC), leaving
162 	 * the result in X86_EFLAGS_AC. We then insert it in place of
163 	 * the PFERR_RSVD_MASK bit; this bit will always be zero in pfec,
164 	 * but it will be one in index if SMAP checks are being overridden.
165 	 * It is important to keep this branchless.
166 	 */
167 	unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC);
168 	int index = (pfec >> 1) +
169 		    (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
170 	bool fault = (mmu->permissions[index] >> pte_access) & 1;
171 	u32 errcode = PFERR_PRESENT_MASK;
172 
173 	WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
174 	if (unlikely(mmu->pkru_mask)) {
175 		u32 pkru_bits, offset;
176 
177 		/*
178 		* PKRU defines 32 bits, there are 16 domains and 2
179 		* attribute bits per domain in pkru.  pte_pkey is the
180 		* index of the protection domain, so pte_pkey * 2 is
181 		* is the index of the first bit for the domain.
182 		*/
183 		pkru_bits = (kvm_read_pkru(vcpu) >> (pte_pkey * 2)) & 3;
184 
185 		/* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
186 		offset = (pfec & ~1) +
187 			((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT));
188 
189 		pkru_bits &= mmu->pkru_mask >> offset;
190 		errcode |= -pkru_bits & PFERR_PK_MASK;
191 		fault |= (pkru_bits != 0);
192 	}
193 
194 	return -(u32)fault & errcode;
195 }
196 
197 void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm);
198 void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
199 
200 void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
201 void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
202 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
203 				    struct kvm_memory_slot *slot, u64 gfn);
204 #endif
205