1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __KVM_X86_MMU_INTERNAL_H 3 #define __KVM_X86_MMU_INTERNAL_H 4 5 #include <linux/types.h> 6 #include <linux/kvm_host.h> 7 #include <asm/kvm_host.h> 8 9 #include "mmu.h" 10 11 #ifdef CONFIG_KVM_PROVE_MMU 12 #define KVM_MMU_WARN_ON(x) WARN_ON_ONCE(x) 13 #else 14 #define KVM_MMU_WARN_ON(x) BUILD_BUG_ON_INVALID(x) 15 #endif 16 17 /* Page table builder macros common to shadow (host) PTEs and guest PTEs. */ 18 #define __PT_BASE_ADDR_MASK GENMASK_ULL(51, 12) 19 #define __PT_LEVEL_SHIFT(level, bits_per_level) \ 20 (PAGE_SHIFT + ((level) - 1) * (bits_per_level)) 21 #define __PT_INDEX(address, level, bits_per_level) \ 22 (((address) >> __PT_LEVEL_SHIFT(level, bits_per_level)) & ((1 << (bits_per_level)) - 1)) 23 24 #define __PT_LVL_ADDR_MASK(base_addr_mask, level, bits_per_level) \ 25 ((base_addr_mask) & ~((1ULL << (PAGE_SHIFT + (((level) - 1) * (bits_per_level)))) - 1)) 26 27 #define __PT_LVL_OFFSET_MASK(base_addr_mask, level, bits_per_level) \ 28 ((base_addr_mask) & ((1ULL << (PAGE_SHIFT + (((level) - 1) * (bits_per_level)))) - 1)) 29 30 #define __PT_ENT_PER_PAGE(bits_per_level) (1 << (bits_per_level)) 31 32 /* 33 * Unlike regular MMU roots, PAE "roots", a.k.a. PDPTEs/PDPTRs, have a PRESENT 34 * bit, and thus are guaranteed to be non-zero when valid. And, when a guest 35 * PDPTR is !PRESENT, its corresponding PAE root cannot be set to INVALID_PAGE, 36 * as the CPU would treat that as PRESENT PDPTR with reserved bits set. Use 37 * '0' instead of INVALID_PAGE to indicate an invalid PAE root. 38 */ 39 #define INVALID_PAE_ROOT 0 40 #define IS_VALID_PAE_ROOT(x) (!!(x)) 41 42 static inline hpa_t kvm_mmu_get_dummy_root(void) 43 { 44 return my_zero_pfn(0) << PAGE_SHIFT; 45 } 46 47 static inline bool kvm_mmu_is_dummy_root(hpa_t shadow_page) 48 { 49 return is_zero_pfn(shadow_page >> PAGE_SHIFT); 50 } 51 52 typedef u64 __rcu *tdp_ptep_t; 53 54 struct kvm_mmu_page { 55 /* 56 * Note, "link" through "spt" fit in a single 64 byte cache line on 57 * 64-bit kernels, keep it that way unless there's a reason not to. 58 */ 59 struct list_head link; 60 struct hlist_node hash_link; 61 62 bool tdp_mmu_page; 63 bool unsync; 64 union { 65 u8 mmu_valid_gen; 66 67 /* Only accessed under slots_lock. */ 68 bool tdp_mmu_scheduled_root_to_zap; 69 }; 70 71 /* 72 * The shadow page can't be replaced by an equivalent huge page 73 * because it is being used to map an executable page in the guest 74 * and the NX huge page mitigation is enabled. 75 */ 76 bool nx_huge_page_disallowed; 77 78 /* 79 * The following two entries are used to key the shadow page in the 80 * hash table. 81 */ 82 union kvm_mmu_page_role role; 83 gfn_t gfn; 84 85 u64 *spt; 86 87 /* 88 * Stores the result of the guest translation being shadowed by each 89 * SPTE. KVM shadows two types of guest translations: nGPA -> GPA 90 * (shadow EPT/NPT) and GVA -> GPA (traditional shadow paging). In both 91 * cases the result of the translation is a GPA and a set of access 92 * constraints. 93 * 94 * The GFN is stored in the upper bits (PAGE_SHIFT) and the shadowed 95 * access permissions are stored in the lower bits. Note, for 96 * convenience and uniformity across guests, the access permissions are 97 * stored in KVM format (e.g. ACC_EXEC_MASK) not the raw guest format. 98 */ 99 u64 *shadowed_translation; 100 101 /* Currently serving as active root */ 102 union { 103 int root_count; 104 refcount_t tdp_mmu_root_count; 105 }; 106 107 bool has_mapped_host_mmio; 108 109 union { 110 /* These two members aren't used for TDP MMU */ 111 struct { 112 unsigned int unsync_children; 113 /* 114 * Number of writes since the last time traversal 115 * visited this page. 116 */ 117 atomic_t write_flooding_count; 118 }; 119 /* 120 * Page table page of external PT. 121 * Passed to TDX module, not accessed by KVM. 122 */ 123 void *external_spt; 124 }; 125 union { 126 struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */ 127 tdp_ptep_t ptep; 128 }; 129 DECLARE_BITMAP(unsync_child_bitmap, 512); 130 131 /* 132 * Tracks shadow pages that, if zapped, would allow KVM to create an NX 133 * huge page. A shadow page will have nx_huge_page_disallowed set but 134 * not be on the list if a huge page is disallowed for other reasons, 135 * e.g. because KVM is shadowing a PTE at the same gfn, the memslot 136 * isn't properly aligned, etc... 137 */ 138 struct list_head possible_nx_huge_page_link; 139 #ifdef CONFIG_X86_32 140 /* 141 * Used out of the mmu-lock to avoid reading spte values while an 142 * update is in progress; see the comments in __get_spte_lockless(). 143 */ 144 int clear_spte_count; 145 #endif 146 147 #ifdef CONFIG_X86_64 148 /* Used for freeing the page asynchronously if it is a TDP MMU page. */ 149 struct rcu_head rcu_head; 150 #endif 151 }; 152 153 extern struct kmem_cache *mmu_page_header_cache; 154 155 static inline int kvm_mmu_role_as_id(union kvm_mmu_page_role role) 156 { 157 return role.smm ? 1 : 0; 158 } 159 160 static inline int kvm_mmu_page_as_id(struct kvm_mmu_page *sp) 161 { 162 return kvm_mmu_role_as_id(sp->role); 163 } 164 165 static inline bool is_mirror_sp(const struct kvm_mmu_page *sp) 166 { 167 return sp->role.is_mirror; 168 } 169 170 static inline void kvm_mmu_alloc_external_spt(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) 171 { 172 /* 173 * external_spt is allocated for TDX module to hold private EPT mappings, 174 * TDX module will initialize the page by itself. 175 * Therefore, KVM does not need to initialize or access external_spt. 176 * KVM only interacts with sp->spt for private EPT operations. 177 */ 178 sp->external_spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_external_spt_cache); 179 } 180 181 static inline gfn_t kvm_gfn_root_bits(const struct kvm *kvm, const struct kvm_mmu_page *root) 182 { 183 /* 184 * Since mirror SPs are used only for TDX, which maps private memory 185 * at its "natural" GFN, no mask needs to be applied to them - and, dually, 186 * we expect that the bits is only used for the shared PT. 187 */ 188 if (is_mirror_sp(root)) 189 return 0; 190 return kvm_gfn_direct_bits(kvm); 191 } 192 193 static inline bool kvm_mmu_page_ad_need_write_protect(struct kvm *kvm, 194 struct kvm_mmu_page *sp) 195 { 196 /* 197 * When using the EPT page-modification log, the GPAs in the CPU dirty 198 * log would come from L2 rather than L1. Therefore, we need to rely 199 * on write protection to record dirty pages, which bypasses PML, since 200 * writes now result in a vmexit. Note, the check on CPU dirty logging 201 * being enabled is mandatory as the bits used to denote WP-only SPTEs 202 * are reserved for PAE paging (32-bit KVM). 203 */ 204 return kvm->arch.cpu_dirty_log_size && sp->role.guest_mode; 205 } 206 207 static inline gfn_t gfn_round_for_level(gfn_t gfn, int level) 208 { 209 return gfn & -KVM_PAGES_PER_HPAGE(level); 210 } 211 212 int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot, 213 gfn_t gfn, bool synchronizing, bool prefetch); 214 215 void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn); 216 void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn); 217 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, 218 struct kvm_memory_slot *slot, u64 gfn, 219 int min_level); 220 221 /* Flush the given page (huge or not) of guest memory. */ 222 static inline void kvm_flush_remote_tlbs_gfn(struct kvm *kvm, gfn_t gfn, int level) 223 { 224 kvm_flush_remote_tlbs_range(kvm, gfn_round_for_level(gfn, level), 225 KVM_PAGES_PER_HPAGE(level)); 226 } 227 228 unsigned int pte_list_count(struct kvm_rmap_head *rmap_head); 229 230 extern int nx_huge_pages; 231 static inline bool is_nx_huge_page_enabled(struct kvm *kvm) 232 { 233 return READ_ONCE(nx_huge_pages) && !kvm->arch.disable_nx_huge_pages; 234 } 235 236 struct kvm_page_fault { 237 /* arguments to kvm_mmu_do_page_fault. */ 238 const gpa_t addr; 239 const u64 error_code; 240 const bool prefetch; 241 242 /* Derived from error_code. */ 243 const bool exec; 244 const bool write; 245 const bool present; 246 const bool rsvd; 247 const bool user; 248 249 /* Derived from mmu and global state. */ 250 const bool is_tdp; 251 const bool is_private; 252 const bool nx_huge_page_workaround_enabled; 253 254 /* 255 * Whether a >4KB mapping can be created or is forbidden due to NX 256 * hugepages. 257 */ 258 bool huge_page_disallowed; 259 260 /* 261 * Maximum page size that can be created for this fault; input to 262 * FNAME(fetch), direct_map() and kvm_tdp_mmu_map(). 263 */ 264 u8 max_level; 265 266 /* 267 * Page size that can be created based on the max_level and the 268 * page size used by the host mapping. 269 */ 270 u8 req_level; 271 272 /* 273 * Page size that will be created based on the req_level and 274 * huge_page_disallowed. 275 */ 276 u8 goal_level; 277 278 /* 279 * Shifted addr, or result of guest page table walk if addr is a gva. In 280 * the case of VM where memslot's can be mapped at multiple GPA aliases 281 * (i.e. TDX), the gfn field does not contain the bit that selects between 282 * the aliases (i.e. the shared bit for TDX). 283 */ 284 gfn_t gfn; 285 286 /* The memslot containing gfn. May be NULL. */ 287 struct kvm_memory_slot *slot; 288 289 /* Outputs of kvm_mmu_faultin_pfn(). */ 290 unsigned long mmu_seq; 291 kvm_pfn_t pfn; 292 struct page *refcounted_page; 293 bool map_writable; 294 295 /* 296 * Indicates the guest is trying to write a gfn that contains one or 297 * more of the PTEs used to translate the write itself, i.e. the access 298 * is changing its own translation in the guest page tables. 299 */ 300 bool write_fault_to_shadow_pgtable; 301 }; 302 303 int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); 304 305 /* 306 * Return values of handle_mmio_page_fault(), mmu.page_fault(), fast_page_fault(), 307 * and of course kvm_mmu_do_page_fault(). 308 * 309 * RET_PF_CONTINUE: So far, so good, keep handling the page fault. 310 * RET_PF_RETRY: let CPU fault again on the address. 311 * RET_PF_EMULATE: mmio page fault, emulate the instruction directly. 312 * RET_PF_WRITE_PROTECTED: the gfn is write-protected, either unprotected the 313 * gfn and retry, or emulate the instruction directly. 314 * RET_PF_INVALID: the spte is invalid, let the real page fault path update it. 315 * RET_PF_FIXED: The faulting entry has been fixed. 316 * RET_PF_SPURIOUS: The faulting entry was already fixed, e.g. by another vCPU. 317 * 318 * Any names added to this enum should be exported to userspace for use in 319 * tracepoints via TRACE_DEFINE_ENUM() in mmutrace.h 320 * 321 * Note, all values must be greater than or equal to zero so as not to encroach 322 * on -errno return values. 323 */ 324 enum { 325 RET_PF_CONTINUE = 0, 326 RET_PF_RETRY, 327 RET_PF_EMULATE, 328 RET_PF_WRITE_PROTECTED, 329 RET_PF_INVALID, 330 RET_PF_FIXED, 331 RET_PF_SPURIOUS, 332 }; 333 334 /* 335 * Define RET_PF_CONTINUE as 0 to allow for 336 * - efficient machine code when checking for CONTINUE, e.g. 337 * "TEST %rax, %rax, JNZ", as all "stop!" values are non-zero, 338 * - kvm_mmu_do_page_fault() to return other RET_PF_* as a positive value. 339 */ 340 static_assert(RET_PF_CONTINUE == 0); 341 342 static inline void kvm_mmu_prepare_memory_fault_exit(struct kvm_vcpu *vcpu, 343 struct kvm_page_fault *fault) 344 { 345 kvm_prepare_memory_fault_exit(vcpu, fault->gfn << PAGE_SHIFT, 346 PAGE_SIZE, fault->write, fault->exec, 347 fault->is_private); 348 } 349 350 static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, 351 u64 err, bool prefetch, 352 int *emulation_type, u8 *level) 353 { 354 struct kvm_page_fault fault = { 355 .addr = cr2_or_gpa, 356 .error_code = err, 357 .exec = err & PFERR_FETCH_MASK, 358 .write = err & PFERR_WRITE_MASK, 359 .present = err & PFERR_PRESENT_MASK, 360 .rsvd = err & PFERR_RSVD_MASK, 361 .user = err & PFERR_USER_MASK, 362 .prefetch = prefetch, 363 .is_tdp = likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault), 364 .nx_huge_page_workaround_enabled = 365 is_nx_huge_page_enabled(vcpu->kvm), 366 367 .max_level = KVM_MAX_HUGEPAGE_LEVEL, 368 .req_level = PG_LEVEL_4K, 369 .goal_level = PG_LEVEL_4K, 370 .is_private = err & PFERR_PRIVATE_ACCESS, 371 372 .pfn = KVM_PFN_ERR_FAULT, 373 }; 374 int r; 375 376 if (vcpu->arch.mmu->root_role.direct) { 377 /* 378 * Things like memslots don't understand the concept of a shared 379 * bit. Strip it so that the GFN can be used like normal, and the 380 * fault.addr can be used when the shared bit is needed. 381 */ 382 fault.gfn = gpa_to_gfn(fault.addr) & ~kvm_gfn_direct_bits(vcpu->kvm); 383 fault.slot = kvm_vcpu_gfn_to_memslot(vcpu, fault.gfn); 384 } 385 386 /* 387 * With retpoline being active an indirect call is rather expensive, 388 * so do a direct call in the most common case. 389 */ 390 if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) && fault.is_tdp) 391 r = kvm_tdp_page_fault(vcpu, &fault); 392 else 393 r = vcpu->arch.mmu->page_fault(vcpu, &fault); 394 395 /* 396 * Not sure what's happening, but punt to userspace and hope that 397 * they can fix it by changing memory to shared, or they can 398 * provide a better error. 399 */ 400 if (r == RET_PF_EMULATE && fault.is_private) { 401 pr_warn_ratelimited("kvm: unexpected emulation request on private memory\n"); 402 kvm_mmu_prepare_memory_fault_exit(vcpu, &fault); 403 return -EFAULT; 404 } 405 406 if (fault.write_fault_to_shadow_pgtable && emulation_type) 407 *emulation_type |= EMULTYPE_WRITE_PF_TO_SP; 408 if (level) 409 *level = fault.goal_level; 410 411 return r; 412 } 413 414 int kvm_mmu_max_mapping_level(struct kvm *kvm, 415 const struct kvm_memory_slot *slot, gfn_t gfn); 416 void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); 417 void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level); 418 419 void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp); 420 void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp); 421 422 #endif /* __KVM_X86_MMU_INTERNAL_H */ 423