16ca9a6f3SSean Christopherson /* SPDX-License-Identifier: GPL-2.0 */ 26ca9a6f3SSean Christopherson #ifndef __KVM_X86_MMU_INTERNAL_H 36ca9a6f3SSean Christopherson #define __KVM_X86_MMU_INTERNAL_H 46ca9a6f3SSean Christopherson 5985ab278SSean Christopherson #include <linux/types.h> 65a9624afSPaolo Bonzini #include <linux/kvm_host.h> 7985ab278SSean Christopherson #include <asm/kvm_host.h> 8985ab278SSean Christopherson 95a9624afSPaolo Bonzini #undef MMU_DEBUG 105a9624afSPaolo Bonzini 115a9624afSPaolo Bonzini #ifdef MMU_DEBUG 125a9624afSPaolo Bonzini extern bool dbg; 135a9624afSPaolo Bonzini 145a9624afSPaolo Bonzini #define pgprintk(x...) do { if (dbg) printk(x); } while (0) 15805a0f83SStephen Zhang #define rmap_printk(fmt, args...) do { if (dbg) printk("%s: " fmt, __func__, ## args); } while (0) 165a9624afSPaolo Bonzini #define MMU_WARN_ON(x) WARN_ON(x) 175a9624afSPaolo Bonzini #else 185a9624afSPaolo Bonzini #define pgprintk(x...) do { } while (0) 195a9624afSPaolo Bonzini #define rmap_printk(x...) do { } while (0) 205a9624afSPaolo Bonzini #define MMU_WARN_ON(x) do { } while (0) 215a9624afSPaolo Bonzini #endif 225a9624afSPaolo Bonzini 2342c88ff8SSean Christopherson /* Page table builder macros common to shadow (host) PTEs and guest PTEs. */ 2442c88ff8SSean Christopherson #define __PT_LEVEL_SHIFT(level, bits_per_level) \ 2542c88ff8SSean Christopherson (PAGE_SHIFT + ((level) - 1) * (bits_per_level)) 2642c88ff8SSean Christopherson #define __PT_INDEX(address, level, bits_per_level) \ 2742c88ff8SSean Christopherson (((address) >> __PT_LEVEL_SHIFT(level, bits_per_level)) & ((1 << (bits_per_level)) - 1)) 2842c88ff8SSean Christopherson 2942c88ff8SSean Christopherson #define __PT_LVL_ADDR_MASK(base_addr_mask, level, bits_per_level) \ 3042c88ff8SSean Christopherson ((base_addr_mask) & ~((1ULL << (PAGE_SHIFT + (((level) - 1) * (bits_per_level)))) - 1)) 3142c88ff8SSean Christopherson 3242c88ff8SSean Christopherson #define __PT_LVL_OFFSET_MASK(base_addr_mask, level, bits_per_level) \ 3342c88ff8SSean Christopherson ((base_addr_mask) & ((1ULL << (PAGE_SHIFT + (((level) - 1) * (bits_per_level)))) - 1)) 3442c88ff8SSean Christopherson 3542c88ff8SSean Christopherson #define __PT_ENT_PER_PAGE(bits_per_level) (1 << (bits_per_level)) 3642c88ff8SSean Christopherson 37c834e5e4SSean Christopherson /* 38c834e5e4SSean Christopherson * Unlike regular MMU roots, PAE "roots", a.k.a. PDPTEs/PDPTRs, have a PRESENT 39c834e5e4SSean Christopherson * bit, and thus are guaranteed to be non-zero when valid. And, when a guest 40c834e5e4SSean Christopherson * PDPTR is !PRESENT, its corresponding PAE root cannot be set to INVALID_PAGE, 41c834e5e4SSean Christopherson * as the CPU would treat that as PRESENT PDPTR with reserved bits set. Use 42c834e5e4SSean Christopherson * '0' instead of INVALID_PAGE to indicate an invalid PAE root. 43c834e5e4SSean Christopherson */ 44c834e5e4SSean Christopherson #define INVALID_PAE_ROOT 0 45c834e5e4SSean Christopherson #define IS_VALID_PAE_ROOT(x) (!!(x)) 46c834e5e4SSean Christopherson 47c10743a1SSean Christopherson typedef u64 __rcu *tdp_ptep_t; 48c10743a1SSean Christopherson 49985ab278SSean Christopherson struct kvm_mmu_page { 501148bfc4SSean Christopherson /* 511148bfc4SSean Christopherson * Note, "link" through "spt" fit in a single 64 byte cache line on 521148bfc4SSean Christopherson * 64-bit kernels, keep it that way unless there's a reason not to. 531148bfc4SSean Christopherson */ 54985ab278SSean Christopherson struct list_head link; 55985ab278SSean Christopherson struct hlist_node hash_link; 56985ab278SSean Christopherson 57ca41c34cSSean Christopherson bool tdp_mmu_page; 58985ab278SSean Christopherson bool unsync; 59985ab278SSean Christopherson u8 mmu_valid_gen; 6055c510e2SSean Christopherson 6155c510e2SSean Christopherson /* 6255c510e2SSean Christopherson * The shadow page can't be replaced by an equivalent huge page 6355c510e2SSean Christopherson * because it is being used to map an executable page in the guest 6455c510e2SSean Christopherson * and the NX huge page mitigation is enabled. 6555c510e2SSean Christopherson */ 6655c510e2SSean Christopherson bool nx_huge_page_disallowed; 67985ab278SSean Christopherson 68985ab278SSean Christopherson /* 69985ab278SSean Christopherson * The following two entries are used to key the shadow page in the 70985ab278SSean Christopherson * hash table. 71985ab278SSean Christopherson */ 72985ab278SSean Christopherson union kvm_mmu_page_role role; 73985ab278SSean Christopherson gfn_t gfn; 74985ab278SSean Christopherson 75985ab278SSean Christopherson u64 *spt; 766a97575dSDavid Matlack 776a97575dSDavid Matlack /* 786a97575dSDavid Matlack * Stores the result of the guest translation being shadowed by each 796a97575dSDavid Matlack * SPTE. KVM shadows two types of guest translations: nGPA -> GPA 806a97575dSDavid Matlack * (shadow EPT/NPT) and GVA -> GPA (traditional shadow paging). In both 816a97575dSDavid Matlack * cases the result of the translation is a GPA and a set of access 826a97575dSDavid Matlack * constraints. 836a97575dSDavid Matlack * 846a97575dSDavid Matlack * The GFN is stored in the upper bits (PAGE_SHIFT) and the shadowed 856a97575dSDavid Matlack * access permissions are stored in the lower bits. Note, for 866a97575dSDavid Matlack * convenience and uniformity across guests, the access permissions are 876a97575dSDavid Matlack * stored in KVM format (e.g. ACC_EXEC_MASK) not the raw guest format. 886a97575dSDavid Matlack */ 896a97575dSDavid Matlack u64 *shadowed_translation; 906a97575dSDavid Matlack 9111cccf5cSBen Gardon /* Currently serving as active root */ 9211cccf5cSBen Gardon union { 9311cccf5cSBen Gardon int root_count; 9411cccf5cSBen Gardon refcount_t tdp_mmu_root_count; 9511cccf5cSBen Gardon }; 96985ab278SSean Christopherson unsigned int unsync_children; 97c10743a1SSean Christopherson union { 98985ab278SSean Christopherson struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */ 99c10743a1SSean Christopherson tdp_ptep_t ptep; 100c10743a1SSean Christopherson }; 10122b94c4bSPaolo Bonzini union { 102985ab278SSean Christopherson DECLARE_BITMAP(unsync_child_bitmap, 512); 10322b94c4bSPaolo Bonzini struct { 10422b94c4bSPaolo Bonzini struct work_struct tdp_mmu_async_work; 10522b94c4bSPaolo Bonzini void *tdp_mmu_async_data; 10622b94c4bSPaolo Bonzini }; 10722b94c4bSPaolo Bonzini }; 108985ab278SSean Christopherson 109428e9216SSean Christopherson /* 110428e9216SSean Christopherson * Tracks shadow pages that, if zapped, would allow KVM to create an NX 11155c510e2SSean Christopherson * huge page. A shadow page will have nx_huge_page_disallowed set but 11255c510e2SSean Christopherson * not be on the list if a huge page is disallowed for other reasons, 11355c510e2SSean Christopherson * e.g. because KVM is shadowing a PTE at the same gfn, the memslot 11455c510e2SSean Christopherson * isn't properly aligned, etc... 115428e9216SSean Christopherson */ 11655c510e2SSean Christopherson struct list_head possible_nx_huge_page_link; 117985ab278SSean Christopherson #ifdef CONFIG_X86_32 118985ab278SSean Christopherson /* 119985ab278SSean Christopherson * Used out of the mmu-lock to avoid reading spte values while an 120985ab278SSean Christopherson * update is in progress; see the comments in __get_spte_lockless(). 121985ab278SSean Christopherson */ 122985ab278SSean Christopherson int clear_spte_count; 123985ab278SSean Christopherson #endif 124985ab278SSean Christopherson 125985ab278SSean Christopherson /* Number of writes since the last time traversal visited this page. */ 126985ab278SSean Christopherson atomic_t write_flooding_count; 12702c00b3aSBen Gardon 128897218ffSPaolo Bonzini #ifdef CONFIG_X86_64 129d9f6e12fSIngo Molnar /* Used for freeing the page asynchronously if it is a TDP MMU page. */ 1307cca2d0bSBen Gardon struct rcu_head rcu_head; 131897218ffSPaolo Bonzini #endif 132985ab278SSean Christopherson }; 133985ab278SSean Christopherson 13402c00b3aSBen Gardon extern struct kmem_cache *mmu_page_header_cache; 13502c00b3aSBen Gardon 136a3f15bdaSSean Christopherson static inline int kvm_mmu_role_as_id(union kvm_mmu_page_role role) 137a3f15bdaSSean Christopherson { 138a3f15bdaSSean Christopherson return role.smm ? 1 : 0; 139a3f15bdaSSean Christopherson } 140a3f15bdaSSean Christopherson 14108889894SSean Christopherson static inline int kvm_mmu_page_as_id(struct kvm_mmu_page *sp) 14208889894SSean Christopherson { 143a3f15bdaSSean Christopherson return kvm_mmu_role_as_id(sp->role); 14408889894SSean Christopherson } 14508889894SSean Christopherson 146ce92ef76SSean Christopherson static inline bool kvm_mmu_page_ad_need_write_protect(struct kvm_mmu_page *sp) 1475a9624afSPaolo Bonzini { 1485a9624afSPaolo Bonzini /* 14944ac5958SSean Christopherson * When using the EPT page-modification log, the GPAs in the CPU dirty 15044ac5958SSean Christopherson * log would come from L2 rather than L1. Therefore, we need to rely 15144ac5958SSean Christopherson * on write protection to record dirty pages, which bypasses PML, since 15244ac5958SSean Christopherson * writes now result in a vmexit. Note, the check on CPU dirty logging 15344ac5958SSean Christopherson * being enabled is mandatory as the bits used to denote WP-only SPTEs 154ce92ef76SSean Christopherson * are reserved for PAE paging (32-bit KVM). 1555a9624afSPaolo Bonzini */ 156ce92ef76SSean Christopherson return kvm_x86_ops.cpu_dirty_log_size && sp->role.guest_mode; 1575a9624afSPaolo Bonzini } 1585a9624afSPaolo Bonzini 1598283e36aSBen Gardon int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot, 1602839180cSPaolo Bonzini gfn_t gfn, bool can_unsync, bool prefetch); 1615a9624afSPaolo Bonzini 162269e9552SHamza Mahfooz void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn); 163269e9552SHamza Mahfooz void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn); 1646ca9a6f3SSean Christopherson bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, 1653ad93562SKeqian Zhu struct kvm_memory_slot *slot, u64 gfn, 1663ad93562SKeqian Zhu int min_level); 1672f2fad08SBen Gardon void kvm_flush_remote_tlbs_with_address(struct kvm *kvm, 1682f2fad08SBen Gardon u64 start_gfn, u64 pages); 1693bcd0662SPeter Xu unsigned int pte_list_count(struct kvm_rmap_head *rmap_head); 1706ca9a6f3SSean Christopherson 1718a009d5bSSean Christopherson extern int nx_huge_pages; 172084cc29fSBen Gardon static inline bool is_nx_huge_page_enabled(struct kvm *kvm) 1738a009d5bSSean Christopherson { 174084cc29fSBen Gardon return READ_ONCE(nx_huge_pages) && !kvm->arch.disable_nx_huge_pages; 1758a009d5bSSean Christopherson } 1768a009d5bSSean Christopherson 1778a009d5bSSean Christopherson struct kvm_page_fault { 1788a009d5bSSean Christopherson /* arguments to kvm_mmu_do_page_fault. */ 1798a009d5bSSean Christopherson const gpa_t addr; 1808a009d5bSSean Christopherson const u32 error_code; 1818a009d5bSSean Christopherson const bool prefetch; 1828a009d5bSSean Christopherson 1838a009d5bSSean Christopherson /* Derived from error_code. */ 1848a009d5bSSean Christopherson const bool exec; 1858a009d5bSSean Christopherson const bool write; 1868a009d5bSSean Christopherson const bool present; 1878a009d5bSSean Christopherson const bool rsvd; 1888a009d5bSSean Christopherson const bool user; 1898a009d5bSSean Christopherson 1908a009d5bSSean Christopherson /* Derived from mmu and global state. */ 1918a009d5bSSean Christopherson const bool is_tdp; 1928a009d5bSSean Christopherson const bool nx_huge_page_workaround_enabled; 1938a009d5bSSean Christopherson 194bb18842eSBen Gardon /* 1958a009d5bSSean Christopherson * Whether a >4KB mapping can be created or is forbidden due to NX 1968a009d5bSSean Christopherson * hugepages. 1978a009d5bSSean Christopherson */ 1988a009d5bSSean Christopherson bool huge_page_disallowed; 1998a009d5bSSean Christopherson 2008a009d5bSSean Christopherson /* 2018a009d5bSSean Christopherson * Maximum page size that can be created for this fault; input to 2028a009d5bSSean Christopherson * FNAME(fetch), __direct_map and kvm_tdp_mmu_map. 2038a009d5bSSean Christopherson */ 2048a009d5bSSean Christopherson u8 max_level; 2058a009d5bSSean Christopherson 2068a009d5bSSean Christopherson /* 2078a009d5bSSean Christopherson * Page size that can be created based on the max_level and the 2088a009d5bSSean Christopherson * page size used by the host mapping. 2098a009d5bSSean Christopherson */ 2108a009d5bSSean Christopherson u8 req_level; 2118a009d5bSSean Christopherson 2128a009d5bSSean Christopherson /* 2138a009d5bSSean Christopherson * Page size that will be created based on the req_level and 2148a009d5bSSean Christopherson * huge_page_disallowed. 2158a009d5bSSean Christopherson */ 2168a009d5bSSean Christopherson u8 goal_level; 2178a009d5bSSean Christopherson 2188a009d5bSSean Christopherson /* Shifted addr, or result of guest page table walk if addr is a gva. */ 2198a009d5bSSean Christopherson gfn_t gfn; 2208a009d5bSSean Christopherson 2218a009d5bSSean Christopherson /* The memslot containing gfn. May be NULL. */ 2228a009d5bSSean Christopherson struct kvm_memory_slot *slot; 2238a009d5bSSean Christopherson 2248a009d5bSSean Christopherson /* Outputs of kvm_faultin_pfn. */ 225*ba6e3fe2SDavid Matlack unsigned long mmu_seq; 2268a009d5bSSean Christopherson kvm_pfn_t pfn; 2278a009d5bSSean Christopherson hva_t hva; 2288a009d5bSSean Christopherson bool map_writable; 2298a009d5bSSean Christopherson }; 2308a009d5bSSean Christopherson 2318a009d5bSSean Christopherson int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); 2328a009d5bSSean Christopherson 2338a009d5bSSean Christopherson /* 2348a009d5bSSean Christopherson * Return values of handle_mmio_page_fault(), mmu.page_fault(), fast_page_fault(), 2358a009d5bSSean Christopherson * and of course kvm_mmu_do_page_fault(). 236bb18842eSBen Gardon * 2375276c616SSean Christopherson * RET_PF_CONTINUE: So far, so good, keep handling the page fault. 238bb18842eSBen Gardon * RET_PF_RETRY: let CPU fault again on the address. 239bb18842eSBen Gardon * RET_PF_EMULATE: mmio page fault, emulate the instruction directly. 240bb18842eSBen Gardon * RET_PF_INVALID: the spte is invalid, let the real page fault path update it. 241bb18842eSBen Gardon * RET_PF_FIXED: The faulting entry has been fixed. 242bb18842eSBen Gardon * RET_PF_SPURIOUS: The faulting entry was already fixed, e.g. by another vCPU. 24361bcd360SDavid Matlack * 24461bcd360SDavid Matlack * Any names added to this enum should be exported to userspace for use in 24561bcd360SDavid Matlack * tracepoints via TRACE_DEFINE_ENUM() in mmutrace.h 2465276c616SSean Christopherson * 2475276c616SSean Christopherson * Note, all values must be greater than or equal to zero so as not to encroach 2485276c616SSean Christopherson * on -errno return values. Somewhat arbitrarily use '0' for CONTINUE, which 2495276c616SSean Christopherson * will allow for efficient machine code when checking for CONTINUE, e.g. 2505276c616SSean Christopherson * "TEST %rax, %rax, JNZ", as all "stop!" values are non-zero. 251bb18842eSBen Gardon */ 252bb18842eSBen Gardon enum { 2535276c616SSean Christopherson RET_PF_CONTINUE = 0, 2545276c616SSean Christopherson RET_PF_RETRY, 255bb18842eSBen Gardon RET_PF_EMULATE, 256bb18842eSBen Gardon RET_PF_INVALID, 257bb18842eSBen Gardon RET_PF_FIXED, 258bb18842eSBen Gardon RET_PF_SPURIOUS, 259bb18842eSBen Gardon }; 260bb18842eSBen Gardon 2618a009d5bSSean Christopherson static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, 2628a009d5bSSean Christopherson u32 err, bool prefetch) 2638a009d5bSSean Christopherson { 2648a009d5bSSean Christopherson struct kvm_page_fault fault = { 2658a009d5bSSean Christopherson .addr = cr2_or_gpa, 2668a009d5bSSean Christopherson .error_code = err, 2678a009d5bSSean Christopherson .exec = err & PFERR_FETCH_MASK, 2688a009d5bSSean Christopherson .write = err & PFERR_WRITE_MASK, 2698a009d5bSSean Christopherson .present = err & PFERR_PRESENT_MASK, 2708a009d5bSSean Christopherson .rsvd = err & PFERR_RSVD_MASK, 2718a009d5bSSean Christopherson .user = err & PFERR_USER_MASK, 2728a009d5bSSean Christopherson .prefetch = prefetch, 2738a009d5bSSean Christopherson .is_tdp = likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault), 274084cc29fSBen Gardon .nx_huge_page_workaround_enabled = 275084cc29fSBen Gardon is_nx_huge_page_enabled(vcpu->kvm), 2768a009d5bSSean Christopherson 2778a009d5bSSean Christopherson .max_level = KVM_MAX_HUGEPAGE_LEVEL, 2788a009d5bSSean Christopherson .req_level = PG_LEVEL_4K, 2798a009d5bSSean Christopherson .goal_level = PG_LEVEL_4K, 2808a009d5bSSean Christopherson }; 2811075d41eSSean Christopherson int r; 2821075d41eSSean Christopherson 2831075d41eSSean Christopherson /* 2841075d41eSSean Christopherson * Async #PF "faults", a.k.a. prefetch faults, are not faults from the 2851075d41eSSean Christopherson * guest perspective and have already been counted at the time of the 2861075d41eSSean Christopherson * original fault. 2871075d41eSSean Christopherson */ 2881075d41eSSean Christopherson if (!prefetch) 2891075d41eSSean Christopherson vcpu->stat.pf_taken++; 2908d5265b1SSean Christopherson 2918d5265b1SSean Christopherson if (IS_ENABLED(CONFIG_RETPOLINE) && fault.is_tdp) 2921075d41eSSean Christopherson r = kvm_tdp_page_fault(vcpu, &fault); 2931075d41eSSean Christopherson else 2941075d41eSSean Christopherson r = vcpu->arch.mmu->page_fault(vcpu, &fault); 2958d5265b1SSean Christopherson 2961075d41eSSean Christopherson /* 2971075d41eSSean Christopherson * Similar to above, prefetch faults aren't truly spurious, and the 2981075d41eSSean Christopherson * async #PF path doesn't do emulation. Do count faults that are fixed 2991075d41eSSean Christopherson * by the async #PF handler though, otherwise they'll never be counted. 3001075d41eSSean Christopherson */ 3011075d41eSSean Christopherson if (r == RET_PF_FIXED) 3021075d41eSSean Christopherson vcpu->stat.pf_fixed++; 3031075d41eSSean Christopherson else if (prefetch) 3041075d41eSSean Christopherson ; 3051075d41eSSean Christopherson else if (r == RET_PF_EMULATE) 3061075d41eSSean Christopherson vcpu->stat.pf_emulate++; 3071075d41eSSean Christopherson else if (r == RET_PF_SPURIOUS) 3081075d41eSSean Christopherson vcpu->stat.pf_spurious++; 3091075d41eSSean Christopherson return r; 3108a009d5bSSean Christopherson } 3118a009d5bSSean Christopherson 3128ca6f063SBen Gardon int kvm_mmu_max_mapping_level(struct kvm *kvm, 3138ca6f063SBen Gardon const struct kvm_memory_slot *slot, gfn_t gfn, 314a8ac499bSSean Christopherson int max_level); 31573a3c659SPaolo Bonzini void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); 316536f0e6aSPaolo Bonzini void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level); 317bb18842eSBen Gardon 318bb18842eSBen Gardon void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc); 319bb18842eSBen Gardon 32061f94478SSean Christopherson void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp); 32161f94478SSean Christopherson void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp); 32229cf0f50SBen Gardon 3236ca9a6f3SSean Christopherson #endif /* __KVM_X86_MMU_INTERNAL_H */ 324