16ca9a6f3SSean Christopherson /* SPDX-License-Identifier: GPL-2.0 */ 26ca9a6f3SSean Christopherson #ifndef __KVM_X86_MMU_INTERNAL_H 36ca9a6f3SSean Christopherson #define __KVM_X86_MMU_INTERNAL_H 46ca9a6f3SSean Christopherson 5985ab278SSean Christopherson #include <linux/types.h> 65a9624afSPaolo Bonzini #include <linux/kvm_host.h> 7985ab278SSean Christopherson #include <asm/kvm_host.h> 8985ab278SSean Christopherson 95a9624afSPaolo Bonzini #undef MMU_DEBUG 105a9624afSPaolo Bonzini 115a9624afSPaolo Bonzini #ifdef MMU_DEBUG 125a9624afSPaolo Bonzini extern bool dbg; 135a9624afSPaolo Bonzini 145a9624afSPaolo Bonzini #define pgprintk(x...) do { if (dbg) printk(x); } while (0) 15805a0f83SStephen Zhang #define rmap_printk(fmt, args...) do { if (dbg) printk("%s: " fmt, __func__, ## args); } while (0) 165a9624afSPaolo Bonzini #define MMU_WARN_ON(x) WARN_ON(x) 175a9624afSPaolo Bonzini #else 185a9624afSPaolo Bonzini #define pgprintk(x...) do { } while (0) 195a9624afSPaolo Bonzini #define rmap_printk(x...) do { } while (0) 205a9624afSPaolo Bonzini #define MMU_WARN_ON(x) do { } while (0) 215a9624afSPaolo Bonzini #endif 225a9624afSPaolo Bonzini 23985ab278SSean Christopherson struct kvm_mmu_page { 24985ab278SSean Christopherson struct list_head link; 25985ab278SSean Christopherson struct hlist_node hash_link; 26985ab278SSean Christopherson struct list_head lpage_disallowed_link; 27985ab278SSean Christopherson 28985ab278SSean Christopherson bool unsync; 29985ab278SSean Christopherson u8 mmu_valid_gen; 30985ab278SSean Christopherson bool mmio_cached; 31985ab278SSean Christopherson bool lpage_disallowed; /* Can't be replaced by an equiv large page */ 32985ab278SSean Christopherson 33985ab278SSean Christopherson /* 34985ab278SSean Christopherson * The following two entries are used to key the shadow page in the 35985ab278SSean Christopherson * hash table. 36985ab278SSean Christopherson */ 37985ab278SSean Christopherson union kvm_mmu_page_role role; 38985ab278SSean Christopherson gfn_t gfn; 39985ab278SSean Christopherson 40985ab278SSean Christopherson u64 *spt; 41985ab278SSean Christopherson /* hold the gfn of each spte inside spt */ 42985ab278SSean Christopherson gfn_t *gfns; 43985ab278SSean Christopherson int root_count; /* Currently serving as active root */ 44985ab278SSean Christopherson unsigned int unsync_children; 45985ab278SSean Christopherson struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */ 46985ab278SSean Christopherson DECLARE_BITMAP(unsync_child_bitmap, 512); 47985ab278SSean Christopherson 48985ab278SSean Christopherson #ifdef CONFIG_X86_32 49985ab278SSean Christopherson /* 50985ab278SSean Christopherson * Used out of the mmu-lock to avoid reading spte values while an 51985ab278SSean Christopherson * update is in progress; see the comments in __get_spte_lockless(). 52985ab278SSean Christopherson */ 53985ab278SSean Christopherson int clear_spte_count; 54985ab278SSean Christopherson #endif 55985ab278SSean Christopherson 56985ab278SSean Christopherson /* Number of writes since the last time traversal visited this page. */ 57985ab278SSean Christopherson atomic_t write_flooding_count; 5802c00b3aSBen Gardon 59897218ffSPaolo Bonzini #ifdef CONFIG_X86_64 6002c00b3aSBen Gardon bool tdp_mmu_page; 617cca2d0bSBen Gardon 627cca2d0bSBen Gardon /* Used for freeing the page asyncronously if it is a TDP MMU page. */ 637cca2d0bSBen Gardon struct rcu_head rcu_head; 64897218ffSPaolo Bonzini #endif 65985ab278SSean Christopherson }; 66985ab278SSean Christopherson 6702c00b3aSBen Gardon extern struct kmem_cache *mmu_page_header_cache; 6802c00b3aSBen Gardon 69e47c4aeeSSean Christopherson static inline struct kvm_mmu_page *to_shadow_page(hpa_t shadow_page) 70985ab278SSean Christopherson { 71985ab278SSean Christopherson struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); 72985ab278SSean Christopherson 73985ab278SSean Christopherson return (struct kvm_mmu_page *)page_private(page); 74985ab278SSean Christopherson } 75985ab278SSean Christopherson 7657354682SSean Christopherson static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep) 7757354682SSean Christopherson { 78e47c4aeeSSean Christopherson return to_shadow_page(__pa(sptep)); 7957354682SSean Christopherson } 8057354682SSean Christopherson 815a9624afSPaolo Bonzini static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu) 825a9624afSPaolo Bonzini { 835a9624afSPaolo Bonzini /* 845a9624afSPaolo Bonzini * When using the EPT page-modification log, the GPAs in the log 855a9624afSPaolo Bonzini * would come from L2 rather than L1. Therefore, we need to rely 865a9624afSPaolo Bonzini * on write protection to record dirty pages. This also bypasses 875a9624afSPaolo Bonzini * PML, since writes now result in a vmexit. 885a9624afSPaolo Bonzini */ 895a9624afSPaolo Bonzini return vcpu->arch.mmu == &vcpu->arch.guest_mmu; 905a9624afSPaolo Bonzini } 915a9624afSPaolo Bonzini 925a9624afSPaolo Bonzini bool is_nx_huge_page_enabled(void); 935a9624afSPaolo Bonzini bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, 945a9624afSPaolo Bonzini bool can_unsync); 955a9624afSPaolo Bonzini 966ca9a6f3SSean Christopherson void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); 976ca9a6f3SSean Christopherson void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); 986ca9a6f3SSean Christopherson bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, 996ca9a6f3SSean Christopherson struct kvm_memory_slot *slot, u64 gfn); 1002f2fad08SBen Gardon void kvm_flush_remote_tlbs_with_address(struct kvm *kvm, 1012f2fad08SBen Gardon u64 start_gfn, u64 pages); 1026ca9a6f3SSean Christopherson 10302c00b3aSBen Gardon static inline void kvm_mmu_get_root(struct kvm *kvm, struct kvm_mmu_page *sp) 10402c00b3aSBen Gardon { 10502c00b3aSBen Gardon BUG_ON(!sp->root_count); 10602c00b3aSBen Gardon lockdep_assert_held(&kvm->mmu_lock); 10702c00b3aSBen Gardon 10802c00b3aSBen Gardon ++sp->root_count; 10902c00b3aSBen Gardon } 11002c00b3aSBen Gardon 11102c00b3aSBen Gardon static inline bool kvm_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *sp) 11202c00b3aSBen Gardon { 11302c00b3aSBen Gardon lockdep_assert_held(&kvm->mmu_lock); 11402c00b3aSBen Gardon --sp->root_count; 11502c00b3aSBen Gardon 11602c00b3aSBen Gardon return !sp->root_count; 11702c00b3aSBen Gardon } 11802c00b3aSBen Gardon 119bb18842eSBen Gardon /* 120bb18842eSBen Gardon * Return values of handle_mmio_page_fault, mmu.page_fault, and fast_page_fault(). 121bb18842eSBen Gardon * 122bb18842eSBen Gardon * RET_PF_RETRY: let CPU fault again on the address. 123bb18842eSBen Gardon * RET_PF_EMULATE: mmio page fault, emulate the instruction directly. 124bb18842eSBen Gardon * RET_PF_INVALID: the spte is invalid, let the real page fault path update it. 125bb18842eSBen Gardon * RET_PF_FIXED: The faulting entry has been fixed. 126bb18842eSBen Gardon * RET_PF_SPURIOUS: The faulting entry was already fixed, e.g. by another vCPU. 127bb18842eSBen Gardon */ 128bb18842eSBen Gardon enum { 129bb18842eSBen Gardon RET_PF_RETRY = 0, 130bb18842eSBen Gardon RET_PF_EMULATE, 131bb18842eSBen Gardon RET_PF_INVALID, 132bb18842eSBen Gardon RET_PF_FIXED, 133bb18842eSBen Gardon RET_PF_SPURIOUS, 134bb18842eSBen Gardon }; 135bb18842eSBen Gardon 136bb18842eSBen Gardon /* Bits which may be returned by set_spte() */ 137bb18842eSBen Gardon #define SET_SPTE_WRITE_PROTECTED_PT BIT(0) 138bb18842eSBen Gardon #define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1) 139bb18842eSBen Gardon #define SET_SPTE_SPURIOUS BIT(2) 140bb18842eSBen Gardon 141*1b6d9d9eSSean Christopherson int kvm_mmu_max_mapping_level(struct kvm *kvm, struct kvm_memory_slot *slot, 142*1b6d9d9eSSean Christopherson gfn_t gfn, kvm_pfn_t pfn, int max_level); 143bb18842eSBen Gardon int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn, 144bb18842eSBen Gardon int max_level, kvm_pfn_t *pfnp, 145bb18842eSBen Gardon bool huge_page_disallowed, int *req_level); 146bb18842eSBen Gardon void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level, 147bb18842eSBen Gardon kvm_pfn_t *pfnp, int *goal_levelp); 148bb18842eSBen Gardon 149bb18842eSBen Gardon bool is_nx_huge_page_enabled(void); 150bb18842eSBen Gardon 151bb18842eSBen Gardon void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc); 152bb18842eSBen Gardon 15329cf0f50SBen Gardon void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp); 15429cf0f50SBen Gardon void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp); 15529cf0f50SBen Gardon 1566ca9a6f3SSean Christopherson #endif /* __KVM_X86_MMU_INTERNAL_H */ 157