16ca9a6f3SSean Christopherson /* SPDX-License-Identifier: GPL-2.0 */ 26ca9a6f3SSean Christopherson #ifndef __KVM_X86_MMU_INTERNAL_H 36ca9a6f3SSean Christopherson #define __KVM_X86_MMU_INTERNAL_H 46ca9a6f3SSean Christopherson 5985ab278SSean Christopherson #include <linux/types.h> 6*5a9624afSPaolo Bonzini #include <linux/kvm_host.h> 7985ab278SSean Christopherson #include <asm/kvm_host.h> 8985ab278SSean Christopherson 9*5a9624afSPaolo Bonzini #undef MMU_DEBUG 10*5a9624afSPaolo Bonzini 11*5a9624afSPaolo Bonzini #ifdef MMU_DEBUG 12*5a9624afSPaolo Bonzini extern bool dbg; 13*5a9624afSPaolo Bonzini 14*5a9624afSPaolo Bonzini #define pgprintk(x...) do { if (dbg) printk(x); } while (0) 15*5a9624afSPaolo Bonzini #define rmap_printk(x...) do { if (dbg) printk(x); } while (0) 16*5a9624afSPaolo Bonzini #define MMU_WARN_ON(x) WARN_ON(x) 17*5a9624afSPaolo Bonzini #else 18*5a9624afSPaolo Bonzini #define pgprintk(x...) do { } while (0) 19*5a9624afSPaolo Bonzini #define rmap_printk(x...) do { } while (0) 20*5a9624afSPaolo Bonzini #define MMU_WARN_ON(x) do { } while (0) 21*5a9624afSPaolo Bonzini #endif 22*5a9624afSPaolo Bonzini 23985ab278SSean Christopherson struct kvm_mmu_page { 24985ab278SSean Christopherson struct list_head link; 25985ab278SSean Christopherson struct hlist_node hash_link; 26985ab278SSean Christopherson struct list_head lpage_disallowed_link; 27985ab278SSean Christopherson 28985ab278SSean Christopherson bool unsync; 29985ab278SSean Christopherson u8 mmu_valid_gen; 30985ab278SSean Christopherson bool mmio_cached; 31985ab278SSean Christopherson bool lpage_disallowed; /* Can't be replaced by an equiv large page */ 32985ab278SSean Christopherson 33985ab278SSean Christopherson /* 34985ab278SSean Christopherson * The following two entries are used to key the shadow page in the 35985ab278SSean Christopherson * hash table. 36985ab278SSean Christopherson */ 37985ab278SSean Christopherson union kvm_mmu_page_role role; 38985ab278SSean Christopherson gfn_t gfn; 39985ab278SSean Christopherson 40985ab278SSean Christopherson u64 *spt; 41985ab278SSean Christopherson /* hold the gfn of each spte inside spt */ 42985ab278SSean Christopherson gfn_t *gfns; 43985ab278SSean Christopherson int root_count; /* Currently serving as active root */ 44985ab278SSean Christopherson unsigned int unsync_children; 45985ab278SSean Christopherson struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */ 46985ab278SSean Christopherson DECLARE_BITMAP(unsync_child_bitmap, 512); 47985ab278SSean Christopherson 48985ab278SSean Christopherson #ifdef CONFIG_X86_32 49985ab278SSean Christopherson /* 50985ab278SSean Christopherson * Used out of the mmu-lock to avoid reading spte values while an 51985ab278SSean Christopherson * update is in progress; see the comments in __get_spte_lockless(). 52985ab278SSean Christopherson */ 53985ab278SSean Christopherson int clear_spte_count; 54985ab278SSean Christopherson #endif 55985ab278SSean Christopherson 56985ab278SSean Christopherson /* Number of writes since the last time traversal visited this page. */ 57985ab278SSean Christopherson atomic_t write_flooding_count; 58985ab278SSean Christopherson }; 59985ab278SSean Christopherson 60e47c4aeeSSean Christopherson static inline struct kvm_mmu_page *to_shadow_page(hpa_t shadow_page) 61985ab278SSean Christopherson { 62985ab278SSean Christopherson struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); 63985ab278SSean Christopherson 64985ab278SSean Christopherson return (struct kvm_mmu_page *)page_private(page); 65985ab278SSean Christopherson } 66985ab278SSean Christopherson 6757354682SSean Christopherson static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep) 6857354682SSean Christopherson { 69e47c4aeeSSean Christopherson return to_shadow_page(__pa(sptep)); 7057354682SSean Christopherson } 7157354682SSean Christopherson 72*5a9624afSPaolo Bonzini static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu) 73*5a9624afSPaolo Bonzini { 74*5a9624afSPaolo Bonzini /* 75*5a9624afSPaolo Bonzini * When using the EPT page-modification log, the GPAs in the log 76*5a9624afSPaolo Bonzini * would come from L2 rather than L1. Therefore, we need to rely 77*5a9624afSPaolo Bonzini * on write protection to record dirty pages. This also bypasses 78*5a9624afSPaolo Bonzini * PML, since writes now result in a vmexit. 79*5a9624afSPaolo Bonzini */ 80*5a9624afSPaolo Bonzini return vcpu->arch.mmu == &vcpu->arch.guest_mmu; 81*5a9624afSPaolo Bonzini } 82*5a9624afSPaolo Bonzini 83*5a9624afSPaolo Bonzini bool is_nx_huge_page_enabled(void); 84*5a9624afSPaolo Bonzini bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, 85*5a9624afSPaolo Bonzini bool can_unsync); 86*5a9624afSPaolo Bonzini 876ca9a6f3SSean Christopherson void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); 886ca9a6f3SSean Christopherson void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); 896ca9a6f3SSean Christopherson bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, 906ca9a6f3SSean Christopherson struct kvm_memory_slot *slot, u64 gfn); 916ca9a6f3SSean Christopherson 926ca9a6f3SSean Christopherson #endif /* __KVM_X86_MMU_INTERNAL_H */ 93