xref: /linux/arch/x86/kvm/mmu/mmu_internal.h (revision 44ac5958a6c1fd91ac8810fbb37194e377d78db5)
16ca9a6f3SSean Christopherson /* SPDX-License-Identifier: GPL-2.0 */
26ca9a6f3SSean Christopherson #ifndef __KVM_X86_MMU_INTERNAL_H
36ca9a6f3SSean Christopherson #define __KVM_X86_MMU_INTERNAL_H
46ca9a6f3SSean Christopherson 
5985ab278SSean Christopherson #include <linux/types.h>
65a9624afSPaolo Bonzini #include <linux/kvm_host.h>
7985ab278SSean Christopherson #include <asm/kvm_host.h>
8985ab278SSean Christopherson 
95a9624afSPaolo Bonzini #undef MMU_DEBUG
105a9624afSPaolo Bonzini 
115a9624afSPaolo Bonzini #ifdef MMU_DEBUG
125a9624afSPaolo Bonzini extern bool dbg;
135a9624afSPaolo Bonzini 
145a9624afSPaolo Bonzini #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
15805a0f83SStephen Zhang #define rmap_printk(fmt, args...) do { if (dbg) printk("%s: " fmt, __func__, ## args); } while (0)
165a9624afSPaolo Bonzini #define MMU_WARN_ON(x) WARN_ON(x)
175a9624afSPaolo Bonzini #else
185a9624afSPaolo Bonzini #define pgprintk(x...) do { } while (0)
195a9624afSPaolo Bonzini #define rmap_printk(x...) do { } while (0)
205a9624afSPaolo Bonzini #define MMU_WARN_ON(x) do { } while (0)
215a9624afSPaolo Bonzini #endif
225a9624afSPaolo Bonzini 
23985ab278SSean Christopherson struct kvm_mmu_page {
24985ab278SSean Christopherson 	struct list_head link;
25985ab278SSean Christopherson 	struct hlist_node hash_link;
26985ab278SSean Christopherson 	struct list_head lpage_disallowed_link;
27985ab278SSean Christopherson 
28985ab278SSean Christopherson 	bool unsync;
29985ab278SSean Christopherson 	u8 mmu_valid_gen;
30985ab278SSean Christopherson 	bool mmio_cached;
31985ab278SSean Christopherson 	bool lpage_disallowed; /* Can't be replaced by an equiv large page */
32985ab278SSean Christopherson 
33985ab278SSean Christopherson 	/*
34985ab278SSean Christopherson 	 * The following two entries are used to key the shadow page in the
35985ab278SSean Christopherson 	 * hash table.
36985ab278SSean Christopherson 	 */
37985ab278SSean Christopherson 	union kvm_mmu_page_role role;
38985ab278SSean Christopherson 	gfn_t gfn;
39985ab278SSean Christopherson 
40985ab278SSean Christopherson 	u64 *spt;
41985ab278SSean Christopherson 	/* hold the gfn of each spte inside spt */
42985ab278SSean Christopherson 	gfn_t *gfns;
43985ab278SSean Christopherson 	int root_count;          /* Currently serving as active root */
44985ab278SSean Christopherson 	unsigned int unsync_children;
45985ab278SSean Christopherson 	struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
46985ab278SSean Christopherson 	DECLARE_BITMAP(unsync_child_bitmap, 512);
47985ab278SSean Christopherson 
48985ab278SSean Christopherson #ifdef CONFIG_X86_32
49985ab278SSean Christopherson 	/*
50985ab278SSean Christopherson 	 * Used out of the mmu-lock to avoid reading spte values while an
51985ab278SSean Christopherson 	 * update is in progress; see the comments in __get_spte_lockless().
52985ab278SSean Christopherson 	 */
53985ab278SSean Christopherson 	int clear_spte_count;
54985ab278SSean Christopherson #endif
55985ab278SSean Christopherson 
56985ab278SSean Christopherson 	/* Number of writes since the last time traversal visited this page.  */
57985ab278SSean Christopherson 	atomic_t write_flooding_count;
5802c00b3aSBen Gardon 
59897218ffSPaolo Bonzini #ifdef CONFIG_X86_64
6002c00b3aSBen Gardon 	bool tdp_mmu_page;
617cca2d0bSBen Gardon 
627cca2d0bSBen Gardon 	/* Used for freeing the page asyncronously if it is a TDP MMU page. */
637cca2d0bSBen Gardon 	struct rcu_head rcu_head;
64897218ffSPaolo Bonzini #endif
65985ab278SSean Christopherson };
66985ab278SSean Christopherson 
6702c00b3aSBen Gardon extern struct kmem_cache *mmu_page_header_cache;
6802c00b3aSBen Gardon 
69e47c4aeeSSean Christopherson static inline struct kvm_mmu_page *to_shadow_page(hpa_t shadow_page)
70985ab278SSean Christopherson {
71985ab278SSean Christopherson 	struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
72985ab278SSean Christopherson 
73985ab278SSean Christopherson 	return (struct kvm_mmu_page *)page_private(page);
74985ab278SSean Christopherson }
75985ab278SSean Christopherson 
7657354682SSean Christopherson static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep)
7757354682SSean Christopherson {
78e47c4aeeSSean Christopherson 	return to_shadow_page(__pa(sptep));
7957354682SSean Christopherson }
8057354682SSean Christopherson 
815a9624afSPaolo Bonzini static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
825a9624afSPaolo Bonzini {
835a9624afSPaolo Bonzini 	/*
84*44ac5958SSean Christopherson 	 * When using the EPT page-modification log, the GPAs in the CPU dirty
85*44ac5958SSean Christopherson 	 * log would come from L2 rather than L1.  Therefore, we need to rely
86*44ac5958SSean Christopherson 	 * on write protection to record dirty pages, which bypasses PML, since
87*44ac5958SSean Christopherson 	 * writes now result in a vmexit.  Note, the check on CPU dirty logging
88*44ac5958SSean Christopherson 	 * being enabled is mandatory as the bits used to denote WP-only SPTEs
89*44ac5958SSean Christopherson 	 * are reserved for NPT w/ PAE (32-bit KVM).
905a9624afSPaolo Bonzini 	 */
91*44ac5958SSean Christopherson 	return vcpu->arch.mmu == &vcpu->arch.guest_mmu &&
92*44ac5958SSean Christopherson 	       kvm_x86_ops.cpu_dirty_log_size;
935a9624afSPaolo Bonzini }
945a9624afSPaolo Bonzini 
955a9624afSPaolo Bonzini bool is_nx_huge_page_enabled(void);
965a9624afSPaolo Bonzini bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
975a9624afSPaolo Bonzini 			    bool can_unsync);
985a9624afSPaolo Bonzini 
996ca9a6f3SSean Christopherson void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
1006ca9a6f3SSean Christopherson void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
1016ca9a6f3SSean Christopherson bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
1026ca9a6f3SSean Christopherson 				    struct kvm_memory_slot *slot, u64 gfn);
1032f2fad08SBen Gardon void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
1042f2fad08SBen Gardon 					u64 start_gfn, u64 pages);
1056ca9a6f3SSean Christopherson 
10602c00b3aSBen Gardon static inline void kvm_mmu_get_root(struct kvm *kvm, struct kvm_mmu_page *sp)
10702c00b3aSBen Gardon {
10802c00b3aSBen Gardon 	BUG_ON(!sp->root_count);
10902c00b3aSBen Gardon 	lockdep_assert_held(&kvm->mmu_lock);
11002c00b3aSBen Gardon 
11102c00b3aSBen Gardon 	++sp->root_count;
11202c00b3aSBen Gardon }
11302c00b3aSBen Gardon 
11402c00b3aSBen Gardon static inline bool kvm_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *sp)
11502c00b3aSBen Gardon {
11602c00b3aSBen Gardon 	lockdep_assert_held(&kvm->mmu_lock);
11702c00b3aSBen Gardon 	--sp->root_count;
11802c00b3aSBen Gardon 
11902c00b3aSBen Gardon 	return !sp->root_count;
12002c00b3aSBen Gardon }
12102c00b3aSBen Gardon 
122bb18842eSBen Gardon /*
123bb18842eSBen Gardon  * Return values of handle_mmio_page_fault, mmu.page_fault, and fast_page_fault().
124bb18842eSBen Gardon  *
125bb18842eSBen Gardon  * RET_PF_RETRY: let CPU fault again on the address.
126bb18842eSBen Gardon  * RET_PF_EMULATE: mmio page fault, emulate the instruction directly.
127bb18842eSBen Gardon  * RET_PF_INVALID: the spte is invalid, let the real page fault path update it.
128bb18842eSBen Gardon  * RET_PF_FIXED: The faulting entry has been fixed.
129bb18842eSBen Gardon  * RET_PF_SPURIOUS: The faulting entry was already fixed, e.g. by another vCPU.
130bb18842eSBen Gardon  */
131bb18842eSBen Gardon enum {
132bb18842eSBen Gardon 	RET_PF_RETRY = 0,
133bb18842eSBen Gardon 	RET_PF_EMULATE,
134bb18842eSBen Gardon 	RET_PF_INVALID,
135bb18842eSBen Gardon 	RET_PF_FIXED,
136bb18842eSBen Gardon 	RET_PF_SPURIOUS,
137bb18842eSBen Gardon };
138bb18842eSBen Gardon 
139bb18842eSBen Gardon /* Bits which may be returned by set_spte() */
140bb18842eSBen Gardon #define SET_SPTE_WRITE_PROTECTED_PT	BIT(0)
141bb18842eSBen Gardon #define SET_SPTE_NEED_REMOTE_TLB_FLUSH	BIT(1)
142bb18842eSBen Gardon #define SET_SPTE_SPURIOUS		BIT(2)
143bb18842eSBen Gardon 
1441b6d9d9eSSean Christopherson int kvm_mmu_max_mapping_level(struct kvm *kvm, struct kvm_memory_slot *slot,
1451b6d9d9eSSean Christopherson 			      gfn_t gfn, kvm_pfn_t pfn, int max_level);
146bb18842eSBen Gardon int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
147bb18842eSBen Gardon 			    int max_level, kvm_pfn_t *pfnp,
148bb18842eSBen Gardon 			    bool huge_page_disallowed, int *req_level);
149bb18842eSBen Gardon void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level,
150bb18842eSBen Gardon 				kvm_pfn_t *pfnp, int *goal_levelp);
151bb18842eSBen Gardon 
152bb18842eSBen Gardon bool is_nx_huge_page_enabled(void);
153bb18842eSBen Gardon 
154bb18842eSBen Gardon void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
155bb18842eSBen Gardon 
15629cf0f50SBen Gardon void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp);
15729cf0f50SBen Gardon void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp);
15829cf0f50SBen Gardon 
1596ca9a6f3SSean Christopherson #endif /* __KVM_X86_MMU_INTERNAL_H */
160