xref: /linux/arch/x86/kvm/mmu/mmu_internal.h (revision ec8a42e7343234802b9054874fe01810880289ce)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_MMU_INTERNAL_H
3 #define __KVM_X86_MMU_INTERNAL_H
4 
5 #include <linux/types.h>
6 #include <linux/kvm_host.h>
7 #include <asm/kvm_host.h>
8 
9 #undef MMU_DEBUG
10 
11 #ifdef MMU_DEBUG
12 extern bool dbg;
13 
14 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
15 #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
16 #define MMU_WARN_ON(x) WARN_ON(x)
17 #else
18 #define pgprintk(x...) do { } while (0)
19 #define rmap_printk(x...) do { } while (0)
20 #define MMU_WARN_ON(x) do { } while (0)
21 #endif
22 
23 struct kvm_mmu_page {
24 	struct list_head link;
25 	struct hlist_node hash_link;
26 	struct list_head lpage_disallowed_link;
27 
28 	bool unsync;
29 	u8 mmu_valid_gen;
30 	bool mmio_cached;
31 	bool lpage_disallowed; /* Can't be replaced by an equiv large page */
32 
33 	/*
34 	 * The following two entries are used to key the shadow page in the
35 	 * hash table.
36 	 */
37 	union kvm_mmu_page_role role;
38 	gfn_t gfn;
39 
40 	u64 *spt;
41 	/* hold the gfn of each spte inside spt */
42 	gfn_t *gfns;
43 	int root_count;          /* Currently serving as active root */
44 	unsigned int unsync_children;
45 	struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
46 	DECLARE_BITMAP(unsync_child_bitmap, 512);
47 
48 #ifdef CONFIG_X86_32
49 	/*
50 	 * Used out of the mmu-lock to avoid reading spte values while an
51 	 * update is in progress; see the comments in __get_spte_lockless().
52 	 */
53 	int clear_spte_count;
54 #endif
55 
56 	/* Number of writes since the last time traversal visited this page.  */
57 	atomic_t write_flooding_count;
58 
59 	bool tdp_mmu_page;
60 };
61 
62 extern struct kmem_cache *mmu_page_header_cache;
63 
64 static inline struct kvm_mmu_page *to_shadow_page(hpa_t shadow_page)
65 {
66 	struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
67 
68 	return (struct kvm_mmu_page *)page_private(page);
69 }
70 
71 static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep)
72 {
73 	return to_shadow_page(__pa(sptep));
74 }
75 
76 static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
77 {
78 	/*
79 	 * When using the EPT page-modification log, the GPAs in the log
80 	 * would come from L2 rather than L1.  Therefore, we need to rely
81 	 * on write protection to record dirty pages.  This also bypasses
82 	 * PML, since writes now result in a vmexit.
83 	 */
84 	return vcpu->arch.mmu == &vcpu->arch.guest_mmu;
85 }
86 
87 bool is_nx_huge_page_enabled(void);
88 bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
89 			    bool can_unsync);
90 
91 void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
92 void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
93 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
94 				    struct kvm_memory_slot *slot, u64 gfn);
95 void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
96 					u64 start_gfn, u64 pages);
97 
98 static inline void kvm_mmu_get_root(struct kvm *kvm, struct kvm_mmu_page *sp)
99 {
100 	BUG_ON(!sp->root_count);
101 	lockdep_assert_held(&kvm->mmu_lock);
102 
103 	++sp->root_count;
104 }
105 
106 static inline bool kvm_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *sp)
107 {
108 	lockdep_assert_held(&kvm->mmu_lock);
109 	--sp->root_count;
110 
111 	return !sp->root_count;
112 }
113 
114 /*
115  * Return values of handle_mmio_page_fault, mmu.page_fault, and fast_page_fault().
116  *
117  * RET_PF_RETRY: let CPU fault again on the address.
118  * RET_PF_EMULATE: mmio page fault, emulate the instruction directly.
119  * RET_PF_INVALID: the spte is invalid, let the real page fault path update it.
120  * RET_PF_FIXED: The faulting entry has been fixed.
121  * RET_PF_SPURIOUS: The faulting entry was already fixed, e.g. by another vCPU.
122  */
123 enum {
124 	RET_PF_RETRY = 0,
125 	RET_PF_EMULATE,
126 	RET_PF_INVALID,
127 	RET_PF_FIXED,
128 	RET_PF_SPURIOUS,
129 };
130 
131 /* Bits which may be returned by set_spte() */
132 #define SET_SPTE_WRITE_PROTECTED_PT	BIT(0)
133 #define SET_SPTE_NEED_REMOTE_TLB_FLUSH	BIT(1)
134 #define SET_SPTE_SPURIOUS		BIT(2)
135 
136 int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
137 			    int max_level, kvm_pfn_t *pfnp,
138 			    bool huge_page_disallowed, int *req_level);
139 void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level,
140 				kvm_pfn_t *pfnp, int *goal_levelp);
141 
142 bool is_nx_huge_page_enabled(void);
143 
144 void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
145 
146 void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp);
147 void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp);
148 
149 #endif /* __KVM_X86_MMU_INTERNAL_H */
150