xref: /linux/arch/x86/kvm/mmu/tdp_mmu.h (revision bdd1a21b52557ea8f61d0a5dc2f77151b576eb70)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #ifndef __KVM_X86_MMU_TDP_MMU_H
4 #define __KVM_X86_MMU_TDP_MMU_H
5 
6 #include <linux/kvm_host.h>
7 
8 hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
9 
10 __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm *kvm,
11 						     struct kvm_mmu_page *root)
12 {
13 	if (root->role.invalid)
14 		return false;
15 
16 	return refcount_inc_not_zero(&root->tdp_mmu_root_count);
17 }
18 
19 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
20 			  bool shared);
21 
22 bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
23 				 gfn_t end, bool can_yield, bool flush,
24 				 bool shared);
25 static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id,
26 					     gfn_t start, gfn_t end, bool flush,
27 					     bool shared)
28 {
29 	return __kvm_tdp_mmu_zap_gfn_range(kvm, as_id, start, end, true, flush,
30 					   shared);
31 }
32 static inline bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
33 {
34 	gfn_t end = sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level + 1);
35 
36 	/*
37 	 * Don't allow yielding, as the caller may have a flush pending.  Note,
38 	 * if mmu_lock is held for write, zapping will never yield in this case,
39 	 * but explicitly disallow it for safety.  The TDP MMU does not yield
40 	 * until it has made forward progress (steps sideways), and when zapping
41 	 * a single shadow page that it's guaranteed to see (thus the mmu_lock
42 	 * requirement), its "step sideways" will always step beyond the bounds
43 	 * of the shadow page's gfn range and stop iterating before yielding.
44 	 */
45 	lockdep_assert_held_write(&kvm->mmu_lock);
46 	return __kvm_tdp_mmu_zap_gfn_range(kvm, kvm_mmu_page_as_id(sp),
47 					   sp->gfn, end, false, false, false);
48 }
49 
50 void kvm_tdp_mmu_zap_all(struct kvm *kvm);
51 void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm);
52 void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm);
53 
54 int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
55 		    int map_writable, int max_level, kvm_pfn_t pfn,
56 		    bool prefault);
57 
58 bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
59 				 bool flush);
60 bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
61 bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
62 bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
63 
64 bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot,
65 			     int min_level);
66 bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
67 				  struct kvm_memory_slot *slot);
68 void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
69 				       struct kvm_memory_slot *slot,
70 				       gfn_t gfn, unsigned long mask,
71 				       bool wrprot);
72 bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
73 				       const struct kvm_memory_slot *slot,
74 				       bool flush);
75 
76 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
77 				   struct kvm_memory_slot *slot, gfn_t gfn,
78 				   int min_level);
79 
80 int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
81 			 int *root_level);
82 
83 #ifdef CONFIG_X86_64
84 bool kvm_mmu_init_tdp_mmu(struct kvm *kvm);
85 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
86 static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return kvm->arch.tdp_mmu_enabled; }
87 static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; }
88 
89 static inline bool is_tdp_mmu(struct kvm_mmu *mmu)
90 {
91 	struct kvm_mmu_page *sp;
92 	hpa_t hpa = mmu->root_hpa;
93 
94 	if (WARN_ON(!VALID_PAGE(hpa)))
95 		return false;
96 
97 	/*
98 	 * A NULL shadow page is legal when shadowing a non-paging guest with
99 	 * PAE paging, as the MMU will be direct with root_hpa pointing at the
100 	 * pae_root page, not a shadow page.
101 	 */
102 	sp = to_shadow_page(hpa);
103 	return sp && is_tdp_mmu_page(sp) && sp->root_count;
104 }
105 #else
106 static inline bool kvm_mmu_init_tdp_mmu(struct kvm *kvm) { return false; }
107 static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {}
108 static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; }
109 static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
110 static inline bool is_tdp_mmu(struct kvm_mmu *mmu) { return false; }
111 #endif
112 
113 #endif /* __KVM_X86_MMU_TDP_MMU_H */
114