xref: /linux/arch/x86/kvm/mmu/tdp_mmu.h (revision 3039bcc744980afe87c612122e47a27306483bc2)
1fe5db27dSBen Gardon // SPDX-License-Identifier: GPL-2.0
2fe5db27dSBen Gardon 
3fe5db27dSBen Gardon #ifndef __KVM_X86_MMU_TDP_MMU_H
4fe5db27dSBen Gardon #define __KVM_X86_MMU_TDP_MMU_H
5fe5db27dSBen Gardon 
6fe5db27dSBen Gardon #include <linux/kvm_host.h>
7fe5db27dSBen Gardon 
802c00b3aSBen Gardon hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
902c00b3aSBen Gardon void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root);
1002c00b3aSBen Gardon 
112b9663d8SSean Christopherson bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
122b9663d8SSean Christopherson 				 gfn_t end, bool can_yield, bool flush);
132b9663d8SSean Christopherson static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id,
142b9663d8SSean Christopherson 					     gfn_t start, gfn_t end, bool flush)
1533a31641SSean Christopherson {
162b9663d8SSean Christopherson 	return __kvm_tdp_mmu_zap_gfn_range(kvm, as_id, start, end, true, flush);
1733a31641SSean Christopherson }
1833a31641SSean Christopherson static inline bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
1933a31641SSean Christopherson {
2033a31641SSean Christopherson 	gfn_t end = sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level);
2133a31641SSean Christopherson 
2233a31641SSean Christopherson 	/*
2333a31641SSean Christopherson 	 * Don't allow yielding, as the caller may have a flush pending.  Note,
2433a31641SSean Christopherson 	 * if mmu_lock is held for write, zapping will never yield in this case,
2533a31641SSean Christopherson 	 * but explicitly disallow it for safety.  The TDP MMU does not yield
2633a31641SSean Christopherson 	 * until it has made forward progress (steps sideways), and when zapping
2733a31641SSean Christopherson 	 * a single shadow page that it's guaranteed to see (thus the mmu_lock
2833a31641SSean Christopherson 	 * requirement), its "step sideways" will always step beyond the bounds
2933a31641SSean Christopherson 	 * of the shadow page's gfn range and stop iterating before yielding.
3033a31641SSean Christopherson 	 */
3133a31641SSean Christopherson 	lockdep_assert_held_write(&kvm->mmu_lock);
322b9663d8SSean Christopherson 	return __kvm_tdp_mmu_zap_gfn_range(kvm, kvm_mmu_page_as_id(sp),
332b9663d8SSean Christopherson 					   sp->gfn, end, false, false);
3433a31641SSean Christopherson }
35faaf05b0SBen Gardon void kvm_tdp_mmu_zap_all(struct kvm *kvm);
36bb18842eSBen Gardon 
37bb18842eSBen Gardon int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
38bb18842eSBen Gardon 		    int map_writable, int max_level, kvm_pfn_t pfn,
39bb18842eSBen Gardon 		    bool prefault);
40063afacdSBen Gardon 
41*3039bcc7SSean Christopherson bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
42*3039bcc7SSean Christopherson 				 bool flush);
43*3039bcc7SSean Christopherson bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
44*3039bcc7SSean Christopherson bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
45*3039bcc7SSean Christopherson bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
46a6a0b05dSBen Gardon 
47a6a0b05dSBen Gardon bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot,
48a6a0b05dSBen Gardon 			     int min_level);
49a6a0b05dSBen Gardon bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
50a6a0b05dSBen Gardon 				  struct kvm_memory_slot *slot);
51a6a0b05dSBen Gardon void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
52a6a0b05dSBen Gardon 				       struct kvm_memory_slot *slot,
53a6a0b05dSBen Gardon 				       gfn_t gfn, unsigned long mask,
54a6a0b05dSBen Gardon 				       bool wrprot);
55142ccde1SSean Christopherson bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
56142ccde1SSean Christopherson 				       struct kvm_memory_slot *slot, bool flush);
5746044f72SBen Gardon 
5846044f72SBen Gardon bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
5946044f72SBen Gardon 				   struct kvm_memory_slot *slot, gfn_t gfn);
6095fb5b02SBen Gardon 
6139b4d43eSSean Christopherson int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
6239b4d43eSSean Christopherson 			 int *root_level);
6339b4d43eSSean Christopherson 
64897218ffSPaolo Bonzini #ifdef CONFIG_X86_64
65897218ffSPaolo Bonzini void kvm_mmu_init_tdp_mmu(struct kvm *kvm);
66897218ffSPaolo Bonzini void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
67897218ffSPaolo Bonzini static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return kvm->arch.tdp_mmu_enabled; }
68897218ffSPaolo Bonzini static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; }
69897218ffSPaolo Bonzini #else
70897218ffSPaolo Bonzini static inline void kvm_mmu_init_tdp_mmu(struct kvm *kvm) {}
71897218ffSPaolo Bonzini static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {}
72897218ffSPaolo Bonzini static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; }
73897218ffSPaolo Bonzini static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
74897218ffSPaolo Bonzini #endif
75897218ffSPaolo Bonzini 
76897218ffSPaolo Bonzini static inline bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
77897218ffSPaolo Bonzini {
78897218ffSPaolo Bonzini 	struct kvm_mmu_page *sp;
79897218ffSPaolo Bonzini 
80897218ffSPaolo Bonzini 	if (!is_tdp_mmu_enabled(kvm))
81897218ffSPaolo Bonzini 		return false;
82897218ffSPaolo Bonzini 	if (WARN_ON(!VALID_PAGE(hpa)))
83897218ffSPaolo Bonzini 		return false;
84897218ffSPaolo Bonzini 
85897218ffSPaolo Bonzini 	sp = to_shadow_page(hpa);
86897218ffSPaolo Bonzini 	if (WARN_ON(!sp))
87897218ffSPaolo Bonzini 		return false;
88897218ffSPaolo Bonzini 
89897218ffSPaolo Bonzini 	return is_tdp_mmu_page(sp) && sp->root_count;
90897218ffSPaolo Bonzini }
91897218ffSPaolo Bonzini 
92fe5db27dSBen Gardon #endif /* __KVM_X86_MMU_TDP_MMU_H */
93