xref: /linux/arch/x86/kvm/mmu/tdp_mmu.h (revision 6e8eb2060cc7fbc4d388d0ab70502e265aec98c6)
1fe5db27dSBen Gardon // SPDX-License-Identifier: GPL-2.0
2fe5db27dSBen Gardon 
3fe5db27dSBen Gardon #ifndef __KVM_X86_MMU_TDP_MMU_H
4fe5db27dSBen Gardon #define __KVM_X86_MMU_TDP_MMU_H
5fe5db27dSBen Gardon 
6fe5db27dSBen Gardon #include <linux/kvm_host.h>
7fe5db27dSBen Gardon 
802c00b3aSBen Gardon hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
902c00b3aSBen Gardon 
10fb101293SBen Gardon __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm *kvm,
1176eb54e7SBen Gardon 						     struct kvm_mmu_page *root)
1276eb54e7SBen Gardon {
13b7cccd39SBen Gardon 	if (root->role.invalid)
14b7cccd39SBen Gardon 		return false;
15b7cccd39SBen Gardon 
16fb101293SBen Gardon 	return refcount_inc_not_zero(&root->tdp_mmu_root_count);
1776eb54e7SBen Gardon }
1876eb54e7SBen Gardon 
196103bc07SBen Gardon void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
206103bc07SBen Gardon 			  bool shared);
2176eb54e7SBen Gardon 
222b9663d8SSean Christopherson bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
236103bc07SBen Gardon 				 gfn_t end, bool can_yield, bool flush,
246103bc07SBen Gardon 				 bool shared);
252b9663d8SSean Christopherson static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id,
266103bc07SBen Gardon 					     gfn_t start, gfn_t end, bool flush,
276103bc07SBen Gardon 					     bool shared)
2833a31641SSean Christopherson {
296103bc07SBen Gardon 	return __kvm_tdp_mmu_zap_gfn_range(kvm, as_id, start, end, true, flush,
306103bc07SBen Gardon 					   shared);
3133a31641SSean Christopherson }
3233a31641SSean Christopherson static inline bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
3333a31641SSean Christopherson {
34f1b83255SKai Huang 	gfn_t end = sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level + 1);
3533a31641SSean Christopherson 
3633a31641SSean Christopherson 	/*
3733a31641SSean Christopherson 	 * Don't allow yielding, as the caller may have a flush pending.  Note,
3833a31641SSean Christopherson 	 * if mmu_lock is held for write, zapping will never yield in this case,
3933a31641SSean Christopherson 	 * but explicitly disallow it for safety.  The TDP MMU does not yield
4033a31641SSean Christopherson 	 * until it has made forward progress (steps sideways), and when zapping
4133a31641SSean Christopherson 	 * a single shadow page that it's guaranteed to see (thus the mmu_lock
4233a31641SSean Christopherson 	 * requirement), its "step sideways" will always step beyond the bounds
4333a31641SSean Christopherson 	 * of the shadow page's gfn range and stop iterating before yielding.
4433a31641SSean Christopherson 	 */
4533a31641SSean Christopherson 	lockdep_assert_held_write(&kvm->mmu_lock);
462b9663d8SSean Christopherson 	return __kvm_tdp_mmu_zap_gfn_range(kvm, kvm_mmu_page_as_id(sp),
476103bc07SBen Gardon 					   sp->gfn, end, false, false, false);
4833a31641SSean Christopherson }
49b7cccd39SBen Gardon 
50faaf05b0SBen Gardon void kvm_tdp_mmu_zap_all(struct kvm *kvm);
51b7cccd39SBen Gardon void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm);
524c6654bdSBen Gardon void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm);
53bb18842eSBen Gardon 
54bb18842eSBen Gardon int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
55bb18842eSBen Gardon 		    int map_writable, int max_level, kvm_pfn_t pfn,
56bb18842eSBen Gardon 		    bool prefault);
57063afacdSBen Gardon 
583039bcc7SSean Christopherson bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
593039bcc7SSean Christopherson 				 bool flush);
603039bcc7SSean Christopherson bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
613039bcc7SSean Christopherson bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
623039bcc7SSean Christopherson bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
63a6a0b05dSBen Gardon 
64a6a0b05dSBen Gardon bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot,
65a6a0b05dSBen Gardon 			     int min_level);
66a6a0b05dSBen Gardon bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
67a6a0b05dSBen Gardon 				  struct kvm_memory_slot *slot);
68a6a0b05dSBen Gardon void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
69a6a0b05dSBen Gardon 				       struct kvm_memory_slot *slot,
70a6a0b05dSBen Gardon 				       gfn_t gfn, unsigned long mask,
71a6a0b05dSBen Gardon 				       bool wrprot);
72142ccde1SSean Christopherson bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
738ca6f063SBen Gardon 				       const struct kvm_memory_slot *slot,
748ca6f063SBen Gardon 				       bool flush);
7546044f72SBen Gardon 
7646044f72SBen Gardon bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
773ad93562SKeqian Zhu 				   struct kvm_memory_slot *slot, gfn_t gfn,
783ad93562SKeqian Zhu 				   int min_level);
7995fb5b02SBen Gardon 
80c5c8c7c5SDavid Matlack static inline void kvm_tdp_mmu_walk_lockless_begin(void)
81c5c8c7c5SDavid Matlack {
82c5c8c7c5SDavid Matlack 	rcu_read_lock();
83c5c8c7c5SDavid Matlack }
84c5c8c7c5SDavid Matlack 
85c5c8c7c5SDavid Matlack static inline void kvm_tdp_mmu_walk_lockless_end(void)
86c5c8c7c5SDavid Matlack {
87c5c8c7c5SDavid Matlack 	rcu_read_unlock();
88c5c8c7c5SDavid Matlack }
89c5c8c7c5SDavid Matlack 
9039b4d43eSSean Christopherson int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
9139b4d43eSSean Christopherson 			 int *root_level);
92*6e8eb206SDavid Matlack u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
93*6e8eb206SDavid Matlack 					u64 *spte);
9439b4d43eSSean Christopherson 
95897218ffSPaolo Bonzini #ifdef CONFIG_X86_64
96d501f747SBen Gardon bool kvm_mmu_init_tdp_mmu(struct kvm *kvm);
97897218ffSPaolo Bonzini void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
98897218ffSPaolo Bonzini static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return kvm->arch.tdp_mmu_enabled; }
99897218ffSPaolo Bonzini static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; }
100897218ffSPaolo Bonzini 
10163c0cac9SDavid Matlack static inline bool is_tdp_mmu(struct kvm_mmu *mmu)
102897218ffSPaolo Bonzini {
103897218ffSPaolo Bonzini 	struct kvm_mmu_page *sp;
10463c0cac9SDavid Matlack 	hpa_t hpa = mmu->root_hpa;
105897218ffSPaolo Bonzini 
106897218ffSPaolo Bonzini 	if (WARN_ON(!VALID_PAGE(hpa)))
107897218ffSPaolo Bonzini 		return false;
108897218ffSPaolo Bonzini 
1096c6e166bSSean Christopherson 	/*
1106c6e166bSSean Christopherson 	 * A NULL shadow page is legal when shadowing a non-paging guest with
1116c6e166bSSean Christopherson 	 * PAE paging, as the MMU will be direct with root_hpa pointing at the
1126c6e166bSSean Christopherson 	 * pae_root page, not a shadow page.
1136c6e166bSSean Christopherson 	 */
114897218ffSPaolo Bonzini 	sp = to_shadow_page(hpa);
1156c6e166bSSean Christopherson 	return sp && is_tdp_mmu_page(sp) && sp->root_count;
116897218ffSPaolo Bonzini }
117c62efff2SPaolo Bonzini #else
118c62efff2SPaolo Bonzini static inline bool kvm_mmu_init_tdp_mmu(struct kvm *kvm) { return false; }
119c62efff2SPaolo Bonzini static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {}
120c62efff2SPaolo Bonzini static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; }
121c62efff2SPaolo Bonzini static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
12263c0cac9SDavid Matlack static inline bool is_tdp_mmu(struct kvm_mmu *mmu) { return false; }
123c62efff2SPaolo Bonzini #endif
124897218ffSPaolo Bonzini 
125fe5db27dSBen Gardon #endif /* __KVM_X86_MMU_TDP_MMU_H */
126