xref: /linux/arch/x86/kvm/mmu/tdp_mmu.h (revision 09732d2b4dc5ba9de504711a3ae5aa1199127e6f)
1fe5db27dSBen Gardon // SPDX-License-Identifier: GPL-2.0
2fe5db27dSBen Gardon 
3fe5db27dSBen Gardon #ifndef __KVM_X86_MMU_TDP_MMU_H
4fe5db27dSBen Gardon #define __KVM_X86_MMU_TDP_MMU_H
5fe5db27dSBen Gardon 
6fe5db27dSBen Gardon #include <linux/kvm_host.h>
7fe5db27dSBen Gardon 
85e3edd7eSSean Christopherson #include "spte.h"
95e3edd7eSSean Christopherson 
10*09732d2bSDavid Matlack int kvm_mmu_init_tdp_mmu(struct kvm *kvm);
11*09732d2bSDavid Matlack void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
12*09732d2bSDavid Matlack 
1302c00b3aSBen Gardon hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
1402c00b3aSBen Gardon 
15ad6d6b94SJinrong Liang __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root)
1676eb54e7SBen Gardon {
17fb101293SBen Gardon 	return refcount_inc_not_zero(&root->tdp_mmu_root_count);
1876eb54e7SBen Gardon }
1976eb54e7SBen Gardon 
206103bc07SBen Gardon void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
216103bc07SBen Gardon 			  bool shared);
2276eb54e7SBen Gardon 
23f47e5bbbSSean Christopherson bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start,
245a324c24SSean Christopherson 				 gfn_t end, bool can_yield, bool flush);
25c10743a1SSean Christopherson bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp);
26faaf05b0SBen Gardon void kvm_tdp_mmu_zap_all(struct kvm *kvm);
27b7cccd39SBen Gardon void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm);
284c6654bdSBen Gardon void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm);
29bb18842eSBen Gardon 
302f6305ddSPaolo Bonzini int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
31063afacdSBen Gardon 
323039bcc7SSean Christopherson bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
333039bcc7SSean Christopherson 				 bool flush);
343039bcc7SSean Christopherson bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
353039bcc7SSean Christopherson bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
363039bcc7SSean Christopherson bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
37a6a0b05dSBen Gardon 
38269e9552SHamza Mahfooz bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
39269e9552SHamza Mahfooz 			     const struct kvm_memory_slot *slot, int min_level);
40a6a0b05dSBen Gardon bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
41269e9552SHamza Mahfooz 				  const struct kvm_memory_slot *slot);
42a6a0b05dSBen Gardon void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
43a6a0b05dSBen Gardon 				       struct kvm_memory_slot *slot,
44a6a0b05dSBen Gardon 				       gfn_t gfn, unsigned long mask,
45a6a0b05dSBen Gardon 				       bool wrprot);
464b85c921SSean Christopherson void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
474b85c921SSean Christopherson 				       const struct kvm_memory_slot *slot);
4846044f72SBen Gardon 
4946044f72SBen Gardon bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
503ad93562SKeqian Zhu 				   struct kvm_memory_slot *slot, gfn_t gfn,
513ad93562SKeqian Zhu 				   int min_level);
5295fb5b02SBen Gardon 
53a3fe5dbdSDavid Matlack void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
54a3fe5dbdSDavid Matlack 				      const struct kvm_memory_slot *slot,
55a3fe5dbdSDavid Matlack 				      gfn_t start, gfn_t end,
56cb00a70bSDavid Matlack 				      int target_level, bool shared);
57a3fe5dbdSDavid Matlack 
58c5c8c7c5SDavid Matlack static inline void kvm_tdp_mmu_walk_lockless_begin(void)
59c5c8c7c5SDavid Matlack {
60c5c8c7c5SDavid Matlack 	rcu_read_lock();
61c5c8c7c5SDavid Matlack }
62c5c8c7c5SDavid Matlack 
63c5c8c7c5SDavid Matlack static inline void kvm_tdp_mmu_walk_lockless_end(void)
64c5c8c7c5SDavid Matlack {
65c5c8c7c5SDavid Matlack 	rcu_read_unlock();
66c5c8c7c5SDavid Matlack }
67c5c8c7c5SDavid Matlack 
6839b4d43eSSean Christopherson int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
6939b4d43eSSean Christopherson 			 int *root_level);
706e8eb206SDavid Matlack u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
716e8eb206SDavid Matlack 					u64 *spte);
7239b4d43eSSean Christopherson 
73897218ffSPaolo Bonzini #ifdef CONFIG_X86_64
74897218ffSPaolo Bonzini static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; }
75897218ffSPaolo Bonzini 
7663c0cac9SDavid Matlack static inline bool is_tdp_mmu(struct kvm_mmu *mmu)
77897218ffSPaolo Bonzini {
78897218ffSPaolo Bonzini 	struct kvm_mmu_page *sp;
79b9e5603cSPaolo Bonzini 	hpa_t hpa = mmu->root.hpa;
80897218ffSPaolo Bonzini 
81897218ffSPaolo Bonzini 	if (WARN_ON(!VALID_PAGE(hpa)))
82897218ffSPaolo Bonzini 		return false;
83897218ffSPaolo Bonzini 
846c6e166bSSean Christopherson 	/*
856c6e166bSSean Christopherson 	 * A NULL shadow page is legal when shadowing a non-paging guest with
866c6e166bSSean Christopherson 	 * PAE paging, as the MMU will be direct with root_hpa pointing at the
876c6e166bSSean Christopherson 	 * pae_root page, not a shadow page.
886c6e166bSSean Christopherson 	 */
89897218ffSPaolo Bonzini 	sp = to_shadow_page(hpa);
906c6e166bSSean Christopherson 	return sp && is_tdp_mmu_page(sp) && sp->root_count;
91897218ffSPaolo Bonzini }
92c62efff2SPaolo Bonzini #else
93c62efff2SPaolo Bonzini static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
9463c0cac9SDavid Matlack static inline bool is_tdp_mmu(struct kvm_mmu *mmu) { return false; }
95c62efff2SPaolo Bonzini #endif
96897218ffSPaolo Bonzini 
97fe5db27dSBen Gardon #endif /* __KVM_X86_MMU_TDP_MMU_H */
98