1fe5db27dSBen Gardon // SPDX-License-Identifier: GPL-2.0
2fe5db27dSBen Gardon
3fe5db27dSBen Gardon #ifndef __KVM_X86_MMU_TDP_MMU_H
4fe5db27dSBen Gardon #define __KVM_X86_MMU_TDP_MMU_H
5fe5db27dSBen Gardon
6fe5db27dSBen Gardon #include <linux/kvm_host.h>
7fe5db27dSBen Gardon
85e3edd7eSSean Christopherson #include "spte.h"
95e3edd7eSSean Christopherson
100df9dab8SSean Christopherson void kvm_mmu_init_tdp_mmu(struct kvm *kvm);
1109732d2bSDavid Matlack void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
1209732d2bSDavid Matlack
13f5238c2aSSean Christopherson int kvm_tdp_mmu_alloc_root(struct kvm_vcpu *vcpu);
1402c00b3aSBen Gardon
kvm_tdp_mmu_get_root(struct kvm_mmu_page * root)15ad6d6b94SJinrong Liang __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root)
1676eb54e7SBen Gardon {
17fb101293SBen Gardon return refcount_inc_not_zero(&root->tdp_mmu_root_count);
1876eb54e7SBen Gardon }
1976eb54e7SBen Gardon
205f3c8c91SPaolo Bonzini void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root);
2176eb54e7SBen Gardon
22441a5dfcSPaolo Bonzini bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush);
23c10743a1SSean Christopherson bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp);
24faaf05b0SBen Gardon void kvm_tdp_mmu_zap_all(struct kvm *kvm);
25b7cccd39SBen Gardon void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm);
264c6654bdSBen Gardon void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm);
27bb18842eSBen Gardon
282f6305ddSPaolo Bonzini int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
29063afacdSBen Gardon
303039bcc7SSean Christopherson bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
313039bcc7SSean Christopherson bool flush);
323039bcc7SSean Christopherson bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
333039bcc7SSean Christopherson bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
34a6a0b05dSBen Gardon
35269e9552SHamza Mahfooz bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
36269e9552SHamza Mahfooz const struct kvm_memory_slot *slot, int min_level);
37a6a0b05dSBen Gardon bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
38269e9552SHamza Mahfooz const struct kvm_memory_slot *slot);
39a6a0b05dSBen Gardon void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
40a6a0b05dSBen Gardon struct kvm_memory_slot *slot,
41a6a0b05dSBen Gardon gfn_t gfn, unsigned long mask,
42a6a0b05dSBen Gardon bool wrprot);
434b85c921SSean Christopherson void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
444b85c921SSean Christopherson const struct kvm_memory_slot *slot);
4546044f72SBen Gardon
4646044f72SBen Gardon bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
473ad93562SKeqian Zhu struct kvm_memory_slot *slot, gfn_t gfn,
483ad93562SKeqian Zhu int min_level);
4995fb5b02SBen Gardon
50a3fe5dbdSDavid Matlack void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
51a3fe5dbdSDavid Matlack const struct kvm_memory_slot *slot,
52a3fe5dbdSDavid Matlack gfn_t start, gfn_t end,
53cb00a70bSDavid Matlack int target_level, bool shared);
54a3fe5dbdSDavid Matlack
kvm_tdp_mmu_walk_lockless_begin(void)55c5c8c7c5SDavid Matlack static inline void kvm_tdp_mmu_walk_lockless_begin(void)
56c5c8c7c5SDavid Matlack {
57c5c8c7c5SDavid Matlack rcu_read_lock();
58c5c8c7c5SDavid Matlack }
59c5c8c7c5SDavid Matlack
kvm_tdp_mmu_walk_lockless_end(void)60c5c8c7c5SDavid Matlack static inline void kvm_tdp_mmu_walk_lockless_end(void)
61c5c8c7c5SDavid Matlack {
62c5c8c7c5SDavid Matlack rcu_read_unlock();
63c5c8c7c5SDavid Matlack }
64c5c8c7c5SDavid Matlack
6539b4d43eSSean Christopherson int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
6639b4d43eSSean Christopherson int *root_level);
67*c2f38f75SRick Edgecombe u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gfn_t gfn,
686e8eb206SDavid Matlack u64 *spte);
6939b4d43eSSean Christopherson
70897218ffSPaolo Bonzini #ifdef CONFIG_X86_64
is_tdp_mmu_page(struct kvm_mmu_page * sp)71897218ffSPaolo Bonzini static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; }
72c62efff2SPaolo Bonzini #else
is_tdp_mmu_page(struct kvm_mmu_page * sp)73c62efff2SPaolo Bonzini static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
74c62efff2SPaolo Bonzini #endif
75897218ffSPaolo Bonzini
76fe5db27dSBen Gardon #endif /* __KVM_X86_MMU_TDP_MMU_H */
77