1fe5db27dSBen Gardon // SPDX-License-Identifier: GPL-2.0 2fe5db27dSBen Gardon 3fe5db27dSBen Gardon #ifndef __KVM_X86_MMU_TDP_MMU_H 4fe5db27dSBen Gardon #define __KVM_X86_MMU_TDP_MMU_H 5fe5db27dSBen Gardon 6fe5db27dSBen Gardon #include <linux/kvm_host.h> 7fe5db27dSBen Gardon 802c00b3aSBen Gardon hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu); 902c00b3aSBen Gardon 10*fb101293SBen Gardon __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm *kvm, 1176eb54e7SBen Gardon struct kvm_mmu_page *root) 1276eb54e7SBen Gardon { 13*fb101293SBen Gardon return refcount_inc_not_zero(&root->tdp_mmu_root_count); 1476eb54e7SBen Gardon } 1576eb54e7SBen Gardon 162bdb3d84SBen Gardon void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root); 1776eb54e7SBen Gardon 182b9663d8SSean Christopherson bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start, 192b9663d8SSean Christopherson gfn_t end, bool can_yield, bool flush); 202b9663d8SSean Christopherson static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, 212b9663d8SSean Christopherson gfn_t start, gfn_t end, bool flush) 2233a31641SSean Christopherson { 232b9663d8SSean Christopherson return __kvm_tdp_mmu_zap_gfn_range(kvm, as_id, start, end, true, flush); 2433a31641SSean Christopherson } 2533a31641SSean Christopherson static inline bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp) 2633a31641SSean Christopherson { 2733a31641SSean Christopherson gfn_t end = sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level); 2833a31641SSean Christopherson 2933a31641SSean Christopherson /* 3033a31641SSean Christopherson * Don't allow yielding, as the caller may have a flush pending. Note, 3133a31641SSean Christopherson * if mmu_lock is held for write, zapping will never yield in this case, 3233a31641SSean Christopherson * but explicitly disallow it for safety. The TDP MMU does not yield 3333a31641SSean Christopherson * until it has made forward progress (steps sideways), and when zapping 3433a31641SSean Christopherson * a single shadow page that it's guaranteed to see (thus the mmu_lock 3533a31641SSean Christopherson * requirement), its "step sideways" will always step beyond the bounds 3633a31641SSean Christopherson * of the shadow page's gfn range and stop iterating before yielding. 3733a31641SSean Christopherson */ 3833a31641SSean Christopherson lockdep_assert_held_write(&kvm->mmu_lock); 392b9663d8SSean Christopherson return __kvm_tdp_mmu_zap_gfn_range(kvm, kvm_mmu_page_as_id(sp), 402b9663d8SSean Christopherson sp->gfn, end, false, false); 4133a31641SSean Christopherson } 42faaf05b0SBen Gardon void kvm_tdp_mmu_zap_all(struct kvm *kvm); 43bb18842eSBen Gardon 44bb18842eSBen Gardon int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, 45bb18842eSBen Gardon int map_writable, int max_level, kvm_pfn_t pfn, 46bb18842eSBen Gardon bool prefault); 47063afacdSBen Gardon 483039bcc7SSean Christopherson bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, 493039bcc7SSean Christopherson bool flush); 503039bcc7SSean Christopherson bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); 513039bcc7SSean Christopherson bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); 523039bcc7SSean Christopherson bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range); 53a6a0b05dSBen Gardon 54a6a0b05dSBen Gardon bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot, 55a6a0b05dSBen Gardon int min_level); 56a6a0b05dSBen Gardon bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, 57a6a0b05dSBen Gardon struct kvm_memory_slot *slot); 58a6a0b05dSBen Gardon void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, 59a6a0b05dSBen Gardon struct kvm_memory_slot *slot, 60a6a0b05dSBen Gardon gfn_t gfn, unsigned long mask, 61a6a0b05dSBen Gardon bool wrprot); 62142ccde1SSean Christopherson bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, 638ca6f063SBen Gardon const struct kvm_memory_slot *slot, 648ca6f063SBen Gardon bool flush); 6546044f72SBen Gardon 6646044f72SBen Gardon bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, 6746044f72SBen Gardon struct kvm_memory_slot *slot, gfn_t gfn); 6895fb5b02SBen Gardon 6939b4d43eSSean Christopherson int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, 7039b4d43eSSean Christopherson int *root_level); 7139b4d43eSSean Christopherson 72897218ffSPaolo Bonzini #ifdef CONFIG_X86_64 73897218ffSPaolo Bonzini void kvm_mmu_init_tdp_mmu(struct kvm *kvm); 74897218ffSPaolo Bonzini void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm); 75897218ffSPaolo Bonzini static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return kvm->arch.tdp_mmu_enabled; } 76897218ffSPaolo Bonzini static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; } 77897218ffSPaolo Bonzini #else 78897218ffSPaolo Bonzini static inline void kvm_mmu_init_tdp_mmu(struct kvm *kvm) {} 79897218ffSPaolo Bonzini static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {} 80897218ffSPaolo Bonzini static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; } 81897218ffSPaolo Bonzini static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; } 82897218ffSPaolo Bonzini #endif 83897218ffSPaolo Bonzini 84897218ffSPaolo Bonzini static inline bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa) 85897218ffSPaolo Bonzini { 86897218ffSPaolo Bonzini struct kvm_mmu_page *sp; 87897218ffSPaolo Bonzini 88897218ffSPaolo Bonzini if (!is_tdp_mmu_enabled(kvm)) 89897218ffSPaolo Bonzini return false; 90897218ffSPaolo Bonzini if (WARN_ON(!VALID_PAGE(hpa))) 91897218ffSPaolo Bonzini return false; 92897218ffSPaolo Bonzini 93897218ffSPaolo Bonzini sp = to_shadow_page(hpa); 94897218ffSPaolo Bonzini if (WARN_ON(!sp)) 95897218ffSPaolo Bonzini return false; 96897218ffSPaolo Bonzini 97897218ffSPaolo Bonzini return is_tdp_mmu_page(sp) && sp->root_count; 98897218ffSPaolo Bonzini } 99897218ffSPaolo Bonzini 100fe5db27dSBen Gardon #endif /* __KVM_X86_MMU_TDP_MMU_H */ 101