1 // SPDX-License-Identifier: GPL-2.0 2 3 #ifndef __KVM_X86_MMU_TDP_MMU_H 4 #define __KVM_X86_MMU_TDP_MMU_H 5 6 #include <linux/kvm_host.h> 7 8 #include "spte.h" 9 10 void kvm_mmu_init_tdp_mmu(struct kvm *kvm); 11 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm); 12 13 void kvm_tdp_mmu_alloc_root(struct kvm_vcpu *vcpu, bool private); 14 15 __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root) 16 { 17 return refcount_inc_not_zero(&root->tdp_mmu_root_count); 18 } 19 20 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root); 21 22 enum kvm_tdp_mmu_root_types { 23 KVM_INVALID_ROOTS = BIT(0), 24 KVM_DIRECT_ROOTS = BIT(1), 25 KVM_MIRROR_ROOTS = BIT(2), 26 KVM_VALID_ROOTS = KVM_DIRECT_ROOTS | KVM_MIRROR_ROOTS, 27 KVM_ALL_ROOTS = KVM_VALID_ROOTS | KVM_INVALID_ROOTS, 28 }; 29 30 static inline enum kvm_tdp_mmu_root_types kvm_gfn_range_filter_to_root_types(struct kvm *kvm, 31 enum kvm_gfn_range_filter process) 32 { 33 enum kvm_tdp_mmu_root_types ret = 0; 34 35 if (!kvm_has_mirrored_tdp(kvm)) 36 return KVM_DIRECT_ROOTS; 37 38 if (process & KVM_FILTER_PRIVATE) 39 ret |= KVM_MIRROR_ROOTS; 40 if (process & KVM_FILTER_SHARED) 41 ret |= KVM_DIRECT_ROOTS; 42 43 WARN_ON_ONCE(!ret); 44 45 return ret; 46 } 47 48 static inline struct kvm_mmu_page *tdp_mmu_get_root_for_fault(struct kvm_vcpu *vcpu, 49 struct kvm_page_fault *fault) 50 { 51 if (unlikely(!kvm_is_addr_direct(vcpu->kvm, fault->addr))) 52 return root_to_sp(vcpu->arch.mmu->mirror_root_hpa); 53 54 return root_to_sp(vcpu->arch.mmu->root.hpa); 55 } 56 57 static inline struct kvm_mmu_page *tdp_mmu_get_root(struct kvm_vcpu *vcpu, 58 enum kvm_tdp_mmu_root_types type) 59 { 60 if (unlikely(type == KVM_MIRROR_ROOTS)) 61 return root_to_sp(vcpu->arch.mmu->mirror_root_hpa); 62 63 return root_to_sp(vcpu->arch.mmu->root.hpa); 64 } 65 66 bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush); 67 bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp); 68 void kvm_tdp_mmu_zap_all(struct kvm *kvm); 69 void kvm_tdp_mmu_invalidate_roots(struct kvm *kvm, 70 enum kvm_tdp_mmu_root_types root_types); 71 void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm, bool shared); 72 73 int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); 74 75 bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, 76 bool flush); 77 bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); 78 bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); 79 80 bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, 81 const struct kvm_memory_slot *slot, int min_level); 82 void kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, 83 const struct kvm_memory_slot *slot); 84 void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, 85 struct kvm_memory_slot *slot, 86 gfn_t gfn, unsigned long mask, 87 bool wrprot); 88 void kvm_tdp_mmu_recover_huge_pages(struct kvm *kvm, 89 const struct kvm_memory_slot *slot); 90 91 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, 92 struct kvm_memory_slot *slot, gfn_t gfn, 93 int min_level); 94 95 void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm, 96 const struct kvm_memory_slot *slot, 97 gfn_t start, gfn_t end, 98 int target_level, bool shared); 99 100 static inline void kvm_tdp_mmu_walk_lockless_begin(void) 101 { 102 rcu_read_lock(); 103 } 104 105 static inline void kvm_tdp_mmu_walk_lockless_end(void) 106 { 107 rcu_read_unlock(); 108 } 109 110 int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, 111 int *root_level); 112 u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gfn_t gfn, 113 u64 *spte); 114 115 #ifdef CONFIG_X86_64 116 static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; } 117 #else 118 static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; } 119 #endif 120 121 #endif /* __KVM_X86_MMU_TDP_MMU_H */ 122