1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 3 #ifndef __KVM_MM_H__ 4 #define __KVM_MM_H__ 1 5 6 /* 7 * Architectures can choose whether to use an rwlock or spinlock 8 * for the mmu_lock. These macros, for use in common code 9 * only, avoids using #ifdefs in places that must deal with 10 * multiple architectures. 11 */ 12 13 #ifdef KVM_HAVE_MMU_RWLOCK 14 #define KVM_MMU_LOCK_INIT(kvm) rwlock_init(&(kvm)->mmu_lock) 15 #define KVM_MMU_LOCK(kvm) write_lock(&(kvm)->mmu_lock) 16 #define KVM_MMU_UNLOCK(kvm) write_unlock(&(kvm)->mmu_lock) 17 #else 18 #define KVM_MMU_LOCK_INIT(kvm) spin_lock_init(&(kvm)->mmu_lock) 19 #define KVM_MMU_LOCK(kvm) spin_lock(&(kvm)->mmu_lock) 20 #define KVM_MMU_UNLOCK(kvm) spin_unlock(&(kvm)->mmu_lock) 21 #endif /* KVM_HAVE_MMU_RWLOCK */ 22 23 kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool interruptible, 24 bool *async, bool write_fault, bool *writable); 25 26 #ifdef CONFIG_HAVE_KVM_PFNCACHE 27 void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, 28 unsigned long start, 29 unsigned long end, 30 bool may_block); 31 #else 32 static inline void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, 33 unsigned long start, 34 unsigned long end, 35 bool may_block) 36 { 37 } 38 #endif /* HAVE_KVM_PFNCACHE */ 39 40 #ifdef CONFIG_KVM_PRIVATE_MEM 41 void kvm_gmem_init(struct module *module); 42 int kvm_gmem_create(struct kvm *kvm, struct kvm_create_guest_memfd *args); 43 int kvm_gmem_bind(struct kvm *kvm, struct kvm_memory_slot *slot, 44 unsigned int fd, loff_t offset); 45 void kvm_gmem_unbind(struct kvm_memory_slot *slot); 46 #else 47 static inline void kvm_gmem_init(struct module *module) 48 { 49 50 } 51 52 static inline int kvm_gmem_bind(struct kvm *kvm, 53 struct kvm_memory_slot *slot, 54 unsigned int fd, loff_t offset) 55 { 56 WARN_ON_ONCE(1); 57 return -EIO; 58 } 59 60 static inline void kvm_gmem_unbind(struct kvm_memory_slot *slot) 61 { 62 WARN_ON_ONCE(1); 63 } 64 #endif /* CONFIG_KVM_PRIVATE_MEM */ 65 66 #endif /* __KVM_MM_H__ */ 67