xref: /linux/virt/kvm/kvm_mm.h (revision 79d2e1919a2728ef49d938eb20ebd5903c14dfb0)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 #ifndef __KVM_MM_H__
4 #define __KVM_MM_H__ 1
5 
6 /*
7  * Architectures can choose whether to use an rwlock or spinlock
8  * for the mmu_lock.  These macros, for use in common code
9  * only, avoids using #ifdefs in places that must deal with
10  * multiple architectures.
11  */
12 
13 #ifdef KVM_HAVE_MMU_RWLOCK
14 #define KVM_MMU_LOCK_INIT(kvm)		rwlock_init(&(kvm)->mmu_lock)
15 #define KVM_MMU_LOCK(kvm)		write_lock(&(kvm)->mmu_lock)
16 #define KVM_MMU_UNLOCK(kvm)		write_unlock(&(kvm)->mmu_lock)
17 #else
18 #define KVM_MMU_LOCK_INIT(kvm)		spin_lock_init(&(kvm)->mmu_lock)
19 #define KVM_MMU_LOCK(kvm)		spin_lock(&(kvm)->mmu_lock)
20 #define KVM_MMU_UNLOCK(kvm)		spin_unlock(&(kvm)->mmu_lock)
21 #endif /* KVM_HAVE_MMU_RWLOCK */
22 
23 
24 struct kvm_follow_pfn {
25 	const struct kvm_memory_slot *slot;
26 	const gfn_t gfn;
27 
28 	unsigned long hva;
29 
30 	/* FOLL_* flags modifying lookup behavior, e.g. FOLL_WRITE. */
31 	unsigned int flags;
32 
33 	/*
34 	 * Pin the page (effectively FOLL_PIN, which is an mm/ internal flag).
35 	 * The page *must* be pinned if KVM will write to the page via a kernel
36 	 * mapping, e.g. via kmap(), mremap(), etc.
37 	 */
38 	bool pin;
39 
40 	/*
41 	 * If non-NULL, try to get a writable mapping even for a read fault.
42 	 * Set to true if a writable mapping was obtained.
43 	 */
44 	bool *map_writable;
45 
46 	/*
47 	 * Optional output.  Set to a valid "struct page" if the returned pfn
48 	 * is for a refcounted or pinned struct page, NULL if the returned pfn
49 	 * has no struct page or if the struct page is not being refcounted
50 	 * (e.g. tail pages of non-compound higher order allocations from
51 	 * IO/PFNMAP mappings).
52 	 */
53 	struct page **refcounted_page;
54 };
55 
56 kvm_pfn_t hva_to_pfn(struct kvm_follow_pfn *kfp);
57 
58 #ifdef CONFIG_HAVE_KVM_PFNCACHE
59 void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm,
60 				       unsigned long start,
61 				       unsigned long end);
62 #else
63 static inline void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm,
64 						     unsigned long start,
65 						     unsigned long end)
66 {
67 }
68 #endif /* HAVE_KVM_PFNCACHE */
69 
70 #ifdef CONFIG_KVM_PRIVATE_MEM
71 void kvm_gmem_init(struct module *module);
72 int kvm_gmem_create(struct kvm *kvm, struct kvm_create_guest_memfd *args);
73 int kvm_gmem_bind(struct kvm *kvm, struct kvm_memory_slot *slot,
74 		  unsigned int fd, loff_t offset);
75 void kvm_gmem_unbind(struct kvm_memory_slot *slot);
76 #else
77 static inline void kvm_gmem_init(struct module *module)
78 {
79 
80 }
81 
82 static inline int kvm_gmem_bind(struct kvm *kvm,
83 					 struct kvm_memory_slot *slot,
84 					 unsigned int fd, loff_t offset)
85 {
86 	WARN_ON_ONCE(1);
87 	return -EIO;
88 }
89 
90 static inline void kvm_gmem_unbind(struct kvm_memory_slot *slot)
91 {
92 	WARN_ON_ONCE(1);
93 }
94 #endif /* CONFIG_KVM_PRIVATE_MEM */
95 
96 #endif /* __KVM_MM_H__ */
97