xref: /linux/arch/s390/kvm/gmap.h (revision 1fd1dc41724319406b0aff221a352a400b0ddfc5)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  KVM guest address space mapping code
4  *
5  *    Copyright IBM Corp. 2007, 2016, 2025
6  *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7  *               Claudio Imbrenda <imbrenda@linux.ibm.com>
8  */
9 
10 #ifndef ARCH_KVM_S390_GMAP_H
11 #define ARCH_KVM_S390_GMAP_H
12 
13 #include "dat.h"
14 
15 /**
16  * enum gmap_flags - Flags of a gmap.
17  *
18  * @GMAP_FLAG_SHADOW: The gmap is a vsie shadow gmap.
19  * @GMAP_FLAG_OWNS_PAGETABLES: The gmap owns all dat levels; normally 1, is 0
20  *                             only for ucontrol per-cpu gmaps, since they
21  *                             share the page tables with the main gmap.
22  * @GMAP_FLAG_IS_UCONTROL: The gmap is ucontrol (main gmap or per-cpu gmap).
23  * @GMAP_FLAG_ALLOW_HPAGE_1M: 1M hugepages are allowed for this gmap,
24  *                            independently of the page size used by userspace.
25  * @GMAP_FLAG_ALLOW_HPAGE_2G: 2G hugepages are allowed for this gmap,
26  *                            independently of the page size used by userspace.
27  * @GMAP_FLAG_PFAULT_ENABLED: Pfault is enabled for the gmap.
28  * @GMAP_FLAG_USES_SKEYS: If the guest uses storage keys.
29  * @GMAP_FLAG_USES_CMM: Whether the guest uses CMMA.
30  * @GMAP_FLAG_EXPORT_ON_UNMAP: Whether to export guest pages when unmapping.
31  */
32 enum gmap_flags {
33 	GMAP_FLAG_SHADOW = 0,
34 	GMAP_FLAG_OWNS_PAGETABLES,
35 	GMAP_FLAG_IS_UCONTROL,
36 	GMAP_FLAG_ALLOW_HPAGE_1M,
37 	GMAP_FLAG_ALLOW_HPAGE_2G,
38 	GMAP_FLAG_PFAULT_ENABLED,
39 	GMAP_FLAG_USES_SKEYS,
40 	GMAP_FLAG_USES_CMM,
41 	GMAP_FLAG_EXPORT_ON_UNMAP,
42 };
43 
44 /**
45  * struct gmap_struct - Guest address space.
46  *
47  * @flags: GMAP_FLAG_* flags.
48  * @edat_level: The edat level of this shadow gmap.
49  * @kvm: The vm.
50  * @asce: The ASCE used by this gmap.
51  * @list: List head used in children gmaps for the children gmap list.
52  * @children_lock: Protects children and scb_users.
53  * @children: List of child gmaps of this gmap.
54  * @scb_users: List of vsie_scb that use this shadow gmap.
55  * @parent: Parent gmap of a child gmap.
56  * @guest_asce: Original ASCE of this shadow gmap.
57  * @host_to_rmap_lock: Protects host_to_rmap.
58  * @host_to_rmap: Radix tree mapping host addresses to guest addresses.
59  */
60 struct gmap {
61 	unsigned long flags;
62 	unsigned char edat_level;
63 	struct kvm *kvm;
64 	union asce asce;
65 	struct list_head list;
66 	spinlock_t children_lock;	/* Protects: children, scb_users */
67 	struct list_head children;
68 	struct list_head scb_users;
69 	struct gmap *parent;
70 	union asce guest_asce;
71 	spinlock_t host_to_rmap_lock;	/* Protects host_to_rmap */
72 	struct radix_tree_root host_to_rmap;
73 	refcount_t refcount;
74 };
75 
76 struct gmap_cache {
77 	struct list_head list;
78 	struct gmap *gmap;
79 };
80 
81 #define gmap_for_each_rmap_safe(pos, n, head) \
82 	for (pos = (head); n = pos ? pos->next : NULL, pos; pos = n)
83 
84 int s390_replace_asce(struct gmap *gmap);
85 bool _gmap_unmap_prefix(struct gmap *gmap, gfn_t gfn, gfn_t end, bool hint);
86 bool gmap_age_gfn(struct gmap *gmap, gfn_t start, gfn_t end);
87 bool gmap_unmap_gfn_range(struct gmap *gmap, struct kvm_memory_slot *slot, gfn_t start, gfn_t end);
88 int gmap_try_fixup_minor(struct gmap *gmap, struct guest_fault *fault);
89 struct gmap *gmap_new(struct kvm *kvm, gfn_t limit);
90 struct gmap *gmap_new_child(struct gmap *parent, gfn_t limit);
91 void gmap_remove_child(struct gmap *child);
92 void gmap_dispose(struct gmap *gmap);
93 int gmap_link(struct kvm_s390_mmu_cache *mc, struct gmap *gmap, struct guest_fault *fault);
94 void gmap_sync_dirty_log(struct gmap *gmap, gfn_t start, gfn_t end);
95 int gmap_set_limit(struct gmap *gmap, gfn_t limit);
96 int gmap_ucas_translate(struct kvm_s390_mmu_cache *mc, struct gmap *gmap, gpa_t *gaddr);
97 int gmap_ucas_map(struct gmap *gmap, gfn_t p_gfn, gfn_t c_gfn, unsigned long count);
98 void gmap_ucas_unmap(struct gmap *gmap, gfn_t c_gfn, unsigned long count);
99 int gmap_enable_skeys(struct gmap *gmap);
100 int gmap_pv_destroy_range(struct gmap *gmap, gfn_t start, gfn_t end, bool interruptible);
101 int gmap_insert_rmap(struct gmap *sg, gfn_t p_gfn, gfn_t r_gfn, int level);
102 int gmap_protect_rmap(struct kvm_s390_mmu_cache *mc, struct gmap *sg, gfn_t p_gfn, gfn_t r_gfn,
103 		      kvm_pfn_t pfn, int level, bool wr);
104 void gmap_set_cmma_all_dirty(struct gmap *gmap);
105 void _gmap_handle_vsie_unshadow_event(struct gmap *parent, gfn_t gfn);
106 struct gmap *gmap_create_shadow(struct kvm_s390_mmu_cache *mc, struct gmap *gmap,
107 				union asce asce, int edat_level);
108 void gmap_split_huge_pages(struct gmap *gmap);
109 
110 static inline bool uses_skeys(struct gmap *gmap)
111 {
112 	return test_bit(GMAP_FLAG_USES_SKEYS, &gmap->flags);
113 }
114 
115 static inline bool uses_cmm(struct gmap *gmap)
116 {
117 	return test_bit(GMAP_FLAG_USES_CMM, &gmap->flags);
118 }
119 
120 static inline bool pfault_enabled(struct gmap *gmap)
121 {
122 	return test_bit(GMAP_FLAG_PFAULT_ENABLED, &gmap->flags);
123 }
124 
125 static inline bool is_ucontrol(struct gmap *gmap)
126 {
127 	return test_bit(GMAP_FLAG_IS_UCONTROL, &gmap->flags);
128 }
129 
130 static inline bool is_shadow(struct gmap *gmap)
131 {
132 	return test_bit(GMAP_FLAG_SHADOW, &gmap->flags);
133 }
134 
135 static inline bool owns_page_tables(struct gmap *gmap)
136 {
137 	return test_bit(GMAP_FLAG_OWNS_PAGETABLES, &gmap->flags);
138 }
139 
140 static inline struct gmap *gmap_put(struct gmap *gmap)
141 {
142 	if (refcount_dec_and_test(&gmap->refcount))
143 		gmap_dispose(gmap);
144 	return NULL;
145 }
146 
147 static inline void gmap_get(struct gmap *gmap)
148 {
149 	WARN_ON_ONCE(unlikely(!refcount_inc_not_zero(&gmap->refcount)));
150 }
151 
152 static inline void gmap_handle_vsie_unshadow_event(struct gmap *parent, gfn_t gfn)
153 {
154 	scoped_guard(spinlock, &parent->children_lock)
155 		_gmap_handle_vsie_unshadow_event(parent, gfn);
156 }
157 
158 static inline bool gmap_mkold_prefix(struct gmap *gmap, gfn_t gfn, gfn_t end)
159 {
160 	return _gmap_unmap_prefix(gmap, gfn, end, true);
161 }
162 
163 static inline bool gmap_unmap_prefix(struct gmap *gmap, gfn_t gfn, gfn_t end)
164 {
165 	return _gmap_unmap_prefix(gmap, gfn, end, false);
166 }
167 
168 static inline union pgste _gmap_ptep_xchg(struct gmap *gmap, union pte *ptep, union pte newpte,
169 					  union pgste pgste, gfn_t gfn, bool needs_lock)
170 {
171 	lockdep_assert_held(&gmap->kvm->mmu_lock);
172 	if (!needs_lock)
173 		lockdep_assert_held(&gmap->children_lock);
174 	else
175 		lockdep_assert_not_held(&gmap->children_lock);
176 
177 	if (pgste.prefix_notif && (newpte.h.p || newpte.h.i)) {
178 		pgste.prefix_notif = 0;
179 		gmap_unmap_prefix(gmap, gfn, gfn + 1);
180 	}
181 	if (pgste.vsie_notif && (ptep->h.p != newpte.h.p || newpte.h.i)) {
182 		pgste.vsie_notif = 0;
183 		if (needs_lock)
184 			gmap_handle_vsie_unshadow_event(gmap, gfn);
185 		else
186 			_gmap_handle_vsie_unshadow_event(gmap, gfn);
187 	}
188 	return __dat_ptep_xchg(ptep, pgste, newpte, gfn, gmap->asce, uses_skeys(gmap));
189 }
190 
191 static inline union pgste gmap_ptep_xchg(struct gmap *gmap, union pte *ptep, union pte newpte,
192 					 union pgste pgste, gfn_t gfn)
193 {
194 	return _gmap_ptep_xchg(gmap, ptep, newpte, pgste, gfn, true);
195 }
196 
197 static inline void _gmap_crstep_xchg(struct gmap *gmap, union crste *crstep, union crste ne,
198 				     gfn_t gfn, bool needs_lock)
199 {
200 	unsigned long align = 8 + (is_pmd(*crstep) ? 0 : 11);
201 
202 	lockdep_assert_held(&gmap->kvm->mmu_lock);
203 	if (!needs_lock)
204 		lockdep_assert_held(&gmap->children_lock);
205 
206 	gfn = ALIGN_DOWN(gfn, align);
207 	if (crste_prefix(*crstep) && (ne.h.p || ne.h.i || !crste_prefix(ne))) {
208 		ne.s.fc1.prefix_notif = 0;
209 		gmap_unmap_prefix(gmap, gfn, gfn + align);
210 	}
211 	if (crste_leaf(*crstep) && crstep->s.fc1.vsie_notif &&
212 	    (ne.h.p || ne.h.i || !ne.s.fc1.vsie_notif)) {
213 		ne.s.fc1.vsie_notif = 0;
214 		if (needs_lock)
215 			gmap_handle_vsie_unshadow_event(gmap, gfn);
216 		else
217 			_gmap_handle_vsie_unshadow_event(gmap, gfn);
218 	}
219 	dat_crstep_xchg(crstep, ne, gfn, gmap->asce);
220 }
221 
222 static inline void gmap_crstep_xchg(struct gmap *gmap, union crste *crstep, union crste ne,
223 				    gfn_t gfn)
224 {
225 	return _gmap_crstep_xchg(gmap, crstep, ne, gfn, true);
226 }
227 
228 /**
229  * gmap_is_shadow_valid() - check if a shadow guest address space matches the
230  *                          given properties and is still valid.
231  * @sg: Pointer to the shadow guest address space structure.
232  * @asce: ASCE for which the shadow table is requested.
233  * @edat_level: Edat level to be used for the shadow translation.
234  *
235  * Return: true if the gmap shadow is still valid and matches the given
236  * properties and the caller can continue using it; false otherwise, the
237  * caller has to request a new shadow gmap in this case.
238  */
239 static inline bool gmap_is_shadow_valid(struct gmap *sg, union asce asce, int edat_level)
240 {
241 	return sg->guest_asce.val == asce.val && sg->edat_level == edat_level;
242 }
243 
244 #endif /* ARCH_KVM_S390_GMAP_H */
245