xref: /linux/arch/s390/kvm/gmap.h (revision 6b802031877a995456c528095c41d1948546bf45)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  KVM guest address space mapping code
4  *
5  *    Copyright IBM Corp. 2007, 2016, 2025
6  *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7  *               Claudio Imbrenda <imbrenda@linux.ibm.com>
8  */
9 
10 #ifndef ARCH_KVM_S390_GMAP_H
11 #define ARCH_KVM_S390_GMAP_H
12 
13 #include "dat.h"
14 
15 /**
16  * enum gmap_flags - Flags of a gmap.
17  *
18  * @GMAP_FLAG_SHADOW: The gmap is a vsie shadow gmap.
19  * @GMAP_FLAG_OWNS_PAGETABLES: The gmap owns all dat levels; normally 1, is 0
20  *                             only for ucontrol per-cpu gmaps, since they
21  *                             share the page tables with the main gmap.
22  * @GMAP_FLAG_IS_UCONTROL: The gmap is ucontrol (main gmap or per-cpu gmap).
23  * @GMAP_FLAG_ALLOW_HPAGE_1M: 1M hugepages are allowed for this gmap,
24  *                            independently of the page size used by userspace.
25  * @GMAP_FLAG_ALLOW_HPAGE_2G: 2G hugepages are allowed for this gmap,
26  *                            independently of the page size used by userspace.
27  * @GMAP_FLAG_PFAULT_ENABLED: Pfault is enabled for the gmap.
28  * @GMAP_FLAG_USES_SKEYS: If the guest uses storage keys.
29  * @GMAP_FLAG_USES_CMM: Whether the guest uses CMMA.
30  * @GMAP_FLAG_EXPORT_ON_UNMAP: Whether to export guest pages when unmapping.
31  */
32 enum gmap_flags {
33 	GMAP_FLAG_SHADOW = 0,
34 	GMAP_FLAG_OWNS_PAGETABLES,
35 	GMAP_FLAG_IS_UCONTROL,
36 	GMAP_FLAG_ALLOW_HPAGE_1M,
37 	GMAP_FLAG_ALLOW_HPAGE_2G,
38 	GMAP_FLAG_PFAULT_ENABLED,
39 	GMAP_FLAG_USES_SKEYS,
40 	GMAP_FLAG_USES_CMM,
41 	GMAP_FLAG_EXPORT_ON_UNMAP,
42 };
43 
44 /**
45  * struct gmap_struct - Guest address space.
46  *
47  * @flags: GMAP_FLAG_* flags.
48  * @edat_level: The edat level of this shadow gmap.
49  * @kvm: The vm.
50  * @asce: The ASCE used by this gmap.
51  * @list: List head used in children gmaps for the children gmap list.
52  * @children_lock: Protects children and scb_users.
53  * @children: List of child gmaps of this gmap.
54  * @scb_users: List of vsie_scb that use this shadow gmap.
55  * @parent: Parent gmap of a child gmap.
56  * @guest_asce: Original ASCE of this shadow gmap.
57  * @host_to_rmap_lock: Protects host_to_rmap.
58  * @host_to_rmap: Radix tree mapping host addresses to guest addresses.
59  */
60 struct gmap {
61 	unsigned long flags;
62 	unsigned char edat_level;
63 	bool invalidated;
64 	struct kvm *kvm;
65 	union asce asce;
66 	struct list_head list;
67 	spinlock_t children_lock;	/* Protects: children, scb_users */
68 	struct list_head children;
69 	struct list_head scb_users;
70 	struct gmap *parent;
71 	union asce guest_asce;
72 	spinlock_t host_to_rmap_lock;	/* Protects host_to_rmap */
73 	struct radix_tree_root host_to_rmap;
74 	refcount_t refcount;
75 };
76 
77 struct gmap_cache {
78 	struct list_head list;
79 	struct gmap *gmap;
80 };
81 
82 #define gmap_for_each_rmap_safe(pos, n, head) \
83 	for (pos = (head); n = pos ? pos->next : NULL, pos; pos = n)
84 
85 int s390_replace_asce(struct gmap *gmap);
86 bool _gmap_unmap_prefix(struct gmap *gmap, gfn_t gfn, gfn_t end, bool hint);
87 bool gmap_age_gfn(struct gmap *gmap, gfn_t start, gfn_t end);
88 bool gmap_unmap_gfn_range(struct gmap *gmap, struct kvm_memory_slot *slot, gfn_t start, gfn_t end);
89 int gmap_try_fixup_minor(struct gmap *gmap, struct guest_fault *fault);
90 struct gmap *gmap_new(struct kvm *kvm, gfn_t limit);
91 struct gmap *gmap_new_child(struct gmap *parent, gfn_t limit);
92 void gmap_remove_child(struct gmap *child);
93 void gmap_dispose(struct gmap *gmap);
94 int gmap_link(struct kvm_s390_mmu_cache *mc, struct gmap *gmap, struct guest_fault *fault,
95 	      struct kvm_memory_slot *slot);
96 void gmap_sync_dirty_log(struct gmap *gmap, gfn_t start, gfn_t end);
97 int gmap_set_limit(struct gmap *gmap, gfn_t limit);
98 int gmap_ucas_translate(struct kvm_s390_mmu_cache *mc, struct gmap *gmap, gpa_t *gaddr);
99 int gmap_ucas_map(struct gmap *gmap, gfn_t p_gfn, gfn_t c_gfn, unsigned long count);
100 void gmap_ucas_unmap(struct gmap *gmap, gfn_t c_gfn, unsigned long count);
101 int gmap_enable_skeys(struct gmap *gmap);
102 int gmap_pv_destroy_range(struct gmap *gmap, gfn_t start, gfn_t end, bool interruptible);
103 int gmap_insert_rmap(struct gmap *sg, gfn_t p_gfn, gfn_t r_gfn, int level);
104 int gmap_protect_rmap(struct kvm_s390_mmu_cache *mc, struct gmap *sg, gfn_t p_gfn, gfn_t r_gfn,
105 		      kvm_pfn_t pfn, int level, bool wr);
106 void gmap_set_cmma_all_dirty(struct gmap *gmap);
107 void _gmap_handle_vsie_unshadow_event(struct gmap *parent, gfn_t gfn);
108 struct gmap *gmap_create_shadow(struct kvm_s390_mmu_cache *mc, struct gmap *gmap,
109 				union asce asce, int edat_level);
110 void gmap_split_huge_pages(struct gmap *gmap);
111 
112 static inline bool uses_skeys(struct gmap *gmap)
113 {
114 	return test_bit(GMAP_FLAG_USES_SKEYS, &gmap->flags);
115 }
116 
117 static inline bool uses_cmm(struct gmap *gmap)
118 {
119 	return test_bit(GMAP_FLAG_USES_CMM, &gmap->flags);
120 }
121 
122 static inline bool pfault_enabled(struct gmap *gmap)
123 {
124 	return test_bit(GMAP_FLAG_PFAULT_ENABLED, &gmap->flags);
125 }
126 
127 static inline bool is_ucontrol(struct gmap *gmap)
128 {
129 	return test_bit(GMAP_FLAG_IS_UCONTROL, &gmap->flags);
130 }
131 
132 static inline bool is_shadow(struct gmap *gmap)
133 {
134 	return test_bit(GMAP_FLAG_SHADOW, &gmap->flags);
135 }
136 
137 static inline bool owns_page_tables(struct gmap *gmap)
138 {
139 	return test_bit(GMAP_FLAG_OWNS_PAGETABLES, &gmap->flags);
140 }
141 
142 static inline struct gmap *gmap_put(struct gmap *gmap)
143 {
144 	if (refcount_dec_and_test(&gmap->refcount))
145 		gmap_dispose(gmap);
146 	return NULL;
147 }
148 
149 static inline void gmap_get(struct gmap *gmap)
150 {
151 	WARN_ON_ONCE(unlikely(!refcount_inc_not_zero(&gmap->refcount)));
152 }
153 
154 static inline void gmap_handle_vsie_unshadow_event(struct gmap *parent, gfn_t gfn)
155 {
156 	scoped_guard(spinlock, &parent->children_lock)
157 		_gmap_handle_vsie_unshadow_event(parent, gfn);
158 }
159 
160 static inline bool gmap_mkold_prefix(struct gmap *gmap, gfn_t gfn, gfn_t end)
161 {
162 	return _gmap_unmap_prefix(gmap, gfn, end, true);
163 }
164 
165 static inline bool gmap_unmap_prefix(struct gmap *gmap, gfn_t gfn, gfn_t end)
166 {
167 	return _gmap_unmap_prefix(gmap, gfn, end, false);
168 }
169 
170 static inline union pgste _gmap_ptep_xchg(struct gmap *gmap, union pte *ptep, union pte newpte,
171 					  union pgste pgste, gfn_t gfn, bool needs_lock)
172 {
173 	lockdep_assert_held(&gmap->kvm->mmu_lock);
174 	if (!needs_lock)
175 		lockdep_assert_held(&gmap->children_lock);
176 	else
177 		lockdep_assert_not_held(&gmap->children_lock);
178 
179 	if (pgste.prefix_notif && (newpte.h.p || newpte.h.i)) {
180 		pgste.prefix_notif = 0;
181 		gmap_unmap_prefix(gmap, gfn, gfn + 1);
182 	}
183 	if (pgste.vsie_notif && (ptep->h.p != newpte.h.p || newpte.h.i)) {
184 		pgste.vsie_notif = 0;
185 		if (needs_lock)
186 			gmap_handle_vsie_unshadow_event(gmap, gfn);
187 		else
188 			_gmap_handle_vsie_unshadow_event(gmap, gfn);
189 	}
190 	if (!ptep->s.d && newpte.s.d && !newpte.s.s)
191 		SetPageDirty(pfn_to_page(newpte.h.pfra));
192 	return __dat_ptep_xchg(ptep, pgste, newpte, gfn, gmap->asce, uses_skeys(gmap));
193 }
194 
195 static inline union pgste gmap_ptep_xchg(struct gmap *gmap, union pte *ptep, union pte newpte,
196 					 union pgste pgste, gfn_t gfn)
197 {
198 	return _gmap_ptep_xchg(gmap, ptep, newpte, pgste, gfn, true);
199 }
200 
201 static inline bool __must_check _gmap_crstep_xchg_atomic(struct gmap *gmap, union crste *crstep,
202 							 union crste oldcrste, union crste newcrste,
203 							 gfn_t gfn, bool needs_lock)
204 {
205 	unsigned long align = is_pmd(newcrste) ? _PAGE_ENTRIES : _PAGE_ENTRIES * _CRST_ENTRIES;
206 
207 	if (KVM_BUG_ON(crstep->h.tt != oldcrste.h.tt || newcrste.h.tt != oldcrste.h.tt, gmap->kvm))
208 		return true;
209 
210 	lockdep_assert_held(&gmap->kvm->mmu_lock);
211 	if (!needs_lock)
212 		lockdep_assert_held(&gmap->children_lock);
213 
214 	gfn = ALIGN_DOWN(gfn, align);
215 	if (crste_prefix(oldcrste) && (newcrste.h.p || newcrste.h.i || !crste_prefix(newcrste))) {
216 		newcrste.s.fc1.prefix_notif = 0;
217 		gmap_unmap_prefix(gmap, gfn, gfn + align);
218 	}
219 	if (crste_leaf(oldcrste) && oldcrste.s.fc1.vsie_notif &&
220 	    (newcrste.h.p || newcrste.h.i || !newcrste.s.fc1.vsie_notif)) {
221 		newcrste.s.fc1.vsie_notif = 0;
222 		if (needs_lock)
223 			gmap_handle_vsie_unshadow_event(gmap, gfn);
224 		else
225 			_gmap_handle_vsie_unshadow_event(gmap, gfn);
226 	}
227 	if (!oldcrste.s.fc1.d && newcrste.s.fc1.d && !newcrste.s.fc1.s)
228 		SetPageDirty(phys_to_page(crste_origin_large(newcrste)));
229 	return dat_crstep_xchg_atomic(crstep, oldcrste, newcrste, gfn, gmap->asce);
230 }
231 
232 static inline bool __must_check gmap_crstep_xchg_atomic(struct gmap *gmap, union crste *crstep,
233 							union crste oldcrste, union crste newcrste,
234 							gfn_t gfn)
235 {
236 	return _gmap_crstep_xchg_atomic(gmap, crstep, oldcrste, newcrste, gfn, true);
237 }
238 
239 /**
240  * gmap_is_shadow_valid() - check if a shadow guest address space matches the
241  *                          given properties and is still valid.
242  * @sg: Pointer to the shadow guest address space structure.
243  * @asce: ASCE for which the shadow table is requested.
244  * @edat_level: Edat level to be used for the shadow translation.
245  *
246  * Return: true if the gmap shadow is still valid and matches the given
247  * properties and the caller can continue using it; false otherwise, the
248  * caller has to request a new shadow gmap in this case.
249  */
250 static inline bool gmap_is_shadow_valid(struct gmap *sg, union asce asce, int edat_level)
251 {
252 	return sg->guest_asce.val == asce.val && sg->edat_level == edat_level;
253 }
254 
255 #endif /* ARCH_KVM_S390_GMAP_H */
256