xref: /linux/arch/s390/kvm/gmap.h (revision ac354b5cb04d2077c3821a6cbfbc7981ad45f84a)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  KVM guest address space mapping code
4  *
5  *    Copyright IBM Corp. 2007, 2016, 2025
6  *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7  *               Claudio Imbrenda <imbrenda@linux.ibm.com>
8  */
9 
10 #ifndef ARCH_KVM_S390_GMAP_H
11 #define ARCH_KVM_S390_GMAP_H
12 
13 #include "dat.h"
14 
15 /**
16  * enum gmap_flags - Flags of a gmap.
17  *
18  * @GMAP_FLAG_SHADOW: The gmap is a vsie shadow gmap.
19  * @GMAP_FLAG_OWNS_PAGETABLES: The gmap owns all dat levels; normally 1, is 0
20  *                             only for ucontrol per-cpu gmaps, since they
21  *                             share the page tables with the main gmap.
22  * @GMAP_FLAG_IS_UCONTROL: The gmap is ucontrol (main gmap or per-cpu gmap).
23  * @GMAP_FLAG_ALLOW_HPAGE_1M: 1M hugepages are allowed for this gmap,
24  *                            independently of the page size used by userspace.
25  * @GMAP_FLAG_ALLOW_HPAGE_2G: 2G hugepages are allowed for this gmap,
26  *                            independently of the page size used by userspace.
27  * @GMAP_FLAG_PFAULT_ENABLED: Pfault is enabled for the gmap.
28  * @GMAP_FLAG_USES_SKEYS: If the guest uses storage keys.
29  * @GMAP_FLAG_USES_CMM: Whether the guest uses CMMA.
30  * @GMAP_FLAG_EXPORT_ON_UNMAP: Whether to export guest pages when unmapping.
31  */
32 enum gmap_flags {
33 	GMAP_FLAG_SHADOW = 0,
34 	GMAP_FLAG_OWNS_PAGETABLES,
35 	GMAP_FLAG_IS_UCONTROL,
36 	GMAP_FLAG_ALLOW_HPAGE_1M,
37 	GMAP_FLAG_ALLOW_HPAGE_2G,
38 	GMAP_FLAG_PFAULT_ENABLED,
39 	GMAP_FLAG_USES_SKEYS,
40 	GMAP_FLAG_USES_CMM,
41 	GMAP_FLAG_EXPORT_ON_UNMAP,
42 };
43 
44 /**
45  * struct gmap_struct - Guest address space.
46  *
47  * @flags: GMAP_FLAG_* flags.
48  * @edat_level: The edat level of this shadow gmap.
49  * @kvm: The vm.
50  * @asce: The ASCE used by this gmap.
51  * @list: List head used in children gmaps for the children gmap list.
52  * @children_lock: Protects children and scb_users.
53  * @children: List of child gmaps of this gmap.
54  * @scb_users: List of vsie_scb that use this shadow gmap.
55  * @parent: Parent gmap of a child gmap.
56  * @guest_asce: Original ASCE of this shadow gmap.
57  * @host_to_rmap_lock: Protects host_to_rmap.
58  * @host_to_rmap: Radix tree mapping host addresses to guest addresses.
59  */
60 struct gmap {
61 	unsigned long flags;
62 	unsigned char edat_level;
63 	struct kvm *kvm;
64 	union asce asce;
65 	struct list_head list;
66 	spinlock_t children_lock;	/* Protects: children, scb_users */
67 	struct list_head children;
68 	struct list_head scb_users;
69 	struct gmap *parent;
70 	union asce guest_asce;
71 	spinlock_t host_to_rmap_lock;	/* Protects host_to_rmap */
72 	struct radix_tree_root host_to_rmap;
73 	refcount_t refcount;
74 };
75 
76 struct gmap_cache {
77 	struct list_head list;
78 	struct gmap *gmap;
79 };
80 
81 #define gmap_for_each_rmap_safe(pos, n, head) \
82 	for (pos = (head); n = pos ? pos->next : NULL, pos; pos = n)
83 
84 int s390_replace_asce(struct gmap *gmap);
85 bool _gmap_unmap_prefix(struct gmap *gmap, gfn_t gfn, gfn_t end, bool hint);
86 bool gmap_age_gfn(struct gmap *gmap, gfn_t start, gfn_t end);
87 bool gmap_unmap_gfn_range(struct gmap *gmap, struct kvm_memory_slot *slot, gfn_t start, gfn_t end);
88 int gmap_try_fixup_minor(struct gmap *gmap, struct guest_fault *fault);
89 struct gmap *gmap_new(struct kvm *kvm, gfn_t limit);
90 struct gmap *gmap_new_child(struct gmap *parent, gfn_t limit);
91 void gmap_remove_child(struct gmap *child);
92 void gmap_dispose(struct gmap *gmap);
93 int gmap_link(struct kvm_s390_mmu_cache *mc, struct gmap *gmap, struct guest_fault *fault);
94 void gmap_sync_dirty_log(struct gmap *gmap, gfn_t start, gfn_t end);
95 int gmap_set_limit(struct gmap *gmap, gfn_t limit);
96 int gmap_ucas_translate(struct kvm_s390_mmu_cache *mc, struct gmap *gmap, gpa_t *gaddr);
97 int gmap_ucas_map(struct gmap *gmap, gfn_t p_gfn, gfn_t c_gfn, unsigned long count);
98 void gmap_ucas_unmap(struct gmap *gmap, gfn_t c_gfn, unsigned long count);
99 int gmap_enable_skeys(struct gmap *gmap);
100 int gmap_pv_destroy_range(struct gmap *gmap, gfn_t start, gfn_t end, bool interruptible);
101 int gmap_insert_rmap(struct gmap *sg, gfn_t p_gfn, gfn_t r_gfn, int level);
102 int gmap_protect_rmap(struct kvm_s390_mmu_cache *mc, struct gmap *sg, gfn_t p_gfn, gfn_t r_gfn,
103 		      kvm_pfn_t pfn, int level, bool wr);
104 void gmap_set_cmma_all_dirty(struct gmap *gmap);
105 void _gmap_handle_vsie_unshadow_event(struct gmap *parent, gfn_t gfn);
106 struct gmap *gmap_create_shadow(struct kvm_s390_mmu_cache *mc, struct gmap *gmap,
107 				union asce asce, int edat_level);
108 void gmap_split_huge_pages(struct gmap *gmap);
109 
uses_skeys(struct gmap * gmap)110 static inline bool uses_skeys(struct gmap *gmap)
111 {
112 	return test_bit(GMAP_FLAG_USES_SKEYS, &gmap->flags);
113 }
114 
uses_cmm(struct gmap * gmap)115 static inline bool uses_cmm(struct gmap *gmap)
116 {
117 	return test_bit(GMAP_FLAG_USES_CMM, &gmap->flags);
118 }
119 
pfault_enabled(struct gmap * gmap)120 static inline bool pfault_enabled(struct gmap *gmap)
121 {
122 	return test_bit(GMAP_FLAG_PFAULT_ENABLED, &gmap->flags);
123 }
124 
is_ucontrol(struct gmap * gmap)125 static inline bool is_ucontrol(struct gmap *gmap)
126 {
127 	return test_bit(GMAP_FLAG_IS_UCONTROL, &gmap->flags);
128 }
129 
is_shadow(struct gmap * gmap)130 static inline bool is_shadow(struct gmap *gmap)
131 {
132 	return test_bit(GMAP_FLAG_SHADOW, &gmap->flags);
133 }
134 
owns_page_tables(struct gmap * gmap)135 static inline bool owns_page_tables(struct gmap *gmap)
136 {
137 	return test_bit(GMAP_FLAG_OWNS_PAGETABLES, &gmap->flags);
138 }
139 
gmap_put(struct gmap * gmap)140 static inline struct gmap *gmap_put(struct gmap *gmap)
141 {
142 	if (refcount_dec_and_test(&gmap->refcount))
143 		gmap_dispose(gmap);
144 	return NULL;
145 }
146 
gmap_get(struct gmap * gmap)147 static inline void gmap_get(struct gmap *gmap)
148 {
149 	WARN_ON_ONCE(unlikely(!refcount_inc_not_zero(&gmap->refcount)));
150 }
151 
gmap_handle_vsie_unshadow_event(struct gmap * parent,gfn_t gfn)152 static inline void gmap_handle_vsie_unshadow_event(struct gmap *parent, gfn_t gfn)
153 {
154 	scoped_guard(spinlock, &parent->children_lock)
155 		_gmap_handle_vsie_unshadow_event(parent, gfn);
156 }
157 
gmap_mkold_prefix(struct gmap * gmap,gfn_t gfn,gfn_t end)158 static inline bool gmap_mkold_prefix(struct gmap *gmap, gfn_t gfn, gfn_t end)
159 {
160 	return _gmap_unmap_prefix(gmap, gfn, end, true);
161 }
162 
gmap_unmap_prefix(struct gmap * gmap,gfn_t gfn,gfn_t end)163 static inline bool gmap_unmap_prefix(struct gmap *gmap, gfn_t gfn, gfn_t end)
164 {
165 	return _gmap_unmap_prefix(gmap, gfn, end, false);
166 }
167 
_gmap_ptep_xchg(struct gmap * gmap,union pte * ptep,union pte newpte,union pgste pgste,gfn_t gfn,bool needs_lock)168 static inline union pgste _gmap_ptep_xchg(struct gmap *gmap, union pte *ptep, union pte newpte,
169 					  union pgste pgste, gfn_t gfn, bool needs_lock)
170 {
171 	lockdep_assert_held(&gmap->kvm->mmu_lock);
172 	if (!needs_lock)
173 		lockdep_assert_held(&gmap->children_lock);
174 	else
175 		lockdep_assert_not_held(&gmap->children_lock);
176 
177 	if (pgste.prefix_notif && (newpte.h.p || newpte.h.i)) {
178 		pgste.prefix_notif = 0;
179 		gmap_unmap_prefix(gmap, gfn, gfn + 1);
180 	}
181 	if (pgste.vsie_notif && (ptep->h.p != newpte.h.p || newpte.h.i)) {
182 		pgste.vsie_notif = 0;
183 		if (needs_lock)
184 			gmap_handle_vsie_unshadow_event(gmap, gfn);
185 		else
186 			_gmap_handle_vsie_unshadow_event(gmap, gfn);
187 	}
188 	if (!ptep->s.d && newpte.s.d && !newpte.s.s)
189 		SetPageDirty(pfn_to_page(newpte.h.pfra));
190 	return __dat_ptep_xchg(ptep, pgste, newpte, gfn, gmap->asce, uses_skeys(gmap));
191 }
192 
gmap_ptep_xchg(struct gmap * gmap,union pte * ptep,union pte newpte,union pgste pgste,gfn_t gfn)193 static inline union pgste gmap_ptep_xchg(struct gmap *gmap, union pte *ptep, union pte newpte,
194 					 union pgste pgste, gfn_t gfn)
195 {
196 	return _gmap_ptep_xchg(gmap, ptep, newpte, pgste, gfn, true);
197 }
198 
_gmap_crstep_xchg_atomic(struct gmap * gmap,union crste * crstep,union crste oldcrste,union crste newcrste,gfn_t gfn,bool needs_lock)199 static inline bool __must_check _gmap_crstep_xchg_atomic(struct gmap *gmap, union crste *crstep,
200 							 union crste oldcrste, union crste newcrste,
201 							 gfn_t gfn, bool needs_lock)
202 {
203 	unsigned long align = is_pmd(newcrste) ? _PAGE_ENTRIES : _PAGE_ENTRIES * _CRST_ENTRIES;
204 
205 	if (KVM_BUG_ON(crstep->h.tt != oldcrste.h.tt || newcrste.h.tt != oldcrste.h.tt, gmap->kvm))
206 		return true;
207 
208 	lockdep_assert_held(&gmap->kvm->mmu_lock);
209 	if (!needs_lock)
210 		lockdep_assert_held(&gmap->children_lock);
211 
212 	gfn = ALIGN_DOWN(gfn, align);
213 	if (crste_prefix(oldcrste) && (newcrste.h.p || newcrste.h.i || !crste_prefix(newcrste))) {
214 		newcrste.s.fc1.prefix_notif = 0;
215 		gmap_unmap_prefix(gmap, gfn, gfn + align);
216 	}
217 	if (crste_leaf(oldcrste) && oldcrste.s.fc1.vsie_notif &&
218 	    (newcrste.h.p || newcrste.h.i || !newcrste.s.fc1.vsie_notif)) {
219 		newcrste.s.fc1.vsie_notif = 0;
220 		if (needs_lock)
221 			gmap_handle_vsie_unshadow_event(gmap, gfn);
222 		else
223 			_gmap_handle_vsie_unshadow_event(gmap, gfn);
224 	}
225 	if (!oldcrste.s.fc1.d && newcrste.s.fc1.d && !newcrste.s.fc1.s)
226 		SetPageDirty(phys_to_page(crste_origin_large(newcrste)));
227 	return dat_crstep_xchg_atomic(crstep, oldcrste, newcrste, gfn, gmap->asce);
228 }
229 
gmap_crstep_xchg_atomic(struct gmap * gmap,union crste * crstep,union crste oldcrste,union crste newcrste,gfn_t gfn)230 static inline bool __must_check gmap_crstep_xchg_atomic(struct gmap *gmap, union crste *crstep,
231 							union crste oldcrste, union crste newcrste,
232 							gfn_t gfn)
233 {
234 	return _gmap_crstep_xchg_atomic(gmap, crstep, oldcrste, newcrste, gfn, true);
235 }
236 
237 /**
238  * gmap_is_shadow_valid() - check if a shadow guest address space matches the
239  *                          given properties and is still valid.
240  * @sg: Pointer to the shadow guest address space structure.
241  * @asce: ASCE for which the shadow table is requested.
242  * @edat_level: Edat level to be used for the shadow translation.
243  *
244  * Return: true if the gmap shadow is still valid and matches the given
245  * properties and the caller can continue using it; false otherwise, the
246  * caller has to request a new shadow gmap in this case.
247  */
gmap_is_shadow_valid(struct gmap * sg,union asce asce,int edat_level)248 static inline bool gmap_is_shadow_valid(struct gmap *sg, union asce asce, int edat_level)
249 {
250 	return sg->guest_asce.val == asce.val && sg->edat_level == edat_level;
251 }
252 
253 #endif /* ARCH_KVM_S390_GMAP_H */
254