1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * KVM guest address space mapping code
4 *
5 * Copyright IBM Corp. 2024, 2025
6 * Author(s): Claudio Imbrenda <imbrenda@linux.ibm.com>
7 */
8
9 #ifndef __KVM_S390_DAT_H
10 #define __KVM_S390_DAT_H
11
12 #include <linux/radix-tree.h>
13 #include <linux/refcount.h>
14 #include <linux/io.h>
15 #include <linux/kvm_types.h>
16 #include <linux/pgalloc.h>
17 #include <asm/tlbflush.h>
18 #include <asm/dat-bits.h>
19
20 /*
21 * Base address and length must be sent at the start of each block, therefore
22 * it's cheaper to send some clean data, as long as it's less than the size of
23 * two longs.
24 */
25 #define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
26 /* For consistency */
27 #define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
28
29 #define _ASCE(x) ((union asce) { .val = (x), })
30 #define NULL_ASCE _ASCE(0)
31
32 enum {
33 _DAT_TOKEN_NONE = 0,
34 _DAT_TOKEN_PIC,
35 };
36
37 #define _CRSTE_TOK(l, t, p) ((union crste) { \
38 .tok.i = 1, \
39 .tok.tt = (l), \
40 .tok.type = (t), \
41 .tok.par = (p) \
42 })
43 #define _CRSTE_PIC(l, p) _CRSTE_TOK(l, _DAT_TOKEN_PIC, p)
44
45 #define _CRSTE_HOLE(l) _CRSTE_PIC(l, PGM_ADDRESSING)
46 #define _CRSTE_EMPTY(l) _CRSTE_TOK(l, _DAT_TOKEN_NONE, 0)
47
48 #define _PMD_EMPTY _CRSTE_EMPTY(TABLE_TYPE_SEGMENT)
49
50 #define _PTE_TOK(t, p) ((union pte) { .tok.i = 1, .tok.type = (t), .tok.par = (p) })
51 #define _PTE_EMPTY _PTE_TOK(_DAT_TOKEN_NONE, 0)
52
53 /* This fake table type is used for page table walks (both for normal page tables and vSIE) */
54 #define TABLE_TYPE_PAGE_TABLE -1
55
56 enum dat_walk_flags {
57 DAT_WALK_USES_SKEYS = 0x40,
58 DAT_WALK_CONTINUE = 0x20,
59 DAT_WALK_IGN_HOLES = 0x10,
60 DAT_WALK_SPLIT = 0x08,
61 DAT_WALK_ALLOC = 0x04,
62 DAT_WALK_ANY = 0x02,
63 DAT_WALK_LEAF = 0x01,
64 DAT_WALK_DEFAULT = 0
65 };
66
67 #define DAT_WALK_SPLIT_ALLOC (DAT_WALK_SPLIT | DAT_WALK_ALLOC)
68 #define DAT_WALK_ALLOC_CONTINUE (DAT_WALK_CONTINUE | DAT_WALK_ALLOC)
69 #define DAT_WALK_LEAF_ALLOC (DAT_WALK_LEAF | DAT_WALK_ALLOC)
70
71 union pte {
72 unsigned long val;
73 union page_table_entry h;
74 struct {
75 unsigned long :56; /* Hardware bits */
76 unsigned long u : 1; /* Page unused */
77 unsigned long s : 1; /* Special */
78 unsigned long w : 1; /* Writable */
79 unsigned long r : 1; /* Readable */
80 unsigned long d : 1; /* Dirty */
81 unsigned long y : 1; /* Young */
82 unsigned long sd: 1; /* Soft dirty */
83 unsigned long pr: 1; /* Present */
84 } s;
85 struct {
86 unsigned char hwbytes[7];
87 unsigned char swbyte;
88 };
89 union {
90 struct {
91 unsigned long type :16; /* Token type */
92 unsigned long par :16; /* Token parameter */
93 unsigned long :20;
94 unsigned long : 1; /* Must be 0 */
95 unsigned long i : 1; /* Must be 1 */
96 unsigned long : 2;
97 unsigned long : 7;
98 unsigned long pr : 1; /* Must be 0 */
99 };
100 struct {
101 unsigned long token:32; /* Token and parameter */
102 unsigned long :32;
103 };
104 } tok;
105 };
106
107 #define _SEGMENT_FR_MASK (_SEGMENT_MASK >> PAGE_SHIFT)
108 #define _REGION3_FR_MASK (_REGION3_MASK >> PAGE_SHIFT)
109 #define _PAGES_PER_SEGMENT _PAGE_ENTRIES
110 #define _PAGES_PER_REGION3 (_PAGES_PER_SEGMENT * _CRST_ENTRIES)
111
112 /* Soft dirty, needed as macro for atomic operations on ptes */
113 #define _PAGE_SD 0x002
114
115 /* Needed as macro to perform atomic operations */
116 #define PGSTE_PCL_BIT 0x0080000000000000UL /* PCL lock, HW bit */
117 #define PGSTE_CMMA_D_BIT 0x0000000000008000UL /* CMMA dirty soft-bit */
118
119 enum pgste_gps_usage {
120 PGSTE_GPS_USAGE_STABLE = 0,
121 PGSTE_GPS_USAGE_UNUSED,
122 PGSTE_GPS_USAGE_POT_VOLATILE,
123 PGSTE_GPS_USAGE_VOLATILE,
124 };
125
126 union pgste {
127 unsigned long val;
128 struct {
129 unsigned long acc : 4;
130 unsigned long fp : 1;
131 unsigned long : 3;
132 unsigned long pcl : 1;
133 unsigned long hr : 1;
134 unsigned long hc : 1;
135 unsigned long : 2;
136 unsigned long gr : 1;
137 unsigned long gc : 1;
138 unsigned long : 1;
139 unsigned long :16; /* val16 */
140 unsigned long zero : 1;
141 unsigned long nodat : 1;
142 unsigned long : 4;
143 unsigned long usage : 2;
144 unsigned long : 8;
145 unsigned long cmma_d : 1; /* Dirty flag for CMMA bits */
146 unsigned long prefix_notif : 1; /* Guest prefix invalidation notification */
147 unsigned long vsie_notif : 1; /* Referenced in a shadow table */
148 unsigned long : 5;
149 unsigned long : 8;
150 };
151 struct {
152 unsigned short hwbytes0;
153 unsigned short val16; /* Used to store chunked values, see dat_{s,g}et_ptval() */
154 unsigned short hwbytes4;
155 unsigned char flags; /* Maps to the software bits */
156 unsigned char hwbyte7;
157 } __packed;
158 };
159
160 union pmd {
161 unsigned long val;
162 union segment_table_entry h;
163 struct {
164 struct {
165 unsigned long :44; /* HW */
166 unsigned long : 3; /* Unused */
167 unsigned long : 1; /* HW */
168 unsigned long s : 1; /* Special */
169 unsigned long w : 1; /* Writable soft-bit */
170 unsigned long r : 1; /* Readable soft-bit */
171 unsigned long d : 1; /* Dirty */
172 unsigned long y : 1; /* Young */
173 unsigned long : 3; /* HW */
174 unsigned long prefix_notif : 1; /* Guest prefix invalidation notification */
175 unsigned long vsie_notif : 1; /* Referenced in a shadow table */
176 unsigned long : 4; /* HW */
177 unsigned long sd : 1; /* Soft-Dirty */
178 unsigned long pr : 1; /* Present */
179 } fc1;
180 } s;
181 };
182
183 union pud {
184 unsigned long val;
185 union region3_table_entry h;
186 struct {
187 struct {
188 unsigned long :33; /* HW */
189 unsigned long :14; /* Unused */
190 unsigned long : 1; /* HW */
191 unsigned long s : 1; /* Special */
192 unsigned long w : 1; /* Writable soft-bit */
193 unsigned long r : 1; /* Readable soft-bit */
194 unsigned long d : 1; /* Dirty */
195 unsigned long y : 1; /* Young */
196 unsigned long : 3; /* HW */
197 unsigned long prefix_notif : 1; /* Guest prefix invalidation notification */
198 unsigned long vsie_notif : 1; /* Referenced in a shadow table */
199 unsigned long : 4; /* HW */
200 unsigned long sd : 1; /* Soft-Dirty */
201 unsigned long pr : 1; /* Present */
202 } fc1;
203 } s;
204 };
205
206 union p4d {
207 unsigned long val;
208 union region2_table_entry h;
209 };
210
211 union pgd {
212 unsigned long val;
213 union region1_table_entry h;
214 };
215
216 union crste {
217 unsigned long val;
218 union {
219 struct {
220 unsigned long :52;
221 unsigned long : 1;
222 unsigned long fc: 1;
223 unsigned long p : 1;
224 unsigned long : 1;
225 unsigned long : 2;
226 unsigned long i : 1;
227 unsigned long : 1;
228 unsigned long tt: 2;
229 unsigned long : 2;
230 };
231 struct {
232 unsigned long to:52;
233 unsigned long : 1;
234 unsigned long fc: 1;
235 unsigned long p : 1;
236 unsigned long : 1;
237 unsigned long tf: 2;
238 unsigned long i : 1;
239 unsigned long : 1;
240 unsigned long tt: 2;
241 unsigned long tl: 2;
242 } fc0;
243 struct {
244 unsigned long :47;
245 unsigned long av : 1; /* ACCF-Validity Control */
246 unsigned long acc: 4; /* Access-Control Bits */
247 unsigned long f : 1; /* Fetch-Protection Bit */
248 unsigned long fc : 1; /* Format-Control */
249 unsigned long p : 1; /* DAT-Protection Bit */
250 unsigned long iep: 1; /* Instruction-Execution-Protection */
251 unsigned long : 2;
252 unsigned long i : 1; /* Segment-Invalid Bit */
253 unsigned long cs : 1; /* Common-Segment Bit */
254 unsigned long tt : 2; /* Table-Type Bits */
255 unsigned long : 2;
256 } fc1;
257 } h;
258 struct {
259 struct {
260 unsigned long :47;
261 unsigned long : 1; /* HW (should be 0) */
262 unsigned long s : 1; /* Special */
263 unsigned long w : 1; /* Writable */
264 unsigned long r : 1; /* Readable */
265 unsigned long d : 1; /* Dirty */
266 unsigned long y : 1; /* Young */
267 unsigned long : 3; /* HW */
268 unsigned long prefix_notif : 1; /* Guest prefix invalidation notification */
269 unsigned long vsie_notif : 1; /* Referenced in a shadow table */
270 unsigned long : 4; /* HW */
271 unsigned long sd : 1; /* Soft-Dirty */
272 unsigned long pr : 1; /* Present */
273 } fc1;
274 } s;
275 union {
276 struct {
277 unsigned long type :16; /* Token type */
278 unsigned long par :16; /* Token parameter */
279 unsigned long :26;
280 unsigned long i : 1; /* Must be 1 */
281 unsigned long : 1;
282 unsigned long tt : 2;
283 unsigned long : 1;
284 unsigned long pr : 1; /* Must be 0 */
285 };
286 struct {
287 unsigned long token:32; /* Token and parameter */
288 unsigned long :32;
289 };
290 } tok;
291 union pmd pmd;
292 union pud pud;
293 union p4d p4d;
294 union pgd pgd;
295 };
296
297 union skey {
298 unsigned char skey;
299 struct {
300 unsigned char acc :4;
301 unsigned char fp :1;
302 unsigned char r :1;
303 unsigned char c :1;
304 unsigned char zero:1;
305 };
306 };
307
308 static_assert(sizeof(union pgste) == sizeof(unsigned long));
309 static_assert(sizeof(union pte) == sizeof(unsigned long));
310 static_assert(sizeof(union pmd) == sizeof(unsigned long));
311 static_assert(sizeof(union pud) == sizeof(unsigned long));
312 static_assert(sizeof(union p4d) == sizeof(unsigned long));
313 static_assert(sizeof(union pgd) == sizeof(unsigned long));
314 static_assert(sizeof(union crste) == sizeof(unsigned long));
315 static_assert(sizeof(union skey) == sizeof(char));
316
317 struct segment_table {
318 union pmd pmds[_CRST_ENTRIES];
319 };
320
321 struct region3_table {
322 union pud puds[_CRST_ENTRIES];
323 };
324
325 struct region2_table {
326 union p4d p4ds[_CRST_ENTRIES];
327 };
328
329 struct region1_table {
330 union pgd pgds[_CRST_ENTRIES];
331 };
332
333 struct crst_table {
334 union {
335 union crste crstes[_CRST_ENTRIES];
336 struct segment_table segment;
337 struct region3_table region3;
338 struct region2_table region2;
339 struct region1_table region1;
340 };
341 };
342
343 struct page_table {
344 union pte ptes[_PAGE_ENTRIES];
345 union pgste pgstes[_PAGE_ENTRIES];
346 };
347
348 static_assert(sizeof(struct crst_table) == _CRST_TABLE_SIZE);
349 static_assert(sizeof(struct page_table) == PAGE_SIZE);
350
351 struct dat_walk;
352
353 typedef long (*dat_walk_op)(union crste *crste, gfn_t gfn, gfn_t next, struct dat_walk *w);
354
355 struct dat_walk_ops {
356 union {
357 dat_walk_op crste_ops[4];
358 struct {
359 dat_walk_op pmd_entry;
360 dat_walk_op pud_entry;
361 dat_walk_op p4d_entry;
362 dat_walk_op pgd_entry;
363 };
364 };
365 long (*pte_entry)(union pte *pte, gfn_t gfn, gfn_t next, struct dat_walk *w);
366 };
367
368 struct dat_walk {
369 const struct dat_walk_ops *ops;
370 union crste *last;
371 union pte *last_pte;
372 union asce asce;
373 gfn_t start;
374 gfn_t end;
375 int flags;
376 void *priv;
377 };
378
379 struct ptval_param {
380 unsigned char offset : 6;
381 unsigned char len : 2;
382 };
383
384 /**
385 * _pte() - Useful constructor for union pte
386 * @pfn: the pfn this pte should point to.
387 * @writable: whether the pte should be writable.
388 * @dirty: whether the pte should be dirty.
389 * @special: whether the pte should be marked as special
390 *
391 * The pte is also marked as young and present. If the pte is marked as dirty,
392 * it gets marked as soft-dirty too. If the pte is not dirty, the hardware
393 * protect bit is set (independently of the write softbit); this way proper
394 * dirty tracking can be performed.
395 *
396 * Return: a union pte value.
397 */
_pte(kvm_pfn_t pfn,bool writable,bool dirty,bool special)398 static inline union pte _pte(kvm_pfn_t pfn, bool writable, bool dirty, bool special)
399 {
400 union pte res = { .val = PFN_PHYS(pfn) };
401
402 res.h.p = !dirty;
403 res.s.y = 1;
404 res.s.pr = 1;
405 res.s.w = writable;
406 res.s.d = dirty;
407 res.s.sd = dirty;
408 res.s.s = special;
409 return res;
410 }
411
_crste_fc0(kvm_pfn_t pfn,int tt)412 static inline union crste _crste_fc0(kvm_pfn_t pfn, int tt)
413 {
414 union crste res = { .val = PFN_PHYS(pfn) };
415
416 res.h.tt = tt;
417 res.h.fc0.tl = _REGION_ENTRY_LENGTH;
418 res.h.fc0.tf = 0;
419 return res;
420 }
421
422 /**
423 * _crste() - Useful constructor for union crste with FC=1
424 * @pfn: the pfn this pte should point to.
425 * @tt: the table type
426 * @writable: whether the pte should be writable.
427 * @dirty: whether the pte should be dirty.
428 *
429 * The crste is also marked as young and present. If the crste is marked as
430 * dirty, it gets marked as soft-dirty too. If the crste is not dirty, the
431 * hardware protect bit is set (independently of the write softbit); this way
432 * proper dirty tracking can be performed.
433 *
434 * Return: a union crste value.
435 */
_crste_fc1(kvm_pfn_t pfn,int tt,bool writable,bool dirty)436 static inline union crste _crste_fc1(kvm_pfn_t pfn, int tt, bool writable, bool dirty)
437 {
438 union crste res = { .val = PFN_PHYS(pfn) & _SEGMENT_MASK };
439
440 res.h.tt = tt;
441 res.h.p = !dirty;
442 res.h.fc = 1;
443 res.s.fc1.y = 1;
444 res.s.fc1.pr = 1;
445 res.s.fc1.w = writable;
446 res.s.fc1.d = dirty;
447 res.s.fc1.sd = dirty;
448 return res;
449 }
450
451 union essa_state {
452 unsigned char val;
453 struct {
454 unsigned char : 2;
455 unsigned char nodat : 1;
456 unsigned char exception : 1;
457 unsigned char usage : 2;
458 unsigned char content : 2;
459 };
460 };
461
462 /**
463 * struct vsie_rmap - reverse mapping for shadow page table entries
464 * @next: pointer to next rmap in the list
465 * @r_gfn: virtual rmap address in the shadow guest address space
466 */
467 struct vsie_rmap {
468 struct vsie_rmap *next;
469 union {
470 unsigned long val;
471 struct {
472 long level: 8;
473 unsigned long : 4;
474 unsigned long r_gfn:52;
475 };
476 };
477 };
478
479 static_assert(sizeof(struct vsie_rmap) == 2 * sizeof(long));
480
481 #define KVM_S390_MMU_CACHE_N_CRSTS 6
482 #define KVM_S390_MMU_CACHE_N_PTS 2
483 #define KVM_S390_MMU_CACHE_N_RMAPS 16
484 struct kvm_s390_mmu_cache {
485 void *crsts[KVM_S390_MMU_CACHE_N_CRSTS];
486 void *pts[KVM_S390_MMU_CACHE_N_PTS];
487 void *rmaps[KVM_S390_MMU_CACHE_N_RMAPS];
488 short int n_crsts;
489 short int n_pts;
490 short int n_rmaps;
491 };
492
493 struct guest_fault {
494 gfn_t gfn; /* Guest frame */
495 kvm_pfn_t pfn; /* Host PFN */
496 struct page *page; /* Host page */
497 union pte *ptep; /* Used to resolve the fault, or NULL */
498 union crste *crstep; /* Used to resolve the fault, or NULL */
499 bool writable; /* Mapping is writable */
500 bool write_attempt; /* Write access attempted */
501 bool attempt_pfault; /* Attempt a pfault first */
502 bool valid; /* This entry contains valid data */
503 void (*callback)(struct guest_fault *f);
504 void *priv;
505 };
506
507 /*
508 * 0 1 2 3 4 5 6 7
509 * +-------+-------+-------+-------+-------+-------+-------+-------+
510 * 0 | | PGT_ADDR |
511 * 8 | VMADDR | |
512 * 16 | |
513 * 24 | |
514 */
515 #define MKPTVAL(o, l) ((struct ptval_param) { .offset = (o), .len = ((l) + 1) / 2 - 1})
516 #define PTVAL_PGT_ADDR MKPTVAL(4, 8)
517 #define PTVAL_VMADDR MKPTVAL(8, 6)
518
519 union pgste __must_check __dat_ptep_xchg(union pte *ptep, union pgste pgste, union pte new,
520 gfn_t gfn, union asce asce, bool uses_skeys);
521 bool dat_crstep_xchg_atomic(union crste *crstep, union crste old, union crste new, gfn_t gfn,
522 union asce asce);
523 void dat_crstep_xchg(union crste *crstep, union crste new, gfn_t gfn, union asce asce);
524
525 long _dat_walk_gfn_range(gfn_t start, gfn_t end, union asce asce,
526 const struct dat_walk_ops *ops, int flags, void *priv);
527
528 int dat_entry_walk(struct kvm_s390_mmu_cache *mc, gfn_t gfn, union asce asce, int flags,
529 int walk_level, union crste **last, union pte **ptepp);
530 void dat_free_level(struct crst_table *table, bool owns_ptes);
531 struct crst_table *dat_alloc_crst_sleepable(unsigned long init);
532 int dat_set_asce_limit(struct kvm_s390_mmu_cache *mc, union asce *asce, int newtype);
533 int dat_get_storage_key(union asce asce, gfn_t gfn, union skey *skey);
534 int dat_set_storage_key(struct kvm_s390_mmu_cache *mc, union asce asce, gfn_t gfn,
535 union skey skey, bool nq);
536 int dat_cond_set_storage_key(struct kvm_s390_mmu_cache *mmc, union asce asce, gfn_t gfn,
537 union skey skey, union skey *oldkey, bool nq, bool mr, bool mc);
538 int dat_reset_reference_bit(union asce asce, gfn_t gfn);
539 long dat_reset_skeys(union asce asce, gfn_t start);
540
541 unsigned long dat_get_ptval(struct page_table *table, struct ptval_param param);
542 void dat_set_ptval(struct page_table *table, struct ptval_param param, unsigned long val);
543
544 int dat_set_slot(struct kvm_s390_mmu_cache *mc, union asce asce, gfn_t start, gfn_t end,
545 u16 type, u16 param);
546 int dat_set_prefix_notif_bit(union asce asce, gfn_t gfn);
547 bool dat_test_age_gfn(union asce asce, gfn_t start, gfn_t end);
548
549 int dat_perform_essa(union asce asce, gfn_t gfn, int orc, union essa_state *state, bool *dirty);
550 long dat_reset_cmma(union asce asce, gfn_t start_gfn);
551 int dat_peek_cmma(gfn_t start, union asce asce, unsigned int *count, u8 *values);
552 int dat_get_cmma(union asce asce, gfn_t *start, unsigned int *count, u8 *values, atomic64_t *rem);
553 int dat_set_cmma_bits(struct kvm_s390_mmu_cache *mc, union asce asce, gfn_t gfn,
554 unsigned long count, unsigned long mask, const uint8_t *bits);
555
556 int kvm_s390_mmu_cache_topup(struct kvm_s390_mmu_cache *mc);
557
558 #define GFP_KVM_S390_MMU_CACHE (GFP_ATOMIC | __GFP_ACCOUNT | __GFP_NOWARN)
559
kvm_s390_mmu_cache_alloc_pt(struct kvm_s390_mmu_cache * mc)560 static inline struct page_table *kvm_s390_mmu_cache_alloc_pt(struct kvm_s390_mmu_cache *mc)
561 {
562 if (mc->n_pts)
563 return mc->pts[--mc->n_pts];
564 return (void *)__get_free_page(GFP_KVM_S390_MMU_CACHE);
565 }
566
kvm_s390_mmu_cache_alloc_crst(struct kvm_s390_mmu_cache * mc)567 static inline struct crst_table *kvm_s390_mmu_cache_alloc_crst(struct kvm_s390_mmu_cache *mc)
568 {
569 if (mc->n_crsts)
570 return mc->crsts[--mc->n_crsts];
571 return (void *)__get_free_pages(GFP_KVM_S390_MMU_CACHE | __GFP_COMP, CRST_ALLOC_ORDER);
572 }
573
kvm_s390_mmu_cache_alloc_rmap(struct kvm_s390_mmu_cache * mc)574 static inline struct vsie_rmap *kvm_s390_mmu_cache_alloc_rmap(struct kvm_s390_mmu_cache *mc)
575 {
576 if (mc->n_rmaps)
577 return mc->rmaps[--mc->n_rmaps];
578 return kzalloc_obj(struct vsie_rmap, GFP_KVM_S390_MMU_CACHE);
579 }
580
crste_table_start(union crste * crstep)581 static inline struct crst_table *crste_table_start(union crste *crstep)
582 {
583 return (struct crst_table *)ALIGN_DOWN((unsigned long)crstep, _CRST_TABLE_SIZE);
584 }
585
pte_table_start(union pte * ptep)586 static inline struct page_table *pte_table_start(union pte *ptep)
587 {
588 return (struct page_table *)ALIGN_DOWN((unsigned long)ptep, _PAGE_TABLE_SIZE);
589 }
590
crdte_crste(union crste * crstep,union crste old,union crste new,gfn_t gfn,union asce asce)591 static inline bool crdte_crste(union crste *crstep, union crste old, union crste new, gfn_t gfn,
592 union asce asce)
593 {
594 unsigned long dtt = 0x10 | new.h.tt << 2;
595 void *table = crste_table_start(crstep);
596
597 return crdte(old.val, new.val, table, dtt, gfn_to_gpa(gfn), asce.val);
598 }
599
600 /**
601 * idte_crste() - invalidate a crste entry using idte
602 * @crstep: pointer to the crste to be invalidated
603 * @gfn: a gfn mapped by the crste
604 * @opt: options for the idte instruction
605 * @asce: the asce
606 * @local: whether the operation is cpu-local
607 */
idte_crste(union crste * crstep,gfn_t gfn,unsigned long opt,union asce asce,int local)608 static __always_inline void idte_crste(union crste *crstep, gfn_t gfn, unsigned long opt,
609 union asce asce, int local)
610 {
611 unsigned long table_origin = __pa(crste_table_start(crstep));
612 unsigned long gaddr = gfn_to_gpa(gfn) & HPAGE_MASK;
613
614 if (__builtin_constant_p(opt) && opt == 0) {
615 /* flush without guest asce */
616 asm volatile("idte %[table_origin],0,%[gaddr],%[local]"
617 : "+m" (*crstep)
618 : [table_origin] "a" (table_origin), [gaddr] "a" (gaddr),
619 [local] "i" (local)
620 : "cc");
621 } else {
622 /* flush with guest asce */
623 asm volatile("idte %[table_origin],%[asce],%[gaddr_opt],%[local]"
624 : "+m" (*crstep)
625 : [table_origin] "a" (table_origin), [gaddr_opt] "a" (gaddr | opt),
626 [asce] "a" (asce.val), [local] "i" (local)
627 : "cc");
628 }
629 }
630
dat_init_pgstes(struct page_table * pt,unsigned long val)631 static inline void dat_init_pgstes(struct page_table *pt, unsigned long val)
632 {
633 memset64((void *)pt->pgstes, val, PTRS_PER_PTE);
634 }
635
dat_init_page_table(struct page_table * pt,unsigned long ptes,unsigned long pgstes)636 static inline void dat_init_page_table(struct page_table *pt, unsigned long ptes,
637 unsigned long pgstes)
638 {
639 memset64((void *)pt->ptes, ptes, PTRS_PER_PTE);
640 dat_init_pgstes(pt, pgstes);
641 }
642
asce_end(union asce asce)643 static inline gfn_t asce_end(union asce asce)
644 {
645 return 1ULL << ((asce.dt + 1) * 11 + _SEGMENT_SHIFT - PAGE_SHIFT);
646 }
647
648 #define _CRSTE(x) ((union crste) { .val = _Generic((x), \
649 union pgd : (x).val, \
650 union p4d : (x).val, \
651 union pud : (x).val, \
652 union pmd : (x).val, \
653 union crste : (x).val)})
654
655 #define _CRSTEP(x) ((union crste *)_Generic((*(x)), \
656 union pgd : (x), \
657 union p4d : (x), \
658 union pud : (x), \
659 union pmd : (x), \
660 union crste : (x)))
661
662 #define _CRSTP(x) ((struct crst_table *)_Generic((*(x)), \
663 struct crst_table : (x), \
664 struct segment_table : (x), \
665 struct region3_table : (x), \
666 struct region2_table : (x), \
667 struct region1_table : (x)))
668
asce_contains_gfn(union asce asce,gfn_t gfn)669 static inline bool asce_contains_gfn(union asce asce, gfn_t gfn)
670 {
671 return gfn < asce_end(asce);
672 }
673
is_pmd(union crste crste)674 static inline bool is_pmd(union crste crste)
675 {
676 return crste.h.tt == TABLE_TYPE_SEGMENT;
677 }
678
is_pud(union crste crste)679 static inline bool is_pud(union crste crste)
680 {
681 return crste.h.tt == TABLE_TYPE_REGION3;
682 }
683
is_p4d(union crste crste)684 static inline bool is_p4d(union crste crste)
685 {
686 return crste.h.tt == TABLE_TYPE_REGION2;
687 }
688
is_pgd(union crste crste)689 static inline bool is_pgd(union crste crste)
690 {
691 return crste.h.tt == TABLE_TYPE_REGION1;
692 }
693
pmd_origin_large(union pmd pmd)694 static inline phys_addr_t pmd_origin_large(union pmd pmd)
695 {
696 return pmd.val & _SEGMENT_ENTRY_ORIGIN_LARGE;
697 }
698
pud_origin_large(union pud pud)699 static inline phys_addr_t pud_origin_large(union pud pud)
700 {
701 return pud.val & _REGION3_ENTRY_ORIGIN_LARGE;
702 }
703
704 /**
705 * crste_origin_large() - Return the large frame origin of a large crste
706 * @crste: The crste whose origin is to be returned. Should be either a
707 * region-3 table entry or a segment table entry, in both cases with
708 * FC set to 1 (large pages).
709 *
710 * Return: The origin of the large frame pointed to by @crste, or -1 if the
711 * crste was not large (wrong table type, or FC==0)
712 */
crste_origin_large(union crste crste)713 static inline phys_addr_t crste_origin_large(union crste crste)
714 {
715 if (unlikely(!crste.h.fc || crste.h.tt > TABLE_TYPE_REGION3))
716 return -1;
717 if (is_pmd(crste))
718 return pmd_origin_large(crste.pmd);
719 return pud_origin_large(crste.pud);
720 }
721
722 #define crste_origin(x) (_Generic((x), \
723 union pmd : (x).val & _SEGMENT_ENTRY_ORIGIN, \
724 union pud : (x).val & _REGION_ENTRY_ORIGIN, \
725 union p4d : (x).val & _REGION_ENTRY_ORIGIN, \
726 union pgd : (x).val & _REGION_ENTRY_ORIGIN))
727
pte_origin(union pte pte)728 static inline unsigned long pte_origin(union pte pte)
729 {
730 return pte.val & PAGE_MASK;
731 }
732
pmd_prefix(union pmd pmd)733 static inline bool pmd_prefix(union pmd pmd)
734 {
735 return pmd.h.fc && pmd.s.fc1.prefix_notif;
736 }
737
pud_prefix(union pud pud)738 static inline bool pud_prefix(union pud pud)
739 {
740 return pud.h.fc && pud.s.fc1.prefix_notif;
741 }
742
crste_leaf(union crste crste)743 static inline bool crste_leaf(union crste crste)
744 {
745 return (crste.h.tt <= TABLE_TYPE_REGION3) && crste.h.fc;
746 }
747
crste_prefix(union crste crste)748 static inline bool crste_prefix(union crste crste)
749 {
750 return crste_leaf(crste) && crste.s.fc1.prefix_notif;
751 }
752
crste_dirty(union crste crste)753 static inline bool crste_dirty(union crste crste)
754 {
755 return crste_leaf(crste) && crste.s.fc1.d;
756 }
757
pgste_of(union pte * pte)758 static inline union pgste *pgste_of(union pte *pte)
759 {
760 return (union pgste *)(pte + _PAGE_ENTRIES);
761 }
762
pte_hole(union pte pte)763 static inline bool pte_hole(union pte pte)
764 {
765 return pte.h.i && !pte.tok.pr && pte.tok.type != _DAT_TOKEN_NONE;
766 }
767
_crste_hole(union crste crste)768 static inline bool _crste_hole(union crste crste)
769 {
770 return crste.h.i && !crste.tok.pr && crste.tok.type != _DAT_TOKEN_NONE;
771 }
772
773 #define crste_hole(x) _crste_hole(_CRSTE(x))
774
_crste_none(union crste crste)775 static inline bool _crste_none(union crste crste)
776 {
777 return crste.h.i && !crste.tok.pr && crste.tok.type == _DAT_TOKEN_NONE;
778 }
779
780 #define crste_none(x) _crste_none(_CRSTE(x))
781
large_pud_to_phys(union pud pud,gfn_t gfn)782 static inline phys_addr_t large_pud_to_phys(union pud pud, gfn_t gfn)
783 {
784 return pud_origin_large(pud) | (gfn_to_gpa(gfn) & ~_REGION3_MASK);
785 }
786
large_pmd_to_phys(union pmd pmd,gfn_t gfn)787 static inline phys_addr_t large_pmd_to_phys(union pmd pmd, gfn_t gfn)
788 {
789 return pmd_origin_large(pmd) | (gfn_to_gpa(gfn) & ~_SEGMENT_MASK);
790 }
791
large_crste_to_phys(union crste crste,gfn_t gfn)792 static inline phys_addr_t large_crste_to_phys(union crste crste, gfn_t gfn)
793 {
794 if (unlikely(!crste.h.fc || crste.h.tt > TABLE_TYPE_REGION3))
795 return -1;
796 if (is_pmd(crste))
797 return large_pmd_to_phys(crste.pmd, gfn);
798 return large_pud_to_phys(crste.pud, gfn);
799 }
800
cspg_crste(union crste * crstep,union crste old,union crste new)801 static inline bool cspg_crste(union crste *crstep, union crste old, union crste new)
802 {
803 return cspg(&crstep->val, old.val, new.val);
804 }
805
dereference_pmd(union pmd pmd)806 static inline struct page_table *dereference_pmd(union pmd pmd)
807 {
808 return phys_to_virt(crste_origin(pmd));
809 }
810
dereference_pud(union pud pud)811 static inline struct segment_table *dereference_pud(union pud pud)
812 {
813 return phys_to_virt(crste_origin(pud));
814 }
815
dereference_p4d(union p4d p4d)816 static inline struct region3_table *dereference_p4d(union p4d p4d)
817 {
818 return phys_to_virt(crste_origin(p4d));
819 }
820
dereference_pgd(union pgd pgd)821 static inline struct region2_table *dereference_pgd(union pgd pgd)
822 {
823 return phys_to_virt(crste_origin(pgd));
824 }
825
_dereference_crste(union crste crste)826 static inline struct crst_table *_dereference_crste(union crste crste)
827 {
828 if (unlikely(is_pmd(crste)))
829 return NULL;
830 return phys_to_virt(crste_origin(crste.pud));
831 }
832
833 #define dereference_crste(x) (_Generic((x), \
834 union pud : _dereference_crste(_CRSTE(x)), \
835 union p4d : _dereference_crste(_CRSTE(x)), \
836 union pgd : _dereference_crste(_CRSTE(x)), \
837 union crste : _dereference_crste(_CRSTE(x))))
838
dereference_asce(union asce asce)839 static inline struct crst_table *dereference_asce(union asce asce)
840 {
841 return phys_to_virt(asce.val & _ASCE_ORIGIN);
842 }
843
asce_flush_tlb(union asce asce)844 static inline void asce_flush_tlb(union asce asce)
845 {
846 __tlb_flush_idte(asce.val);
847 }
848
pgste_get_trylock(union pte * ptep,union pgste * res)849 static inline bool pgste_get_trylock(union pte *ptep, union pgste *res)
850 {
851 union pgste *pgstep = pgste_of(ptep);
852 union pgste old_pgste;
853
854 if (READ_ONCE(pgstep->val) & PGSTE_PCL_BIT)
855 return false;
856 old_pgste.val = __atomic64_or_barrier(PGSTE_PCL_BIT, &pgstep->val);
857 if (old_pgste.pcl)
858 return false;
859 old_pgste.pcl = 1;
860 *res = old_pgste;
861 return true;
862 }
863
pgste_get_lock(union pte * ptep)864 static inline union pgste pgste_get_lock(union pte *ptep)
865 {
866 union pgste res;
867
868 while (!pgste_get_trylock(ptep, &res))
869 cpu_relax();
870 return res;
871 }
872
pgste_set_unlock(union pte * ptep,union pgste pgste)873 static inline void pgste_set_unlock(union pte *ptep, union pgste pgste)
874 {
875 pgste.pcl = 0;
876 barrier();
877 WRITE_ONCE(*pgste_of(ptep), pgste);
878 }
879
dat_ptep_xchg(union pte * ptep,union pte new,gfn_t gfn,union asce asce,bool has_skeys)880 static inline void dat_ptep_xchg(union pte *ptep, union pte new, gfn_t gfn, union asce asce,
881 bool has_skeys)
882 {
883 union pgste pgste;
884
885 pgste = pgste_get_lock(ptep);
886 pgste = __dat_ptep_xchg(ptep, pgste, new, gfn, asce, has_skeys);
887 pgste_set_unlock(ptep, pgste);
888 }
889
dat_ptep_clear(union pte * ptep,gfn_t gfn,union asce asce,bool has_skeys)890 static inline void dat_ptep_clear(union pte *ptep, gfn_t gfn, union asce asce, bool has_skeys)
891 {
892 dat_ptep_xchg(ptep, _PTE_EMPTY, gfn, asce, has_skeys);
893 }
894
dat_free_pt(struct page_table * pt)895 static inline void dat_free_pt(struct page_table *pt)
896 {
897 free_page((unsigned long)pt);
898 }
899
_dat_free_crst(struct crst_table * table)900 static inline void _dat_free_crst(struct crst_table *table)
901 {
902 free_pages((unsigned long)table, CRST_ALLOC_ORDER);
903 }
904
905 #define dat_free_crst(x) _dat_free_crst(_CRSTP(x))
906
kvm_s390_free_mmu_cache(struct kvm_s390_mmu_cache * mc)907 static inline void kvm_s390_free_mmu_cache(struct kvm_s390_mmu_cache *mc)
908 {
909 if (!mc)
910 return;
911 while (mc->n_pts)
912 dat_free_pt(mc->pts[--mc->n_pts]);
913 while (mc->n_crsts)
914 _dat_free_crst(mc->crsts[--mc->n_crsts]);
915 while (mc->n_rmaps)
916 kfree(mc->rmaps[--mc->n_rmaps]);
917 kfree(mc);
918 }
919
DEFINE_FREE(kvm_s390_mmu_cache,struct kvm_s390_mmu_cache *,if (_T)kvm_s390_free_mmu_cache (_T))920 DEFINE_FREE(kvm_s390_mmu_cache, struct kvm_s390_mmu_cache *, if (_T) kvm_s390_free_mmu_cache(_T))
921
922 static inline struct kvm_s390_mmu_cache *kvm_s390_new_mmu_cache(void)
923 {
924 struct kvm_s390_mmu_cache *mc __free(kvm_s390_mmu_cache) = NULL;
925
926 mc = kzalloc_obj(*mc, GFP_KERNEL_ACCOUNT);
927 if (mc && !kvm_s390_mmu_cache_topup(mc))
928 return_ptr(mc);
929 return NULL;
930 }
931
dat_pmdp_xchg_atomic(union pmd * pmdp,union pmd old,union pmd new,gfn_t gfn,union asce asce)932 static inline bool dat_pmdp_xchg_atomic(union pmd *pmdp, union pmd old, union pmd new,
933 gfn_t gfn, union asce asce)
934 {
935 return dat_crstep_xchg_atomic(_CRSTEP(pmdp), _CRSTE(old), _CRSTE(new), gfn, asce);
936 }
937
dat_pudp_xchg_atomic(union pud * pudp,union pud old,union pud new,gfn_t gfn,union asce asce)938 static inline bool dat_pudp_xchg_atomic(union pud *pudp, union pud old, union pud new,
939 gfn_t gfn, union asce asce)
940 {
941 return dat_crstep_xchg_atomic(_CRSTEP(pudp), _CRSTE(old), _CRSTE(new), gfn, asce);
942 }
943
dat_crstep_clear_atomic(union crste * crstep,gfn_t gfn,union asce asce)944 static inline union crste dat_crstep_clear_atomic(union crste *crstep, gfn_t gfn, union asce asce)
945 {
946 union crste oldcrste, empty = _CRSTE_EMPTY(crstep->h.tt);
947
948 do {
949 oldcrste = READ_ONCE(*crstep);
950 } while (!dat_crstep_xchg_atomic(crstep, oldcrste, empty, gfn, asce));
951 return oldcrste;
952 }
953
get_level(union crste * crstep,union pte * ptep)954 static inline int get_level(union crste *crstep, union pte *ptep)
955 {
956 return ptep ? TABLE_TYPE_PAGE_TABLE : crstep->h.tt;
957 }
958
dat_delete_slot(struct kvm_s390_mmu_cache * mc,union asce asce,gfn_t start,unsigned long npages)959 static inline int dat_delete_slot(struct kvm_s390_mmu_cache *mc, union asce asce, gfn_t start,
960 unsigned long npages)
961 {
962 return dat_set_slot(mc, asce, start, start + npages, _DAT_TOKEN_PIC, PGM_ADDRESSING);
963 }
964
dat_create_slot(struct kvm_s390_mmu_cache * mc,union asce asce,gfn_t start,unsigned long npages)965 static inline int dat_create_slot(struct kvm_s390_mmu_cache *mc, union asce asce, gfn_t start,
966 unsigned long npages)
967 {
968 return dat_set_slot(mc, asce, start, start + npages, _DAT_TOKEN_NONE, 0);
969 }
970
crste_is_ucas(union crste crste)971 static inline bool crste_is_ucas(union crste crste)
972 {
973 return is_pmd(crste) && crste.h.i && crste.h.fc0.tl == 1 && crste.h.fc == 0;
974 }
975
976 #endif /* __KVM_S390_DAT_H */
977