xref: /linux/arch/s390/kvm/gaccess.c (revision 4b132aacb0768ac1e652cf517097ea6f237214b9)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * guest access functions
4  *
5  * Copyright IBM Corp. 2014
6  *
7  */
8 
9 #include <linux/vmalloc.h>
10 #include <linux/mm_types.h>
11 #include <linux/err.h>
12 #include <linux/pgtable.h>
13 #include <linux/bitfield.h>
14 #include <asm/access-regs.h>
15 #include <asm/fault.h>
16 #include <asm/gmap.h>
17 #include <asm/dat-bits.h>
18 #include "kvm-s390.h"
19 #include "gaccess.h"
20 
21 /*
22  * vaddress union in order to easily decode a virtual address into its
23  * region first index, region second index etc. parts.
24  */
25 union vaddress {
26 	unsigned long addr;
27 	struct {
28 		unsigned long rfx : 11;
29 		unsigned long rsx : 11;
30 		unsigned long rtx : 11;
31 		unsigned long sx  : 11;
32 		unsigned long px  : 8;
33 		unsigned long bx  : 12;
34 	};
35 	struct {
36 		unsigned long rfx01 : 2;
37 		unsigned long	    : 9;
38 		unsigned long rsx01 : 2;
39 		unsigned long	    : 9;
40 		unsigned long rtx01 : 2;
41 		unsigned long	    : 9;
42 		unsigned long sx01  : 2;
43 		unsigned long	    : 29;
44 	};
45 };
46 
47 /*
48  * raddress union which will contain the result (real or absolute address)
49  * after a page table walk. The rfaa, sfaa and pfra members are used to
50  * simply assign them the value of a region, segment or page table entry.
51  */
52 union raddress {
53 	unsigned long addr;
54 	unsigned long rfaa : 33; /* Region-Frame Absolute Address */
55 	unsigned long sfaa : 44; /* Segment-Frame Absolute Address */
56 	unsigned long pfra : 52; /* Page-Frame Real Address */
57 };
58 
59 union alet {
60 	u32 val;
61 	struct {
62 		u32 reserved : 7;
63 		u32 p        : 1;
64 		u32 alesn    : 8;
65 		u32 alen     : 16;
66 	};
67 };
68 
69 union ald {
70 	u32 val;
71 	struct {
72 		u32     : 1;
73 		u32 alo : 24;
74 		u32 all : 7;
75 	};
76 };
77 
78 struct ale {
79 	unsigned long i      : 1; /* ALEN-Invalid Bit */
80 	unsigned long        : 5;
81 	unsigned long fo     : 1; /* Fetch-Only Bit */
82 	unsigned long p      : 1; /* Private Bit */
83 	unsigned long alesn  : 8; /* Access-List-Entry Sequence Number */
84 	unsigned long aleax  : 16; /* Access-List-Entry Authorization Index */
85 	unsigned long        : 32;
86 	unsigned long        : 1;
87 	unsigned long asteo  : 25; /* ASN-Second-Table-Entry Origin */
88 	unsigned long        : 6;
89 	unsigned long astesn : 32; /* ASTE Sequence Number */
90 };
91 
92 struct aste {
93 	unsigned long i      : 1; /* ASX-Invalid Bit */
94 	unsigned long ato    : 29; /* Authority-Table Origin */
95 	unsigned long        : 1;
96 	unsigned long b      : 1; /* Base-Space Bit */
97 	unsigned long ax     : 16; /* Authorization Index */
98 	unsigned long atl    : 12; /* Authority-Table Length */
99 	unsigned long        : 2;
100 	unsigned long ca     : 1; /* Controlled-ASN Bit */
101 	unsigned long ra     : 1; /* Reusable-ASN Bit */
102 	unsigned long asce   : 64; /* Address-Space-Control Element */
103 	unsigned long ald    : 32;
104 	unsigned long astesn : 32;
105 	/* .. more fields there */
106 };
107 
108 int ipte_lock_held(struct kvm *kvm)
109 {
110 	if (sclp.has_siif) {
111 		int rc;
112 
113 		read_lock(&kvm->arch.sca_lock);
114 		rc = kvm_s390_get_ipte_control(kvm)->kh != 0;
115 		read_unlock(&kvm->arch.sca_lock);
116 		return rc;
117 	}
118 	return kvm->arch.ipte_lock_count != 0;
119 }
120 
121 static void ipte_lock_simple(struct kvm *kvm)
122 {
123 	union ipte_control old, new, *ic;
124 
125 	mutex_lock(&kvm->arch.ipte_mutex);
126 	kvm->arch.ipte_lock_count++;
127 	if (kvm->arch.ipte_lock_count > 1)
128 		goto out;
129 retry:
130 	read_lock(&kvm->arch.sca_lock);
131 	ic = kvm_s390_get_ipte_control(kvm);
132 	do {
133 		old = READ_ONCE(*ic);
134 		if (old.k) {
135 			read_unlock(&kvm->arch.sca_lock);
136 			cond_resched();
137 			goto retry;
138 		}
139 		new = old;
140 		new.k = 1;
141 	} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
142 	read_unlock(&kvm->arch.sca_lock);
143 out:
144 	mutex_unlock(&kvm->arch.ipte_mutex);
145 }
146 
147 static void ipte_unlock_simple(struct kvm *kvm)
148 {
149 	union ipte_control old, new, *ic;
150 
151 	mutex_lock(&kvm->arch.ipte_mutex);
152 	kvm->arch.ipte_lock_count--;
153 	if (kvm->arch.ipte_lock_count)
154 		goto out;
155 	read_lock(&kvm->arch.sca_lock);
156 	ic = kvm_s390_get_ipte_control(kvm);
157 	do {
158 		old = READ_ONCE(*ic);
159 		new = old;
160 		new.k = 0;
161 	} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
162 	read_unlock(&kvm->arch.sca_lock);
163 	wake_up(&kvm->arch.ipte_wq);
164 out:
165 	mutex_unlock(&kvm->arch.ipte_mutex);
166 }
167 
168 static void ipte_lock_siif(struct kvm *kvm)
169 {
170 	union ipte_control old, new, *ic;
171 
172 retry:
173 	read_lock(&kvm->arch.sca_lock);
174 	ic = kvm_s390_get_ipte_control(kvm);
175 	do {
176 		old = READ_ONCE(*ic);
177 		if (old.kg) {
178 			read_unlock(&kvm->arch.sca_lock);
179 			cond_resched();
180 			goto retry;
181 		}
182 		new = old;
183 		new.k = 1;
184 		new.kh++;
185 	} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
186 	read_unlock(&kvm->arch.sca_lock);
187 }
188 
189 static void ipte_unlock_siif(struct kvm *kvm)
190 {
191 	union ipte_control old, new, *ic;
192 
193 	read_lock(&kvm->arch.sca_lock);
194 	ic = kvm_s390_get_ipte_control(kvm);
195 	do {
196 		old = READ_ONCE(*ic);
197 		new = old;
198 		new.kh--;
199 		if (!new.kh)
200 			new.k = 0;
201 	} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
202 	read_unlock(&kvm->arch.sca_lock);
203 	if (!new.kh)
204 		wake_up(&kvm->arch.ipte_wq);
205 }
206 
207 void ipte_lock(struct kvm *kvm)
208 {
209 	if (sclp.has_siif)
210 		ipte_lock_siif(kvm);
211 	else
212 		ipte_lock_simple(kvm);
213 }
214 
215 void ipte_unlock(struct kvm *kvm)
216 {
217 	if (sclp.has_siif)
218 		ipte_unlock_siif(kvm);
219 	else
220 		ipte_unlock_simple(kvm);
221 }
222 
223 static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, u8 ar,
224 			  enum gacc_mode mode)
225 {
226 	union alet alet;
227 	struct ale ale;
228 	struct aste aste;
229 	unsigned long ald_addr, authority_table_addr;
230 	union ald ald;
231 	int eax, rc;
232 	u8 authority_table;
233 
234 	if (ar >= NUM_ACRS)
235 		return -EINVAL;
236 
237 	if (vcpu->arch.acrs_loaded)
238 		save_access_regs(vcpu->run->s.regs.acrs);
239 	alet.val = vcpu->run->s.regs.acrs[ar];
240 
241 	if (ar == 0 || alet.val == 0) {
242 		asce->val = vcpu->arch.sie_block->gcr[1];
243 		return 0;
244 	} else if (alet.val == 1) {
245 		asce->val = vcpu->arch.sie_block->gcr[7];
246 		return 0;
247 	}
248 
249 	if (alet.reserved)
250 		return PGM_ALET_SPECIFICATION;
251 
252 	if (alet.p)
253 		ald_addr = vcpu->arch.sie_block->gcr[5];
254 	else
255 		ald_addr = vcpu->arch.sie_block->gcr[2];
256 	ald_addr &= 0x7fffffc0;
257 
258 	rc = read_guest_real(vcpu, ald_addr + 16, &ald.val, sizeof(union ald));
259 	if (rc)
260 		return rc;
261 
262 	if (alet.alen / 8 > ald.all)
263 		return PGM_ALEN_TRANSLATION;
264 
265 	if (0x7fffffff - ald.alo * 128 < alet.alen * 16)
266 		return PGM_ADDRESSING;
267 
268 	rc = read_guest_real(vcpu, ald.alo * 128 + alet.alen * 16, &ale,
269 			     sizeof(struct ale));
270 	if (rc)
271 		return rc;
272 
273 	if (ale.i == 1)
274 		return PGM_ALEN_TRANSLATION;
275 	if (ale.alesn != alet.alesn)
276 		return PGM_ALE_SEQUENCE;
277 
278 	rc = read_guest_real(vcpu, ale.asteo * 64, &aste, sizeof(struct aste));
279 	if (rc)
280 		return rc;
281 
282 	if (aste.i)
283 		return PGM_ASTE_VALIDITY;
284 	if (aste.astesn != ale.astesn)
285 		return PGM_ASTE_SEQUENCE;
286 
287 	if (ale.p == 1) {
288 		eax = (vcpu->arch.sie_block->gcr[8] >> 16) & 0xffff;
289 		if (ale.aleax != eax) {
290 			if (eax / 16 > aste.atl)
291 				return PGM_EXTENDED_AUTHORITY;
292 
293 			authority_table_addr = aste.ato * 4 + eax / 4;
294 
295 			rc = read_guest_real(vcpu, authority_table_addr,
296 					     &authority_table,
297 					     sizeof(u8));
298 			if (rc)
299 				return rc;
300 
301 			if ((authority_table & (0x40 >> ((eax & 3) * 2))) == 0)
302 				return PGM_EXTENDED_AUTHORITY;
303 		}
304 	}
305 
306 	if (ale.fo == 1 && mode == GACC_STORE)
307 		return PGM_PROTECTION;
308 
309 	asce->val = aste.asce;
310 	return 0;
311 }
312 
313 enum prot_type {
314 	PROT_TYPE_LA   = 0,
315 	PROT_TYPE_KEYC = 1,
316 	PROT_TYPE_ALC  = 2,
317 	PROT_TYPE_DAT  = 3,
318 	PROT_TYPE_IEP  = 4,
319 	/* Dummy value for passing an initialized value when code != PGM_PROTECTION */
320 	PROT_NONE,
321 };
322 
323 static int trans_exc_ending(struct kvm_vcpu *vcpu, int code, unsigned long gva, u8 ar,
324 			    enum gacc_mode mode, enum prot_type prot, bool terminate)
325 {
326 	struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
327 	union teid *teid;
328 
329 	memset(pgm, 0, sizeof(*pgm));
330 	pgm->code = code;
331 	teid = (union teid *)&pgm->trans_exc_code;
332 
333 	switch (code) {
334 	case PGM_PROTECTION:
335 		switch (prot) {
336 		case PROT_NONE:
337 			/* We should never get here, acts like termination */
338 			WARN_ON_ONCE(1);
339 			break;
340 		case PROT_TYPE_IEP:
341 			teid->b61 = 1;
342 			fallthrough;
343 		case PROT_TYPE_LA:
344 			teid->b56 = 1;
345 			break;
346 		case PROT_TYPE_KEYC:
347 			teid->b60 = 1;
348 			break;
349 		case PROT_TYPE_ALC:
350 			teid->b60 = 1;
351 			fallthrough;
352 		case PROT_TYPE_DAT:
353 			teid->b61 = 1;
354 			break;
355 		}
356 		if (terminate) {
357 			teid->b56 = 0;
358 			teid->b60 = 0;
359 			teid->b61 = 0;
360 		}
361 		fallthrough;
362 	case PGM_ASCE_TYPE:
363 	case PGM_PAGE_TRANSLATION:
364 	case PGM_REGION_FIRST_TRANS:
365 	case PGM_REGION_SECOND_TRANS:
366 	case PGM_REGION_THIRD_TRANS:
367 	case PGM_SEGMENT_TRANSLATION:
368 		/*
369 		 * op_access_id only applies to MOVE_PAGE -> set bit 61
370 		 * exc_access_id has to be set to 0 for some instructions. Both
371 		 * cases have to be handled by the caller.
372 		 */
373 		teid->addr = gva >> PAGE_SHIFT;
374 		teid->fsi = mode == GACC_STORE ? TEID_FSI_STORE : TEID_FSI_FETCH;
375 		teid->as = psw_bits(vcpu->arch.sie_block->gpsw).as;
376 		fallthrough;
377 	case PGM_ALEN_TRANSLATION:
378 	case PGM_ALE_SEQUENCE:
379 	case PGM_ASTE_VALIDITY:
380 	case PGM_ASTE_SEQUENCE:
381 	case PGM_EXTENDED_AUTHORITY:
382 		/*
383 		 * We can always store exc_access_id, as it is
384 		 * undefined for non-ar cases. It is undefined for
385 		 * most DAT protection exceptions.
386 		 */
387 		pgm->exc_access_id = ar;
388 		break;
389 	}
390 	return code;
391 }
392 
393 static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva, u8 ar,
394 		     enum gacc_mode mode, enum prot_type prot)
395 {
396 	return trans_exc_ending(vcpu, code, gva, ar, mode, prot, false);
397 }
398 
399 static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce,
400 			 unsigned long ga, u8 ar, enum gacc_mode mode)
401 {
402 	int rc;
403 	struct psw_bits psw = psw_bits(vcpu->arch.sie_block->gpsw);
404 
405 	if (!psw.dat) {
406 		asce->val = 0;
407 		asce->r = 1;
408 		return 0;
409 	}
410 
411 	if ((mode == GACC_IFETCH) && (psw.as != PSW_BITS_AS_HOME))
412 		psw.as = PSW_BITS_AS_PRIMARY;
413 
414 	switch (psw.as) {
415 	case PSW_BITS_AS_PRIMARY:
416 		asce->val = vcpu->arch.sie_block->gcr[1];
417 		return 0;
418 	case PSW_BITS_AS_SECONDARY:
419 		asce->val = vcpu->arch.sie_block->gcr[7];
420 		return 0;
421 	case PSW_BITS_AS_HOME:
422 		asce->val = vcpu->arch.sie_block->gcr[13];
423 		return 0;
424 	case PSW_BITS_AS_ACCREG:
425 		rc = ar_translation(vcpu, asce, ar, mode);
426 		if (rc > 0)
427 			return trans_exc(vcpu, rc, ga, ar, mode, PROT_TYPE_ALC);
428 		return rc;
429 	}
430 	return 0;
431 }
432 
433 static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
434 {
435 	return kvm_read_guest(kvm, gpa, val, sizeof(*val));
436 }
437 
438 /**
439  * guest_translate - translate a guest virtual into a guest absolute address
440  * @vcpu: virtual cpu
441  * @gva: guest virtual address
442  * @gpa: points to where guest physical (absolute) address should be stored
443  * @asce: effective asce
444  * @mode: indicates the access mode to be used
445  * @prot: returns the type for protection exceptions
446  *
447  * Translate a guest virtual address into a guest absolute address by means
448  * of dynamic address translation as specified by the architecture.
449  * If the resulting absolute address is not available in the configuration
450  * an addressing exception is indicated and @gpa will not be changed.
451  *
452  * Returns: - zero on success; @gpa contains the resulting absolute address
453  *	    - a negative value if guest access failed due to e.g. broken
454  *	      guest mapping
455  *	    - a positive value if an access exception happened. In this case
456  *	      the returned value is the program interruption code as defined
457  *	      by the architecture
458  */
459 static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
460 				     unsigned long *gpa, const union asce asce,
461 				     enum gacc_mode mode, enum prot_type *prot)
462 {
463 	union vaddress vaddr = {.addr = gva};
464 	union raddress raddr = {.addr = gva};
465 	union page_table_entry pte;
466 	int dat_protection = 0;
467 	int iep_protection = 0;
468 	union ctlreg0 ctlreg0;
469 	unsigned long ptr;
470 	int edat1, edat2, iep;
471 
472 	ctlreg0.val = vcpu->arch.sie_block->gcr[0];
473 	edat1 = ctlreg0.edat && test_kvm_facility(vcpu->kvm, 8);
474 	edat2 = edat1 && test_kvm_facility(vcpu->kvm, 78);
475 	iep = ctlreg0.iep && test_kvm_facility(vcpu->kvm, 130);
476 	if (asce.r)
477 		goto real_address;
478 	ptr = asce.rsto * PAGE_SIZE;
479 	switch (asce.dt) {
480 	case ASCE_TYPE_REGION1:
481 		if (vaddr.rfx01 > asce.tl)
482 			return PGM_REGION_FIRST_TRANS;
483 		ptr += vaddr.rfx * 8;
484 		break;
485 	case ASCE_TYPE_REGION2:
486 		if (vaddr.rfx)
487 			return PGM_ASCE_TYPE;
488 		if (vaddr.rsx01 > asce.tl)
489 			return PGM_REGION_SECOND_TRANS;
490 		ptr += vaddr.rsx * 8;
491 		break;
492 	case ASCE_TYPE_REGION3:
493 		if (vaddr.rfx || vaddr.rsx)
494 			return PGM_ASCE_TYPE;
495 		if (vaddr.rtx01 > asce.tl)
496 			return PGM_REGION_THIRD_TRANS;
497 		ptr += vaddr.rtx * 8;
498 		break;
499 	case ASCE_TYPE_SEGMENT:
500 		if (vaddr.rfx || vaddr.rsx || vaddr.rtx)
501 			return PGM_ASCE_TYPE;
502 		if (vaddr.sx01 > asce.tl)
503 			return PGM_SEGMENT_TRANSLATION;
504 		ptr += vaddr.sx * 8;
505 		break;
506 	}
507 	switch (asce.dt) {
508 	case ASCE_TYPE_REGION1:	{
509 		union region1_table_entry rfte;
510 
511 		if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr))
512 			return PGM_ADDRESSING;
513 		if (deref_table(vcpu->kvm, ptr, &rfte.val))
514 			return -EFAULT;
515 		if (rfte.i)
516 			return PGM_REGION_FIRST_TRANS;
517 		if (rfte.tt != TABLE_TYPE_REGION1)
518 			return PGM_TRANSLATION_SPEC;
519 		if (vaddr.rsx01 < rfte.tf || vaddr.rsx01 > rfte.tl)
520 			return PGM_REGION_SECOND_TRANS;
521 		if (edat1)
522 			dat_protection |= rfte.p;
523 		ptr = rfte.rto * PAGE_SIZE + vaddr.rsx * 8;
524 	}
525 		fallthrough;
526 	case ASCE_TYPE_REGION2: {
527 		union region2_table_entry rste;
528 
529 		if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr))
530 			return PGM_ADDRESSING;
531 		if (deref_table(vcpu->kvm, ptr, &rste.val))
532 			return -EFAULT;
533 		if (rste.i)
534 			return PGM_REGION_SECOND_TRANS;
535 		if (rste.tt != TABLE_TYPE_REGION2)
536 			return PGM_TRANSLATION_SPEC;
537 		if (vaddr.rtx01 < rste.tf || vaddr.rtx01 > rste.tl)
538 			return PGM_REGION_THIRD_TRANS;
539 		if (edat1)
540 			dat_protection |= rste.p;
541 		ptr = rste.rto * PAGE_SIZE + vaddr.rtx * 8;
542 	}
543 		fallthrough;
544 	case ASCE_TYPE_REGION3: {
545 		union region3_table_entry rtte;
546 
547 		if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr))
548 			return PGM_ADDRESSING;
549 		if (deref_table(vcpu->kvm, ptr, &rtte.val))
550 			return -EFAULT;
551 		if (rtte.i)
552 			return PGM_REGION_THIRD_TRANS;
553 		if (rtte.tt != TABLE_TYPE_REGION3)
554 			return PGM_TRANSLATION_SPEC;
555 		if (rtte.cr && asce.p && edat2)
556 			return PGM_TRANSLATION_SPEC;
557 		if (rtte.fc && edat2) {
558 			dat_protection |= rtte.fc1.p;
559 			iep_protection = rtte.fc1.iep;
560 			raddr.rfaa = rtte.fc1.rfaa;
561 			goto absolute_address;
562 		}
563 		if (vaddr.sx01 < rtte.fc0.tf)
564 			return PGM_SEGMENT_TRANSLATION;
565 		if (vaddr.sx01 > rtte.fc0.tl)
566 			return PGM_SEGMENT_TRANSLATION;
567 		if (edat1)
568 			dat_protection |= rtte.fc0.p;
569 		ptr = rtte.fc0.sto * PAGE_SIZE + vaddr.sx * 8;
570 	}
571 		fallthrough;
572 	case ASCE_TYPE_SEGMENT: {
573 		union segment_table_entry ste;
574 
575 		if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr))
576 			return PGM_ADDRESSING;
577 		if (deref_table(vcpu->kvm, ptr, &ste.val))
578 			return -EFAULT;
579 		if (ste.i)
580 			return PGM_SEGMENT_TRANSLATION;
581 		if (ste.tt != TABLE_TYPE_SEGMENT)
582 			return PGM_TRANSLATION_SPEC;
583 		if (ste.cs && asce.p)
584 			return PGM_TRANSLATION_SPEC;
585 		if (ste.fc && edat1) {
586 			dat_protection |= ste.fc1.p;
587 			iep_protection = ste.fc1.iep;
588 			raddr.sfaa = ste.fc1.sfaa;
589 			goto absolute_address;
590 		}
591 		dat_protection |= ste.fc0.p;
592 		ptr = ste.fc0.pto * (PAGE_SIZE / 2) + vaddr.px * 8;
593 	}
594 	}
595 	if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr))
596 		return PGM_ADDRESSING;
597 	if (deref_table(vcpu->kvm, ptr, &pte.val))
598 		return -EFAULT;
599 	if (pte.i)
600 		return PGM_PAGE_TRANSLATION;
601 	if (pte.z)
602 		return PGM_TRANSLATION_SPEC;
603 	dat_protection |= pte.p;
604 	iep_protection = pte.iep;
605 	raddr.pfra = pte.pfra;
606 real_address:
607 	raddr.addr = kvm_s390_real_to_abs(vcpu, raddr.addr);
608 absolute_address:
609 	if (mode == GACC_STORE && dat_protection) {
610 		*prot = PROT_TYPE_DAT;
611 		return PGM_PROTECTION;
612 	}
613 	if (mode == GACC_IFETCH && iep_protection && iep) {
614 		*prot = PROT_TYPE_IEP;
615 		return PGM_PROTECTION;
616 	}
617 	if (!kvm_is_gpa_in_memslot(vcpu->kvm, raddr.addr))
618 		return PGM_ADDRESSING;
619 	*gpa = raddr.addr;
620 	return 0;
621 }
622 
623 static inline int is_low_address(unsigned long ga)
624 {
625 	/* Check for address ranges 0..511 and 4096..4607 */
626 	return (ga & ~0x11fful) == 0;
627 }
628 
629 static int low_address_protection_enabled(struct kvm_vcpu *vcpu,
630 					  const union asce asce)
631 {
632 	union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
633 	psw_t *psw = &vcpu->arch.sie_block->gpsw;
634 
635 	if (!ctlreg0.lap)
636 		return 0;
637 	if (psw_bits(*psw).dat && asce.p)
638 		return 0;
639 	return 1;
640 }
641 
642 static int vm_check_access_key(struct kvm *kvm, u8 access_key,
643 			       enum gacc_mode mode, gpa_t gpa)
644 {
645 	u8 storage_key, access_control;
646 	bool fetch_protected;
647 	unsigned long hva;
648 	int r;
649 
650 	if (access_key == 0)
651 		return 0;
652 
653 	hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
654 	if (kvm_is_error_hva(hva))
655 		return PGM_ADDRESSING;
656 
657 	mmap_read_lock(current->mm);
658 	r = get_guest_storage_key(current->mm, hva, &storage_key);
659 	mmap_read_unlock(current->mm);
660 	if (r)
661 		return r;
662 	access_control = FIELD_GET(_PAGE_ACC_BITS, storage_key);
663 	if (access_control == access_key)
664 		return 0;
665 	fetch_protected = storage_key & _PAGE_FP_BIT;
666 	if ((mode == GACC_FETCH || mode == GACC_IFETCH) && !fetch_protected)
667 		return 0;
668 	return PGM_PROTECTION;
669 }
670 
671 static bool fetch_prot_override_applicable(struct kvm_vcpu *vcpu, enum gacc_mode mode,
672 					   union asce asce)
673 {
674 	psw_t *psw = &vcpu->arch.sie_block->gpsw;
675 	unsigned long override;
676 
677 	if (mode == GACC_FETCH || mode == GACC_IFETCH) {
678 		/* check if fetch protection override enabled */
679 		override = vcpu->arch.sie_block->gcr[0];
680 		override &= CR0_FETCH_PROTECTION_OVERRIDE;
681 		/* not applicable if subject to DAT && private space */
682 		override = override && !(psw_bits(*psw).dat && asce.p);
683 		return override;
684 	}
685 	return false;
686 }
687 
688 static bool fetch_prot_override_applies(unsigned long ga, unsigned int len)
689 {
690 	return ga < 2048 && ga + len <= 2048;
691 }
692 
693 static bool storage_prot_override_applicable(struct kvm_vcpu *vcpu)
694 {
695 	/* check if storage protection override enabled */
696 	return vcpu->arch.sie_block->gcr[0] & CR0_STORAGE_PROTECTION_OVERRIDE;
697 }
698 
699 static bool storage_prot_override_applies(u8 access_control)
700 {
701 	/* matches special storage protection override key (9) -> allow */
702 	return access_control == PAGE_SPO_ACC;
703 }
704 
705 static int vcpu_check_access_key(struct kvm_vcpu *vcpu, u8 access_key,
706 				 enum gacc_mode mode, union asce asce, gpa_t gpa,
707 				 unsigned long ga, unsigned int len)
708 {
709 	u8 storage_key, access_control;
710 	unsigned long hva;
711 	int r;
712 
713 	/* access key 0 matches any storage key -> allow */
714 	if (access_key == 0)
715 		return 0;
716 	/*
717 	 * caller needs to ensure that gfn is accessible, so we can
718 	 * assume that this cannot fail
719 	 */
720 	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gpa));
721 	mmap_read_lock(current->mm);
722 	r = get_guest_storage_key(current->mm, hva, &storage_key);
723 	mmap_read_unlock(current->mm);
724 	if (r)
725 		return r;
726 	access_control = FIELD_GET(_PAGE_ACC_BITS, storage_key);
727 	/* access key matches storage key -> allow */
728 	if (access_control == access_key)
729 		return 0;
730 	if (mode == GACC_FETCH || mode == GACC_IFETCH) {
731 		/* it is a fetch and fetch protection is off -> allow */
732 		if (!(storage_key & _PAGE_FP_BIT))
733 			return 0;
734 		if (fetch_prot_override_applicable(vcpu, mode, asce) &&
735 		    fetch_prot_override_applies(ga, len))
736 			return 0;
737 	}
738 	if (storage_prot_override_applicable(vcpu) &&
739 	    storage_prot_override_applies(access_control))
740 		return 0;
741 	return PGM_PROTECTION;
742 }
743 
744 /**
745  * guest_range_to_gpas() - Calculate guest physical addresses of page fragments
746  * covering a logical range
747  * @vcpu: virtual cpu
748  * @ga: guest address, start of range
749  * @ar: access register
750  * @gpas: output argument, may be NULL
751  * @len: length of range in bytes
752  * @asce: address-space-control element to use for translation
753  * @mode: access mode
754  * @access_key: access key to mach the range's storage keys against
755  *
756  * Translate a logical range to a series of guest absolute addresses,
757  * such that the concatenation of page fragments starting at each gpa make up
758  * the whole range.
759  * The translation is performed as if done by the cpu for the given @asce, @ar,
760  * @mode and state of the @vcpu.
761  * If the translation causes an exception, its program interruption code is
762  * returned and the &struct kvm_s390_pgm_info pgm member of @vcpu is modified
763  * such that a subsequent call to kvm_s390_inject_prog_vcpu() will inject
764  * a correct exception into the guest.
765  * The resulting gpas are stored into @gpas, unless it is NULL.
766  *
767  * Note: All fragments except the first one start at the beginning of a page.
768  *	 When deriving the boundaries of a fragment from a gpa, all but the last
769  *	 fragment end at the end of the page.
770  *
771  * Return:
772  * * 0		- success
773  * * <0		- translation could not be performed, for example if  guest
774  *		  memory could not be accessed
775  * * >0		- an access exception occurred. In this case the returned value
776  *		  is the program interruption code and the contents of pgm may
777  *		  be used to inject an exception into the guest.
778  */
779 static int guest_range_to_gpas(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
780 			       unsigned long *gpas, unsigned long len,
781 			       const union asce asce, enum gacc_mode mode,
782 			       u8 access_key)
783 {
784 	psw_t *psw = &vcpu->arch.sie_block->gpsw;
785 	unsigned int offset = offset_in_page(ga);
786 	unsigned int fragment_len;
787 	int lap_enabled, rc = 0;
788 	enum prot_type prot;
789 	unsigned long gpa;
790 
791 	lap_enabled = low_address_protection_enabled(vcpu, asce);
792 	while (min(PAGE_SIZE - offset, len) > 0) {
793 		fragment_len = min(PAGE_SIZE - offset, len);
794 		ga = kvm_s390_logical_to_effective(vcpu, ga);
795 		if (mode == GACC_STORE && lap_enabled && is_low_address(ga))
796 			return trans_exc(vcpu, PGM_PROTECTION, ga, ar, mode,
797 					 PROT_TYPE_LA);
798 		if (psw_bits(*psw).dat) {
799 			rc = guest_translate(vcpu, ga, &gpa, asce, mode, &prot);
800 			if (rc < 0)
801 				return rc;
802 		} else {
803 			gpa = kvm_s390_real_to_abs(vcpu, ga);
804 			if (!kvm_is_gpa_in_memslot(vcpu->kvm, gpa)) {
805 				rc = PGM_ADDRESSING;
806 				prot = PROT_NONE;
807 			}
808 		}
809 		if (rc)
810 			return trans_exc(vcpu, rc, ga, ar, mode, prot);
811 		rc = vcpu_check_access_key(vcpu, access_key, mode, asce, gpa, ga,
812 					   fragment_len);
813 		if (rc)
814 			return trans_exc(vcpu, rc, ga, ar, mode, PROT_TYPE_KEYC);
815 		if (gpas)
816 			*gpas++ = gpa;
817 		offset = 0;
818 		ga += fragment_len;
819 		len -= fragment_len;
820 	}
821 	return 0;
822 }
823 
824 static int access_guest_page(struct kvm *kvm, enum gacc_mode mode, gpa_t gpa,
825 			     void *data, unsigned int len)
826 {
827 	const unsigned int offset = offset_in_page(gpa);
828 	const gfn_t gfn = gpa_to_gfn(gpa);
829 	int rc;
830 
831 	if (mode == GACC_STORE)
832 		rc = kvm_write_guest_page(kvm, gfn, data, offset, len);
833 	else
834 		rc = kvm_read_guest_page(kvm, gfn, data, offset, len);
835 	return rc;
836 }
837 
838 static int
839 access_guest_page_with_key(struct kvm *kvm, enum gacc_mode mode, gpa_t gpa,
840 			   void *data, unsigned int len, u8 access_key)
841 {
842 	struct kvm_memory_slot *slot;
843 	bool writable;
844 	gfn_t gfn;
845 	hva_t hva;
846 	int rc;
847 
848 	gfn = gpa >> PAGE_SHIFT;
849 	slot = gfn_to_memslot(kvm, gfn);
850 	hva = gfn_to_hva_memslot_prot(slot, gfn, &writable);
851 
852 	if (kvm_is_error_hva(hva))
853 		return PGM_ADDRESSING;
854 	/*
855 	 * Check if it's a ro memslot, even tho that can't occur (they're unsupported).
856 	 * Don't try to actually handle that case.
857 	 */
858 	if (!writable && mode == GACC_STORE)
859 		return -EOPNOTSUPP;
860 	hva += offset_in_page(gpa);
861 	if (mode == GACC_STORE)
862 		rc = copy_to_user_key((void __user *)hva, data, len, access_key);
863 	else
864 		rc = copy_from_user_key(data, (void __user *)hva, len, access_key);
865 	if (rc)
866 		return PGM_PROTECTION;
867 	if (mode == GACC_STORE)
868 		mark_page_dirty_in_slot(kvm, slot, gfn);
869 	return 0;
870 }
871 
872 int access_guest_abs_with_key(struct kvm *kvm, gpa_t gpa, void *data,
873 			      unsigned long len, enum gacc_mode mode, u8 access_key)
874 {
875 	int offset = offset_in_page(gpa);
876 	int fragment_len;
877 	int rc;
878 
879 	while (min(PAGE_SIZE - offset, len) > 0) {
880 		fragment_len = min(PAGE_SIZE - offset, len);
881 		rc = access_guest_page_with_key(kvm, mode, gpa, data, fragment_len, access_key);
882 		if (rc)
883 			return rc;
884 		offset = 0;
885 		len -= fragment_len;
886 		data += fragment_len;
887 		gpa += fragment_len;
888 	}
889 	return 0;
890 }
891 
892 int access_guest_with_key(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
893 			  void *data, unsigned long len, enum gacc_mode mode,
894 			  u8 access_key)
895 {
896 	psw_t *psw = &vcpu->arch.sie_block->gpsw;
897 	unsigned long nr_pages, idx;
898 	unsigned long gpa_array[2];
899 	unsigned int fragment_len;
900 	unsigned long *gpas;
901 	enum prot_type prot;
902 	int need_ipte_lock;
903 	union asce asce;
904 	bool try_storage_prot_override;
905 	bool try_fetch_prot_override;
906 	int rc;
907 
908 	if (!len)
909 		return 0;
910 	ga = kvm_s390_logical_to_effective(vcpu, ga);
911 	rc = get_vcpu_asce(vcpu, &asce, ga, ar, mode);
912 	if (rc)
913 		return rc;
914 	nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1;
915 	gpas = gpa_array;
916 	if (nr_pages > ARRAY_SIZE(gpa_array))
917 		gpas = vmalloc(array_size(nr_pages, sizeof(unsigned long)));
918 	if (!gpas)
919 		return -ENOMEM;
920 	try_fetch_prot_override = fetch_prot_override_applicable(vcpu, mode, asce);
921 	try_storage_prot_override = storage_prot_override_applicable(vcpu);
922 	need_ipte_lock = psw_bits(*psw).dat && !asce.r;
923 	if (need_ipte_lock)
924 		ipte_lock(vcpu->kvm);
925 	/*
926 	 * Since we do the access further down ultimately via a move instruction
927 	 * that does key checking and returns an error in case of a protection
928 	 * violation, we don't need to do the check during address translation.
929 	 * Skip it by passing access key 0, which matches any storage key,
930 	 * obviating the need for any further checks. As a result the check is
931 	 * handled entirely in hardware on access, we only need to take care to
932 	 * forego key protection checking if fetch protection override applies or
933 	 * retry with the special key 9 in case of storage protection override.
934 	 */
935 	rc = guest_range_to_gpas(vcpu, ga, ar, gpas, len, asce, mode, 0);
936 	if (rc)
937 		goto out_unlock;
938 	for (idx = 0; idx < nr_pages; idx++) {
939 		fragment_len = min(PAGE_SIZE - offset_in_page(gpas[idx]), len);
940 		if (try_fetch_prot_override && fetch_prot_override_applies(ga, fragment_len)) {
941 			rc = access_guest_page(vcpu->kvm, mode, gpas[idx],
942 					       data, fragment_len);
943 		} else {
944 			rc = access_guest_page_with_key(vcpu->kvm, mode, gpas[idx],
945 							data, fragment_len, access_key);
946 		}
947 		if (rc == PGM_PROTECTION && try_storage_prot_override)
948 			rc = access_guest_page_with_key(vcpu->kvm, mode, gpas[idx],
949 							data, fragment_len, PAGE_SPO_ACC);
950 		if (rc)
951 			break;
952 		len -= fragment_len;
953 		data += fragment_len;
954 		ga = kvm_s390_logical_to_effective(vcpu, ga + fragment_len);
955 	}
956 	if (rc > 0) {
957 		bool terminate = (mode == GACC_STORE) && (idx > 0);
958 
959 		if (rc == PGM_PROTECTION)
960 			prot = PROT_TYPE_KEYC;
961 		else
962 			prot = PROT_NONE;
963 		rc = trans_exc_ending(vcpu, rc, ga, ar, mode, prot, terminate);
964 	}
965 out_unlock:
966 	if (need_ipte_lock)
967 		ipte_unlock(vcpu->kvm);
968 	if (nr_pages > ARRAY_SIZE(gpa_array))
969 		vfree(gpas);
970 	return rc;
971 }
972 
973 int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
974 		      void *data, unsigned long len, enum gacc_mode mode)
975 {
976 	unsigned int fragment_len;
977 	unsigned long gpa;
978 	int rc = 0;
979 
980 	while (len && !rc) {
981 		gpa = kvm_s390_real_to_abs(vcpu, gra);
982 		fragment_len = min(PAGE_SIZE - offset_in_page(gpa), len);
983 		rc = access_guest_page(vcpu->kvm, mode, gpa, data, fragment_len);
984 		len -= fragment_len;
985 		gra += fragment_len;
986 		data += fragment_len;
987 	}
988 	return rc;
989 }
990 
991 /**
992  * cmpxchg_guest_abs_with_key() - Perform cmpxchg on guest absolute address.
993  * @kvm: Virtual machine instance.
994  * @gpa: Absolute guest address of the location to be changed.
995  * @len: Operand length of the cmpxchg, required: 1 <= len <= 16. Providing a
996  *       non power of two will result in failure.
997  * @old_addr: Pointer to old value. If the location at @gpa contains this value,
998  *            the exchange will succeed. After calling cmpxchg_guest_abs_with_key()
999  *            *@old_addr contains the value at @gpa before the attempt to
1000  *            exchange the value.
1001  * @new: The value to place at @gpa.
1002  * @access_key: The access key to use for the guest access.
1003  * @success: output value indicating if an exchange occurred.
1004  *
1005  * Atomically exchange the value at @gpa by @new, if it contains *@old.
1006  * Honors storage keys.
1007  *
1008  * Return: * 0: successful exchange
1009  *         * >0: a program interruption code indicating the reason cmpxchg could
1010  *               not be attempted
1011  *         * -EINVAL: address misaligned or len not power of two
1012  *         * -EAGAIN: transient failure (len 1 or 2)
1013  *         * -EOPNOTSUPP: read-only memslot (should never occur)
1014  */
1015 int cmpxchg_guest_abs_with_key(struct kvm *kvm, gpa_t gpa, int len,
1016 			       __uint128_t *old_addr, __uint128_t new,
1017 			       u8 access_key, bool *success)
1018 {
1019 	gfn_t gfn = gpa_to_gfn(gpa);
1020 	struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
1021 	bool writable;
1022 	hva_t hva;
1023 	int ret;
1024 
1025 	if (!IS_ALIGNED(gpa, len))
1026 		return -EINVAL;
1027 
1028 	hva = gfn_to_hva_memslot_prot(slot, gfn, &writable);
1029 	if (kvm_is_error_hva(hva))
1030 		return PGM_ADDRESSING;
1031 	/*
1032 	 * Check if it's a read-only memslot, even though that cannot occur
1033 	 * since those are unsupported.
1034 	 * Don't try to actually handle that case.
1035 	 */
1036 	if (!writable)
1037 		return -EOPNOTSUPP;
1038 
1039 	hva += offset_in_page(gpa);
1040 	/*
1041 	 * The cmpxchg_user_key macro depends on the type of "old", so we need
1042 	 * a case for each valid length and get some code duplication as long
1043 	 * as we don't introduce a new macro.
1044 	 */
1045 	switch (len) {
1046 	case 1: {
1047 		u8 old;
1048 
1049 		ret = cmpxchg_user_key((u8 __user *)hva, &old, *old_addr, new, access_key);
1050 		*success = !ret && old == *old_addr;
1051 		*old_addr = old;
1052 		break;
1053 	}
1054 	case 2: {
1055 		u16 old;
1056 
1057 		ret = cmpxchg_user_key((u16 __user *)hva, &old, *old_addr, new, access_key);
1058 		*success = !ret && old == *old_addr;
1059 		*old_addr = old;
1060 		break;
1061 	}
1062 	case 4: {
1063 		u32 old;
1064 
1065 		ret = cmpxchg_user_key((u32 __user *)hva, &old, *old_addr, new, access_key);
1066 		*success = !ret && old == *old_addr;
1067 		*old_addr = old;
1068 		break;
1069 	}
1070 	case 8: {
1071 		u64 old;
1072 
1073 		ret = cmpxchg_user_key((u64 __user *)hva, &old, *old_addr, new, access_key);
1074 		*success = !ret && old == *old_addr;
1075 		*old_addr = old;
1076 		break;
1077 	}
1078 	case 16: {
1079 		__uint128_t old;
1080 
1081 		ret = cmpxchg_user_key((__uint128_t __user *)hva, &old, *old_addr, new, access_key);
1082 		*success = !ret && old == *old_addr;
1083 		*old_addr = old;
1084 		break;
1085 	}
1086 	default:
1087 		return -EINVAL;
1088 	}
1089 	if (*success)
1090 		mark_page_dirty_in_slot(kvm, slot, gfn);
1091 	/*
1092 	 * Assume that the fault is caused by protection, either key protection
1093 	 * or user page write protection.
1094 	 */
1095 	if (ret == -EFAULT)
1096 		ret = PGM_PROTECTION;
1097 	return ret;
1098 }
1099 
1100 /**
1101  * guest_translate_address_with_key - translate guest logical into guest absolute address
1102  * @vcpu: virtual cpu
1103  * @gva: Guest virtual address
1104  * @ar: Access register
1105  * @gpa: Guest physical address
1106  * @mode: Translation access mode
1107  * @access_key: access key to mach the storage key with
1108  *
1109  * Parameter semantics are the same as the ones from guest_translate.
1110  * The memory contents at the guest address are not changed.
1111  *
1112  * Note: The IPTE lock is not taken during this function, so the caller
1113  * has to take care of this.
1114  */
1115 int guest_translate_address_with_key(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
1116 				     unsigned long *gpa, enum gacc_mode mode,
1117 				     u8 access_key)
1118 {
1119 	union asce asce;
1120 	int rc;
1121 
1122 	gva = kvm_s390_logical_to_effective(vcpu, gva);
1123 	rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode);
1124 	if (rc)
1125 		return rc;
1126 	return guest_range_to_gpas(vcpu, gva, ar, gpa, 1, asce, mode,
1127 				   access_key);
1128 }
1129 
1130 /**
1131  * check_gva_range - test a range of guest virtual addresses for accessibility
1132  * @vcpu: virtual cpu
1133  * @gva: Guest virtual address
1134  * @ar: Access register
1135  * @length: Length of test range
1136  * @mode: Translation access mode
1137  * @access_key: access key to mach the storage keys with
1138  */
1139 int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
1140 		    unsigned long length, enum gacc_mode mode, u8 access_key)
1141 {
1142 	union asce asce;
1143 	int rc = 0;
1144 
1145 	rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode);
1146 	if (rc)
1147 		return rc;
1148 	ipte_lock(vcpu->kvm);
1149 	rc = guest_range_to_gpas(vcpu, gva, ar, NULL, length, asce, mode,
1150 				 access_key);
1151 	ipte_unlock(vcpu->kvm);
1152 
1153 	return rc;
1154 }
1155 
1156 /**
1157  * check_gpa_range - test a range of guest physical addresses for accessibility
1158  * @kvm: virtual machine instance
1159  * @gpa: guest physical address
1160  * @length: length of test range
1161  * @mode: access mode to test, relevant for storage keys
1162  * @access_key: access key to mach the storage keys with
1163  */
1164 int check_gpa_range(struct kvm *kvm, unsigned long gpa, unsigned long length,
1165 		    enum gacc_mode mode, u8 access_key)
1166 {
1167 	unsigned int fragment_len;
1168 	int rc = 0;
1169 
1170 	while (length && !rc) {
1171 		fragment_len = min(PAGE_SIZE - offset_in_page(gpa), length);
1172 		rc = vm_check_access_key(kvm, access_key, mode, gpa);
1173 		length -= fragment_len;
1174 		gpa += fragment_len;
1175 	}
1176 	return rc;
1177 }
1178 
1179 /**
1180  * kvm_s390_check_low_addr_prot_real - check for low-address protection
1181  * @vcpu: virtual cpu
1182  * @gra: Guest real address
1183  *
1184  * Checks whether an address is subject to low-address protection and set
1185  * up vcpu->arch.pgm accordingly if necessary.
1186  *
1187  * Return: 0 if no protection exception, or PGM_PROTECTION if protected.
1188  */
1189 int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra)
1190 {
1191 	union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
1192 
1193 	if (!ctlreg0.lap || !is_low_address(gra))
1194 		return 0;
1195 	return trans_exc(vcpu, PGM_PROTECTION, gra, 0, GACC_STORE, PROT_TYPE_LA);
1196 }
1197 
1198 /**
1199  * kvm_s390_shadow_tables - walk the guest page table and create shadow tables
1200  * @sg: pointer to the shadow guest address space structure
1201  * @saddr: faulting address in the shadow gmap
1202  * @pgt: pointer to the beginning of the page table for the given address if
1203  *	 successful (return value 0), or to the first invalid DAT entry in
1204  *	 case of exceptions (return value > 0)
1205  * @dat_protection: referenced memory is write protected
1206  * @fake: pgt references contiguous guest memory block, not a pgtable
1207  */
1208 static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
1209 				  unsigned long *pgt, int *dat_protection,
1210 				  int *fake)
1211 {
1212 	struct kvm *kvm;
1213 	struct gmap *parent;
1214 	union asce asce;
1215 	union vaddress vaddr;
1216 	unsigned long ptr;
1217 	int rc;
1218 
1219 	*fake = 0;
1220 	*dat_protection = 0;
1221 	kvm = sg->private;
1222 	parent = sg->parent;
1223 	vaddr.addr = saddr;
1224 	asce.val = sg->orig_asce;
1225 	ptr = asce.rsto * PAGE_SIZE;
1226 	if (asce.r) {
1227 		*fake = 1;
1228 		ptr = 0;
1229 		asce.dt = ASCE_TYPE_REGION1;
1230 	}
1231 	switch (asce.dt) {
1232 	case ASCE_TYPE_REGION1:
1233 		if (vaddr.rfx01 > asce.tl && !*fake)
1234 			return PGM_REGION_FIRST_TRANS;
1235 		break;
1236 	case ASCE_TYPE_REGION2:
1237 		if (vaddr.rfx)
1238 			return PGM_ASCE_TYPE;
1239 		if (vaddr.rsx01 > asce.tl)
1240 			return PGM_REGION_SECOND_TRANS;
1241 		break;
1242 	case ASCE_TYPE_REGION3:
1243 		if (vaddr.rfx || vaddr.rsx)
1244 			return PGM_ASCE_TYPE;
1245 		if (vaddr.rtx01 > asce.tl)
1246 			return PGM_REGION_THIRD_TRANS;
1247 		break;
1248 	case ASCE_TYPE_SEGMENT:
1249 		if (vaddr.rfx || vaddr.rsx || vaddr.rtx)
1250 			return PGM_ASCE_TYPE;
1251 		if (vaddr.sx01 > asce.tl)
1252 			return PGM_SEGMENT_TRANSLATION;
1253 		break;
1254 	}
1255 
1256 	switch (asce.dt) {
1257 	case ASCE_TYPE_REGION1: {
1258 		union region1_table_entry rfte;
1259 
1260 		if (*fake) {
1261 			ptr += vaddr.rfx * _REGION1_SIZE;
1262 			rfte.val = ptr;
1263 			goto shadow_r2t;
1264 		}
1265 		*pgt = ptr + vaddr.rfx * 8;
1266 		rc = gmap_read_table(parent, ptr + vaddr.rfx * 8, &rfte.val);
1267 		if (rc)
1268 			return rc;
1269 		if (rfte.i)
1270 			return PGM_REGION_FIRST_TRANS;
1271 		if (rfte.tt != TABLE_TYPE_REGION1)
1272 			return PGM_TRANSLATION_SPEC;
1273 		if (vaddr.rsx01 < rfte.tf || vaddr.rsx01 > rfte.tl)
1274 			return PGM_REGION_SECOND_TRANS;
1275 		if (sg->edat_level >= 1)
1276 			*dat_protection |= rfte.p;
1277 		ptr = rfte.rto * PAGE_SIZE;
1278 shadow_r2t:
1279 		rc = gmap_shadow_r2t(sg, saddr, rfte.val, *fake);
1280 		if (rc)
1281 			return rc;
1282 		kvm->stat.gmap_shadow_r1_entry++;
1283 	}
1284 		fallthrough;
1285 	case ASCE_TYPE_REGION2: {
1286 		union region2_table_entry rste;
1287 
1288 		if (*fake) {
1289 			ptr += vaddr.rsx * _REGION2_SIZE;
1290 			rste.val = ptr;
1291 			goto shadow_r3t;
1292 		}
1293 		*pgt = ptr + vaddr.rsx * 8;
1294 		rc = gmap_read_table(parent, ptr + vaddr.rsx * 8, &rste.val);
1295 		if (rc)
1296 			return rc;
1297 		if (rste.i)
1298 			return PGM_REGION_SECOND_TRANS;
1299 		if (rste.tt != TABLE_TYPE_REGION2)
1300 			return PGM_TRANSLATION_SPEC;
1301 		if (vaddr.rtx01 < rste.tf || vaddr.rtx01 > rste.tl)
1302 			return PGM_REGION_THIRD_TRANS;
1303 		if (sg->edat_level >= 1)
1304 			*dat_protection |= rste.p;
1305 		ptr = rste.rto * PAGE_SIZE;
1306 shadow_r3t:
1307 		rste.p |= *dat_protection;
1308 		rc = gmap_shadow_r3t(sg, saddr, rste.val, *fake);
1309 		if (rc)
1310 			return rc;
1311 		kvm->stat.gmap_shadow_r2_entry++;
1312 	}
1313 		fallthrough;
1314 	case ASCE_TYPE_REGION3: {
1315 		union region3_table_entry rtte;
1316 
1317 		if (*fake) {
1318 			ptr += vaddr.rtx * _REGION3_SIZE;
1319 			rtte.val = ptr;
1320 			goto shadow_sgt;
1321 		}
1322 		*pgt = ptr + vaddr.rtx * 8;
1323 		rc = gmap_read_table(parent, ptr + vaddr.rtx * 8, &rtte.val);
1324 		if (rc)
1325 			return rc;
1326 		if (rtte.i)
1327 			return PGM_REGION_THIRD_TRANS;
1328 		if (rtte.tt != TABLE_TYPE_REGION3)
1329 			return PGM_TRANSLATION_SPEC;
1330 		if (rtte.cr && asce.p && sg->edat_level >= 2)
1331 			return PGM_TRANSLATION_SPEC;
1332 		if (rtte.fc && sg->edat_level >= 2) {
1333 			*dat_protection |= rtte.fc0.p;
1334 			*fake = 1;
1335 			ptr = rtte.fc1.rfaa * _REGION3_SIZE;
1336 			rtte.val = ptr;
1337 			goto shadow_sgt;
1338 		}
1339 		if (vaddr.sx01 < rtte.fc0.tf || vaddr.sx01 > rtte.fc0.tl)
1340 			return PGM_SEGMENT_TRANSLATION;
1341 		if (sg->edat_level >= 1)
1342 			*dat_protection |= rtte.fc0.p;
1343 		ptr = rtte.fc0.sto * PAGE_SIZE;
1344 shadow_sgt:
1345 		rtte.fc0.p |= *dat_protection;
1346 		rc = gmap_shadow_sgt(sg, saddr, rtte.val, *fake);
1347 		if (rc)
1348 			return rc;
1349 		kvm->stat.gmap_shadow_r3_entry++;
1350 	}
1351 		fallthrough;
1352 	case ASCE_TYPE_SEGMENT: {
1353 		union segment_table_entry ste;
1354 
1355 		if (*fake) {
1356 			ptr += vaddr.sx * _SEGMENT_SIZE;
1357 			ste.val = ptr;
1358 			goto shadow_pgt;
1359 		}
1360 		*pgt = ptr + vaddr.sx * 8;
1361 		rc = gmap_read_table(parent, ptr + vaddr.sx * 8, &ste.val);
1362 		if (rc)
1363 			return rc;
1364 		if (ste.i)
1365 			return PGM_SEGMENT_TRANSLATION;
1366 		if (ste.tt != TABLE_TYPE_SEGMENT)
1367 			return PGM_TRANSLATION_SPEC;
1368 		if (ste.cs && asce.p)
1369 			return PGM_TRANSLATION_SPEC;
1370 		*dat_protection |= ste.fc0.p;
1371 		if (ste.fc && sg->edat_level >= 1) {
1372 			*fake = 1;
1373 			ptr = ste.fc1.sfaa * _SEGMENT_SIZE;
1374 			ste.val = ptr;
1375 			goto shadow_pgt;
1376 		}
1377 		ptr = ste.fc0.pto * (PAGE_SIZE / 2);
1378 shadow_pgt:
1379 		ste.fc0.p |= *dat_protection;
1380 		rc = gmap_shadow_pgt(sg, saddr, ste.val, *fake);
1381 		if (rc)
1382 			return rc;
1383 		kvm->stat.gmap_shadow_sg_entry++;
1384 	}
1385 	}
1386 	/* Return the parent address of the page table */
1387 	*pgt = ptr;
1388 	return 0;
1389 }
1390 
1391 /**
1392  * kvm_s390_shadow_fault - handle fault on a shadow page table
1393  * @vcpu: virtual cpu
1394  * @sg: pointer to the shadow guest address space structure
1395  * @saddr: faulting address in the shadow gmap
1396  * @datptr: will contain the address of the faulting DAT table entry, or of
1397  *	    the valid leaf, plus some flags
1398  *
1399  * Returns: - 0 if the shadow fault was successfully resolved
1400  *	    - > 0 (pgm exception code) on exceptions while faulting
1401  *	    - -EAGAIN if the caller can retry immediately
1402  *	    - -EFAULT when accessing invalid guest addresses
1403  *	    - -ENOMEM if out of memory
1404  */
1405 int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
1406 			  unsigned long saddr, unsigned long *datptr)
1407 {
1408 	union vaddress vaddr;
1409 	union page_table_entry pte;
1410 	unsigned long pgt = 0;
1411 	int dat_protection, fake;
1412 	int rc;
1413 
1414 	mmap_read_lock(sg->mm);
1415 	/*
1416 	 * We don't want any guest-2 tables to change - so the parent
1417 	 * tables/pointers we read stay valid - unshadowing is however
1418 	 * always possible - only guest_table_lock protects us.
1419 	 */
1420 	ipte_lock(vcpu->kvm);
1421 
1422 	rc = gmap_shadow_pgt_lookup(sg, saddr, &pgt, &dat_protection, &fake);
1423 	if (rc)
1424 		rc = kvm_s390_shadow_tables(sg, saddr, &pgt, &dat_protection,
1425 					    &fake);
1426 
1427 	vaddr.addr = saddr;
1428 	if (fake) {
1429 		pte.val = pgt + vaddr.px * PAGE_SIZE;
1430 		goto shadow_page;
1431 	}
1432 
1433 	switch (rc) {
1434 	case PGM_SEGMENT_TRANSLATION:
1435 	case PGM_REGION_THIRD_TRANS:
1436 	case PGM_REGION_SECOND_TRANS:
1437 	case PGM_REGION_FIRST_TRANS:
1438 		pgt |= PEI_NOT_PTE;
1439 		break;
1440 	case 0:
1441 		pgt += vaddr.px * 8;
1442 		rc = gmap_read_table(sg->parent, pgt, &pte.val);
1443 	}
1444 	if (datptr)
1445 		*datptr = pgt | dat_protection * PEI_DAT_PROT;
1446 	if (!rc && pte.i)
1447 		rc = PGM_PAGE_TRANSLATION;
1448 	if (!rc && pte.z)
1449 		rc = PGM_TRANSLATION_SPEC;
1450 shadow_page:
1451 	pte.p |= dat_protection;
1452 	if (!rc)
1453 		rc = gmap_shadow_page(sg, saddr, __pte(pte.val));
1454 	vcpu->kvm->stat.gmap_shadow_pg_entry++;
1455 	ipte_unlock(vcpu->kvm);
1456 	mmap_read_unlock(sg->mm);
1457 	return rc;
1458 }
1459