1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * guest access functions 4 * 5 * Copyright IBM Corp. 2014 6 * 7 */ 8 9 #include <linux/vmalloc.h> 10 #include <linux/mm_types.h> 11 #include <linux/err.h> 12 #include <linux/pgtable.h> 13 #include <linux/bitfield.h> 14 #include <asm/access-regs.h> 15 #include <asm/fault.h> 16 #include <asm/gmap.h> 17 #include <asm/dat-bits.h> 18 #include "kvm-s390.h" 19 #include "gaccess.h" 20 21 #define GMAP_SHADOW_FAKE_TABLE 1ULL 22 23 /* 24 * vaddress union in order to easily decode a virtual address into its 25 * region first index, region second index etc. parts. 26 */ 27 union vaddress { 28 unsigned long addr; 29 struct { 30 unsigned long rfx : 11; 31 unsigned long rsx : 11; 32 unsigned long rtx : 11; 33 unsigned long sx : 11; 34 unsigned long px : 8; 35 unsigned long bx : 12; 36 }; 37 struct { 38 unsigned long rfx01 : 2; 39 unsigned long : 9; 40 unsigned long rsx01 : 2; 41 unsigned long : 9; 42 unsigned long rtx01 : 2; 43 unsigned long : 9; 44 unsigned long sx01 : 2; 45 unsigned long : 29; 46 }; 47 }; 48 49 /* 50 * raddress union which will contain the result (real or absolute address) 51 * after a page table walk. The rfaa, sfaa and pfra members are used to 52 * simply assign them the value of a region, segment or page table entry. 53 */ 54 union raddress { 55 unsigned long addr; 56 unsigned long rfaa : 33; /* Region-Frame Absolute Address */ 57 unsigned long sfaa : 44; /* Segment-Frame Absolute Address */ 58 unsigned long pfra : 52; /* Page-Frame Real Address */ 59 }; 60 61 union alet { 62 u32 val; 63 struct { 64 u32 reserved : 7; 65 u32 p : 1; 66 u32 alesn : 8; 67 u32 alen : 16; 68 }; 69 }; 70 71 union ald { 72 u32 val; 73 struct { 74 u32 : 1; 75 u32 alo : 24; 76 u32 all : 7; 77 }; 78 }; 79 80 struct ale { 81 unsigned long i : 1; /* ALEN-Invalid Bit */ 82 unsigned long : 5; 83 unsigned long fo : 1; /* Fetch-Only Bit */ 84 unsigned long p : 1; /* Private Bit */ 85 unsigned long alesn : 8; /* Access-List-Entry Sequence Number */ 86 unsigned long aleax : 16; /* Access-List-Entry Authorization Index */ 87 unsigned long : 32; 88 unsigned long : 1; 89 unsigned long asteo : 25; /* ASN-Second-Table-Entry Origin */ 90 unsigned long : 6; 91 unsigned long astesn : 32; /* ASTE Sequence Number */ 92 }; 93 94 struct aste { 95 unsigned long i : 1; /* ASX-Invalid Bit */ 96 unsigned long ato : 29; /* Authority-Table Origin */ 97 unsigned long : 1; 98 unsigned long b : 1; /* Base-Space Bit */ 99 unsigned long ax : 16; /* Authorization Index */ 100 unsigned long atl : 12; /* Authority-Table Length */ 101 unsigned long : 2; 102 unsigned long ca : 1; /* Controlled-ASN Bit */ 103 unsigned long ra : 1; /* Reusable-ASN Bit */ 104 unsigned long asce : 64; /* Address-Space-Control Element */ 105 unsigned long ald : 32; 106 unsigned long astesn : 32; 107 /* .. more fields there */ 108 }; 109 110 int ipte_lock_held(struct kvm *kvm) 111 { 112 if (sclp.has_siif) 113 return kvm->arch.sca->ipte_control.kh != 0; 114 115 return kvm->arch.ipte_lock_count != 0; 116 } 117 118 static void ipte_lock_simple(struct kvm *kvm) 119 { 120 union ipte_control old, new, *ic; 121 122 mutex_lock(&kvm->arch.ipte_mutex); 123 kvm->arch.ipte_lock_count++; 124 if (kvm->arch.ipte_lock_count > 1) 125 goto out; 126 retry: 127 ic = &kvm->arch.sca->ipte_control; 128 old = READ_ONCE(*ic); 129 do { 130 if (old.k) { 131 cond_resched(); 132 goto retry; 133 } 134 new = old; 135 new.k = 1; 136 } while (!try_cmpxchg(&ic->val, &old.val, new.val)); 137 out: 138 mutex_unlock(&kvm->arch.ipte_mutex); 139 } 140 141 static void ipte_unlock_simple(struct kvm *kvm) 142 { 143 union ipte_control old, new, *ic; 144 145 mutex_lock(&kvm->arch.ipte_mutex); 146 kvm->arch.ipte_lock_count--; 147 if (kvm->arch.ipte_lock_count) 148 goto out; 149 ic = &kvm->arch.sca->ipte_control; 150 old = READ_ONCE(*ic); 151 do { 152 new = old; 153 new.k = 0; 154 } while (!try_cmpxchg(&ic->val, &old.val, new.val)); 155 wake_up(&kvm->arch.ipte_wq); 156 out: 157 mutex_unlock(&kvm->arch.ipte_mutex); 158 } 159 160 static void ipte_lock_siif(struct kvm *kvm) 161 { 162 union ipte_control old, new, *ic; 163 164 retry: 165 ic = &kvm->arch.sca->ipte_control; 166 old = READ_ONCE(*ic); 167 do { 168 if (old.kg) { 169 cond_resched(); 170 goto retry; 171 } 172 new = old; 173 new.k = 1; 174 new.kh++; 175 } while (!try_cmpxchg(&ic->val, &old.val, new.val)); 176 } 177 178 static void ipte_unlock_siif(struct kvm *kvm) 179 { 180 union ipte_control old, new, *ic; 181 182 ic = &kvm->arch.sca->ipte_control; 183 old = READ_ONCE(*ic); 184 do { 185 new = old; 186 new.kh--; 187 if (!new.kh) 188 new.k = 0; 189 } while (!try_cmpxchg(&ic->val, &old.val, new.val)); 190 if (!new.kh) 191 wake_up(&kvm->arch.ipte_wq); 192 } 193 194 void ipte_lock(struct kvm *kvm) 195 { 196 if (sclp.has_siif) 197 ipte_lock_siif(kvm); 198 else 199 ipte_lock_simple(kvm); 200 } 201 202 void ipte_unlock(struct kvm *kvm) 203 { 204 if (sclp.has_siif) 205 ipte_unlock_siif(kvm); 206 else 207 ipte_unlock_simple(kvm); 208 } 209 210 static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, u8 ar, 211 enum gacc_mode mode) 212 { 213 union alet alet; 214 struct ale ale; 215 struct aste aste; 216 unsigned long ald_addr, authority_table_addr; 217 union ald ald; 218 int eax, rc; 219 u8 authority_table; 220 221 if (ar >= NUM_ACRS) 222 return -EINVAL; 223 224 if (vcpu->arch.acrs_loaded) 225 save_access_regs(vcpu->run->s.regs.acrs); 226 alet.val = vcpu->run->s.regs.acrs[ar]; 227 228 if (ar == 0 || alet.val == 0) { 229 asce->val = vcpu->arch.sie_block->gcr[1]; 230 return 0; 231 } else if (alet.val == 1) { 232 asce->val = vcpu->arch.sie_block->gcr[7]; 233 return 0; 234 } 235 236 if (alet.reserved) 237 return PGM_ALET_SPECIFICATION; 238 239 if (alet.p) 240 ald_addr = vcpu->arch.sie_block->gcr[5]; 241 else 242 ald_addr = vcpu->arch.sie_block->gcr[2]; 243 ald_addr &= 0x7fffffc0; 244 245 rc = read_guest_real(vcpu, ald_addr + 16, &ald.val, sizeof(union ald)); 246 if (rc) 247 return rc; 248 249 if (alet.alen / 8 > ald.all) 250 return PGM_ALEN_TRANSLATION; 251 252 if (0x7fffffff - ald.alo * 128 < alet.alen * 16) 253 return PGM_ADDRESSING; 254 255 rc = read_guest_real(vcpu, ald.alo * 128 + alet.alen * 16, &ale, 256 sizeof(struct ale)); 257 if (rc) 258 return rc; 259 260 if (ale.i == 1) 261 return PGM_ALEN_TRANSLATION; 262 if (ale.alesn != alet.alesn) 263 return PGM_ALE_SEQUENCE; 264 265 rc = read_guest_real(vcpu, ale.asteo * 64, &aste, sizeof(struct aste)); 266 if (rc) 267 return rc; 268 269 if (aste.i) 270 return PGM_ASTE_VALIDITY; 271 if (aste.astesn != ale.astesn) 272 return PGM_ASTE_SEQUENCE; 273 274 if (ale.p == 1) { 275 eax = (vcpu->arch.sie_block->gcr[8] >> 16) & 0xffff; 276 if (ale.aleax != eax) { 277 if (eax / 16 > aste.atl) 278 return PGM_EXTENDED_AUTHORITY; 279 280 authority_table_addr = aste.ato * 4 + eax / 4; 281 282 rc = read_guest_real(vcpu, authority_table_addr, 283 &authority_table, 284 sizeof(u8)); 285 if (rc) 286 return rc; 287 288 if ((authority_table & (0x40 >> ((eax & 3) * 2))) == 0) 289 return PGM_EXTENDED_AUTHORITY; 290 } 291 } 292 293 if (ale.fo == 1 && mode == GACC_STORE) 294 return PGM_PROTECTION; 295 296 asce->val = aste.asce; 297 return 0; 298 } 299 300 enum prot_type { 301 PROT_TYPE_LA = 0, 302 PROT_TYPE_KEYC = 1, 303 PROT_TYPE_ALC = 2, 304 PROT_TYPE_DAT = 3, 305 PROT_TYPE_IEP = 4, 306 /* Dummy value for passing an initialized value when code != PGM_PROTECTION */ 307 PROT_TYPE_DUMMY, 308 }; 309 310 static int trans_exc_ending(struct kvm_vcpu *vcpu, int code, unsigned long gva, u8 ar, 311 enum gacc_mode mode, enum prot_type prot, bool terminate) 312 { 313 struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; 314 union teid *teid; 315 316 memset(pgm, 0, sizeof(*pgm)); 317 pgm->code = code; 318 teid = (union teid *)&pgm->trans_exc_code; 319 320 switch (code) { 321 case PGM_PROTECTION: 322 switch (prot) { 323 case PROT_TYPE_DUMMY: 324 /* We should never get here, acts like termination */ 325 WARN_ON_ONCE(1); 326 break; 327 case PROT_TYPE_IEP: 328 teid->b61 = 1; 329 fallthrough; 330 case PROT_TYPE_LA: 331 teid->b56 = 1; 332 break; 333 case PROT_TYPE_KEYC: 334 teid->b60 = 1; 335 break; 336 case PROT_TYPE_ALC: 337 teid->b60 = 1; 338 fallthrough; 339 case PROT_TYPE_DAT: 340 teid->b61 = 1; 341 break; 342 } 343 if (terminate) { 344 teid->b56 = 0; 345 teid->b60 = 0; 346 teid->b61 = 0; 347 } 348 fallthrough; 349 case PGM_ASCE_TYPE: 350 case PGM_PAGE_TRANSLATION: 351 case PGM_REGION_FIRST_TRANS: 352 case PGM_REGION_SECOND_TRANS: 353 case PGM_REGION_THIRD_TRANS: 354 case PGM_SEGMENT_TRANSLATION: 355 /* 356 * op_access_id only applies to MOVE_PAGE -> set bit 61 357 * exc_access_id has to be set to 0 for some instructions. Both 358 * cases have to be handled by the caller. 359 */ 360 teid->addr = gva >> PAGE_SHIFT; 361 teid->fsi = mode == GACC_STORE ? TEID_FSI_STORE : TEID_FSI_FETCH; 362 teid->as = psw_bits(vcpu->arch.sie_block->gpsw).as; 363 fallthrough; 364 case PGM_ALEN_TRANSLATION: 365 case PGM_ALE_SEQUENCE: 366 case PGM_ASTE_VALIDITY: 367 case PGM_ASTE_SEQUENCE: 368 case PGM_EXTENDED_AUTHORITY: 369 /* 370 * We can always store exc_access_id, as it is 371 * undefined for non-ar cases. It is undefined for 372 * most DAT protection exceptions. 373 */ 374 pgm->exc_access_id = ar; 375 break; 376 } 377 return code; 378 } 379 380 static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva, u8 ar, 381 enum gacc_mode mode, enum prot_type prot) 382 { 383 return trans_exc_ending(vcpu, code, gva, ar, mode, prot, false); 384 } 385 386 static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce, 387 unsigned long ga, u8 ar, enum gacc_mode mode) 388 { 389 int rc; 390 struct psw_bits psw = psw_bits(vcpu->arch.sie_block->gpsw); 391 392 if (!psw.dat) { 393 asce->val = 0; 394 asce->r = 1; 395 return 0; 396 } 397 398 if ((mode == GACC_IFETCH) && (psw.as != PSW_BITS_AS_HOME)) 399 psw.as = PSW_BITS_AS_PRIMARY; 400 401 switch (psw.as) { 402 case PSW_BITS_AS_PRIMARY: 403 asce->val = vcpu->arch.sie_block->gcr[1]; 404 return 0; 405 case PSW_BITS_AS_SECONDARY: 406 asce->val = vcpu->arch.sie_block->gcr[7]; 407 return 0; 408 case PSW_BITS_AS_HOME: 409 asce->val = vcpu->arch.sie_block->gcr[13]; 410 return 0; 411 case PSW_BITS_AS_ACCREG: 412 rc = ar_translation(vcpu, asce, ar, mode); 413 if (rc > 0) 414 return trans_exc(vcpu, rc, ga, ar, mode, PROT_TYPE_ALC); 415 return rc; 416 } 417 return 0; 418 } 419 420 static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val) 421 { 422 return kvm_read_guest(kvm, gpa, val, sizeof(*val)); 423 } 424 425 /** 426 * guest_translate - translate a guest virtual into a guest absolute address 427 * @vcpu: virtual cpu 428 * @gva: guest virtual address 429 * @gpa: points to where guest physical (absolute) address should be stored 430 * @asce: effective asce 431 * @mode: indicates the access mode to be used 432 * @prot: returns the type for protection exceptions 433 * 434 * Translate a guest virtual address into a guest absolute address by means 435 * of dynamic address translation as specified by the architecture. 436 * If the resulting absolute address is not available in the configuration 437 * an addressing exception is indicated and @gpa will not be changed. 438 * 439 * Returns: - zero on success; @gpa contains the resulting absolute address 440 * - a negative value if guest access failed due to e.g. broken 441 * guest mapping 442 * - a positive value if an access exception happened. In this case 443 * the returned value is the program interruption code as defined 444 * by the architecture 445 */ 446 static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva, 447 unsigned long *gpa, const union asce asce, 448 enum gacc_mode mode, enum prot_type *prot) 449 { 450 union vaddress vaddr = {.addr = gva}; 451 union raddress raddr = {.addr = gva}; 452 union page_table_entry pte; 453 int dat_protection = 0; 454 int iep_protection = 0; 455 union ctlreg0 ctlreg0; 456 unsigned long ptr; 457 int edat1, edat2, iep; 458 459 ctlreg0.val = vcpu->arch.sie_block->gcr[0]; 460 edat1 = ctlreg0.edat && test_kvm_facility(vcpu->kvm, 8); 461 edat2 = edat1 && test_kvm_facility(vcpu->kvm, 78); 462 iep = ctlreg0.iep && test_kvm_facility(vcpu->kvm, 130); 463 if (asce.r) 464 goto real_address; 465 ptr = asce.rsto * PAGE_SIZE; 466 switch (asce.dt) { 467 case ASCE_TYPE_REGION1: 468 if (vaddr.rfx01 > asce.tl) 469 return PGM_REGION_FIRST_TRANS; 470 ptr += vaddr.rfx * 8; 471 break; 472 case ASCE_TYPE_REGION2: 473 if (vaddr.rfx) 474 return PGM_ASCE_TYPE; 475 if (vaddr.rsx01 > asce.tl) 476 return PGM_REGION_SECOND_TRANS; 477 ptr += vaddr.rsx * 8; 478 break; 479 case ASCE_TYPE_REGION3: 480 if (vaddr.rfx || vaddr.rsx) 481 return PGM_ASCE_TYPE; 482 if (vaddr.rtx01 > asce.tl) 483 return PGM_REGION_THIRD_TRANS; 484 ptr += vaddr.rtx * 8; 485 break; 486 case ASCE_TYPE_SEGMENT: 487 if (vaddr.rfx || vaddr.rsx || vaddr.rtx) 488 return PGM_ASCE_TYPE; 489 if (vaddr.sx01 > asce.tl) 490 return PGM_SEGMENT_TRANSLATION; 491 ptr += vaddr.sx * 8; 492 break; 493 } 494 switch (asce.dt) { 495 case ASCE_TYPE_REGION1: { 496 union region1_table_entry rfte; 497 498 if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr)) 499 return PGM_ADDRESSING; 500 if (deref_table(vcpu->kvm, ptr, &rfte.val)) 501 return -EFAULT; 502 if (rfte.i) 503 return PGM_REGION_FIRST_TRANS; 504 if (rfte.tt != TABLE_TYPE_REGION1) 505 return PGM_TRANSLATION_SPEC; 506 if (vaddr.rsx01 < rfte.tf || vaddr.rsx01 > rfte.tl) 507 return PGM_REGION_SECOND_TRANS; 508 if (edat1) 509 dat_protection |= rfte.p; 510 ptr = rfte.rto * PAGE_SIZE + vaddr.rsx * 8; 511 } 512 fallthrough; 513 case ASCE_TYPE_REGION2: { 514 union region2_table_entry rste; 515 516 if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr)) 517 return PGM_ADDRESSING; 518 if (deref_table(vcpu->kvm, ptr, &rste.val)) 519 return -EFAULT; 520 if (rste.i) 521 return PGM_REGION_SECOND_TRANS; 522 if (rste.tt != TABLE_TYPE_REGION2) 523 return PGM_TRANSLATION_SPEC; 524 if (vaddr.rtx01 < rste.tf || vaddr.rtx01 > rste.tl) 525 return PGM_REGION_THIRD_TRANS; 526 if (edat1) 527 dat_protection |= rste.p; 528 ptr = rste.rto * PAGE_SIZE + vaddr.rtx * 8; 529 } 530 fallthrough; 531 case ASCE_TYPE_REGION3: { 532 union region3_table_entry rtte; 533 534 if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr)) 535 return PGM_ADDRESSING; 536 if (deref_table(vcpu->kvm, ptr, &rtte.val)) 537 return -EFAULT; 538 if (rtte.i) 539 return PGM_REGION_THIRD_TRANS; 540 if (rtte.tt != TABLE_TYPE_REGION3) 541 return PGM_TRANSLATION_SPEC; 542 if (rtte.cr && asce.p && edat2) 543 return PGM_TRANSLATION_SPEC; 544 if (rtte.fc && edat2) { 545 dat_protection |= rtte.fc1.p; 546 iep_protection = rtte.fc1.iep; 547 raddr.rfaa = rtte.fc1.rfaa; 548 goto absolute_address; 549 } 550 if (vaddr.sx01 < rtte.fc0.tf) 551 return PGM_SEGMENT_TRANSLATION; 552 if (vaddr.sx01 > rtte.fc0.tl) 553 return PGM_SEGMENT_TRANSLATION; 554 if (edat1) 555 dat_protection |= rtte.fc0.p; 556 ptr = rtte.fc0.sto * PAGE_SIZE + vaddr.sx * 8; 557 } 558 fallthrough; 559 case ASCE_TYPE_SEGMENT: { 560 union segment_table_entry ste; 561 562 if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr)) 563 return PGM_ADDRESSING; 564 if (deref_table(vcpu->kvm, ptr, &ste.val)) 565 return -EFAULT; 566 if (ste.i) 567 return PGM_SEGMENT_TRANSLATION; 568 if (ste.tt != TABLE_TYPE_SEGMENT) 569 return PGM_TRANSLATION_SPEC; 570 if (ste.cs && asce.p) 571 return PGM_TRANSLATION_SPEC; 572 if (ste.fc && edat1) { 573 dat_protection |= ste.fc1.p; 574 iep_protection = ste.fc1.iep; 575 raddr.sfaa = ste.fc1.sfaa; 576 goto absolute_address; 577 } 578 dat_protection |= ste.fc0.p; 579 ptr = ste.fc0.pto * (PAGE_SIZE / 2) + vaddr.px * 8; 580 } 581 } 582 if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr)) 583 return PGM_ADDRESSING; 584 if (deref_table(vcpu->kvm, ptr, &pte.val)) 585 return -EFAULT; 586 if (pte.i) 587 return PGM_PAGE_TRANSLATION; 588 if (pte.z) 589 return PGM_TRANSLATION_SPEC; 590 dat_protection |= pte.p; 591 iep_protection = pte.iep; 592 raddr.pfra = pte.pfra; 593 real_address: 594 raddr.addr = kvm_s390_real_to_abs(vcpu, raddr.addr); 595 absolute_address: 596 if (mode == GACC_STORE && dat_protection) { 597 *prot = PROT_TYPE_DAT; 598 return PGM_PROTECTION; 599 } 600 if (mode == GACC_IFETCH && iep_protection && iep) { 601 *prot = PROT_TYPE_IEP; 602 return PGM_PROTECTION; 603 } 604 if (!kvm_is_gpa_in_memslot(vcpu->kvm, raddr.addr)) 605 return PGM_ADDRESSING; 606 *gpa = raddr.addr; 607 return 0; 608 } 609 610 static inline int is_low_address(unsigned long ga) 611 { 612 /* Check for address ranges 0..511 and 4096..4607 */ 613 return (ga & ~0x11fful) == 0; 614 } 615 616 static int low_address_protection_enabled(struct kvm_vcpu *vcpu, 617 const union asce asce) 618 { 619 union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]}; 620 psw_t *psw = &vcpu->arch.sie_block->gpsw; 621 622 if (!ctlreg0.lap) 623 return 0; 624 if (psw_bits(*psw).dat && asce.p) 625 return 0; 626 return 1; 627 } 628 629 static int vm_check_access_key(struct kvm *kvm, u8 access_key, 630 enum gacc_mode mode, gpa_t gpa) 631 { 632 u8 storage_key, access_control; 633 bool fetch_protected; 634 unsigned long hva; 635 int r; 636 637 if (access_key == 0) 638 return 0; 639 640 hva = gfn_to_hva(kvm, gpa_to_gfn(gpa)); 641 if (kvm_is_error_hva(hva)) 642 return PGM_ADDRESSING; 643 644 mmap_read_lock(current->mm); 645 r = get_guest_storage_key(current->mm, hva, &storage_key); 646 mmap_read_unlock(current->mm); 647 if (r) 648 return r; 649 access_control = FIELD_GET(_PAGE_ACC_BITS, storage_key); 650 if (access_control == access_key) 651 return 0; 652 fetch_protected = storage_key & _PAGE_FP_BIT; 653 if ((mode == GACC_FETCH || mode == GACC_IFETCH) && !fetch_protected) 654 return 0; 655 return PGM_PROTECTION; 656 } 657 658 static bool fetch_prot_override_applicable(struct kvm_vcpu *vcpu, enum gacc_mode mode, 659 union asce asce) 660 { 661 psw_t *psw = &vcpu->arch.sie_block->gpsw; 662 unsigned long override; 663 664 if (mode == GACC_FETCH || mode == GACC_IFETCH) { 665 /* check if fetch protection override enabled */ 666 override = vcpu->arch.sie_block->gcr[0]; 667 override &= CR0_FETCH_PROTECTION_OVERRIDE; 668 /* not applicable if subject to DAT && private space */ 669 override = override && !(psw_bits(*psw).dat && asce.p); 670 return override; 671 } 672 return false; 673 } 674 675 static bool fetch_prot_override_applies(unsigned long ga, unsigned int len) 676 { 677 return ga < 2048 && ga + len <= 2048; 678 } 679 680 static bool storage_prot_override_applicable(struct kvm_vcpu *vcpu) 681 { 682 /* check if storage protection override enabled */ 683 return vcpu->arch.sie_block->gcr[0] & CR0_STORAGE_PROTECTION_OVERRIDE; 684 } 685 686 static bool storage_prot_override_applies(u8 access_control) 687 { 688 /* matches special storage protection override key (9) -> allow */ 689 return access_control == PAGE_SPO_ACC; 690 } 691 692 static int vcpu_check_access_key(struct kvm_vcpu *vcpu, u8 access_key, 693 enum gacc_mode mode, union asce asce, gpa_t gpa, 694 unsigned long ga, unsigned int len) 695 { 696 u8 storage_key, access_control; 697 unsigned long hva; 698 int r; 699 700 /* access key 0 matches any storage key -> allow */ 701 if (access_key == 0) 702 return 0; 703 /* 704 * caller needs to ensure that gfn is accessible, so we can 705 * assume that this cannot fail 706 */ 707 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gpa)); 708 mmap_read_lock(current->mm); 709 r = get_guest_storage_key(current->mm, hva, &storage_key); 710 mmap_read_unlock(current->mm); 711 if (r) 712 return r; 713 access_control = FIELD_GET(_PAGE_ACC_BITS, storage_key); 714 /* access key matches storage key -> allow */ 715 if (access_control == access_key) 716 return 0; 717 if (mode == GACC_FETCH || mode == GACC_IFETCH) { 718 /* it is a fetch and fetch protection is off -> allow */ 719 if (!(storage_key & _PAGE_FP_BIT)) 720 return 0; 721 if (fetch_prot_override_applicable(vcpu, mode, asce) && 722 fetch_prot_override_applies(ga, len)) 723 return 0; 724 } 725 if (storage_prot_override_applicable(vcpu) && 726 storage_prot_override_applies(access_control)) 727 return 0; 728 return PGM_PROTECTION; 729 } 730 731 /** 732 * guest_range_to_gpas() - Calculate guest physical addresses of page fragments 733 * covering a logical range 734 * @vcpu: virtual cpu 735 * @ga: guest address, start of range 736 * @ar: access register 737 * @gpas: output argument, may be NULL 738 * @len: length of range in bytes 739 * @asce: address-space-control element to use for translation 740 * @mode: access mode 741 * @access_key: access key to mach the range's storage keys against 742 * 743 * Translate a logical range to a series of guest absolute addresses, 744 * such that the concatenation of page fragments starting at each gpa make up 745 * the whole range. 746 * The translation is performed as if done by the cpu for the given @asce, @ar, 747 * @mode and state of the @vcpu. 748 * If the translation causes an exception, its program interruption code is 749 * returned and the &struct kvm_s390_pgm_info pgm member of @vcpu is modified 750 * such that a subsequent call to kvm_s390_inject_prog_vcpu() will inject 751 * a correct exception into the guest. 752 * The resulting gpas are stored into @gpas, unless it is NULL. 753 * 754 * Note: All fragments except the first one start at the beginning of a page. 755 * When deriving the boundaries of a fragment from a gpa, all but the last 756 * fragment end at the end of the page. 757 * 758 * Return: 759 * * 0 - success 760 * * <0 - translation could not be performed, for example if guest 761 * memory could not be accessed 762 * * >0 - an access exception occurred. In this case the returned value 763 * is the program interruption code and the contents of pgm may 764 * be used to inject an exception into the guest. 765 */ 766 static int guest_range_to_gpas(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, 767 unsigned long *gpas, unsigned long len, 768 const union asce asce, enum gacc_mode mode, 769 u8 access_key) 770 { 771 psw_t *psw = &vcpu->arch.sie_block->gpsw; 772 unsigned int offset = offset_in_page(ga); 773 unsigned int fragment_len; 774 int lap_enabled, rc = 0; 775 enum prot_type prot; 776 unsigned long gpa; 777 778 lap_enabled = low_address_protection_enabled(vcpu, asce); 779 while (min(PAGE_SIZE - offset, len) > 0) { 780 fragment_len = min(PAGE_SIZE - offset, len); 781 ga = kvm_s390_logical_to_effective(vcpu, ga); 782 if (mode == GACC_STORE && lap_enabled && is_low_address(ga)) 783 return trans_exc(vcpu, PGM_PROTECTION, ga, ar, mode, 784 PROT_TYPE_LA); 785 if (psw_bits(*psw).dat) { 786 rc = guest_translate(vcpu, ga, &gpa, asce, mode, &prot); 787 if (rc < 0) 788 return rc; 789 } else { 790 gpa = kvm_s390_real_to_abs(vcpu, ga); 791 if (!kvm_is_gpa_in_memslot(vcpu->kvm, gpa)) { 792 rc = PGM_ADDRESSING; 793 prot = PROT_TYPE_DUMMY; 794 } 795 } 796 if (rc) 797 return trans_exc(vcpu, rc, ga, ar, mode, prot); 798 rc = vcpu_check_access_key(vcpu, access_key, mode, asce, gpa, ga, 799 fragment_len); 800 if (rc) 801 return trans_exc(vcpu, rc, ga, ar, mode, PROT_TYPE_KEYC); 802 if (gpas) 803 *gpas++ = gpa; 804 offset = 0; 805 ga += fragment_len; 806 len -= fragment_len; 807 } 808 return 0; 809 } 810 811 static int access_guest_page(struct kvm *kvm, enum gacc_mode mode, gpa_t gpa, 812 void *data, unsigned int len) 813 { 814 const unsigned int offset = offset_in_page(gpa); 815 const gfn_t gfn = gpa_to_gfn(gpa); 816 int rc; 817 818 if (!gfn_to_memslot(kvm, gfn)) 819 return PGM_ADDRESSING; 820 if (mode == GACC_STORE) 821 rc = kvm_write_guest_page(kvm, gfn, data, offset, len); 822 else 823 rc = kvm_read_guest_page(kvm, gfn, data, offset, len); 824 return rc; 825 } 826 827 static int 828 access_guest_page_with_key(struct kvm *kvm, enum gacc_mode mode, gpa_t gpa, 829 void *data, unsigned int len, u8 access_key) 830 { 831 struct kvm_memory_slot *slot; 832 bool writable; 833 gfn_t gfn; 834 hva_t hva; 835 int rc; 836 837 gfn = gpa >> PAGE_SHIFT; 838 slot = gfn_to_memslot(kvm, gfn); 839 hva = gfn_to_hva_memslot_prot(slot, gfn, &writable); 840 841 if (kvm_is_error_hva(hva)) 842 return PGM_ADDRESSING; 843 /* 844 * Check if it's a ro memslot, even tho that can't occur (they're unsupported). 845 * Don't try to actually handle that case. 846 */ 847 if (!writable && mode == GACC_STORE) 848 return -EOPNOTSUPP; 849 hva += offset_in_page(gpa); 850 if (mode == GACC_STORE) 851 rc = copy_to_user_key((void __user *)hva, data, len, access_key); 852 else 853 rc = copy_from_user_key(data, (void __user *)hva, len, access_key); 854 if (rc) 855 return PGM_PROTECTION; 856 if (mode == GACC_STORE) 857 mark_page_dirty_in_slot(kvm, slot, gfn); 858 return 0; 859 } 860 861 int access_guest_abs_with_key(struct kvm *kvm, gpa_t gpa, void *data, 862 unsigned long len, enum gacc_mode mode, u8 access_key) 863 { 864 int offset = offset_in_page(gpa); 865 int fragment_len; 866 int rc; 867 868 while (min(PAGE_SIZE - offset, len) > 0) { 869 fragment_len = min(PAGE_SIZE - offset, len); 870 rc = access_guest_page_with_key(kvm, mode, gpa, data, fragment_len, access_key); 871 if (rc) 872 return rc; 873 offset = 0; 874 len -= fragment_len; 875 data += fragment_len; 876 gpa += fragment_len; 877 } 878 return 0; 879 } 880 881 int access_guest_with_key(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, 882 void *data, unsigned long len, enum gacc_mode mode, 883 u8 access_key) 884 { 885 psw_t *psw = &vcpu->arch.sie_block->gpsw; 886 unsigned long nr_pages, idx; 887 unsigned long gpa_array[2]; 888 unsigned int fragment_len; 889 unsigned long *gpas; 890 enum prot_type prot; 891 int need_ipte_lock; 892 union asce asce; 893 bool try_storage_prot_override; 894 bool try_fetch_prot_override; 895 int rc; 896 897 if (!len) 898 return 0; 899 ga = kvm_s390_logical_to_effective(vcpu, ga); 900 rc = get_vcpu_asce(vcpu, &asce, ga, ar, mode); 901 if (rc) 902 return rc; 903 nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1; 904 gpas = gpa_array; 905 if (nr_pages > ARRAY_SIZE(gpa_array)) 906 gpas = vmalloc(array_size(nr_pages, sizeof(unsigned long))); 907 if (!gpas) 908 return -ENOMEM; 909 try_fetch_prot_override = fetch_prot_override_applicable(vcpu, mode, asce); 910 try_storage_prot_override = storage_prot_override_applicable(vcpu); 911 need_ipte_lock = psw_bits(*psw).dat && !asce.r; 912 if (need_ipte_lock) 913 ipte_lock(vcpu->kvm); 914 /* 915 * Since we do the access further down ultimately via a move instruction 916 * that does key checking and returns an error in case of a protection 917 * violation, we don't need to do the check during address translation. 918 * Skip it by passing access key 0, which matches any storage key, 919 * obviating the need for any further checks. As a result the check is 920 * handled entirely in hardware on access, we only need to take care to 921 * forego key protection checking if fetch protection override applies or 922 * retry with the special key 9 in case of storage protection override. 923 */ 924 rc = guest_range_to_gpas(vcpu, ga, ar, gpas, len, asce, mode, 0); 925 if (rc) 926 goto out_unlock; 927 for (idx = 0; idx < nr_pages; idx++) { 928 fragment_len = min(PAGE_SIZE - offset_in_page(gpas[idx]), len); 929 if (try_fetch_prot_override && fetch_prot_override_applies(ga, fragment_len)) { 930 rc = access_guest_page(vcpu->kvm, mode, gpas[idx], 931 data, fragment_len); 932 } else { 933 rc = access_guest_page_with_key(vcpu->kvm, mode, gpas[idx], 934 data, fragment_len, access_key); 935 } 936 if (rc == PGM_PROTECTION && try_storage_prot_override) 937 rc = access_guest_page_with_key(vcpu->kvm, mode, gpas[idx], 938 data, fragment_len, PAGE_SPO_ACC); 939 if (rc) 940 break; 941 len -= fragment_len; 942 data += fragment_len; 943 ga = kvm_s390_logical_to_effective(vcpu, ga + fragment_len); 944 } 945 if (rc > 0) { 946 bool terminate = (mode == GACC_STORE) && (idx > 0); 947 948 if (rc == PGM_PROTECTION) 949 prot = PROT_TYPE_KEYC; 950 else 951 prot = PROT_TYPE_DUMMY; 952 rc = trans_exc_ending(vcpu, rc, ga, ar, mode, prot, terminate); 953 } 954 out_unlock: 955 if (need_ipte_lock) 956 ipte_unlock(vcpu->kvm); 957 if (nr_pages > ARRAY_SIZE(gpa_array)) 958 vfree(gpas); 959 return rc; 960 } 961 962 int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, 963 void *data, unsigned long len, enum gacc_mode mode) 964 { 965 unsigned int fragment_len; 966 unsigned long gpa; 967 int rc = 0; 968 969 while (len && !rc) { 970 gpa = kvm_s390_real_to_abs(vcpu, gra); 971 fragment_len = min(PAGE_SIZE - offset_in_page(gpa), len); 972 rc = access_guest_page(vcpu->kvm, mode, gpa, data, fragment_len); 973 len -= fragment_len; 974 gra += fragment_len; 975 data += fragment_len; 976 } 977 if (rc > 0) 978 vcpu->arch.pgm.code = rc; 979 return rc; 980 } 981 982 /** 983 * cmpxchg_guest_abs_with_key() - Perform cmpxchg on guest absolute address. 984 * @kvm: Virtual machine instance. 985 * @gpa: Absolute guest address of the location to be changed. 986 * @len: Operand length of the cmpxchg, required: 1 <= len <= 16. Providing a 987 * non power of two will result in failure. 988 * @old_addr: Pointer to old value. If the location at @gpa contains this value, 989 * the exchange will succeed. After calling cmpxchg_guest_abs_with_key() 990 * *@old_addr contains the value at @gpa before the attempt to 991 * exchange the value. 992 * @new: The value to place at @gpa. 993 * @access_key: The access key to use for the guest access. 994 * @success: output value indicating if an exchange occurred. 995 * 996 * Atomically exchange the value at @gpa by @new, if it contains *@old. 997 * Honors storage keys. 998 * 999 * Return: * 0: successful exchange 1000 * * >0: a program interruption code indicating the reason cmpxchg could 1001 * not be attempted 1002 * * -EINVAL: address misaligned or len not power of two 1003 * * -EAGAIN: transient failure (len 1 or 2) 1004 * * -EOPNOTSUPP: read-only memslot (should never occur) 1005 */ 1006 int cmpxchg_guest_abs_with_key(struct kvm *kvm, gpa_t gpa, int len, 1007 __uint128_t *old_addr, __uint128_t new, 1008 u8 access_key, bool *success) 1009 { 1010 gfn_t gfn = gpa_to_gfn(gpa); 1011 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 1012 bool writable; 1013 hva_t hva; 1014 int ret; 1015 1016 if (!IS_ALIGNED(gpa, len)) 1017 return -EINVAL; 1018 1019 hva = gfn_to_hva_memslot_prot(slot, gfn, &writable); 1020 if (kvm_is_error_hva(hva)) 1021 return PGM_ADDRESSING; 1022 /* 1023 * Check if it's a read-only memslot, even though that cannot occur 1024 * since those are unsupported. 1025 * Don't try to actually handle that case. 1026 */ 1027 if (!writable) 1028 return -EOPNOTSUPP; 1029 1030 hva += offset_in_page(gpa); 1031 /* 1032 * The cmpxchg_user_key macro depends on the type of "old", so we need 1033 * a case for each valid length and get some code duplication as long 1034 * as we don't introduce a new macro. 1035 */ 1036 switch (len) { 1037 case 1: { 1038 u8 old; 1039 1040 ret = cmpxchg_user_key((u8 __user *)hva, &old, *old_addr, new, access_key); 1041 *success = !ret && old == *old_addr; 1042 *old_addr = old; 1043 break; 1044 } 1045 case 2: { 1046 u16 old; 1047 1048 ret = cmpxchg_user_key((u16 __user *)hva, &old, *old_addr, new, access_key); 1049 *success = !ret && old == *old_addr; 1050 *old_addr = old; 1051 break; 1052 } 1053 case 4: { 1054 u32 old; 1055 1056 ret = cmpxchg_user_key((u32 __user *)hva, &old, *old_addr, new, access_key); 1057 *success = !ret && old == *old_addr; 1058 *old_addr = old; 1059 break; 1060 } 1061 case 8: { 1062 u64 old; 1063 1064 ret = cmpxchg_user_key((u64 __user *)hva, &old, *old_addr, new, access_key); 1065 *success = !ret && old == *old_addr; 1066 *old_addr = old; 1067 break; 1068 } 1069 case 16: { 1070 __uint128_t old; 1071 1072 ret = cmpxchg_user_key((__uint128_t __user *)hva, &old, *old_addr, new, access_key); 1073 *success = !ret && old == *old_addr; 1074 *old_addr = old; 1075 break; 1076 } 1077 default: 1078 return -EINVAL; 1079 } 1080 if (*success) 1081 mark_page_dirty_in_slot(kvm, slot, gfn); 1082 /* 1083 * Assume that the fault is caused by protection, either key protection 1084 * or user page write protection. 1085 */ 1086 if (ret == -EFAULT) 1087 ret = PGM_PROTECTION; 1088 return ret; 1089 } 1090 1091 /** 1092 * guest_translate_address_with_key - translate guest logical into guest absolute address 1093 * @vcpu: virtual cpu 1094 * @gva: Guest virtual address 1095 * @ar: Access register 1096 * @gpa: Guest physical address 1097 * @mode: Translation access mode 1098 * @access_key: access key to mach the storage key with 1099 * 1100 * Parameter semantics are the same as the ones from guest_translate. 1101 * The memory contents at the guest address are not changed. 1102 * 1103 * Note: The IPTE lock is not taken during this function, so the caller 1104 * has to take care of this. 1105 */ 1106 int guest_translate_address_with_key(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar, 1107 unsigned long *gpa, enum gacc_mode mode, 1108 u8 access_key) 1109 { 1110 union asce asce; 1111 int rc; 1112 1113 gva = kvm_s390_logical_to_effective(vcpu, gva); 1114 rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode); 1115 if (rc) 1116 return rc; 1117 return guest_range_to_gpas(vcpu, gva, ar, gpa, 1, asce, mode, 1118 access_key); 1119 } 1120 1121 /** 1122 * check_gva_range - test a range of guest virtual addresses for accessibility 1123 * @vcpu: virtual cpu 1124 * @gva: Guest virtual address 1125 * @ar: Access register 1126 * @length: Length of test range 1127 * @mode: Translation access mode 1128 * @access_key: access key to mach the storage keys with 1129 */ 1130 int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar, 1131 unsigned long length, enum gacc_mode mode, u8 access_key) 1132 { 1133 union asce asce; 1134 int rc = 0; 1135 1136 rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode); 1137 if (rc) 1138 return rc; 1139 ipte_lock(vcpu->kvm); 1140 rc = guest_range_to_gpas(vcpu, gva, ar, NULL, length, asce, mode, 1141 access_key); 1142 ipte_unlock(vcpu->kvm); 1143 1144 return rc; 1145 } 1146 1147 /** 1148 * check_gpa_range - test a range of guest physical addresses for accessibility 1149 * @kvm: virtual machine instance 1150 * @gpa: guest physical address 1151 * @length: length of test range 1152 * @mode: access mode to test, relevant for storage keys 1153 * @access_key: access key to mach the storage keys with 1154 */ 1155 int check_gpa_range(struct kvm *kvm, unsigned long gpa, unsigned long length, 1156 enum gacc_mode mode, u8 access_key) 1157 { 1158 unsigned int fragment_len; 1159 int rc = 0; 1160 1161 while (length && !rc) { 1162 fragment_len = min(PAGE_SIZE - offset_in_page(gpa), length); 1163 rc = vm_check_access_key(kvm, access_key, mode, gpa); 1164 length -= fragment_len; 1165 gpa += fragment_len; 1166 } 1167 return rc; 1168 } 1169 1170 /** 1171 * kvm_s390_check_low_addr_prot_real - check for low-address protection 1172 * @vcpu: virtual cpu 1173 * @gra: Guest real address 1174 * 1175 * Checks whether an address is subject to low-address protection and set 1176 * up vcpu->arch.pgm accordingly if necessary. 1177 * 1178 * Return: 0 if no protection exception, or PGM_PROTECTION if protected. 1179 */ 1180 int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra) 1181 { 1182 union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]}; 1183 1184 if (!ctlreg0.lap || !is_low_address(gra)) 1185 return 0; 1186 return trans_exc(vcpu, PGM_PROTECTION, gra, 0, GACC_STORE, PROT_TYPE_LA); 1187 } 1188 1189 /** 1190 * kvm_s390_shadow_tables - walk the guest page table and create shadow tables 1191 * @sg: pointer to the shadow guest address space structure 1192 * @saddr: faulting address in the shadow gmap 1193 * @pgt: pointer to the beginning of the page table for the given address if 1194 * successful (return value 0), or to the first invalid DAT entry in 1195 * case of exceptions (return value > 0) 1196 * @dat_protection: referenced memory is write protected 1197 * @fake: pgt references contiguous guest memory block, not a pgtable 1198 */ 1199 static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, 1200 unsigned long *pgt, int *dat_protection, 1201 int *fake) 1202 { 1203 struct kvm *kvm; 1204 struct gmap *parent; 1205 union asce asce; 1206 union vaddress vaddr; 1207 unsigned long ptr; 1208 int rc; 1209 1210 *fake = 0; 1211 *dat_protection = 0; 1212 kvm = sg->private; 1213 parent = sg->parent; 1214 vaddr.addr = saddr; 1215 asce.val = sg->orig_asce; 1216 ptr = asce.rsto * PAGE_SIZE; 1217 if (asce.r) { 1218 *fake = 1; 1219 ptr = 0; 1220 asce.dt = ASCE_TYPE_REGION1; 1221 } 1222 switch (asce.dt) { 1223 case ASCE_TYPE_REGION1: 1224 if (vaddr.rfx01 > asce.tl && !*fake) 1225 return PGM_REGION_FIRST_TRANS; 1226 break; 1227 case ASCE_TYPE_REGION2: 1228 if (vaddr.rfx) 1229 return PGM_ASCE_TYPE; 1230 if (vaddr.rsx01 > asce.tl) 1231 return PGM_REGION_SECOND_TRANS; 1232 break; 1233 case ASCE_TYPE_REGION3: 1234 if (vaddr.rfx || vaddr.rsx) 1235 return PGM_ASCE_TYPE; 1236 if (vaddr.rtx01 > asce.tl) 1237 return PGM_REGION_THIRD_TRANS; 1238 break; 1239 case ASCE_TYPE_SEGMENT: 1240 if (vaddr.rfx || vaddr.rsx || vaddr.rtx) 1241 return PGM_ASCE_TYPE; 1242 if (vaddr.sx01 > asce.tl) 1243 return PGM_SEGMENT_TRANSLATION; 1244 break; 1245 } 1246 1247 switch (asce.dt) { 1248 case ASCE_TYPE_REGION1: { 1249 union region1_table_entry rfte; 1250 1251 if (*fake) { 1252 ptr += vaddr.rfx * _REGION1_SIZE; 1253 rfte.val = ptr; 1254 goto shadow_r2t; 1255 } 1256 *pgt = ptr + vaddr.rfx * 8; 1257 rc = gmap_read_table(parent, ptr + vaddr.rfx * 8, &rfte.val); 1258 if (rc) 1259 return rc; 1260 if (rfte.i) 1261 return PGM_REGION_FIRST_TRANS; 1262 if (rfte.tt != TABLE_TYPE_REGION1) 1263 return PGM_TRANSLATION_SPEC; 1264 if (vaddr.rsx01 < rfte.tf || vaddr.rsx01 > rfte.tl) 1265 return PGM_REGION_SECOND_TRANS; 1266 if (sg->edat_level >= 1) 1267 *dat_protection |= rfte.p; 1268 ptr = rfte.rto * PAGE_SIZE; 1269 shadow_r2t: 1270 rc = gmap_shadow_r2t(sg, saddr, rfte.val, *fake); 1271 if (rc) 1272 return rc; 1273 kvm->stat.gmap_shadow_r1_entry++; 1274 } 1275 fallthrough; 1276 case ASCE_TYPE_REGION2: { 1277 union region2_table_entry rste; 1278 1279 if (*fake) { 1280 ptr += vaddr.rsx * _REGION2_SIZE; 1281 rste.val = ptr; 1282 goto shadow_r3t; 1283 } 1284 *pgt = ptr + vaddr.rsx * 8; 1285 rc = gmap_read_table(parent, ptr + vaddr.rsx * 8, &rste.val); 1286 if (rc) 1287 return rc; 1288 if (rste.i) 1289 return PGM_REGION_SECOND_TRANS; 1290 if (rste.tt != TABLE_TYPE_REGION2) 1291 return PGM_TRANSLATION_SPEC; 1292 if (vaddr.rtx01 < rste.tf || vaddr.rtx01 > rste.tl) 1293 return PGM_REGION_THIRD_TRANS; 1294 if (sg->edat_level >= 1) 1295 *dat_protection |= rste.p; 1296 ptr = rste.rto * PAGE_SIZE; 1297 shadow_r3t: 1298 rste.p |= *dat_protection; 1299 rc = gmap_shadow_r3t(sg, saddr, rste.val, *fake); 1300 if (rc) 1301 return rc; 1302 kvm->stat.gmap_shadow_r2_entry++; 1303 } 1304 fallthrough; 1305 case ASCE_TYPE_REGION3: { 1306 union region3_table_entry rtte; 1307 1308 if (*fake) { 1309 ptr += vaddr.rtx * _REGION3_SIZE; 1310 rtte.val = ptr; 1311 goto shadow_sgt; 1312 } 1313 *pgt = ptr + vaddr.rtx * 8; 1314 rc = gmap_read_table(parent, ptr + vaddr.rtx * 8, &rtte.val); 1315 if (rc) 1316 return rc; 1317 if (rtte.i) 1318 return PGM_REGION_THIRD_TRANS; 1319 if (rtte.tt != TABLE_TYPE_REGION3) 1320 return PGM_TRANSLATION_SPEC; 1321 if (rtte.cr && asce.p && sg->edat_level >= 2) 1322 return PGM_TRANSLATION_SPEC; 1323 if (rtte.fc && sg->edat_level >= 2) { 1324 *dat_protection |= rtte.fc0.p; 1325 *fake = 1; 1326 ptr = rtte.fc1.rfaa * _REGION3_SIZE; 1327 rtte.val = ptr; 1328 goto shadow_sgt; 1329 } 1330 if (vaddr.sx01 < rtte.fc0.tf || vaddr.sx01 > rtte.fc0.tl) 1331 return PGM_SEGMENT_TRANSLATION; 1332 if (sg->edat_level >= 1) 1333 *dat_protection |= rtte.fc0.p; 1334 ptr = rtte.fc0.sto * PAGE_SIZE; 1335 shadow_sgt: 1336 rtte.fc0.p |= *dat_protection; 1337 rc = gmap_shadow_sgt(sg, saddr, rtte.val, *fake); 1338 if (rc) 1339 return rc; 1340 kvm->stat.gmap_shadow_r3_entry++; 1341 } 1342 fallthrough; 1343 case ASCE_TYPE_SEGMENT: { 1344 union segment_table_entry ste; 1345 1346 if (*fake) { 1347 ptr += vaddr.sx * _SEGMENT_SIZE; 1348 ste.val = ptr; 1349 goto shadow_pgt; 1350 } 1351 *pgt = ptr + vaddr.sx * 8; 1352 rc = gmap_read_table(parent, ptr + vaddr.sx * 8, &ste.val); 1353 if (rc) 1354 return rc; 1355 if (ste.i) 1356 return PGM_SEGMENT_TRANSLATION; 1357 if (ste.tt != TABLE_TYPE_SEGMENT) 1358 return PGM_TRANSLATION_SPEC; 1359 if (ste.cs && asce.p) 1360 return PGM_TRANSLATION_SPEC; 1361 *dat_protection |= ste.fc0.p; 1362 if (ste.fc && sg->edat_level >= 1) { 1363 *fake = 1; 1364 ptr = ste.fc1.sfaa * _SEGMENT_SIZE; 1365 ste.val = ptr; 1366 goto shadow_pgt; 1367 } 1368 ptr = ste.fc0.pto * (PAGE_SIZE / 2); 1369 shadow_pgt: 1370 ste.fc0.p |= *dat_protection; 1371 rc = gmap_shadow_pgt(sg, saddr, ste.val, *fake); 1372 if (rc) 1373 return rc; 1374 kvm->stat.gmap_shadow_sg_entry++; 1375 } 1376 } 1377 /* Return the parent address of the page table */ 1378 *pgt = ptr; 1379 return 0; 1380 } 1381 1382 /** 1383 * shadow_pgt_lookup() - find a shadow page table 1384 * @sg: pointer to the shadow guest address space structure 1385 * @saddr: the address in the shadow aguest address space 1386 * @pgt: parent gmap address of the page table to get shadowed 1387 * @dat_protection: if the pgtable is marked as protected by dat 1388 * @fake: pgt references contiguous guest memory block, not a pgtable 1389 * 1390 * Returns 0 if the shadow page table was found and -EAGAIN if the page 1391 * table was not found. 1392 * 1393 * Called with sg->mm->mmap_lock in read. 1394 */ 1395 static int shadow_pgt_lookup(struct gmap *sg, unsigned long saddr, unsigned long *pgt, 1396 int *dat_protection, int *fake) 1397 { 1398 unsigned long pt_index; 1399 unsigned long *table; 1400 struct page *page; 1401 int rc; 1402 1403 spin_lock(&sg->guest_table_lock); 1404 table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */ 1405 if (table && !(*table & _SEGMENT_ENTRY_INVALID)) { 1406 /* Shadow page tables are full pages (pte+pgste) */ 1407 page = pfn_to_page(*table >> PAGE_SHIFT); 1408 pt_index = gmap_pgste_get_pgt_addr(page_to_virt(page)); 1409 *pgt = pt_index & ~GMAP_SHADOW_FAKE_TABLE; 1410 *dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT); 1411 *fake = !!(pt_index & GMAP_SHADOW_FAKE_TABLE); 1412 rc = 0; 1413 } else { 1414 rc = -EAGAIN; 1415 } 1416 spin_unlock(&sg->guest_table_lock); 1417 return rc; 1418 } 1419 1420 /** 1421 * kvm_s390_shadow_fault - handle fault on a shadow page table 1422 * @vcpu: virtual cpu 1423 * @sg: pointer to the shadow guest address space structure 1424 * @saddr: faulting address in the shadow gmap 1425 * @datptr: will contain the address of the faulting DAT table entry, or of 1426 * the valid leaf, plus some flags 1427 * 1428 * Returns: - 0 if the shadow fault was successfully resolved 1429 * - > 0 (pgm exception code) on exceptions while faulting 1430 * - -EAGAIN if the caller can retry immediately 1431 * - -EFAULT when accessing invalid guest addresses 1432 * - -ENOMEM if out of memory 1433 */ 1434 int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg, 1435 unsigned long saddr, unsigned long *datptr) 1436 { 1437 union vaddress vaddr; 1438 union page_table_entry pte; 1439 unsigned long pgt = 0; 1440 int dat_protection, fake; 1441 int rc; 1442 1443 if (KVM_BUG_ON(!gmap_is_shadow(sg), vcpu->kvm)) 1444 return -EFAULT; 1445 1446 mmap_read_lock(sg->mm); 1447 /* 1448 * We don't want any guest-2 tables to change - so the parent 1449 * tables/pointers we read stay valid - unshadowing is however 1450 * always possible - only guest_table_lock protects us. 1451 */ 1452 ipte_lock(vcpu->kvm); 1453 1454 rc = shadow_pgt_lookup(sg, saddr, &pgt, &dat_protection, &fake); 1455 if (rc) 1456 rc = kvm_s390_shadow_tables(sg, saddr, &pgt, &dat_protection, 1457 &fake); 1458 1459 vaddr.addr = saddr; 1460 if (fake) { 1461 pte.val = pgt + vaddr.px * PAGE_SIZE; 1462 goto shadow_page; 1463 } 1464 1465 switch (rc) { 1466 case PGM_SEGMENT_TRANSLATION: 1467 case PGM_REGION_THIRD_TRANS: 1468 case PGM_REGION_SECOND_TRANS: 1469 case PGM_REGION_FIRST_TRANS: 1470 pgt |= PEI_NOT_PTE; 1471 break; 1472 case 0: 1473 pgt += vaddr.px * 8; 1474 rc = gmap_read_table(sg->parent, pgt, &pte.val); 1475 } 1476 if (datptr) 1477 *datptr = pgt | dat_protection * PEI_DAT_PROT; 1478 if (!rc && pte.i) 1479 rc = PGM_PAGE_TRANSLATION; 1480 if (!rc && pte.z) 1481 rc = PGM_TRANSLATION_SPEC; 1482 shadow_page: 1483 pte.p |= dat_protection; 1484 if (!rc) 1485 rc = gmap_shadow_page(sg, saddr, __pte(pte.val)); 1486 vcpu->kvm->stat.gmap_shadow_pg_entry++; 1487 ipte_unlock(vcpu->kvm); 1488 mmap_read_unlock(sg->mm); 1489 return rc; 1490 } 1491