1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * kvm nested virtualization support for s390x 4 * 5 * Copyright IBM Corp. 2016, 2018 6 * 7 * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com> 8 */ 9 #include <linux/vmalloc.h> 10 #include <linux/kvm_host.h> 11 #include <linux/bug.h> 12 #include <linux/list.h> 13 #include <linux/bitmap.h> 14 #include <linux/sched/signal.h> 15 #include <linux/io.h> 16 #include <linux/mman.h> 17 18 #include <asm/mmu_context.h> 19 #include <asm/sclp.h> 20 #include <asm/nmi.h> 21 #include <asm/dis.h> 22 #include <asm/facility.h> 23 #include "kvm-s390.h" 24 #include "gaccess.h" 25 #include "gmap.h" 26 27 enum vsie_page_flags { 28 VSIE_PAGE_IN_USE = 0, 29 }; 30 31 struct vsie_page { 32 struct kvm_s390_sie_block scb_s; /* 0x0000 */ 33 /* 34 * the backup info for machine check. ensure it's at 35 * the same offset as that in struct sie_page! 36 */ 37 struct mcck_volatile_info mcck_info; /* 0x0200 */ 38 /* 39 * The pinned original scb. Be aware that other VCPUs can modify 40 * it while we read from it. Values that are used for conditions or 41 * are reused conditionally, should be accessed via READ_ONCE. 42 */ 43 struct kvm_s390_sie_block *scb_o; /* 0x0218 */ 44 /* 45 * Flags: must be set/cleared atomically after the vsie page can be 46 * looked up by other CPUs. 47 */ 48 unsigned long flags; /* 0x0220 */ 49 /* address of the last reported fault to guest2 */ 50 unsigned long fault_addr; /* 0x0228 */ 51 /* calculated guest addresses of satellite control blocks */ 52 gpa_t sca_gpa; /* 0x0230 */ 53 gpa_t itdba_gpa; /* 0x0238 */ 54 gpa_t gvrd_gpa; /* 0x0240 */ 55 gpa_t riccbd_gpa; /* 0x0248 */ 56 gpa_t sdnx_gpa; /* 0x0250 */ 57 /* 58 * guest address of the original SCB. Remains set for free vsie 59 * pages, so we can properly look them up in our addr_to_page 60 * radix tree. 61 */ 62 gpa_t scb_gpa; /* 0x0258 */ 63 /* the shadow gmap in use by the vsie_page */ 64 struct gmap_cache gmap_cache; /* 0x0260 */ 65 __u8 reserved[0x0700 - 0x0278]; /* 0x0278 */ 66 struct kvm_s390_crypto_cb crycb; /* 0x0700 */ 67 __u8 fac[S390_ARCH_FAC_LIST_SIZE_BYTE]; /* 0x0800 */ 68 }; 69 70 static_assert(sizeof(struct vsie_page) == PAGE_SIZE); 71 72 /* trigger a validity icpt for the given scb */ 73 static int set_validity_icpt(struct kvm_s390_sie_block *scb, 74 __u16 reason_code) 75 { 76 scb->ipa = 0x1000; 77 scb->ipb = ((__u32) reason_code) << 16; 78 scb->icptcode = ICPT_VALIDITY; 79 return 1; 80 } 81 82 /* mark the prefix as unmapped, this will block the VSIE */ 83 static void prefix_unmapped(struct vsie_page *vsie_page) 84 { 85 atomic_or(PROG_REQUEST, &vsie_page->scb_s.prog20); 86 } 87 88 /* mark the prefix as unmapped and wait until the VSIE has been left */ 89 static void prefix_unmapped_sync(struct vsie_page *vsie_page) 90 { 91 prefix_unmapped(vsie_page); 92 if (vsie_page->scb_s.prog0c & PROG_IN_SIE) 93 atomic_or(CPUSTAT_STOP_INT, &vsie_page->scb_s.cpuflags); 94 while (vsie_page->scb_s.prog0c & PROG_IN_SIE) 95 cpu_relax(); 96 } 97 98 /* mark the prefix as mapped, this will allow the VSIE to run */ 99 static void prefix_mapped(struct vsie_page *vsie_page) 100 { 101 atomic_andnot(PROG_REQUEST, &vsie_page->scb_s.prog20); 102 } 103 104 /* test if the prefix is mapped into the gmap shadow */ 105 static int prefix_is_mapped(struct vsie_page *vsie_page) 106 { 107 return !(atomic_read(&vsie_page->scb_s.prog20) & PROG_REQUEST); 108 } 109 110 /* copy the updated intervention request bits into the shadow scb */ 111 static void update_intervention_requests(struct vsie_page *vsie_page) 112 { 113 const int bits = CPUSTAT_STOP_INT | CPUSTAT_IO_INT | CPUSTAT_EXT_INT; 114 int cpuflags; 115 116 cpuflags = atomic_read(&vsie_page->scb_o->cpuflags); 117 atomic_andnot(bits, &vsie_page->scb_s.cpuflags); 118 atomic_or(cpuflags & bits, &vsie_page->scb_s.cpuflags); 119 } 120 121 /* shadow (filter and validate) the cpuflags */ 122 static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) 123 { 124 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; 125 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; 126 int newflags, cpuflags = atomic_read(&scb_o->cpuflags); 127 128 /* we don't allow ESA/390 guests */ 129 if (!(cpuflags & CPUSTAT_ZARCH)) 130 return set_validity_icpt(scb_s, 0x0001U); 131 132 if (cpuflags & (CPUSTAT_RRF | CPUSTAT_MCDS)) 133 return set_validity_icpt(scb_s, 0x0001U); 134 else if (cpuflags & (CPUSTAT_SLSV | CPUSTAT_SLSR)) 135 return set_validity_icpt(scb_s, 0x0007U); 136 137 /* intervention requests will be set later */ 138 newflags = CPUSTAT_ZARCH; 139 if (cpuflags & CPUSTAT_GED && test_kvm_facility(vcpu->kvm, 8)) 140 newflags |= CPUSTAT_GED; 141 if (cpuflags & CPUSTAT_GED2 && test_kvm_facility(vcpu->kvm, 78)) { 142 if (cpuflags & CPUSTAT_GED) 143 return set_validity_icpt(scb_s, 0x0001U); 144 newflags |= CPUSTAT_GED2; 145 } 146 if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GPERE)) 147 newflags |= cpuflags & CPUSTAT_P; 148 if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GSLS)) 149 newflags |= cpuflags & CPUSTAT_SM; 150 if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IBS)) 151 newflags |= cpuflags & CPUSTAT_IBS; 152 if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_KSS)) 153 newflags |= cpuflags & CPUSTAT_KSS; 154 155 atomic_set(&scb_s->cpuflags, newflags); 156 return 0; 157 } 158 /* Copy to APCB FORMAT1 from APCB FORMAT0 */ 159 static int setup_apcb10(struct kvm_vcpu *vcpu, struct kvm_s390_apcb1 *apcb_s, 160 unsigned long crycb_gpa, struct kvm_s390_apcb1 *apcb_h) 161 { 162 struct kvm_s390_apcb0 tmp; 163 unsigned long apcb_gpa; 164 165 apcb_gpa = crycb_gpa + offsetof(struct kvm_s390_crypto_cb, apcb0); 166 167 if (read_guest_real(vcpu, apcb_gpa, &tmp, 168 sizeof(struct kvm_s390_apcb0))) 169 return -EFAULT; 170 171 apcb_s->apm[0] = apcb_h->apm[0] & tmp.apm[0]; 172 apcb_s->aqm[0] = apcb_h->aqm[0] & tmp.aqm[0] & 0xffff000000000000UL; 173 apcb_s->adm[0] = apcb_h->adm[0] & tmp.adm[0] & 0xffff000000000000UL; 174 175 return 0; 176 177 } 178 179 /** 180 * setup_apcb00 - Copy to APCB FORMAT0 from APCB FORMAT0 181 * @vcpu: pointer to the virtual CPU 182 * @apcb_s: pointer to start of apcb in the shadow crycb 183 * @crycb_gpa: guest physical address to start of original guest crycb 184 * @apcb_h: pointer to start of apcb in the guest1 185 * 186 * Returns 0 and -EFAULT on error reading guest apcb 187 */ 188 static int setup_apcb00(struct kvm_vcpu *vcpu, unsigned long *apcb_s, 189 unsigned long crycb_gpa, unsigned long *apcb_h) 190 { 191 unsigned long apcb_gpa; 192 193 apcb_gpa = crycb_gpa + offsetof(struct kvm_s390_crypto_cb, apcb0); 194 195 if (read_guest_real(vcpu, apcb_gpa, apcb_s, 196 sizeof(struct kvm_s390_apcb0))) 197 return -EFAULT; 198 199 bitmap_and(apcb_s, apcb_s, apcb_h, 200 BITS_PER_BYTE * sizeof(struct kvm_s390_apcb0)); 201 202 return 0; 203 } 204 205 /** 206 * setup_apcb11 - Copy the FORMAT1 APCB from the guest to the shadow CRYCB 207 * @vcpu: pointer to the virtual CPU 208 * @apcb_s: pointer to start of apcb in the shadow crycb 209 * @crycb_gpa: guest physical address to start of original guest crycb 210 * @apcb_h: pointer to start of apcb in the host 211 * 212 * Returns 0 and -EFAULT on error reading guest apcb 213 */ 214 static int setup_apcb11(struct kvm_vcpu *vcpu, unsigned long *apcb_s, 215 unsigned long crycb_gpa, 216 unsigned long *apcb_h) 217 { 218 unsigned long apcb_gpa; 219 220 apcb_gpa = crycb_gpa + offsetof(struct kvm_s390_crypto_cb, apcb1); 221 222 if (read_guest_real(vcpu, apcb_gpa, apcb_s, 223 sizeof(struct kvm_s390_apcb1))) 224 return -EFAULT; 225 226 bitmap_and(apcb_s, apcb_s, apcb_h, 227 BITS_PER_BYTE * sizeof(struct kvm_s390_apcb1)); 228 229 return 0; 230 } 231 232 /** 233 * setup_apcb - Create a shadow copy of the apcb. 234 * @vcpu: pointer to the virtual CPU 235 * @crycb_s: pointer to shadow crycb 236 * @crycb_gpa: guest physical address of original guest crycb 237 * @crycb_h: pointer to the host crycb 238 * @fmt_o: format of the original guest crycb. 239 * @fmt_h: format of the host crycb. 240 * 241 * Checks the compatibility between the guest and host crycb and calls the 242 * appropriate copy function. 243 * 244 * Return 0 or an error number if the guest and host crycb are incompatible. 245 */ 246 static int setup_apcb(struct kvm_vcpu *vcpu, struct kvm_s390_crypto_cb *crycb_s, 247 const u32 crycb_gpa, 248 struct kvm_s390_crypto_cb *crycb_h, 249 int fmt_o, int fmt_h) 250 { 251 switch (fmt_o) { 252 case CRYCB_FORMAT2: 253 if ((crycb_gpa & PAGE_MASK) != ((crycb_gpa + 256) & PAGE_MASK)) 254 return -EACCES; 255 if (fmt_h != CRYCB_FORMAT2) 256 return -EINVAL; 257 return setup_apcb11(vcpu, (unsigned long *)&crycb_s->apcb1, 258 crycb_gpa, 259 (unsigned long *)&crycb_h->apcb1); 260 case CRYCB_FORMAT1: 261 switch (fmt_h) { 262 case CRYCB_FORMAT2: 263 return setup_apcb10(vcpu, &crycb_s->apcb1, 264 crycb_gpa, 265 &crycb_h->apcb1); 266 case CRYCB_FORMAT1: 267 return setup_apcb00(vcpu, 268 (unsigned long *) &crycb_s->apcb0, 269 crycb_gpa, 270 (unsigned long *) &crycb_h->apcb0); 271 } 272 break; 273 case CRYCB_FORMAT0: 274 if ((crycb_gpa & PAGE_MASK) != ((crycb_gpa + 32) & PAGE_MASK)) 275 return -EACCES; 276 277 switch (fmt_h) { 278 case CRYCB_FORMAT2: 279 return setup_apcb10(vcpu, &crycb_s->apcb1, 280 crycb_gpa, 281 &crycb_h->apcb1); 282 case CRYCB_FORMAT1: 283 case CRYCB_FORMAT0: 284 return setup_apcb00(vcpu, 285 (unsigned long *) &crycb_s->apcb0, 286 crycb_gpa, 287 (unsigned long *) &crycb_h->apcb0); 288 } 289 } 290 return -EINVAL; 291 } 292 293 /** 294 * shadow_crycb - Create a shadow copy of the crycb block 295 * @vcpu: a pointer to the virtual CPU 296 * @vsie_page: a pointer to internal date used for the vSIE 297 * 298 * Create a shadow copy of the crycb block and setup key wrapping, if 299 * requested for guest 3 and enabled for guest 2. 300 * 301 * We accept format-1 or format-2, but we convert format-1 into format-2 302 * in the shadow CRYCB. 303 * Using format-2 enables the firmware to choose the right format when 304 * scheduling the SIE. 305 * There is nothing to do for format-0. 306 * 307 * This function centralize the issuing of set_validity_icpt() for all 308 * the subfunctions working on the crycb. 309 * 310 * Returns: - 0 if shadowed or nothing to do 311 * - > 0 if control has to be given to guest 2 312 */ 313 static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) 314 { 315 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; 316 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; 317 const uint32_t crycbd_o = READ_ONCE(scb_o->crycbd); 318 const u32 crycb_addr = crycbd_o & 0x7ffffff8U; 319 unsigned long *b1, *b2; 320 u8 ecb3_flags; 321 u32 ecd_flags; 322 int apie_h; 323 int apie_s; 324 int key_msk = test_kvm_facility(vcpu->kvm, 76); 325 int fmt_o = crycbd_o & CRYCB_FORMAT_MASK; 326 int fmt_h = vcpu->arch.sie_block->crycbd & CRYCB_FORMAT_MASK; 327 int ret = 0; 328 329 scb_s->crycbd = 0; 330 331 apie_h = vcpu->arch.sie_block->eca & ECA_APIE; 332 apie_s = apie_h & scb_o->eca; 333 if (!apie_s && (!key_msk || (fmt_o == CRYCB_FORMAT0))) 334 return 0; 335 336 if (!crycb_addr) 337 return set_validity_icpt(scb_s, 0x0039U); 338 339 if (fmt_o == CRYCB_FORMAT1) 340 if ((crycb_addr & PAGE_MASK) != 341 ((crycb_addr + 128) & PAGE_MASK)) 342 return set_validity_icpt(scb_s, 0x003CU); 343 344 if (apie_s) { 345 ret = setup_apcb(vcpu, &vsie_page->crycb, crycb_addr, 346 vcpu->kvm->arch.crypto.crycb, 347 fmt_o, fmt_h); 348 if (ret) 349 goto end; 350 scb_s->eca |= scb_o->eca & ECA_APIE; 351 } 352 353 /* we may only allow it if enabled for guest 2 */ 354 ecb3_flags = scb_o->ecb3 & vcpu->arch.sie_block->ecb3 & 355 (ECB3_AES | ECB3_DEA); 356 ecd_flags = scb_o->ecd & vcpu->arch.sie_block->ecd & 357 (ECD_ECC | ECD_HMAC); 358 if (!ecb3_flags && !ecd_flags) 359 goto end; 360 361 /* copy only the wrapping keys */ 362 if (read_guest_real(vcpu, crycb_addr + 72, 363 vsie_page->crycb.dea_wrapping_key_mask, 56)) 364 return set_validity_icpt(scb_s, 0x0035U); 365 366 scb_s->ecb3 |= ecb3_flags; 367 scb_s->ecd |= ecd_flags; 368 369 /* xor both blocks in one run */ 370 b1 = (unsigned long *) vsie_page->crycb.dea_wrapping_key_mask; 371 b2 = (unsigned long *) 372 vcpu->kvm->arch.crypto.crycb->dea_wrapping_key_mask; 373 /* as 56%8 == 0, bitmap_xor won't overwrite any data */ 374 bitmap_xor(b1, b1, b2, BITS_PER_BYTE * 56); 375 end: 376 switch (ret) { 377 case -EINVAL: 378 return set_validity_icpt(scb_s, 0x0022U); 379 case -EFAULT: 380 return set_validity_icpt(scb_s, 0x0035U); 381 case -EACCES: 382 return set_validity_icpt(scb_s, 0x003CU); 383 } 384 scb_s->crycbd = (u32)virt_to_phys(&vsie_page->crycb) | CRYCB_FORMAT2; 385 return 0; 386 } 387 388 /* shadow (round up/down) the ibc to avoid validity icpt */ 389 static void prepare_ibc(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) 390 { 391 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; 392 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; 393 /* READ_ONCE does not work on bitfields - use a temporary variable */ 394 const uint32_t __new_ibc = scb_o->ibc; 395 const uint32_t new_ibc = READ_ONCE(__new_ibc) & 0x0fffU; 396 __u64 min_ibc = (sclp.ibc >> 16) & 0x0fffU; 397 398 scb_s->ibc = 0; 399 /* ibc installed in g2 and requested for g3 */ 400 if (vcpu->kvm->arch.model.ibc && new_ibc) { 401 scb_s->ibc = new_ibc; 402 /* takte care of the minimum ibc level of the machine */ 403 if (scb_s->ibc < min_ibc) 404 scb_s->ibc = min_ibc; 405 /* take care of the maximum ibc level set for the guest */ 406 if (scb_s->ibc > vcpu->kvm->arch.model.ibc) 407 scb_s->ibc = vcpu->kvm->arch.model.ibc; 408 } 409 } 410 411 /* unshadow the scb, copying parameters back to the real scb */ 412 static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) 413 { 414 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; 415 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; 416 417 /* interception */ 418 scb_o->icptcode = scb_s->icptcode; 419 scb_o->icptstatus = scb_s->icptstatus; 420 scb_o->ipa = scb_s->ipa; 421 scb_o->ipb = scb_s->ipb; 422 scb_o->gbea = scb_s->gbea; 423 424 /* timer */ 425 scb_o->cputm = scb_s->cputm; 426 scb_o->ckc = scb_s->ckc; 427 scb_o->todpr = scb_s->todpr; 428 429 /* guest state */ 430 scb_o->gpsw = scb_s->gpsw; 431 scb_o->gg14 = scb_s->gg14; 432 scb_o->gg15 = scb_s->gg15; 433 memcpy(scb_o->gcr, scb_s->gcr, 128); 434 scb_o->pp = scb_s->pp; 435 436 /* branch prediction */ 437 if (test_kvm_facility(vcpu->kvm, 82)) { 438 scb_o->fpf &= ~FPF_BPBC; 439 scb_o->fpf |= scb_s->fpf & FPF_BPBC; 440 } 441 442 /* interrupt intercept */ 443 switch (scb_s->icptcode) { 444 case ICPT_PROGI: 445 case ICPT_INSTPROGI: 446 case ICPT_EXTINT: 447 memcpy((void *)((u64)scb_o + 0xc0), 448 (void *)((u64)scb_s + 0xc0), 0xf0 - 0xc0); 449 break; 450 } 451 452 if (scb_s->ihcpu != 0xffffU) 453 scb_o->ihcpu = scb_s->ihcpu; 454 } 455 456 /* 457 * Setup the shadow scb by copying and checking the relevant parts of the g2 458 * provided scb. 459 * 460 * Returns: - 0 if the scb has been shadowed 461 * - > 0 if control has to be given to guest 2 462 */ 463 static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) 464 { 465 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; 466 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; 467 /* READ_ONCE does not work on bitfields - use a temporary variable */ 468 const uint32_t __new_prefix = scb_o->prefix; 469 const uint32_t new_prefix = READ_ONCE(__new_prefix); 470 const bool wants_tx = READ_ONCE(scb_o->ecb) & ECB_TE; 471 bool had_tx = scb_s->ecb & ECB_TE; 472 unsigned long new_mso = 0; 473 int rc; 474 475 /* make sure we don't have any leftovers when reusing the scb */ 476 scb_s->icptcode = 0; 477 scb_s->eca = 0; 478 scb_s->ecb = 0; 479 scb_s->ecb2 = 0; 480 scb_s->ecb3 = 0; 481 scb_s->ecd = 0; 482 scb_s->fac = 0; 483 scb_s->fpf = 0; 484 485 rc = prepare_cpuflags(vcpu, vsie_page); 486 if (rc) 487 goto out; 488 489 /* timer */ 490 scb_s->cputm = scb_o->cputm; 491 scb_s->ckc = scb_o->ckc; 492 scb_s->todpr = scb_o->todpr; 493 scb_s->epoch = scb_o->epoch; 494 495 /* guest state */ 496 scb_s->gpsw = scb_o->gpsw; 497 scb_s->gg14 = scb_o->gg14; 498 scb_s->gg15 = scb_o->gg15; 499 memcpy(scb_s->gcr, scb_o->gcr, 128); 500 scb_s->pp = scb_o->pp; 501 502 /* interception / execution handling */ 503 scb_s->gbea = scb_o->gbea; 504 scb_s->lctl = scb_o->lctl; 505 scb_s->svcc = scb_o->svcc; 506 scb_s->ictl = scb_o->ictl; 507 /* 508 * SKEY handling functions can't deal with false setting of PTE invalid 509 * bits. Therefore we cannot provide interpretation and would later 510 * have to provide own emulation handlers. 511 */ 512 if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_KSS)) 513 scb_s->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; 514 515 scb_s->icpua = scb_o->icpua; 516 517 if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_SM)) 518 new_mso = READ_ONCE(scb_o->mso) & 0xfffffffffff00000UL; 519 /* if the hva of the prefix changes, we have to remap the prefix */ 520 if (scb_s->mso != new_mso || scb_s->prefix != new_prefix) 521 prefix_unmapped(vsie_page); 522 /* SIE will do mso/msl validity and exception checks for us */ 523 scb_s->msl = scb_o->msl & 0xfffffffffff00000UL; 524 scb_s->mso = new_mso; 525 scb_s->prefix = new_prefix; 526 527 /* We have to definitely flush the tlb if this scb never ran */ 528 if (scb_s->ihcpu != 0xffffU) 529 scb_s->ihcpu = scb_o->ihcpu; 530 531 /* MVPG and Protection Exception Interpretation are always available */ 532 scb_s->eca |= scb_o->eca & (ECA_MVPGI | ECA_PROTEXCI); 533 /* Host-protection-interruption introduced with ESOP */ 534 if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_ESOP)) 535 scb_s->ecb |= scb_o->ecb & ECB_HOSTPROTINT; 536 /* 537 * CPU Topology 538 * This facility only uses the utility field of the SCA and none of 539 * the cpu entries that are problematic with the other interpretation 540 * facilities so we can pass it through 541 */ 542 if (test_kvm_facility(vcpu->kvm, 11)) 543 scb_s->ecb |= scb_o->ecb & ECB_PTF; 544 /* transactional execution */ 545 if (test_kvm_facility(vcpu->kvm, 73) && wants_tx) { 546 /* remap the prefix is tx is toggled on */ 547 if (!had_tx) 548 prefix_unmapped(vsie_page); 549 scb_s->ecb |= ECB_TE; 550 } 551 /* specification exception interpretation */ 552 scb_s->ecb |= scb_o->ecb & ECB_SPECI; 553 /* branch prediction */ 554 if (test_kvm_facility(vcpu->kvm, 82)) 555 scb_s->fpf |= scb_o->fpf & FPF_BPBC; 556 /* SIMD */ 557 if (test_kvm_facility(vcpu->kvm, 129)) { 558 scb_s->eca |= scb_o->eca & ECA_VX; 559 scb_s->ecd |= scb_o->ecd & ECD_HOSTREGMGMT; 560 } 561 /* Run-time-Instrumentation */ 562 if (test_kvm_facility(vcpu->kvm, 64)) 563 scb_s->ecb3 |= scb_o->ecb3 & ECB3_RI; 564 /* Instruction Execution Prevention */ 565 if (test_kvm_facility(vcpu->kvm, 130)) 566 scb_s->ecb2 |= scb_o->ecb2 & ECB2_IEP; 567 /* Guarded Storage */ 568 if (test_kvm_facility(vcpu->kvm, 133)) { 569 scb_s->ecb |= scb_o->ecb & ECB_GS; 570 scb_s->ecd |= scb_o->ecd & ECD_HOSTREGMGMT; 571 } 572 if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIIF)) 573 scb_s->eca |= scb_o->eca & ECA_SII; 574 if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IB)) 575 scb_s->eca |= scb_o->eca & ECA_IB; 576 if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_CEI)) 577 scb_s->eca |= scb_o->eca & ECA_CEI; 578 /* Epoch Extension */ 579 if (test_kvm_facility(vcpu->kvm, 139)) { 580 scb_s->ecd |= scb_o->ecd & ECD_MEF; 581 scb_s->epdx = scb_o->epdx; 582 } 583 584 /* etoken */ 585 if (test_kvm_facility(vcpu->kvm, 156)) 586 scb_s->ecd |= scb_o->ecd & ECD_ETOKENF; 587 588 scb_s->hpid = HPID_VSIE; 589 scb_s->cpnc = scb_o->cpnc; 590 591 prepare_ibc(vcpu, vsie_page); 592 rc = shadow_crycb(vcpu, vsie_page); 593 out: 594 if (rc) 595 unshadow_scb(vcpu, vsie_page); 596 return rc; 597 } 598 599 void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, gpa_t start, gpa_t end) 600 { 601 struct vsie_page *cur, *next; 602 unsigned long prefix; 603 604 KVM_BUG_ON(!test_bit(GMAP_FLAG_SHADOW, &gmap->flags), gmap->kvm); 605 /* 606 * Only new shadow blocks are added to the list during runtime, 607 * therefore we can safely reference them all the time. 608 */ 609 list_for_each_entry_safe(cur, next, &gmap->scb_users, gmap_cache.list) { 610 prefix = cur->scb_s.prefix << GUEST_PREFIX_SHIFT; 611 /* with mso/msl, the prefix lies at an offset */ 612 prefix += cur->scb_s.mso; 613 if (prefix <= end && start <= prefix + 2 * PAGE_SIZE - 1) 614 prefix_unmapped_sync(cur); 615 } 616 } 617 618 /* 619 * Map the first prefix page and if tx is enabled also the second prefix page. 620 * 621 * The prefix will be protected, a gmap notifier will inform about unmaps. 622 * The shadow scb must not be executed until the prefix is remapped, this is 623 * guaranteed by properly handling PROG_REQUEST. 624 * 625 * Returns: - 0 on if successfully mapped or already mapped 626 * - > 0 if control has to be given to guest 2 627 * - -EAGAIN if the caller can retry immediately 628 * - -ENOMEM if out of memory 629 */ 630 static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, struct gmap *sg) 631 { 632 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; 633 u64 prefix = scb_s->prefix << GUEST_PREFIX_SHIFT; 634 int rc; 635 636 if (prefix_is_mapped(vsie_page)) 637 return 0; 638 639 /* mark it as mapped so we can catch any concurrent unmappers */ 640 prefix_mapped(vsie_page); 641 642 /* with mso/msl, the prefix lies at offset *mso* */ 643 prefix += scb_s->mso; 644 645 rc = gaccess_shadow_fault(vcpu, sg, prefix, NULL, true); 646 if (!rc && (scb_s->ecb & ECB_TE)) 647 rc = gaccess_shadow_fault(vcpu, sg, prefix + PAGE_SIZE, NULL, true); 648 /* 649 * We don't have to mprotect, we will be called for all unshadows. 650 * SIE will detect if protection applies and trigger a validity. 651 */ 652 if (rc) 653 prefix_unmapped(vsie_page); 654 if (rc > 0 || rc == -EFAULT) 655 rc = set_validity_icpt(scb_s, 0x0037U); 656 return rc; 657 } 658 659 /* 660 * Pin the guest page given by gpa and set hpa to the pinned host address. 661 * Will always be pinned writable. 662 * 663 * Returns: - 0 on success 664 * - -EINVAL if the gpa is not valid guest storage 665 */ 666 static int pin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t *hpa) 667 { 668 struct page *page; 669 670 page = gfn_to_page(kvm, gpa_to_gfn(gpa)); 671 if (!page) 672 return -EINVAL; 673 *hpa = (hpa_t)page_to_phys(page) + (gpa & ~PAGE_MASK); 674 return 0; 675 } 676 677 /* Unpins a page previously pinned via pin_guest_page, marking it as dirty. */ 678 static void unpin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t hpa) 679 { 680 kvm_release_page_dirty(pfn_to_page(hpa >> PAGE_SHIFT)); 681 /* mark the page always as dirty for migration */ 682 mark_page_dirty(kvm, gpa_to_gfn(gpa)); 683 } 684 685 /* unpin all blocks previously pinned by pin_blocks(), marking them dirty */ 686 static void unpin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) 687 { 688 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; 689 hpa_t hpa; 690 691 hpa = (u64) scb_s->scaoh << 32 | scb_s->scaol; 692 if (hpa) { 693 unpin_guest_page(vcpu->kvm, vsie_page->sca_gpa, hpa); 694 vsie_page->sca_gpa = 0; 695 scb_s->scaol = 0; 696 scb_s->scaoh = 0; 697 } 698 699 hpa = scb_s->itdba; 700 if (hpa) { 701 unpin_guest_page(vcpu->kvm, vsie_page->itdba_gpa, hpa); 702 vsie_page->itdba_gpa = 0; 703 scb_s->itdba = 0; 704 } 705 706 hpa = scb_s->gvrd; 707 if (hpa) { 708 unpin_guest_page(vcpu->kvm, vsie_page->gvrd_gpa, hpa); 709 vsie_page->gvrd_gpa = 0; 710 scb_s->gvrd = 0; 711 } 712 713 hpa = scb_s->riccbd; 714 if (hpa) { 715 unpin_guest_page(vcpu->kvm, vsie_page->riccbd_gpa, hpa); 716 vsie_page->riccbd_gpa = 0; 717 scb_s->riccbd = 0; 718 } 719 720 hpa = scb_s->sdnxo; 721 if (hpa) { 722 unpin_guest_page(vcpu->kvm, vsie_page->sdnx_gpa, hpa); 723 vsie_page->sdnx_gpa = 0; 724 scb_s->sdnxo = 0; 725 } 726 } 727 728 /* 729 * Instead of shadowing some blocks, we can simply forward them because the 730 * addresses in the scb are 64 bit long. 731 * 732 * This works as long as the data lies in one page. If blocks ever exceed one 733 * page, we have to fall back to shadowing. 734 * 735 * As we reuse the sca, the vcpu pointers contained in it are invalid. We must 736 * therefore not enable any facilities that access these pointers (e.g. SIGPIF). 737 * 738 * Returns: - 0 if all blocks were pinned. 739 * - > 0 if control has to be given to guest 2 740 * - -ENOMEM if out of memory 741 */ 742 static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) 743 { 744 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; 745 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; 746 hpa_t hpa; 747 gpa_t gpa; 748 int rc = 0; 749 750 gpa = READ_ONCE(scb_o->scaol) & ~0xfUL; 751 if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_64BSCAO)) 752 gpa |= (u64) READ_ONCE(scb_o->scaoh) << 32; 753 if (gpa) { 754 if (gpa < 2 * PAGE_SIZE) 755 rc = set_validity_icpt(scb_s, 0x0038U); 756 else if ((gpa & ~0x1fffUL) == kvm_s390_get_prefix(vcpu)) 757 rc = set_validity_icpt(scb_s, 0x0011U); 758 else if ((gpa & PAGE_MASK) != 759 ((gpa + offsetof(struct bsca_block, cpu[0]) - 1) & PAGE_MASK)) 760 rc = set_validity_icpt(scb_s, 0x003bU); 761 if (!rc) { 762 rc = pin_guest_page(vcpu->kvm, gpa, &hpa); 763 if (rc) 764 rc = set_validity_icpt(scb_s, 0x0034U); 765 } 766 if (rc) 767 goto unpin; 768 vsie_page->sca_gpa = gpa; 769 scb_s->scaoh = (u32)((u64)hpa >> 32); 770 scb_s->scaol = (u32)(u64)hpa; 771 } 772 773 gpa = READ_ONCE(scb_o->itdba) & ~0xffUL; 774 if (gpa && (scb_s->ecb & ECB_TE)) { 775 if (gpa < 2 * PAGE_SIZE) { 776 rc = set_validity_icpt(scb_s, 0x0080U); 777 goto unpin; 778 } 779 /* 256 bytes cannot cross page boundaries */ 780 rc = pin_guest_page(vcpu->kvm, gpa, &hpa); 781 if (rc) { 782 rc = set_validity_icpt(scb_s, 0x0080U); 783 goto unpin; 784 } 785 vsie_page->itdba_gpa = gpa; 786 scb_s->itdba = hpa; 787 } 788 789 gpa = READ_ONCE(scb_o->gvrd) & ~0x1ffUL; 790 if (gpa && (scb_s->eca & ECA_VX) && !(scb_s->ecd & ECD_HOSTREGMGMT)) { 791 if (gpa < 2 * PAGE_SIZE) { 792 rc = set_validity_icpt(scb_s, 0x1310U); 793 goto unpin; 794 } 795 /* 796 * 512 bytes vector registers cannot cross page boundaries 797 * if this block gets bigger, we have to shadow it. 798 */ 799 rc = pin_guest_page(vcpu->kvm, gpa, &hpa); 800 if (rc) { 801 rc = set_validity_icpt(scb_s, 0x1310U); 802 goto unpin; 803 } 804 vsie_page->gvrd_gpa = gpa; 805 scb_s->gvrd = hpa; 806 } 807 808 gpa = READ_ONCE(scb_o->riccbd) & ~0x3fUL; 809 if (gpa && (scb_s->ecb3 & ECB3_RI)) { 810 if (gpa < 2 * PAGE_SIZE) { 811 rc = set_validity_icpt(scb_s, 0x0043U); 812 goto unpin; 813 } 814 /* 64 bytes cannot cross page boundaries */ 815 rc = pin_guest_page(vcpu->kvm, gpa, &hpa); 816 if (rc) { 817 rc = set_validity_icpt(scb_s, 0x0043U); 818 goto unpin; 819 } 820 /* Validity 0x0044 will be checked by SIE */ 821 vsie_page->riccbd_gpa = gpa; 822 scb_s->riccbd = hpa; 823 } 824 if (((scb_s->ecb & ECB_GS) && !(scb_s->ecd & ECD_HOSTREGMGMT)) || 825 (scb_s->ecd & ECD_ETOKENF)) { 826 unsigned long sdnxc; 827 828 gpa = READ_ONCE(scb_o->sdnxo) & ~0xfUL; 829 sdnxc = READ_ONCE(scb_o->sdnxo) & 0xfUL; 830 if (!gpa || gpa < 2 * PAGE_SIZE) { 831 rc = set_validity_icpt(scb_s, 0x10b0U); 832 goto unpin; 833 } 834 if (sdnxc < 6 || sdnxc > 12) { 835 rc = set_validity_icpt(scb_s, 0x10b1U); 836 goto unpin; 837 } 838 if (gpa & ((1 << sdnxc) - 1)) { 839 rc = set_validity_icpt(scb_s, 0x10b2U); 840 goto unpin; 841 } 842 /* Due to alignment rules (checked above) this cannot 843 * cross page boundaries 844 */ 845 rc = pin_guest_page(vcpu->kvm, gpa, &hpa); 846 if (rc) { 847 rc = set_validity_icpt(scb_s, 0x10b0U); 848 goto unpin; 849 } 850 vsie_page->sdnx_gpa = gpa; 851 scb_s->sdnxo = hpa | sdnxc; 852 } 853 return 0; 854 unpin: 855 unpin_blocks(vcpu, vsie_page); 856 return rc; 857 } 858 859 /* unpin the scb provided by guest 2, marking it as dirty */ 860 static void unpin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, 861 gpa_t gpa) 862 { 863 hpa_t hpa = virt_to_phys(vsie_page->scb_o); 864 865 if (hpa) 866 unpin_guest_page(vcpu->kvm, gpa, hpa); 867 vsie_page->scb_o = NULL; 868 } 869 870 /* 871 * Pin the scb at gpa provided by guest 2 at vsie_page->scb_o. 872 * 873 * Returns: - 0 if the scb was pinned. 874 * - > 0 if control has to be given to guest 2 875 */ 876 static int pin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, 877 gpa_t gpa) 878 { 879 hpa_t hpa; 880 int rc; 881 882 rc = pin_guest_page(vcpu->kvm, gpa, &hpa); 883 if (rc) { 884 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 885 WARN_ON_ONCE(rc); 886 return 1; 887 } 888 vsie_page->scb_o = phys_to_virt(hpa); 889 return 0; 890 } 891 892 /* 893 * Inject a fault into guest 2. 894 * 895 * Returns: - > 0 if control has to be given to guest 2 896 * < 0 if an error occurred during injection. 897 */ 898 static int inject_fault(struct kvm_vcpu *vcpu, __u16 code, __u64 vaddr, 899 bool write_flag) 900 { 901 struct kvm_s390_pgm_info pgm = { 902 .code = code, 903 .trans_exc_code = 904 /* 0-51: virtual address */ 905 (vaddr & 0xfffffffffffff000UL) | 906 /* 52-53: store / fetch */ 907 (((unsigned int) !write_flag) + 1) << 10, 908 /* 62-63: asce id (always primary == 0) */ 909 .exc_access_id = 0, /* always primary */ 910 .op_access_id = 0, /* not MVPG */ 911 }; 912 int rc; 913 914 if (code == PGM_PROTECTION) 915 pgm.trans_exc_code |= 0x4UL; 916 917 rc = kvm_s390_inject_prog_irq(vcpu, &pgm); 918 return rc ? rc : 1; 919 } 920 921 /* 922 * Handle a fault during vsie execution on a gmap shadow. 923 * 924 * Returns: - 0 if the fault was resolved 925 * - > 0 if control has to be given to guest 2 926 * - < 0 if an error occurred 927 */ 928 static int handle_fault(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, struct gmap *sg) 929 { 930 bool wr = kvm_s390_cur_gmap_fault_is_write(); 931 int rc; 932 933 if ((current->thread.gmap_int_code & PGM_INT_CODE_MASK) == PGM_PROTECTION) 934 /* we can directly forward all protection exceptions */ 935 return inject_fault(vcpu, PGM_PROTECTION, 936 current->thread.gmap_teid.addr * PAGE_SIZE, 1); 937 938 rc = gaccess_shadow_fault(vcpu, sg, current->thread.gmap_teid.addr * PAGE_SIZE, NULL, wr); 939 if (rc > 0) { 940 rc = inject_fault(vcpu, rc, 941 current->thread.gmap_teid.addr * PAGE_SIZE, wr); 942 if (rc >= 0) 943 vsie_page->fault_addr = current->thread.gmap_teid.addr * PAGE_SIZE; 944 } 945 return rc; 946 } 947 948 /* 949 * Retry the previous fault that required guest 2 intervention. This avoids 950 * one superfluous SIE re-entry and direct exit. 951 * 952 * Will ignore any errors. The next SIE fault will do proper fault handling. 953 */ 954 static void handle_last_fault(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, struct gmap *sg) 955 { 956 if (vsie_page->fault_addr) 957 gaccess_shadow_fault(vcpu, sg, vsie_page->fault_addr, NULL, true); 958 vsie_page->fault_addr = 0; 959 } 960 961 static inline void clear_vsie_icpt(struct vsie_page *vsie_page) 962 { 963 vsie_page->scb_s.icptcode = 0; 964 } 965 966 /* rewind the psw and clear the vsie icpt, so we can retry execution */ 967 static void retry_vsie_icpt(struct vsie_page *vsie_page) 968 { 969 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; 970 int ilen = insn_length(scb_s->ipa >> 8); 971 972 /* take care of EXECUTE instructions */ 973 if (scb_s->icptstatus & 1) { 974 ilen = (scb_s->icptstatus >> 4) & 0x6; 975 if (!ilen) 976 ilen = 4; 977 } 978 scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, ilen); 979 clear_vsie_icpt(vsie_page); 980 } 981 982 /* 983 * Try to shadow + enable the guest 2 provided facility list. 984 * Retry instruction execution if enabled for and provided by guest 2. 985 * 986 * Returns: - 0 if handled (retry or guest 2 icpt) 987 * - > 0 if control has to be given to guest 2 988 */ 989 static int handle_stfle(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) 990 { 991 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; 992 __u32 fac = READ_ONCE(vsie_page->scb_o->fac); 993 994 /* 995 * Alternate-STFLE-Interpretive-Execution facilities are not supported 996 * -> format-0 flcb 997 */ 998 if (fac && test_kvm_facility(vcpu->kvm, 7)) { 999 retry_vsie_icpt(vsie_page); 1000 /* 1001 * The facility list origin (FLO) is in bits 1 - 28 of the FLD 1002 * so we need to mask here before reading. 1003 */ 1004 fac = fac & 0x7ffffff8U; 1005 /* 1006 * format-0 -> size of nested guest's facility list == guest's size 1007 * guest's size == host's size, since STFLE is interpretatively executed 1008 * using a format-0 for the guest, too. 1009 */ 1010 if (read_guest_real(vcpu, fac, &vsie_page->fac, 1011 stfle_size() * sizeof(u64))) 1012 return set_validity_icpt(scb_s, 0x1090U); 1013 scb_s->fac = (u32)virt_to_phys(&vsie_page->fac); 1014 } 1015 return 0; 1016 } 1017 1018 /* 1019 * Get a register for a nested guest. 1020 * @vcpu the vcpu of the guest 1021 * @vsie_page the vsie_page for the nested guest 1022 * @reg the register number, the upper 4 bits are ignored. 1023 * returns: the value of the register. 1024 */ 1025 static u64 vsie_get_register(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, u8 reg) 1026 { 1027 /* no need to validate the parameter and/or perform error handling */ 1028 reg &= 0xf; 1029 switch (reg) { 1030 case 15: 1031 return vsie_page->scb_s.gg15; 1032 case 14: 1033 return vsie_page->scb_s.gg14; 1034 default: 1035 return vcpu->run->s.regs.gprs[reg]; 1036 } 1037 } 1038 1039 static int vsie_handle_mvpg(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, struct gmap *sg) 1040 { 1041 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; 1042 unsigned long src, dest, mask, prefix; 1043 u64 *pei_block = &vsie_page->scb_o->mcic; 1044 union mvpg_pei pei_dest, pei_src; 1045 int edat, rc_dest, rc_src; 1046 union ctlreg0 cr0; 1047 1048 cr0.val = vcpu->arch.sie_block->gcr[0]; 1049 edat = cr0.edat && test_kvm_facility(vcpu->kvm, 8); 1050 mask = _kvm_s390_logical_to_effective(&scb_s->gpsw, PAGE_MASK); 1051 prefix = scb_s->prefix << GUEST_PREFIX_SHIFT; 1052 1053 dest = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 20) & mask; 1054 dest = _kvm_s390_real_to_abs(prefix, dest) + scb_s->mso; 1055 src = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 16) & mask; 1056 src = _kvm_s390_real_to_abs(prefix, src) + scb_s->mso; 1057 1058 rc_dest = gaccess_shadow_fault(vcpu, sg, dest, &pei_dest, true); 1059 rc_src = gaccess_shadow_fault(vcpu, sg, src, &pei_src, false); 1060 /* 1061 * Either everything went well, or something non-critical went wrong 1062 * e.g. because of a race. In either case, simply retry. 1063 */ 1064 if (rc_dest == -EAGAIN || rc_src == -EAGAIN || (!rc_dest && !rc_src)) { 1065 retry_vsie_icpt(vsie_page); 1066 return -EAGAIN; 1067 } 1068 /* Something more serious went wrong, propagate the error */ 1069 if (rc_dest < 0) 1070 return rc_dest; 1071 if (rc_src < 0) 1072 return rc_src; 1073 1074 /* The only possible suppressing exception: just deliver it */ 1075 if (rc_dest == PGM_TRANSLATION_SPEC || rc_src == PGM_TRANSLATION_SPEC) { 1076 clear_vsie_icpt(vsie_page); 1077 rc_dest = kvm_s390_inject_program_int(vcpu, PGM_TRANSLATION_SPEC); 1078 WARN_ON_ONCE(rc_dest); 1079 return 1; 1080 } 1081 1082 /* 1083 * Forward the PEI intercept to the guest if it was a page fault, or 1084 * also for segment and region table faults if EDAT applies. 1085 */ 1086 if (edat) { 1087 rc_dest = rc_dest == PGM_ASCE_TYPE ? rc_dest : 0; 1088 rc_src = rc_src == PGM_ASCE_TYPE ? rc_src : 0; 1089 } else { 1090 rc_dest = rc_dest != PGM_PAGE_TRANSLATION ? rc_dest : 0; 1091 rc_src = rc_src != PGM_PAGE_TRANSLATION ? rc_src : 0; 1092 } 1093 if (!rc_dest && !rc_src) { 1094 pei_block[0] = pei_dest.val; 1095 pei_block[1] = pei_src.val; 1096 return 1; 1097 } 1098 1099 retry_vsie_icpt(vsie_page); 1100 1101 /* 1102 * The host has edat, and the guest does not, or it was an ASCE type 1103 * exception. The host needs to inject the appropriate DAT interrupts 1104 * into the guest. 1105 */ 1106 if (rc_dest) 1107 return inject_fault(vcpu, rc_dest, dest, 1); 1108 return inject_fault(vcpu, rc_src, src, 0); 1109 } 1110 1111 /* 1112 * Run the vsie on a shadow scb and a shadow gmap, without any further 1113 * sanity checks, handling SIE faults. 1114 * 1115 * Returns: - 0 everything went fine 1116 * - > 0 if control has to be given to guest 2 1117 * - < 0 if an error occurred 1118 */ 1119 static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, struct gmap *sg) 1120 __releases(vcpu->kvm->srcu) 1121 __acquires(vcpu->kvm->srcu) 1122 { 1123 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; 1124 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; 1125 int guest_bp_isolation; 1126 int rc = 0; 1127 1128 handle_last_fault(vcpu, vsie_page, sg); 1129 1130 kvm_vcpu_srcu_read_unlock(vcpu); 1131 1132 /* save current guest state of bp isolation override */ 1133 guest_bp_isolation = test_thread_flag(TIF_ISOLATE_BP_GUEST); 1134 1135 /* 1136 * The guest is running with BPBC, so we have to force it on for our 1137 * nested guest. This is done by enabling BPBC globally, so the BPBC 1138 * control in the SCB (which the nested guest can modify) is simply 1139 * ignored. 1140 */ 1141 if (test_kvm_facility(vcpu->kvm, 82) && 1142 vcpu->arch.sie_block->fpf & FPF_BPBC) 1143 set_thread_flag(TIF_ISOLATE_BP_GUEST); 1144 1145 /* 1146 * Simulate a SIE entry of the VCPU (see sie64a), so VCPU blocking 1147 * and VCPU requests also hinder the vSIE from running and lead 1148 * to an immediate exit. kvm_s390_vsie_kick() has to be used to 1149 * also kick the vSIE. 1150 */ 1151 vcpu->arch.sie_block->prog0c |= PROG_IN_SIE; 1152 current->thread.gmap_int_code = 0; 1153 barrier(); 1154 if (!kvm_s390_vcpu_sie_inhibited(vcpu)) { 1155 xfer_to_guest_mode_check: 1156 local_irq_disable(); 1157 xfer_to_guest_mode_prepare(); 1158 if (xfer_to_guest_mode_work_pending()) { 1159 local_irq_enable(); 1160 rc = kvm_xfer_to_guest_mode_handle_work(vcpu); 1161 if (rc) 1162 goto skip_sie; 1163 goto xfer_to_guest_mode_check; 1164 } 1165 guest_timing_enter_irqoff(); 1166 rc = kvm_s390_enter_exit_sie(scb_s, vcpu->run->s.regs.gprs, sg->asce.val); 1167 guest_timing_exit_irqoff(); 1168 local_irq_enable(); 1169 } 1170 1171 skip_sie: 1172 barrier(); 1173 vcpu->arch.sie_block->prog0c &= ~PROG_IN_SIE; 1174 1175 /* restore guest state for bp isolation override */ 1176 if (!guest_bp_isolation) 1177 clear_thread_flag(TIF_ISOLATE_BP_GUEST); 1178 1179 kvm_vcpu_srcu_read_lock(vcpu); 1180 1181 if (rc == -EINTR) { 1182 VCPU_EVENT(vcpu, 3, "%s", "machine check"); 1183 kvm_s390_reinject_machine_check(vcpu, &vsie_page->mcck_info); 1184 return 0; 1185 } 1186 1187 if (rc > 0) 1188 rc = 0; /* we could still have an icpt */ 1189 else if (current->thread.gmap_int_code) 1190 return handle_fault(vcpu, vsie_page, sg); 1191 1192 switch (scb_s->icptcode) { 1193 case ICPT_INST: 1194 if (scb_s->ipa == 0xb2b0) 1195 rc = handle_stfle(vcpu, vsie_page); 1196 break; 1197 case ICPT_STOP: 1198 /* stop not requested by g2 - must have been a kick */ 1199 if (!(atomic_read(&scb_o->cpuflags) & CPUSTAT_STOP_INT)) 1200 clear_vsie_icpt(vsie_page); 1201 break; 1202 case ICPT_VALIDITY: 1203 if ((scb_s->ipa & 0xf000) != 0xf000) 1204 scb_s->ipa += 0x1000; 1205 break; 1206 case ICPT_PARTEXEC: 1207 if (scb_s->ipa == 0xb254) 1208 rc = vsie_handle_mvpg(vcpu, vsie_page, sg); 1209 break; 1210 } 1211 return rc; 1212 } 1213 1214 static void release_gmap_shadow(struct vsie_page *vsie_page) 1215 { 1216 struct gmap *gmap = vsie_page->gmap_cache.gmap; 1217 1218 lockdep_assert_held(&gmap->kvm->arch.gmap->children_lock); 1219 1220 list_del(&vsie_page->gmap_cache.list); 1221 vsie_page->gmap_cache.gmap = NULL; 1222 prefix_unmapped(vsie_page); 1223 1224 if (list_empty(&gmap->scb_users)) { 1225 gmap_remove_child(gmap); 1226 gmap_put(gmap); 1227 } 1228 } 1229 1230 static struct gmap *acquire_gmap_shadow(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) 1231 { 1232 union ctlreg0 cr0; 1233 struct gmap *gmap; 1234 union asce asce; 1235 int edat; 1236 1237 asce.val = vcpu->arch.sie_block->gcr[1]; 1238 cr0.val = vcpu->arch.sie_block->gcr[0]; 1239 edat = cr0.edat && test_kvm_facility(vcpu->kvm, 8); 1240 edat += edat && test_kvm_facility(vcpu->kvm, 78); 1241 1242 scoped_guard(spinlock, &vcpu->kvm->arch.gmap->children_lock) { 1243 gmap = vsie_page->gmap_cache.gmap; 1244 if (gmap) { 1245 /* 1246 * ASCE or EDAT could have changed since last icpt, or the gmap 1247 * we're holding has been unshadowed. If the gmap is still valid, 1248 * we can safely reuse it. 1249 */ 1250 if (gmap_is_shadow_valid(gmap, asce, edat)) { 1251 vcpu->kvm->stat.gmap_shadow_reuse++; 1252 gmap_get(gmap); 1253 return gmap; 1254 } 1255 /* release the old shadow and mark the prefix as unmapped */ 1256 release_gmap_shadow(vsie_page); 1257 } 1258 } 1259 again: 1260 gmap = gmap_create_shadow(vcpu->arch.mc, vcpu->kvm->arch.gmap, asce, edat); 1261 if (IS_ERR(gmap)) 1262 return gmap; 1263 scoped_guard(spinlock, &vcpu->kvm->arch.gmap->children_lock) { 1264 /* unlikely race condition, remove the previous shadow */ 1265 if (vsie_page->gmap_cache.gmap) 1266 release_gmap_shadow(vsie_page); 1267 if (!gmap->parent) { 1268 gmap_put(gmap); 1269 goto again; 1270 } 1271 vcpu->kvm->stat.gmap_shadow_create++; 1272 list_add(&vsie_page->gmap_cache.list, &gmap->scb_users); 1273 vsie_page->gmap_cache.gmap = gmap; 1274 prefix_unmapped(vsie_page); 1275 } 1276 return gmap; 1277 } 1278 1279 /* 1280 * Register the shadow scb at the VCPU, e.g. for kicking out of vsie. 1281 */ 1282 static void register_shadow_scb(struct kvm_vcpu *vcpu, 1283 struct vsie_page *vsie_page) 1284 { 1285 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; 1286 1287 WRITE_ONCE(vcpu->arch.vsie_block, &vsie_page->scb_s); 1288 /* 1289 * External calls have to lead to a kick of the vcpu and 1290 * therefore the vsie -> Simulate Wait state. 1291 */ 1292 kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT); 1293 /* 1294 * We have to adjust the g3 epoch by the g2 epoch. The epoch will 1295 * automatically be adjusted on tod clock changes via kvm_sync_clock. 1296 */ 1297 preempt_disable(); 1298 scb_s->epoch += vcpu->kvm->arch.epoch; 1299 1300 if (scb_s->ecd & ECD_MEF) { 1301 scb_s->epdx += vcpu->kvm->arch.epdx; 1302 if (scb_s->epoch < vcpu->kvm->arch.epoch) 1303 scb_s->epdx += 1; 1304 } 1305 1306 preempt_enable(); 1307 } 1308 1309 /* 1310 * Unregister a shadow scb from a VCPU. 1311 */ 1312 static void unregister_shadow_scb(struct kvm_vcpu *vcpu) 1313 { 1314 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT); 1315 WRITE_ONCE(vcpu->arch.vsie_block, NULL); 1316 } 1317 1318 /* 1319 * Run the vsie on a shadowed scb, managing the gmap shadow, handling 1320 * prefix pages and faults. 1321 * 1322 * Returns: - 0 if no errors occurred 1323 * - > 0 if control has to be given to guest 2 1324 * - -ENOMEM if out of memory 1325 */ 1326 static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) 1327 { 1328 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; 1329 struct gmap *sg; 1330 int rc = 0; 1331 1332 while (1) { 1333 sg = acquire_gmap_shadow(vcpu, vsie_page); 1334 if (IS_ERR(sg)) { 1335 rc = PTR_ERR(sg); 1336 sg = NULL; 1337 } 1338 if (!rc) 1339 rc = map_prefix(vcpu, vsie_page, sg); 1340 if (!rc) { 1341 update_intervention_requests(vsie_page); 1342 rc = do_vsie_run(vcpu, vsie_page, sg); 1343 } 1344 atomic_andnot(PROG_BLOCK_SIE, &scb_s->prog20); 1345 1346 if (rc == -EAGAIN) 1347 rc = 0; 1348 1349 /* 1350 * Exit the loop if the guest needs to process the intercept 1351 */ 1352 if (rc || scb_s->icptcode) 1353 break; 1354 1355 /* 1356 * Exit the loop if the host needs to process an intercept, 1357 * but rewind the PSW to re-enter SIE once that's completed 1358 * instead of passing a "no action" intercept to the guest. 1359 */ 1360 if (kvm_s390_vcpu_has_irq(vcpu, 0) || 1361 kvm_s390_vcpu_sie_inhibited(vcpu)) { 1362 kvm_s390_rewind_psw(vcpu, 4); 1363 break; 1364 } 1365 if (sg) 1366 sg = gmap_put(sg); 1367 cond_resched(); 1368 } 1369 1370 if (rc == -EFAULT) { 1371 /* 1372 * Addressing exceptions are always presentes as intercepts. 1373 * As addressing exceptions are suppressing and our guest 3 PSW 1374 * points at the responsible instruction, we have to 1375 * forward the PSW and set the ilc. If we can't read guest 3 1376 * instruction, we can use an arbitrary ilc. Let's always use 1377 * ilen = 4 for now, so we can avoid reading in guest 3 virtual 1378 * memory. (we could also fake the shadow so the hardware 1379 * handles it). 1380 */ 1381 scb_s->icptcode = ICPT_PROGI; 1382 scb_s->iprcc = PGM_ADDRESSING; 1383 scb_s->pgmilc = 4; 1384 scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, 4); 1385 rc = 1; 1386 } 1387 return rc; 1388 } 1389 1390 /* Try getting a given vsie page, returning "true" on success. */ 1391 static inline bool try_get_vsie_page(struct vsie_page *vsie_page) 1392 { 1393 if (test_bit(VSIE_PAGE_IN_USE, &vsie_page->flags)) 1394 return false; 1395 return !test_and_set_bit(VSIE_PAGE_IN_USE, &vsie_page->flags); 1396 } 1397 1398 /* Put a vsie page acquired through get_vsie_page / try_get_vsie_page. */ 1399 static void put_vsie_page(struct vsie_page *vsie_page) 1400 { 1401 clear_bit(VSIE_PAGE_IN_USE, &vsie_page->flags); 1402 } 1403 1404 /* 1405 * Get or create a vsie page for a scb address. 1406 * 1407 * Returns: - address of a vsie page (cached or new one) 1408 * - NULL if the same scb address is already used by another VCPU 1409 * - ERR_PTR(-ENOMEM) if out of memory 1410 */ 1411 static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr) 1412 { 1413 struct vsie_page *vsie_page; 1414 int nr_vcpus; 1415 1416 rcu_read_lock(); 1417 vsie_page = radix_tree_lookup(&kvm->arch.vsie.addr_to_page, addr >> 9); 1418 rcu_read_unlock(); 1419 if (vsie_page) { 1420 if (try_get_vsie_page(vsie_page)) { 1421 if (vsie_page->scb_gpa == addr) 1422 return vsie_page; 1423 /* 1424 * We raced with someone reusing + putting this vsie 1425 * page before we grabbed it. 1426 */ 1427 put_vsie_page(vsie_page); 1428 } 1429 } 1430 1431 /* 1432 * We want at least #online_vcpus shadows, so every VCPU can execute 1433 * the VSIE in parallel. 1434 */ 1435 nr_vcpus = atomic_read(&kvm->online_vcpus); 1436 1437 mutex_lock(&kvm->arch.vsie.mutex); 1438 if (kvm->arch.vsie.page_count < nr_vcpus) { 1439 vsie_page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO | GFP_DMA); 1440 if (!vsie_page) { 1441 mutex_unlock(&kvm->arch.vsie.mutex); 1442 return ERR_PTR(-ENOMEM); 1443 } 1444 __set_bit(VSIE_PAGE_IN_USE, &vsie_page->flags); 1445 kvm->arch.vsie.pages[kvm->arch.vsie.page_count] = vsie_page; 1446 kvm->arch.vsie.page_count++; 1447 } else { 1448 /* reuse an existing entry that belongs to nobody */ 1449 while (true) { 1450 vsie_page = kvm->arch.vsie.pages[kvm->arch.vsie.next]; 1451 if (try_get_vsie_page(vsie_page)) 1452 break; 1453 kvm->arch.vsie.next++; 1454 kvm->arch.vsie.next %= nr_vcpus; 1455 } 1456 if (vsie_page->scb_gpa != ULONG_MAX) 1457 radix_tree_delete(&kvm->arch.vsie.addr_to_page, 1458 vsie_page->scb_gpa >> 9); 1459 } 1460 /* Mark it as invalid until it resides in the tree. */ 1461 vsie_page->scb_gpa = ULONG_MAX; 1462 1463 /* Double use of the same address or allocation failure. */ 1464 if (radix_tree_insert(&kvm->arch.vsie.addr_to_page, addr >> 9, vsie_page)) { 1465 put_vsie_page(vsie_page); 1466 mutex_unlock(&kvm->arch.vsie.mutex); 1467 return NULL; 1468 } 1469 vsie_page->scb_gpa = addr; 1470 mutex_unlock(&kvm->arch.vsie.mutex); 1471 1472 memset(&vsie_page->scb_s, 0, sizeof(struct kvm_s390_sie_block)); 1473 if (vsie_page->gmap_cache.gmap) { 1474 scoped_guard(spinlock, &kvm->arch.gmap->children_lock) 1475 if (vsie_page->gmap_cache.gmap) 1476 release_gmap_shadow(vsie_page); 1477 } 1478 prefix_unmapped(vsie_page); 1479 vsie_page->fault_addr = 0; 1480 vsie_page->scb_s.ihcpu = 0xffffU; 1481 return vsie_page; 1482 } 1483 1484 int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu) 1485 { 1486 struct vsie_page *vsie_page; 1487 unsigned long scb_addr; 1488 int rc; 1489 1490 vcpu->stat.instruction_sie++; 1491 if (!test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIEF2)) 1492 return -EOPNOTSUPP; 1493 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 1494 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 1495 1496 BUILD_BUG_ON(sizeof(struct vsie_page) != PAGE_SIZE); 1497 scb_addr = kvm_s390_get_base_disp_s(vcpu, NULL); 1498 1499 /* 512 byte alignment */ 1500 if (unlikely(scb_addr & 0x1ffUL)) 1501 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 1502 1503 if (kvm_s390_vcpu_has_irq(vcpu, 0) || kvm_s390_vcpu_sie_inhibited(vcpu)) { 1504 kvm_s390_rewind_psw(vcpu, 4); 1505 return 0; 1506 } 1507 1508 vsie_page = get_vsie_page(vcpu->kvm, scb_addr); 1509 if (IS_ERR(vsie_page)) { 1510 return PTR_ERR(vsie_page); 1511 } else if (!vsie_page) { 1512 /* double use of sie control block - simply do nothing */ 1513 kvm_s390_rewind_psw(vcpu, 4); 1514 return 0; 1515 } 1516 1517 rc = pin_scb(vcpu, vsie_page, scb_addr); 1518 if (rc) 1519 goto out_put; 1520 rc = shadow_scb(vcpu, vsie_page); 1521 if (rc) 1522 goto out_unpin_scb; 1523 rc = pin_blocks(vcpu, vsie_page); 1524 if (rc) 1525 goto out_unshadow; 1526 register_shadow_scb(vcpu, vsie_page); 1527 rc = vsie_run(vcpu, vsie_page); 1528 unregister_shadow_scb(vcpu); 1529 unpin_blocks(vcpu, vsie_page); 1530 out_unshadow: 1531 unshadow_scb(vcpu, vsie_page); 1532 out_unpin_scb: 1533 unpin_scb(vcpu, vsie_page, scb_addr); 1534 out_put: 1535 put_vsie_page(vsie_page); 1536 1537 return rc < 0 ? rc : 0; 1538 } 1539 1540 /* Init the vsie data structures. To be called when a vm is initialized. */ 1541 void kvm_s390_vsie_init(struct kvm *kvm) 1542 { 1543 mutex_init(&kvm->arch.vsie.mutex); 1544 INIT_RADIX_TREE(&kvm->arch.vsie.addr_to_page, GFP_KERNEL_ACCOUNT); 1545 } 1546 1547 /* Destroy the vsie data structures. To be called when a vm is destroyed. */ 1548 void kvm_s390_vsie_destroy(struct kvm *kvm) 1549 { 1550 struct vsie_page *vsie_page; 1551 int i; 1552 1553 mutex_lock(&kvm->arch.vsie.mutex); 1554 for (i = 0; i < kvm->arch.vsie.page_count; i++) { 1555 vsie_page = kvm->arch.vsie.pages[i]; 1556 scoped_guard(spinlock, &kvm->arch.gmap->children_lock) 1557 if (vsie_page->gmap_cache.gmap) 1558 release_gmap_shadow(vsie_page); 1559 kvm->arch.vsie.pages[i] = NULL; 1560 /* free the radix tree entry */ 1561 if (vsie_page->scb_gpa != ULONG_MAX) 1562 radix_tree_delete(&kvm->arch.vsie.addr_to_page, 1563 vsie_page->scb_gpa >> 9); 1564 free_page((unsigned long)vsie_page); 1565 } 1566 kvm->arch.vsie.page_count = 0; 1567 mutex_unlock(&kvm->arch.vsie.mutex); 1568 } 1569 1570 void kvm_s390_vsie_kick(struct kvm_vcpu *vcpu) 1571 { 1572 struct kvm_s390_sie_block *scb = READ_ONCE(vcpu->arch.vsie_block); 1573 1574 /* 1575 * Even if the VCPU lets go of the shadow sie block reference, it is 1576 * still valid in the cache. So we can safely kick it. 1577 */ 1578 if (scb) { 1579 atomic_or(PROG_BLOCK_SIE, &scb->prog20); 1580 if (scb->prog0c & PROG_IN_SIE) 1581 atomic_or(CPUSTAT_STOP_INT, &scb->cpuflags); 1582 } 1583 } 1584