1 /* 2 * handling privileged instructions 3 * 4 * Copyright IBM Corp. 2008, 2013 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License (version 2 only) 8 * as published by the Free Software Foundation. 9 * 10 * Author(s): Carsten Otte <cotte@de.ibm.com> 11 * Christian Borntraeger <borntraeger@de.ibm.com> 12 */ 13 14 #include <linux/kvm.h> 15 #include <linux/gfp.h> 16 #include <linux/errno.h> 17 #include <linux/compat.h> 18 #include <asm/asm-offsets.h> 19 #include <asm/facility.h> 20 #include <asm/current.h> 21 #include <asm/debug.h> 22 #include <asm/ebcdic.h> 23 #include <asm/sysinfo.h> 24 #include <asm/pgtable.h> 25 #include <asm/pgalloc.h> 26 #include <asm/gmap.h> 27 #include <asm/io.h> 28 #include <asm/ptrace.h> 29 #include <asm/compat.h> 30 #include <asm/sclp.h> 31 #include "gaccess.h" 32 #include "kvm-s390.h" 33 #include "trace.h" 34 35 static int handle_ri(struct kvm_vcpu *vcpu) 36 { 37 if (test_kvm_facility(vcpu->kvm, 64)) { 38 vcpu->arch.sie_block->ecb3 |= 0x01; 39 kvm_s390_retry_instr(vcpu); 40 return 0; 41 } else 42 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 43 } 44 45 int kvm_s390_handle_aa(struct kvm_vcpu *vcpu) 46 { 47 if ((vcpu->arch.sie_block->ipa & 0xf) <= 4) 48 return handle_ri(vcpu); 49 else 50 return -EOPNOTSUPP; 51 } 52 53 /* Handle SCK (SET CLOCK) interception */ 54 static int handle_set_clock(struct kvm_vcpu *vcpu) 55 { 56 int rc; 57 ar_t ar; 58 u64 op2, val; 59 60 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 61 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 62 63 op2 = kvm_s390_get_base_disp_s(vcpu, &ar); 64 if (op2 & 7) /* Operand must be on a doubleword boundary */ 65 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 66 rc = read_guest(vcpu, op2, ar, &val, sizeof(val)); 67 if (rc) 68 return kvm_s390_inject_prog_cond(vcpu, rc); 69 70 VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val); 71 kvm_s390_set_tod_clock(vcpu->kvm, val); 72 73 kvm_s390_set_psw_cc(vcpu, 0); 74 return 0; 75 } 76 77 static int handle_set_prefix(struct kvm_vcpu *vcpu) 78 { 79 u64 operand2; 80 u32 address; 81 int rc; 82 ar_t ar; 83 84 vcpu->stat.instruction_spx++; 85 86 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 87 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 88 89 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); 90 91 /* must be word boundary */ 92 if (operand2 & 3) 93 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 94 95 /* get the value */ 96 rc = read_guest(vcpu, operand2, ar, &address, sizeof(address)); 97 if (rc) 98 return kvm_s390_inject_prog_cond(vcpu, rc); 99 100 address &= 0x7fffe000u; 101 102 /* 103 * Make sure the new value is valid memory. We only need to check the 104 * first page, since address is 8k aligned and memory pieces are always 105 * at least 1MB aligned and have at least a size of 1MB. 106 */ 107 if (kvm_is_error_gpa(vcpu->kvm, address)) 108 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 109 110 kvm_s390_set_prefix(vcpu, address); 111 trace_kvm_s390_handle_prefix(vcpu, 1, address); 112 return 0; 113 } 114 115 static int handle_store_prefix(struct kvm_vcpu *vcpu) 116 { 117 u64 operand2; 118 u32 address; 119 int rc; 120 ar_t ar; 121 122 vcpu->stat.instruction_stpx++; 123 124 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 125 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 126 127 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); 128 129 /* must be word boundary */ 130 if (operand2 & 3) 131 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 132 133 address = kvm_s390_get_prefix(vcpu); 134 135 /* get the value */ 136 rc = write_guest(vcpu, operand2, ar, &address, sizeof(address)); 137 if (rc) 138 return kvm_s390_inject_prog_cond(vcpu, rc); 139 140 VCPU_EVENT(vcpu, 3, "STPX: storing prefix 0x%x into 0x%llx", address, operand2); 141 trace_kvm_s390_handle_prefix(vcpu, 0, address); 142 return 0; 143 } 144 145 static int handle_store_cpu_address(struct kvm_vcpu *vcpu) 146 { 147 u16 vcpu_id = vcpu->vcpu_id; 148 u64 ga; 149 int rc; 150 ar_t ar; 151 152 vcpu->stat.instruction_stap++; 153 154 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 155 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 156 157 ga = kvm_s390_get_base_disp_s(vcpu, &ar); 158 159 if (ga & 1) 160 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 161 162 rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id)); 163 if (rc) 164 return kvm_s390_inject_prog_cond(vcpu, rc); 165 166 VCPU_EVENT(vcpu, 3, "STAP: storing cpu address (%u) to 0x%llx", vcpu_id, ga); 167 trace_kvm_s390_handle_stap(vcpu, ga); 168 return 0; 169 } 170 171 static int __skey_check_enable(struct kvm_vcpu *vcpu) 172 { 173 int rc = 0; 174 175 trace_kvm_s390_skey_related_inst(vcpu); 176 if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE))) 177 return rc; 178 179 rc = s390_enable_skey(); 180 VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc); 181 if (!rc) 182 vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE); 183 return rc; 184 } 185 186 static int try_handle_skey(struct kvm_vcpu *vcpu) 187 { 188 int rc; 189 190 vcpu->stat.instruction_storage_key++; 191 rc = __skey_check_enable(vcpu); 192 if (rc) 193 return rc; 194 if (sclp.has_skey) { 195 /* with storage-key facility, SIE interprets it for us */ 196 kvm_s390_retry_instr(vcpu); 197 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation"); 198 return -EAGAIN; 199 } 200 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 201 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 202 return 0; 203 } 204 205 static int handle_iske(struct kvm_vcpu *vcpu) 206 { 207 unsigned long addr; 208 unsigned char key; 209 int reg1, reg2; 210 int rc; 211 212 rc = try_handle_skey(vcpu); 213 if (rc) 214 return rc != -EAGAIN ? rc : 0; 215 216 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 217 218 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 219 addr = kvm_s390_logical_to_effective(vcpu, addr); 220 addr = kvm_s390_real_to_abs(vcpu, addr); 221 addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr)); 222 if (kvm_is_error_hva(addr)) 223 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 224 225 down_read(¤t->mm->mmap_sem); 226 rc = get_guest_storage_key(current->mm, addr, &key); 227 up_read(¤t->mm->mmap_sem); 228 if (rc) 229 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 230 vcpu->run->s.regs.gprs[reg1] &= ~0xff; 231 vcpu->run->s.regs.gprs[reg1] |= key; 232 return 0; 233 } 234 235 static int handle_rrbe(struct kvm_vcpu *vcpu) 236 { 237 unsigned long addr; 238 int reg1, reg2; 239 int rc; 240 241 rc = try_handle_skey(vcpu); 242 if (rc) 243 return rc != -EAGAIN ? rc : 0; 244 245 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 246 247 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 248 addr = kvm_s390_logical_to_effective(vcpu, addr); 249 addr = kvm_s390_real_to_abs(vcpu, addr); 250 addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr)); 251 if (kvm_is_error_hva(addr)) 252 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 253 254 down_read(¤t->mm->mmap_sem); 255 rc = reset_guest_reference_bit(current->mm, addr); 256 up_read(¤t->mm->mmap_sem); 257 if (rc < 0) 258 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 259 260 kvm_s390_set_psw_cc(vcpu, rc); 261 return 0; 262 } 263 264 #define SSKE_NQ 0x8 265 #define SSKE_MR 0x4 266 #define SSKE_MC 0x2 267 #define SSKE_MB 0x1 268 static int handle_sske(struct kvm_vcpu *vcpu) 269 { 270 unsigned char m3 = vcpu->arch.sie_block->ipb >> 28; 271 unsigned long start, end; 272 unsigned char key, oldkey; 273 int reg1, reg2; 274 int rc; 275 276 rc = try_handle_skey(vcpu); 277 if (rc) 278 return rc != -EAGAIN ? rc : 0; 279 280 if (!test_kvm_facility(vcpu->kvm, 8)) 281 m3 &= ~SSKE_MB; 282 if (!test_kvm_facility(vcpu->kvm, 10)) 283 m3 &= ~(SSKE_MC | SSKE_MR); 284 if (!test_kvm_facility(vcpu->kvm, 14)) 285 m3 &= ~SSKE_NQ; 286 287 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 288 289 key = vcpu->run->s.regs.gprs[reg1] & 0xfe; 290 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 291 start = kvm_s390_logical_to_effective(vcpu, start); 292 if (m3 & SSKE_MB) { 293 /* start already designates an absolute address */ 294 end = (start + (1UL << 20)) & ~((1UL << 20) - 1); 295 } else { 296 start = kvm_s390_real_to_abs(vcpu, start); 297 end = start + PAGE_SIZE; 298 } 299 300 while (start != end) { 301 unsigned long addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start)); 302 303 if (kvm_is_error_hva(addr)) 304 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 305 306 down_read(¤t->mm->mmap_sem); 307 rc = cond_set_guest_storage_key(current->mm, addr, key, &oldkey, 308 m3 & SSKE_NQ, m3 & SSKE_MR, 309 m3 & SSKE_MC); 310 up_read(¤t->mm->mmap_sem); 311 if (rc < 0) 312 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 313 start += PAGE_SIZE; 314 } 315 316 if (m3 & (SSKE_MC | SSKE_MR)) { 317 if (m3 & SSKE_MB) { 318 /* skey in reg1 is unpredictable */ 319 kvm_s390_set_psw_cc(vcpu, 3); 320 } else { 321 kvm_s390_set_psw_cc(vcpu, rc); 322 vcpu->run->s.regs.gprs[reg1] &= ~0xff00UL; 323 vcpu->run->s.regs.gprs[reg1] |= (u64) oldkey << 8; 324 } 325 } 326 if (m3 & SSKE_MB) { 327 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_64BIT) 328 vcpu->run->s.regs.gprs[reg2] &= ~PAGE_MASK; 329 else 330 vcpu->run->s.regs.gprs[reg2] &= ~0xfffff000UL; 331 end = kvm_s390_logical_to_effective(vcpu, end); 332 vcpu->run->s.regs.gprs[reg2] |= end; 333 } 334 return 0; 335 } 336 337 static int handle_ipte_interlock(struct kvm_vcpu *vcpu) 338 { 339 vcpu->stat.instruction_ipte_interlock++; 340 if (psw_bits(vcpu->arch.sie_block->gpsw).p) 341 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 342 wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu)); 343 kvm_s390_retry_instr(vcpu); 344 VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation"); 345 return 0; 346 } 347 348 static int handle_test_block(struct kvm_vcpu *vcpu) 349 { 350 gpa_t addr; 351 int reg2; 352 353 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 354 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 355 356 kvm_s390_get_regs_rre(vcpu, NULL, ®2); 357 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 358 addr = kvm_s390_logical_to_effective(vcpu, addr); 359 if (kvm_s390_check_low_addr_prot_real(vcpu, addr)) 360 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); 361 addr = kvm_s390_real_to_abs(vcpu, addr); 362 363 if (kvm_is_error_gpa(vcpu->kvm, addr)) 364 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 365 /* 366 * We don't expect errors on modern systems, and do not care 367 * about storage keys (yet), so let's just clear the page. 368 */ 369 if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE)) 370 return -EFAULT; 371 kvm_s390_set_psw_cc(vcpu, 0); 372 vcpu->run->s.regs.gprs[0] = 0; 373 return 0; 374 } 375 376 static int handle_tpi(struct kvm_vcpu *vcpu) 377 { 378 struct kvm_s390_interrupt_info *inti; 379 unsigned long len; 380 u32 tpi_data[3]; 381 int rc; 382 u64 addr; 383 ar_t ar; 384 385 addr = kvm_s390_get_base_disp_s(vcpu, &ar); 386 if (addr & 3) 387 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 388 389 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0); 390 if (!inti) { 391 kvm_s390_set_psw_cc(vcpu, 0); 392 return 0; 393 } 394 395 tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr; 396 tpi_data[1] = inti->io.io_int_parm; 397 tpi_data[2] = inti->io.io_int_word; 398 if (addr) { 399 /* 400 * Store the two-word I/O interruption code into the 401 * provided area. 402 */ 403 len = sizeof(tpi_data) - 4; 404 rc = write_guest(vcpu, addr, ar, &tpi_data, len); 405 if (rc) { 406 rc = kvm_s390_inject_prog_cond(vcpu, rc); 407 goto reinject_interrupt; 408 } 409 } else { 410 /* 411 * Store the three-word I/O interruption code into 412 * the appropriate lowcore area. 413 */ 414 len = sizeof(tpi_data); 415 if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) { 416 /* failed writes to the low core are not recoverable */ 417 rc = -EFAULT; 418 goto reinject_interrupt; 419 } 420 } 421 422 /* irq was successfully handed to the guest */ 423 kfree(inti); 424 kvm_s390_set_psw_cc(vcpu, 1); 425 return 0; 426 reinject_interrupt: 427 /* 428 * If we encounter a problem storing the interruption code, the 429 * instruction is suppressed from the guest's view: reinject the 430 * interrupt. 431 */ 432 if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) { 433 kfree(inti); 434 rc = -EFAULT; 435 } 436 /* don't set the cc, a pgm irq was injected or we drop to user space */ 437 return rc ? -EFAULT : 0; 438 } 439 440 static int handle_tsch(struct kvm_vcpu *vcpu) 441 { 442 struct kvm_s390_interrupt_info *inti = NULL; 443 const u64 isc_mask = 0xffUL << 24; /* all iscs set */ 444 445 /* a valid schid has at least one bit set */ 446 if (vcpu->run->s.regs.gprs[1]) 447 inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask, 448 vcpu->run->s.regs.gprs[1]); 449 450 /* 451 * Prepare exit to userspace. 452 * We indicate whether we dequeued a pending I/O interrupt 453 * so that userspace can re-inject it if the instruction gets 454 * a program check. While this may re-order the pending I/O 455 * interrupts, this is no problem since the priority is kept 456 * intact. 457 */ 458 vcpu->run->exit_reason = KVM_EXIT_S390_TSCH; 459 vcpu->run->s390_tsch.dequeued = !!inti; 460 if (inti) { 461 vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id; 462 vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr; 463 vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm; 464 vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word; 465 } 466 vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb; 467 kfree(inti); 468 return -EREMOTE; 469 } 470 471 static int handle_io_inst(struct kvm_vcpu *vcpu) 472 { 473 VCPU_EVENT(vcpu, 4, "%s", "I/O instruction"); 474 475 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 476 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 477 478 if (vcpu->kvm->arch.css_support) { 479 /* 480 * Most I/O instructions will be handled by userspace. 481 * Exceptions are tpi and the interrupt portion of tsch. 482 */ 483 if (vcpu->arch.sie_block->ipa == 0xb236) 484 return handle_tpi(vcpu); 485 if (vcpu->arch.sie_block->ipa == 0xb235) 486 return handle_tsch(vcpu); 487 /* Handle in userspace. */ 488 return -EOPNOTSUPP; 489 } else { 490 /* 491 * Set condition code 3 to stop the guest from issuing channel 492 * I/O instructions. 493 */ 494 kvm_s390_set_psw_cc(vcpu, 3); 495 return 0; 496 } 497 } 498 499 static int handle_stfl(struct kvm_vcpu *vcpu) 500 { 501 int rc; 502 unsigned int fac; 503 504 vcpu->stat.instruction_stfl++; 505 506 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 507 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 508 509 /* 510 * We need to shift the lower 32 facility bits (bit 0-31) from a u64 511 * into a u32 memory representation. They will remain bits 0-31. 512 */ 513 fac = *vcpu->kvm->arch.model.fac_list >> 32; 514 rc = write_guest_lc(vcpu, offsetof(struct lowcore, stfl_fac_list), 515 &fac, sizeof(fac)); 516 if (rc) 517 return rc; 518 VCPU_EVENT(vcpu, 3, "STFL: store facility list 0x%x", fac); 519 trace_kvm_s390_handle_stfl(vcpu, fac); 520 return 0; 521 } 522 523 #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA) 524 #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL 525 #define PSW_ADDR_24 0x0000000000ffffffUL 526 #define PSW_ADDR_31 0x000000007fffffffUL 527 528 int is_valid_psw(psw_t *psw) 529 { 530 if (psw->mask & PSW_MASK_UNASSIGNED) 531 return 0; 532 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) { 533 if (psw->addr & ~PSW_ADDR_31) 534 return 0; 535 } 536 if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24)) 537 return 0; 538 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA) 539 return 0; 540 if (psw->addr & 1) 541 return 0; 542 return 1; 543 } 544 545 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu) 546 { 547 psw_t *gpsw = &vcpu->arch.sie_block->gpsw; 548 psw_compat_t new_psw; 549 u64 addr; 550 int rc; 551 ar_t ar; 552 553 if (gpsw->mask & PSW_MASK_PSTATE) 554 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 555 556 addr = kvm_s390_get_base_disp_s(vcpu, &ar); 557 if (addr & 7) 558 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 559 560 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw)); 561 if (rc) 562 return kvm_s390_inject_prog_cond(vcpu, rc); 563 if (!(new_psw.mask & PSW32_MASK_BASE)) 564 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 565 gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32; 566 gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE; 567 gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE; 568 if (!is_valid_psw(gpsw)) 569 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 570 return 0; 571 } 572 573 static int handle_lpswe(struct kvm_vcpu *vcpu) 574 { 575 psw_t new_psw; 576 u64 addr; 577 int rc; 578 ar_t ar; 579 580 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 581 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 582 583 addr = kvm_s390_get_base_disp_s(vcpu, &ar); 584 if (addr & 7) 585 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 586 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw)); 587 if (rc) 588 return kvm_s390_inject_prog_cond(vcpu, rc); 589 vcpu->arch.sie_block->gpsw = new_psw; 590 if (!is_valid_psw(&vcpu->arch.sie_block->gpsw)) 591 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 592 return 0; 593 } 594 595 static int handle_stidp(struct kvm_vcpu *vcpu) 596 { 597 u64 stidp_data = vcpu->kvm->arch.model.cpuid; 598 u64 operand2; 599 int rc; 600 ar_t ar; 601 602 vcpu->stat.instruction_stidp++; 603 604 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 605 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 606 607 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); 608 609 if (operand2 & 7) 610 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 611 612 rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data)); 613 if (rc) 614 return kvm_s390_inject_prog_cond(vcpu, rc); 615 616 VCPU_EVENT(vcpu, 3, "STIDP: store cpu id 0x%llx", stidp_data); 617 return 0; 618 } 619 620 static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem) 621 { 622 int cpus = 0; 623 int n; 624 625 cpus = atomic_read(&vcpu->kvm->online_vcpus); 626 627 /* deal with other level 3 hypervisors */ 628 if (stsi(mem, 3, 2, 2)) 629 mem->count = 0; 630 if (mem->count < 8) 631 mem->count++; 632 for (n = mem->count - 1; n > 0 ; n--) 633 memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0])); 634 635 memset(&mem->vm[0], 0, sizeof(mem->vm[0])); 636 mem->vm[0].cpus_total = cpus; 637 mem->vm[0].cpus_configured = cpus; 638 mem->vm[0].cpus_standby = 0; 639 mem->vm[0].cpus_reserved = 0; 640 mem->vm[0].caf = 1000; 641 memcpy(mem->vm[0].name, "KVMguest", 8); 642 ASCEBC(mem->vm[0].name, 8); 643 memcpy(mem->vm[0].cpi, "KVM/Linux ", 16); 644 ASCEBC(mem->vm[0].cpi, 16); 645 } 646 647 static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, ar_t ar, 648 u8 fc, u8 sel1, u16 sel2) 649 { 650 vcpu->run->exit_reason = KVM_EXIT_S390_STSI; 651 vcpu->run->s390_stsi.addr = addr; 652 vcpu->run->s390_stsi.ar = ar; 653 vcpu->run->s390_stsi.fc = fc; 654 vcpu->run->s390_stsi.sel1 = sel1; 655 vcpu->run->s390_stsi.sel2 = sel2; 656 } 657 658 static int handle_stsi(struct kvm_vcpu *vcpu) 659 { 660 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28; 661 int sel1 = vcpu->run->s.regs.gprs[0] & 0xff; 662 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff; 663 unsigned long mem = 0; 664 u64 operand2; 665 int rc = 0; 666 ar_t ar; 667 668 vcpu->stat.instruction_stsi++; 669 VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2); 670 671 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 672 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 673 674 if (fc > 3) { 675 kvm_s390_set_psw_cc(vcpu, 3); 676 return 0; 677 } 678 679 if (vcpu->run->s.regs.gprs[0] & 0x0fffff00 680 || vcpu->run->s.regs.gprs[1] & 0xffff0000) 681 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 682 683 if (fc == 0) { 684 vcpu->run->s.regs.gprs[0] = 3 << 28; 685 kvm_s390_set_psw_cc(vcpu, 0); 686 return 0; 687 } 688 689 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); 690 691 if (operand2 & 0xfff) 692 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 693 694 switch (fc) { 695 case 1: /* same handling for 1 and 2 */ 696 case 2: 697 mem = get_zeroed_page(GFP_KERNEL); 698 if (!mem) 699 goto out_no_data; 700 if (stsi((void *) mem, fc, sel1, sel2)) 701 goto out_no_data; 702 break; 703 case 3: 704 if (sel1 != 2 || sel2 != 2) 705 goto out_no_data; 706 mem = get_zeroed_page(GFP_KERNEL); 707 if (!mem) 708 goto out_no_data; 709 handle_stsi_3_2_2(vcpu, (void *) mem); 710 break; 711 } 712 713 rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE); 714 if (rc) { 715 rc = kvm_s390_inject_prog_cond(vcpu, rc); 716 goto out; 717 } 718 if (vcpu->kvm->arch.user_stsi) { 719 insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2); 720 rc = -EREMOTE; 721 } 722 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2); 723 free_page(mem); 724 kvm_s390_set_psw_cc(vcpu, 0); 725 vcpu->run->s.regs.gprs[0] = 0; 726 return rc; 727 out_no_data: 728 kvm_s390_set_psw_cc(vcpu, 3); 729 out: 730 free_page(mem); 731 return rc; 732 } 733 734 static const intercept_handler_t b2_handlers[256] = { 735 [0x02] = handle_stidp, 736 [0x04] = handle_set_clock, 737 [0x10] = handle_set_prefix, 738 [0x11] = handle_store_prefix, 739 [0x12] = handle_store_cpu_address, 740 [0x14] = kvm_s390_handle_vsie, 741 [0x21] = handle_ipte_interlock, 742 [0x29] = handle_iske, 743 [0x2a] = handle_rrbe, 744 [0x2b] = handle_sske, 745 [0x2c] = handle_test_block, 746 [0x30] = handle_io_inst, 747 [0x31] = handle_io_inst, 748 [0x32] = handle_io_inst, 749 [0x33] = handle_io_inst, 750 [0x34] = handle_io_inst, 751 [0x35] = handle_io_inst, 752 [0x36] = handle_io_inst, 753 [0x37] = handle_io_inst, 754 [0x38] = handle_io_inst, 755 [0x39] = handle_io_inst, 756 [0x3a] = handle_io_inst, 757 [0x3b] = handle_io_inst, 758 [0x3c] = handle_io_inst, 759 [0x50] = handle_ipte_interlock, 760 [0x5f] = handle_io_inst, 761 [0x74] = handle_io_inst, 762 [0x76] = handle_io_inst, 763 [0x7d] = handle_stsi, 764 [0xb1] = handle_stfl, 765 [0xb2] = handle_lpswe, 766 }; 767 768 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu) 769 { 770 intercept_handler_t handler; 771 772 /* 773 * A lot of B2 instructions are priviledged. Here we check for 774 * the privileged ones, that we can handle in the kernel. 775 * Anything else goes to userspace. 776 */ 777 handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; 778 if (handler) 779 return handler(vcpu); 780 781 return -EOPNOTSUPP; 782 } 783 784 static int handle_epsw(struct kvm_vcpu *vcpu) 785 { 786 int reg1, reg2; 787 788 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 789 790 /* This basically extracts the mask half of the psw. */ 791 vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL; 792 vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32; 793 if (reg2) { 794 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL; 795 vcpu->run->s.regs.gprs[reg2] |= 796 vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL; 797 } 798 return 0; 799 } 800 801 #define PFMF_RESERVED 0xfffc0101UL 802 #define PFMF_SK 0x00020000UL 803 #define PFMF_CF 0x00010000UL 804 #define PFMF_UI 0x00008000UL 805 #define PFMF_FSC 0x00007000UL 806 #define PFMF_NQ 0x00000800UL 807 #define PFMF_MR 0x00000400UL 808 #define PFMF_MC 0x00000200UL 809 #define PFMF_KEY 0x000000feUL 810 811 static int handle_pfmf(struct kvm_vcpu *vcpu) 812 { 813 bool mr = false, mc = false, nq; 814 int reg1, reg2; 815 unsigned long start, end; 816 unsigned char key; 817 818 vcpu->stat.instruction_pfmf++; 819 820 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 821 822 if (!test_kvm_facility(vcpu->kvm, 8)) 823 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 824 825 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 826 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 827 828 if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED) 829 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 830 831 /* Only provide non-quiescing support if enabled for the guest */ 832 if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && 833 !test_kvm_facility(vcpu->kvm, 14)) 834 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 835 836 /* Only provide conditional-SSKE support if enabled for the guest */ 837 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK && 838 test_kvm_facility(vcpu->kvm, 10)) { 839 mr = vcpu->run->s.regs.gprs[reg1] & PFMF_MR; 840 mc = vcpu->run->s.regs.gprs[reg1] & PFMF_MC; 841 } 842 843 nq = vcpu->run->s.regs.gprs[reg1] & PFMF_NQ; 844 key = vcpu->run->s.regs.gprs[reg1] & PFMF_KEY; 845 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 846 start = kvm_s390_logical_to_effective(vcpu, start); 847 848 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { 849 if (kvm_s390_check_low_addr_prot_real(vcpu, start)) 850 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); 851 } 852 853 switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { 854 case 0x00000000: 855 /* only 4k frames specify a real address */ 856 start = kvm_s390_real_to_abs(vcpu, start); 857 end = (start + (1UL << 12)) & ~((1UL << 12) - 1); 858 break; 859 case 0x00001000: 860 end = (start + (1UL << 20)) & ~((1UL << 20) - 1); 861 break; 862 case 0x00002000: 863 /* only support 2G frame size if EDAT2 is available and we are 864 not in 24-bit addressing mode */ 865 if (!test_kvm_facility(vcpu->kvm, 78) || 866 psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_24BIT) 867 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 868 end = (start + (1UL << 31)) & ~((1UL << 31) - 1); 869 break; 870 default: 871 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 872 } 873 874 while (start != end) { 875 unsigned long useraddr; 876 877 /* Translate guest address to host address */ 878 useraddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start)); 879 if (kvm_is_error_hva(useraddr)) 880 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 881 882 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { 883 if (clear_user((void __user *)useraddr, PAGE_SIZE)) 884 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 885 } 886 887 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) { 888 int rc = __skey_check_enable(vcpu); 889 890 if (rc) 891 return rc; 892 down_read(¤t->mm->mmap_sem); 893 rc = cond_set_guest_storage_key(current->mm, useraddr, 894 key, NULL, nq, mr, mc); 895 up_read(¤t->mm->mmap_sem); 896 if (rc < 0) 897 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 898 } 899 900 start += PAGE_SIZE; 901 } 902 if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { 903 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_64BIT) { 904 vcpu->run->s.regs.gprs[reg2] = end; 905 } else { 906 vcpu->run->s.regs.gprs[reg2] &= ~0xffffffffUL; 907 end = kvm_s390_logical_to_effective(vcpu, end); 908 vcpu->run->s.regs.gprs[reg2] |= end; 909 } 910 } 911 return 0; 912 } 913 914 static int handle_essa(struct kvm_vcpu *vcpu) 915 { 916 /* entries expected to be 1FF */ 917 int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3; 918 unsigned long *cbrlo; 919 struct gmap *gmap; 920 int i; 921 922 VCPU_EVENT(vcpu, 4, "ESSA: release %d pages", entries); 923 gmap = vcpu->arch.gmap; 924 vcpu->stat.instruction_essa++; 925 if (!vcpu->kvm->arch.use_cmma) 926 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 927 928 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 929 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 930 931 if (((vcpu->arch.sie_block->ipb & 0xf0000000) >> 28) > 6) 932 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 933 934 /* Retry the ESSA instruction */ 935 kvm_s390_retry_instr(vcpu); 936 vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */ 937 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo); 938 down_read(&gmap->mm->mmap_sem); 939 for (i = 0; i < entries; ++i) 940 __gmap_zap(gmap, cbrlo[i]); 941 up_read(&gmap->mm->mmap_sem); 942 return 0; 943 } 944 945 static const intercept_handler_t b9_handlers[256] = { 946 [0x8a] = handle_ipte_interlock, 947 [0x8d] = handle_epsw, 948 [0x8e] = handle_ipte_interlock, 949 [0x8f] = handle_ipte_interlock, 950 [0xab] = handle_essa, 951 [0xaf] = handle_pfmf, 952 }; 953 954 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu) 955 { 956 intercept_handler_t handler; 957 958 /* This is handled just as for the B2 instructions. */ 959 handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; 960 if (handler) 961 return handler(vcpu); 962 963 return -EOPNOTSUPP; 964 } 965 966 int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) 967 { 968 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 969 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 970 int reg, rc, nr_regs; 971 u32 ctl_array[16]; 972 u64 ga; 973 ar_t ar; 974 975 vcpu->stat.instruction_lctl++; 976 977 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 978 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 979 980 ga = kvm_s390_get_base_disp_rs(vcpu, &ar); 981 982 if (ga & 3) 983 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 984 985 VCPU_EVENT(vcpu, 4, "LCTL: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); 986 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga); 987 988 nr_regs = ((reg3 - reg1) & 0xf) + 1; 989 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32)); 990 if (rc) 991 return kvm_s390_inject_prog_cond(vcpu, rc); 992 reg = reg1; 993 nr_regs = 0; 994 do { 995 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul; 996 vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++]; 997 if (reg == reg3) 998 break; 999 reg = (reg + 1) % 16; 1000 } while (1); 1001 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 1002 return 0; 1003 } 1004 1005 int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu) 1006 { 1007 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 1008 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 1009 int reg, rc, nr_regs; 1010 u32 ctl_array[16]; 1011 u64 ga; 1012 ar_t ar; 1013 1014 vcpu->stat.instruction_stctl++; 1015 1016 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 1017 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 1018 1019 ga = kvm_s390_get_base_disp_rs(vcpu, &ar); 1020 1021 if (ga & 3) 1022 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 1023 1024 VCPU_EVENT(vcpu, 4, "STCTL r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); 1025 trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga); 1026 1027 reg = reg1; 1028 nr_regs = 0; 1029 do { 1030 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg]; 1031 if (reg == reg3) 1032 break; 1033 reg = (reg + 1) % 16; 1034 } while (1); 1035 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32)); 1036 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; 1037 } 1038 1039 static int handle_lctlg(struct kvm_vcpu *vcpu) 1040 { 1041 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 1042 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 1043 int reg, rc, nr_regs; 1044 u64 ctl_array[16]; 1045 u64 ga; 1046 ar_t ar; 1047 1048 vcpu->stat.instruction_lctlg++; 1049 1050 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 1051 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 1052 1053 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar); 1054 1055 if (ga & 7) 1056 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 1057 1058 VCPU_EVENT(vcpu, 4, "LCTLG: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); 1059 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga); 1060 1061 nr_regs = ((reg3 - reg1) & 0xf) + 1; 1062 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64)); 1063 if (rc) 1064 return kvm_s390_inject_prog_cond(vcpu, rc); 1065 reg = reg1; 1066 nr_regs = 0; 1067 do { 1068 vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++]; 1069 if (reg == reg3) 1070 break; 1071 reg = (reg + 1) % 16; 1072 } while (1); 1073 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 1074 return 0; 1075 } 1076 1077 static int handle_stctg(struct kvm_vcpu *vcpu) 1078 { 1079 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 1080 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 1081 int reg, rc, nr_regs; 1082 u64 ctl_array[16]; 1083 u64 ga; 1084 ar_t ar; 1085 1086 vcpu->stat.instruction_stctg++; 1087 1088 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 1089 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 1090 1091 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar); 1092 1093 if (ga & 7) 1094 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 1095 1096 VCPU_EVENT(vcpu, 4, "STCTG r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); 1097 trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga); 1098 1099 reg = reg1; 1100 nr_regs = 0; 1101 do { 1102 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg]; 1103 if (reg == reg3) 1104 break; 1105 reg = (reg + 1) % 16; 1106 } while (1); 1107 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64)); 1108 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; 1109 } 1110 1111 static const intercept_handler_t eb_handlers[256] = { 1112 [0x2f] = handle_lctlg, 1113 [0x25] = handle_stctg, 1114 [0x60] = handle_ri, 1115 [0x61] = handle_ri, 1116 [0x62] = handle_ri, 1117 }; 1118 1119 int kvm_s390_handle_eb(struct kvm_vcpu *vcpu) 1120 { 1121 intercept_handler_t handler; 1122 1123 handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff]; 1124 if (handler) 1125 return handler(vcpu); 1126 return -EOPNOTSUPP; 1127 } 1128 1129 static int handle_tprot(struct kvm_vcpu *vcpu) 1130 { 1131 u64 address1, address2; 1132 unsigned long hva, gpa; 1133 int ret = 0, cc = 0; 1134 bool writable; 1135 ar_t ar; 1136 1137 vcpu->stat.instruction_tprot++; 1138 1139 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 1140 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 1141 1142 kvm_s390_get_base_disp_sse(vcpu, &address1, &address2, &ar, NULL); 1143 1144 /* we only handle the Linux memory detection case: 1145 * access key == 0 1146 * everything else goes to userspace. */ 1147 if (address2 & 0xf0) 1148 return -EOPNOTSUPP; 1149 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) 1150 ipte_lock(vcpu); 1151 ret = guest_translate_address(vcpu, address1, ar, &gpa, GACC_STORE); 1152 if (ret == PGM_PROTECTION) { 1153 /* Write protected? Try again with read-only... */ 1154 cc = 1; 1155 ret = guest_translate_address(vcpu, address1, ar, &gpa, 1156 GACC_FETCH); 1157 } 1158 if (ret) { 1159 if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) { 1160 ret = kvm_s390_inject_program_int(vcpu, ret); 1161 } else if (ret > 0) { 1162 /* Translation not available */ 1163 kvm_s390_set_psw_cc(vcpu, 3); 1164 ret = 0; 1165 } 1166 goto out_unlock; 1167 } 1168 1169 hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable); 1170 if (kvm_is_error_hva(hva)) { 1171 ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 1172 } else { 1173 if (!writable) 1174 cc = 1; /* Write not permitted ==> read-only */ 1175 kvm_s390_set_psw_cc(vcpu, cc); 1176 /* Note: CC2 only occurs for storage keys (not supported yet) */ 1177 } 1178 out_unlock: 1179 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) 1180 ipte_unlock(vcpu); 1181 return ret; 1182 } 1183 1184 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu) 1185 { 1186 /* For e5xx... instructions we only handle TPROT */ 1187 if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01) 1188 return handle_tprot(vcpu); 1189 return -EOPNOTSUPP; 1190 } 1191 1192 static int handle_sckpf(struct kvm_vcpu *vcpu) 1193 { 1194 u32 value; 1195 1196 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 1197 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 1198 1199 if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000) 1200 return kvm_s390_inject_program_int(vcpu, 1201 PGM_SPECIFICATION); 1202 1203 value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff; 1204 vcpu->arch.sie_block->todpr = value; 1205 1206 return 0; 1207 } 1208 1209 static int handle_ptff(struct kvm_vcpu *vcpu) 1210 { 1211 /* we don't emulate any control instructions yet */ 1212 kvm_s390_set_psw_cc(vcpu, 3); 1213 return 0; 1214 } 1215 1216 static const intercept_handler_t x01_handlers[256] = { 1217 [0x04] = handle_ptff, 1218 [0x07] = handle_sckpf, 1219 }; 1220 1221 int kvm_s390_handle_01(struct kvm_vcpu *vcpu) 1222 { 1223 intercept_handler_t handler; 1224 1225 handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; 1226 if (handler) 1227 return handler(vcpu); 1228 return -EOPNOTSUPP; 1229 } 1230