1 /* 2 * handling privileged instructions 3 * 4 * Copyright IBM Corp. 2008, 2013 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License (version 2 only) 8 * as published by the Free Software Foundation. 9 * 10 * Author(s): Carsten Otte <cotte@de.ibm.com> 11 * Christian Borntraeger <borntraeger@de.ibm.com> 12 */ 13 14 #include <linux/kvm.h> 15 #include <linux/gfp.h> 16 #include <linux/errno.h> 17 #include <linux/compat.h> 18 #include <asm/asm-offsets.h> 19 #include <asm/facility.h> 20 #include <asm/current.h> 21 #include <asm/debug.h> 22 #include <asm/ebcdic.h> 23 #include <asm/sysinfo.h> 24 #include <asm/pgtable.h> 25 #include <asm/pgalloc.h> 26 #include <asm/io.h> 27 #include <asm/ptrace.h> 28 #include <asm/compat.h> 29 #include "gaccess.h" 30 #include "kvm-s390.h" 31 #include "trace.h" 32 33 /* Handle SCK (SET CLOCK) interception */ 34 static int handle_set_clock(struct kvm_vcpu *vcpu) 35 { 36 struct kvm_vcpu *cpup; 37 s64 hostclk, val; 38 int i, rc; 39 u64 op2; 40 41 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 42 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 43 44 op2 = kvm_s390_get_base_disp_s(vcpu); 45 if (op2 & 7) /* Operand must be on a doubleword boundary */ 46 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 47 rc = read_guest(vcpu, op2, &val, sizeof(val)); 48 if (rc) 49 return kvm_s390_inject_prog_cond(vcpu, rc); 50 51 if (store_tod_clock(&hostclk)) { 52 kvm_s390_set_psw_cc(vcpu, 3); 53 return 0; 54 } 55 val = (val - hostclk) & ~0x3fUL; 56 57 mutex_lock(&vcpu->kvm->lock); 58 kvm_for_each_vcpu(i, cpup, vcpu->kvm) 59 cpup->arch.sie_block->epoch = val; 60 mutex_unlock(&vcpu->kvm->lock); 61 62 kvm_s390_set_psw_cc(vcpu, 0); 63 return 0; 64 } 65 66 static int handle_set_prefix(struct kvm_vcpu *vcpu) 67 { 68 u64 operand2; 69 u32 address; 70 int rc; 71 72 vcpu->stat.instruction_spx++; 73 74 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 75 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 76 77 operand2 = kvm_s390_get_base_disp_s(vcpu); 78 79 /* must be word boundary */ 80 if (operand2 & 3) 81 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 82 83 /* get the value */ 84 rc = read_guest(vcpu, operand2, &address, sizeof(address)); 85 if (rc) 86 return kvm_s390_inject_prog_cond(vcpu, rc); 87 88 address &= 0x7fffe000u; 89 90 /* 91 * Make sure the new value is valid memory. We only need to check the 92 * first page, since address is 8k aligned and memory pieces are always 93 * at least 1MB aligned and have at least a size of 1MB. 94 */ 95 if (kvm_is_error_gpa(vcpu->kvm, address)) 96 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 97 98 kvm_s390_set_prefix(vcpu, address); 99 100 VCPU_EVENT(vcpu, 5, "setting prefix to %x", address); 101 trace_kvm_s390_handle_prefix(vcpu, 1, address); 102 return 0; 103 } 104 105 static int handle_store_prefix(struct kvm_vcpu *vcpu) 106 { 107 u64 operand2; 108 u32 address; 109 int rc; 110 111 vcpu->stat.instruction_stpx++; 112 113 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 114 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 115 116 operand2 = kvm_s390_get_base_disp_s(vcpu); 117 118 /* must be word boundary */ 119 if (operand2 & 3) 120 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 121 122 address = kvm_s390_get_prefix(vcpu); 123 124 /* get the value */ 125 rc = write_guest(vcpu, operand2, &address, sizeof(address)); 126 if (rc) 127 return kvm_s390_inject_prog_cond(vcpu, rc); 128 129 VCPU_EVENT(vcpu, 5, "storing prefix to %x", address); 130 trace_kvm_s390_handle_prefix(vcpu, 0, address); 131 return 0; 132 } 133 134 static int handle_store_cpu_address(struct kvm_vcpu *vcpu) 135 { 136 u16 vcpu_id = vcpu->vcpu_id; 137 u64 ga; 138 int rc; 139 140 vcpu->stat.instruction_stap++; 141 142 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 143 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 144 145 ga = kvm_s390_get_base_disp_s(vcpu); 146 147 if (ga & 1) 148 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 149 150 rc = write_guest(vcpu, ga, &vcpu_id, sizeof(vcpu_id)); 151 if (rc) 152 return kvm_s390_inject_prog_cond(vcpu, rc); 153 154 VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", ga); 155 trace_kvm_s390_handle_stap(vcpu, ga); 156 return 0; 157 } 158 159 static int __skey_check_enable(struct kvm_vcpu *vcpu) 160 { 161 int rc = 0; 162 if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE))) 163 return rc; 164 165 rc = s390_enable_skey(); 166 trace_kvm_s390_skey_related_inst(vcpu); 167 vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE); 168 return rc; 169 } 170 171 172 static int handle_skey(struct kvm_vcpu *vcpu) 173 { 174 int rc = __skey_check_enable(vcpu); 175 176 if (rc) 177 return rc; 178 vcpu->stat.instruction_storage_key++; 179 180 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 181 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 182 183 vcpu->arch.sie_block->gpsw.addr = 184 __rewind_psw(vcpu->arch.sie_block->gpsw, 4); 185 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation"); 186 return 0; 187 } 188 189 static int handle_ipte_interlock(struct kvm_vcpu *vcpu) 190 { 191 psw_t *psw = &vcpu->arch.sie_block->gpsw; 192 193 vcpu->stat.instruction_ipte_interlock++; 194 if (psw_bits(*psw).p) 195 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 196 wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu)); 197 psw->addr = __rewind_psw(*psw, 4); 198 VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation"); 199 return 0; 200 } 201 202 static int handle_test_block(struct kvm_vcpu *vcpu) 203 { 204 gpa_t addr; 205 int reg2; 206 207 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 208 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 209 210 kvm_s390_get_regs_rre(vcpu, NULL, ®2); 211 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 212 addr = kvm_s390_logical_to_effective(vcpu, addr); 213 if (kvm_s390_check_low_addr_protection(vcpu, addr)) 214 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); 215 addr = kvm_s390_real_to_abs(vcpu, addr); 216 217 if (kvm_is_error_gpa(vcpu->kvm, addr)) 218 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 219 /* 220 * We don't expect errors on modern systems, and do not care 221 * about storage keys (yet), so let's just clear the page. 222 */ 223 if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE)) 224 return -EFAULT; 225 kvm_s390_set_psw_cc(vcpu, 0); 226 vcpu->run->s.regs.gprs[0] = 0; 227 return 0; 228 } 229 230 static int handle_tpi(struct kvm_vcpu *vcpu) 231 { 232 struct kvm_s390_interrupt_info *inti; 233 unsigned long len; 234 u32 tpi_data[3]; 235 int cc, rc; 236 u64 addr; 237 238 rc = 0; 239 addr = kvm_s390_get_base_disp_s(vcpu); 240 if (addr & 3) 241 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 242 cc = 0; 243 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0); 244 if (!inti) 245 goto no_interrupt; 246 cc = 1; 247 tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr; 248 tpi_data[1] = inti->io.io_int_parm; 249 tpi_data[2] = inti->io.io_int_word; 250 if (addr) { 251 /* 252 * Store the two-word I/O interruption code into the 253 * provided area. 254 */ 255 len = sizeof(tpi_data) - 4; 256 rc = write_guest(vcpu, addr, &tpi_data, len); 257 if (rc) 258 return kvm_s390_inject_prog_cond(vcpu, rc); 259 } else { 260 /* 261 * Store the three-word I/O interruption code into 262 * the appropriate lowcore area. 263 */ 264 len = sizeof(tpi_data); 265 if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) 266 rc = -EFAULT; 267 } 268 /* 269 * If we encounter a problem storing the interruption code, the 270 * instruction is suppressed from the guest's view: reinject the 271 * interrupt. 272 */ 273 if (!rc) 274 kfree(inti); 275 else 276 kvm_s390_reinject_io_int(vcpu->kvm, inti); 277 no_interrupt: 278 /* Set condition code and we're done. */ 279 if (!rc) 280 kvm_s390_set_psw_cc(vcpu, cc); 281 return rc ? -EFAULT : 0; 282 } 283 284 static int handle_tsch(struct kvm_vcpu *vcpu) 285 { 286 struct kvm_s390_interrupt_info *inti; 287 288 inti = kvm_s390_get_io_int(vcpu->kvm, 0, 289 vcpu->run->s.regs.gprs[1]); 290 291 /* 292 * Prepare exit to userspace. 293 * We indicate whether we dequeued a pending I/O interrupt 294 * so that userspace can re-inject it if the instruction gets 295 * a program check. While this may re-order the pending I/O 296 * interrupts, this is no problem since the priority is kept 297 * intact. 298 */ 299 vcpu->run->exit_reason = KVM_EXIT_S390_TSCH; 300 vcpu->run->s390_tsch.dequeued = !!inti; 301 if (inti) { 302 vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id; 303 vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr; 304 vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm; 305 vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word; 306 } 307 vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb; 308 kfree(inti); 309 return -EREMOTE; 310 } 311 312 static int handle_io_inst(struct kvm_vcpu *vcpu) 313 { 314 VCPU_EVENT(vcpu, 4, "%s", "I/O instruction"); 315 316 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 317 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 318 319 if (vcpu->kvm->arch.css_support) { 320 /* 321 * Most I/O instructions will be handled by userspace. 322 * Exceptions are tpi and the interrupt portion of tsch. 323 */ 324 if (vcpu->arch.sie_block->ipa == 0xb236) 325 return handle_tpi(vcpu); 326 if (vcpu->arch.sie_block->ipa == 0xb235) 327 return handle_tsch(vcpu); 328 /* Handle in userspace. */ 329 return -EOPNOTSUPP; 330 } else { 331 /* 332 * Set condition code 3 to stop the guest from issuing channel 333 * I/O instructions. 334 */ 335 kvm_s390_set_psw_cc(vcpu, 3); 336 return 0; 337 } 338 } 339 340 static int handle_stfl(struct kvm_vcpu *vcpu) 341 { 342 int rc; 343 344 vcpu->stat.instruction_stfl++; 345 346 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 347 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 348 349 rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list), 350 vfacilities, 4); 351 if (rc) 352 return rc; 353 VCPU_EVENT(vcpu, 5, "store facility list value %x", 354 *(unsigned int *) vfacilities); 355 trace_kvm_s390_handle_stfl(vcpu, *(unsigned int *) vfacilities); 356 return 0; 357 } 358 359 #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA) 360 #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL 361 #define PSW_ADDR_24 0x0000000000ffffffUL 362 #define PSW_ADDR_31 0x000000007fffffffUL 363 364 int is_valid_psw(psw_t *psw) 365 { 366 if (psw->mask & PSW_MASK_UNASSIGNED) 367 return 0; 368 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) { 369 if (psw->addr & ~PSW_ADDR_31) 370 return 0; 371 } 372 if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24)) 373 return 0; 374 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA) 375 return 0; 376 if (psw->addr & 1) 377 return 0; 378 return 1; 379 } 380 381 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu) 382 { 383 psw_t *gpsw = &vcpu->arch.sie_block->gpsw; 384 psw_compat_t new_psw; 385 u64 addr; 386 int rc; 387 388 if (gpsw->mask & PSW_MASK_PSTATE) 389 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 390 391 addr = kvm_s390_get_base_disp_s(vcpu); 392 if (addr & 7) 393 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 394 395 rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw)); 396 if (rc) 397 return kvm_s390_inject_prog_cond(vcpu, rc); 398 if (!(new_psw.mask & PSW32_MASK_BASE)) 399 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 400 gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32; 401 gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE; 402 gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE; 403 if (!is_valid_psw(gpsw)) 404 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 405 return 0; 406 } 407 408 static int handle_lpswe(struct kvm_vcpu *vcpu) 409 { 410 psw_t new_psw; 411 u64 addr; 412 int rc; 413 414 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 415 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 416 417 addr = kvm_s390_get_base_disp_s(vcpu); 418 if (addr & 7) 419 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 420 rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw)); 421 if (rc) 422 return kvm_s390_inject_prog_cond(vcpu, rc); 423 vcpu->arch.sie_block->gpsw = new_psw; 424 if (!is_valid_psw(&vcpu->arch.sie_block->gpsw)) 425 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 426 return 0; 427 } 428 429 static int handle_stidp(struct kvm_vcpu *vcpu) 430 { 431 u64 stidp_data = vcpu->arch.stidp_data; 432 u64 operand2; 433 int rc; 434 435 vcpu->stat.instruction_stidp++; 436 437 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 438 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 439 440 operand2 = kvm_s390_get_base_disp_s(vcpu); 441 442 if (operand2 & 7) 443 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 444 445 rc = write_guest(vcpu, operand2, &stidp_data, sizeof(stidp_data)); 446 if (rc) 447 return kvm_s390_inject_prog_cond(vcpu, rc); 448 449 VCPU_EVENT(vcpu, 5, "%s", "store cpu id"); 450 return 0; 451 } 452 453 static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem) 454 { 455 int cpus = 0; 456 int n; 457 458 cpus = atomic_read(&vcpu->kvm->online_vcpus); 459 460 /* deal with other level 3 hypervisors */ 461 if (stsi(mem, 3, 2, 2)) 462 mem->count = 0; 463 if (mem->count < 8) 464 mem->count++; 465 for (n = mem->count - 1; n > 0 ; n--) 466 memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0])); 467 468 mem->vm[0].cpus_total = cpus; 469 mem->vm[0].cpus_configured = cpus; 470 mem->vm[0].cpus_standby = 0; 471 mem->vm[0].cpus_reserved = 0; 472 mem->vm[0].caf = 1000; 473 memcpy(mem->vm[0].name, "KVMguest", 8); 474 ASCEBC(mem->vm[0].name, 8); 475 memcpy(mem->vm[0].cpi, "KVM/Linux ", 16); 476 ASCEBC(mem->vm[0].cpi, 16); 477 } 478 479 static int handle_stsi(struct kvm_vcpu *vcpu) 480 { 481 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28; 482 int sel1 = vcpu->run->s.regs.gprs[0] & 0xff; 483 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff; 484 unsigned long mem = 0; 485 u64 operand2; 486 int rc = 0; 487 488 vcpu->stat.instruction_stsi++; 489 VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2); 490 491 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 492 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 493 494 if (fc > 3) { 495 kvm_s390_set_psw_cc(vcpu, 3); 496 return 0; 497 } 498 499 if (vcpu->run->s.regs.gprs[0] & 0x0fffff00 500 || vcpu->run->s.regs.gprs[1] & 0xffff0000) 501 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 502 503 if (fc == 0) { 504 vcpu->run->s.regs.gprs[0] = 3 << 28; 505 kvm_s390_set_psw_cc(vcpu, 0); 506 return 0; 507 } 508 509 operand2 = kvm_s390_get_base_disp_s(vcpu); 510 511 if (operand2 & 0xfff) 512 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 513 514 switch (fc) { 515 case 1: /* same handling for 1 and 2 */ 516 case 2: 517 mem = get_zeroed_page(GFP_KERNEL); 518 if (!mem) 519 goto out_no_data; 520 if (stsi((void *) mem, fc, sel1, sel2)) 521 goto out_no_data; 522 break; 523 case 3: 524 if (sel1 != 2 || sel2 != 2) 525 goto out_no_data; 526 mem = get_zeroed_page(GFP_KERNEL); 527 if (!mem) 528 goto out_no_data; 529 handle_stsi_3_2_2(vcpu, (void *) mem); 530 break; 531 } 532 533 rc = write_guest(vcpu, operand2, (void *)mem, PAGE_SIZE); 534 if (rc) { 535 rc = kvm_s390_inject_prog_cond(vcpu, rc); 536 goto out; 537 } 538 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2); 539 free_page(mem); 540 kvm_s390_set_psw_cc(vcpu, 0); 541 vcpu->run->s.regs.gprs[0] = 0; 542 return 0; 543 out_no_data: 544 kvm_s390_set_psw_cc(vcpu, 3); 545 out: 546 free_page(mem); 547 return rc; 548 } 549 550 static const intercept_handler_t b2_handlers[256] = { 551 [0x02] = handle_stidp, 552 [0x04] = handle_set_clock, 553 [0x10] = handle_set_prefix, 554 [0x11] = handle_store_prefix, 555 [0x12] = handle_store_cpu_address, 556 [0x21] = handle_ipte_interlock, 557 [0x29] = handle_skey, 558 [0x2a] = handle_skey, 559 [0x2b] = handle_skey, 560 [0x2c] = handle_test_block, 561 [0x30] = handle_io_inst, 562 [0x31] = handle_io_inst, 563 [0x32] = handle_io_inst, 564 [0x33] = handle_io_inst, 565 [0x34] = handle_io_inst, 566 [0x35] = handle_io_inst, 567 [0x36] = handle_io_inst, 568 [0x37] = handle_io_inst, 569 [0x38] = handle_io_inst, 570 [0x39] = handle_io_inst, 571 [0x3a] = handle_io_inst, 572 [0x3b] = handle_io_inst, 573 [0x3c] = handle_io_inst, 574 [0x50] = handle_ipte_interlock, 575 [0x5f] = handle_io_inst, 576 [0x74] = handle_io_inst, 577 [0x76] = handle_io_inst, 578 [0x7d] = handle_stsi, 579 [0xb1] = handle_stfl, 580 [0xb2] = handle_lpswe, 581 }; 582 583 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu) 584 { 585 intercept_handler_t handler; 586 587 /* 588 * A lot of B2 instructions are priviledged. Here we check for 589 * the privileged ones, that we can handle in the kernel. 590 * Anything else goes to userspace. 591 */ 592 handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; 593 if (handler) 594 return handler(vcpu); 595 596 return -EOPNOTSUPP; 597 } 598 599 static int handle_epsw(struct kvm_vcpu *vcpu) 600 { 601 int reg1, reg2; 602 603 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 604 605 /* This basically extracts the mask half of the psw. */ 606 vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL; 607 vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32; 608 if (reg2) { 609 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL; 610 vcpu->run->s.regs.gprs[reg2] |= 611 vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL; 612 } 613 return 0; 614 } 615 616 #define PFMF_RESERVED 0xfffc0101UL 617 #define PFMF_SK 0x00020000UL 618 #define PFMF_CF 0x00010000UL 619 #define PFMF_UI 0x00008000UL 620 #define PFMF_FSC 0x00007000UL 621 #define PFMF_NQ 0x00000800UL 622 #define PFMF_MR 0x00000400UL 623 #define PFMF_MC 0x00000200UL 624 #define PFMF_KEY 0x000000feUL 625 626 static int handle_pfmf(struct kvm_vcpu *vcpu) 627 { 628 int reg1, reg2; 629 unsigned long start, end; 630 631 vcpu->stat.instruction_pfmf++; 632 633 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 634 635 if (!MACHINE_HAS_PFMF) 636 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 637 638 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 639 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 640 641 if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED) 642 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 643 644 /* Only provide non-quiescing support if the host supports it */ 645 if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && !test_facility(14)) 646 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 647 648 /* No support for conditional-SSKE */ 649 if (vcpu->run->s.regs.gprs[reg1] & (PFMF_MR | PFMF_MC)) 650 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 651 652 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 653 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { 654 if (kvm_s390_check_low_addr_protection(vcpu, start)) 655 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); 656 } 657 658 switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { 659 case 0x00000000: 660 end = (start + (1UL << 12)) & ~((1UL << 12) - 1); 661 break; 662 case 0x00001000: 663 end = (start + (1UL << 20)) & ~((1UL << 20) - 1); 664 break; 665 /* We dont support EDAT2 666 case 0x00002000: 667 end = (start + (1UL << 31)) & ~((1UL << 31) - 1); 668 break;*/ 669 default: 670 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 671 } 672 while (start < end) { 673 unsigned long useraddr, abs_addr; 674 675 /* Translate guest address to host address */ 676 if ((vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) == 0) 677 abs_addr = kvm_s390_real_to_abs(vcpu, start); 678 else 679 abs_addr = start; 680 useraddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(abs_addr)); 681 if (kvm_is_error_hva(useraddr)) 682 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 683 684 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { 685 if (clear_user((void __user *)useraddr, PAGE_SIZE)) 686 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 687 } 688 689 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) { 690 int rc = __skey_check_enable(vcpu); 691 692 if (rc) 693 return rc; 694 if (set_guest_storage_key(current->mm, useraddr, 695 vcpu->run->s.regs.gprs[reg1] & PFMF_KEY, 696 vcpu->run->s.regs.gprs[reg1] & PFMF_NQ)) 697 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 698 } 699 700 start += PAGE_SIZE; 701 } 702 if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) 703 vcpu->run->s.regs.gprs[reg2] = end; 704 return 0; 705 } 706 707 static int handle_essa(struct kvm_vcpu *vcpu) 708 { 709 /* entries expected to be 1FF */ 710 int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3; 711 unsigned long *cbrlo, cbrle; 712 struct gmap *gmap; 713 int i; 714 715 VCPU_EVENT(vcpu, 5, "cmma release %d pages", entries); 716 gmap = vcpu->arch.gmap; 717 vcpu->stat.instruction_essa++; 718 if (!kvm_s390_cmma_enabled(vcpu->kvm)) 719 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 720 721 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 722 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 723 724 if (((vcpu->arch.sie_block->ipb & 0xf0000000) >> 28) > 6) 725 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 726 727 /* Rewind PSW to repeat the ESSA instruction */ 728 vcpu->arch.sie_block->gpsw.addr = 729 __rewind_psw(vcpu->arch.sie_block->gpsw, 4); 730 vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */ 731 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo); 732 down_read(&gmap->mm->mmap_sem); 733 for (i = 0; i < entries; ++i) { 734 cbrle = cbrlo[i]; 735 if (unlikely(cbrle & ~PAGE_MASK || cbrle < 2 * PAGE_SIZE)) 736 /* invalid entry */ 737 break; 738 /* try to free backing */ 739 __gmap_zap(gmap, cbrle); 740 } 741 up_read(&gmap->mm->mmap_sem); 742 if (i < entries) 743 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 744 return 0; 745 } 746 747 static const intercept_handler_t b9_handlers[256] = { 748 [0x8a] = handle_ipte_interlock, 749 [0x8d] = handle_epsw, 750 [0x8e] = handle_ipte_interlock, 751 [0x8f] = handle_ipte_interlock, 752 [0xab] = handle_essa, 753 [0xaf] = handle_pfmf, 754 }; 755 756 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu) 757 { 758 intercept_handler_t handler; 759 760 /* This is handled just as for the B2 instructions. */ 761 handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; 762 if (handler) 763 return handler(vcpu); 764 765 return -EOPNOTSUPP; 766 } 767 768 int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) 769 { 770 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 771 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 772 u32 val = 0; 773 int reg, rc; 774 u64 ga; 775 776 vcpu->stat.instruction_lctl++; 777 778 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 779 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 780 781 ga = kvm_s390_get_base_disp_rs(vcpu); 782 783 if (ga & 3) 784 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 785 786 VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); 787 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga); 788 789 reg = reg1; 790 do { 791 rc = read_guest(vcpu, ga, &val, sizeof(val)); 792 if (rc) 793 return kvm_s390_inject_prog_cond(vcpu, rc); 794 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul; 795 vcpu->arch.sie_block->gcr[reg] |= val; 796 ga += 4; 797 if (reg == reg3) 798 break; 799 reg = (reg + 1) % 16; 800 } while (1); 801 802 return 0; 803 } 804 805 int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu) 806 { 807 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 808 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 809 u64 ga; 810 u32 val; 811 int reg, rc; 812 813 vcpu->stat.instruction_stctl++; 814 815 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 816 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 817 818 ga = kvm_s390_get_base_disp_rs(vcpu); 819 820 if (ga & 3) 821 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 822 823 VCPU_EVENT(vcpu, 5, "stctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); 824 trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga); 825 826 reg = reg1; 827 do { 828 val = vcpu->arch.sie_block->gcr[reg] & 0x00000000fffffffful; 829 rc = write_guest(vcpu, ga, &val, sizeof(val)); 830 if (rc) 831 return kvm_s390_inject_prog_cond(vcpu, rc); 832 ga += 4; 833 if (reg == reg3) 834 break; 835 reg = (reg + 1) % 16; 836 } while (1); 837 838 return 0; 839 } 840 841 static int handle_lctlg(struct kvm_vcpu *vcpu) 842 { 843 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 844 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 845 u64 ga, val; 846 int reg, rc; 847 848 vcpu->stat.instruction_lctlg++; 849 850 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 851 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 852 853 ga = kvm_s390_get_base_disp_rsy(vcpu); 854 855 if (ga & 7) 856 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 857 858 reg = reg1; 859 860 VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); 861 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga); 862 863 do { 864 rc = read_guest(vcpu, ga, &val, sizeof(val)); 865 if (rc) 866 return kvm_s390_inject_prog_cond(vcpu, rc); 867 vcpu->arch.sie_block->gcr[reg] = val; 868 ga += 8; 869 if (reg == reg3) 870 break; 871 reg = (reg + 1) % 16; 872 } while (1); 873 874 return 0; 875 } 876 877 static int handle_stctg(struct kvm_vcpu *vcpu) 878 { 879 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 880 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 881 u64 ga, val; 882 int reg, rc; 883 884 vcpu->stat.instruction_stctg++; 885 886 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 887 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 888 889 ga = kvm_s390_get_base_disp_rsy(vcpu); 890 891 if (ga & 7) 892 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 893 894 reg = reg1; 895 896 VCPU_EVENT(vcpu, 5, "stctg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); 897 trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga); 898 899 do { 900 val = vcpu->arch.sie_block->gcr[reg]; 901 rc = write_guest(vcpu, ga, &val, sizeof(val)); 902 if (rc) 903 return kvm_s390_inject_prog_cond(vcpu, rc); 904 ga += 8; 905 if (reg == reg3) 906 break; 907 reg = (reg + 1) % 16; 908 } while (1); 909 910 return 0; 911 } 912 913 static const intercept_handler_t eb_handlers[256] = { 914 [0x2f] = handle_lctlg, 915 [0x25] = handle_stctg, 916 }; 917 918 int kvm_s390_handle_eb(struct kvm_vcpu *vcpu) 919 { 920 intercept_handler_t handler; 921 922 handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff]; 923 if (handler) 924 return handler(vcpu); 925 return -EOPNOTSUPP; 926 } 927 928 static int handle_tprot(struct kvm_vcpu *vcpu) 929 { 930 u64 address1, address2; 931 unsigned long hva, gpa; 932 int ret = 0, cc = 0; 933 bool writable; 934 935 vcpu->stat.instruction_tprot++; 936 937 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 938 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 939 940 kvm_s390_get_base_disp_sse(vcpu, &address1, &address2); 941 942 /* we only handle the Linux memory detection case: 943 * access key == 0 944 * everything else goes to userspace. */ 945 if (address2 & 0xf0) 946 return -EOPNOTSUPP; 947 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) 948 ipte_lock(vcpu); 949 ret = guest_translate_address(vcpu, address1, &gpa, 1); 950 if (ret == PGM_PROTECTION) { 951 /* Write protected? Try again with read-only... */ 952 cc = 1; 953 ret = guest_translate_address(vcpu, address1, &gpa, 0); 954 } 955 if (ret) { 956 if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) { 957 ret = kvm_s390_inject_program_int(vcpu, ret); 958 } else if (ret > 0) { 959 /* Translation not available */ 960 kvm_s390_set_psw_cc(vcpu, 3); 961 ret = 0; 962 } 963 goto out_unlock; 964 } 965 966 hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable); 967 if (kvm_is_error_hva(hva)) { 968 ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 969 } else { 970 if (!writable) 971 cc = 1; /* Write not permitted ==> read-only */ 972 kvm_s390_set_psw_cc(vcpu, cc); 973 /* Note: CC2 only occurs for storage keys (not supported yet) */ 974 } 975 out_unlock: 976 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) 977 ipte_unlock(vcpu); 978 return ret; 979 } 980 981 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu) 982 { 983 /* For e5xx... instructions we only handle TPROT */ 984 if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01) 985 return handle_tprot(vcpu); 986 return -EOPNOTSUPP; 987 } 988 989 static int handle_sckpf(struct kvm_vcpu *vcpu) 990 { 991 u32 value; 992 993 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 994 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 995 996 if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000) 997 return kvm_s390_inject_program_int(vcpu, 998 PGM_SPECIFICATION); 999 1000 value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff; 1001 vcpu->arch.sie_block->todpr = value; 1002 1003 return 0; 1004 } 1005 1006 static const intercept_handler_t x01_handlers[256] = { 1007 [0x07] = handle_sckpf, 1008 }; 1009 1010 int kvm_s390_handle_01(struct kvm_vcpu *vcpu) 1011 { 1012 intercept_handler_t handler; 1013 1014 handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; 1015 if (handler) 1016 return handler(vcpu); 1017 return -EOPNOTSUPP; 1018 } 1019