1 /* 2 * handling privileged instructions 3 * 4 * Copyright IBM Corp. 2008, 2013 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License (version 2 only) 8 * as published by the Free Software Foundation. 9 * 10 * Author(s): Carsten Otte <cotte@de.ibm.com> 11 * Christian Borntraeger <borntraeger@de.ibm.com> 12 */ 13 14 #include <linux/kvm.h> 15 #include <linux/gfp.h> 16 #include <linux/errno.h> 17 #include <linux/compat.h> 18 #include <asm/asm-offsets.h> 19 #include <asm/facility.h> 20 #include <asm/current.h> 21 #include <asm/debug.h> 22 #include <asm/ebcdic.h> 23 #include <asm/sysinfo.h> 24 #include <asm/pgtable.h> 25 #include <asm/pgalloc.h> 26 #include <asm/io.h> 27 #include <asm/ptrace.h> 28 #include <asm/compat.h> 29 #include "gaccess.h" 30 #include "kvm-s390.h" 31 #include "trace.h" 32 33 /* Handle SCK (SET CLOCK) interception */ 34 static int handle_set_clock(struct kvm_vcpu *vcpu) 35 { 36 struct kvm_vcpu *cpup; 37 s64 hostclk, val; 38 int i, rc; 39 u64 op2; 40 41 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 42 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 43 44 op2 = kvm_s390_get_base_disp_s(vcpu); 45 if (op2 & 7) /* Operand must be on a doubleword boundary */ 46 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 47 rc = read_guest(vcpu, op2, &val, sizeof(val)); 48 if (rc) 49 return kvm_s390_inject_prog_cond(vcpu, rc); 50 51 if (store_tod_clock(&hostclk)) { 52 kvm_s390_set_psw_cc(vcpu, 3); 53 return 0; 54 } 55 val = (val - hostclk) & ~0x3fUL; 56 57 mutex_lock(&vcpu->kvm->lock); 58 kvm_for_each_vcpu(i, cpup, vcpu->kvm) 59 cpup->arch.sie_block->epoch = val; 60 mutex_unlock(&vcpu->kvm->lock); 61 62 kvm_s390_set_psw_cc(vcpu, 0); 63 return 0; 64 } 65 66 static int handle_set_prefix(struct kvm_vcpu *vcpu) 67 { 68 u64 operand2; 69 u32 address; 70 int rc; 71 72 vcpu->stat.instruction_spx++; 73 74 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 75 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 76 77 operand2 = kvm_s390_get_base_disp_s(vcpu); 78 79 /* must be word boundary */ 80 if (operand2 & 3) 81 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 82 83 /* get the value */ 84 rc = read_guest(vcpu, operand2, &address, sizeof(address)); 85 if (rc) 86 return kvm_s390_inject_prog_cond(vcpu, rc); 87 88 address &= 0x7fffe000u; 89 90 /* 91 * Make sure the new value is valid memory. We only need to check the 92 * first page, since address is 8k aligned and memory pieces are always 93 * at least 1MB aligned and have at least a size of 1MB. 94 */ 95 if (kvm_is_error_gpa(vcpu->kvm, address)) 96 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 97 98 kvm_s390_set_prefix(vcpu, address); 99 100 VCPU_EVENT(vcpu, 5, "setting prefix to %x", address); 101 trace_kvm_s390_handle_prefix(vcpu, 1, address); 102 return 0; 103 } 104 105 static int handle_store_prefix(struct kvm_vcpu *vcpu) 106 { 107 u64 operand2; 108 u32 address; 109 int rc; 110 111 vcpu->stat.instruction_stpx++; 112 113 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 114 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 115 116 operand2 = kvm_s390_get_base_disp_s(vcpu); 117 118 /* must be word boundary */ 119 if (operand2 & 3) 120 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 121 122 address = kvm_s390_get_prefix(vcpu); 123 124 /* get the value */ 125 rc = write_guest(vcpu, operand2, &address, sizeof(address)); 126 if (rc) 127 return kvm_s390_inject_prog_cond(vcpu, rc); 128 129 VCPU_EVENT(vcpu, 5, "storing prefix to %x", address); 130 trace_kvm_s390_handle_prefix(vcpu, 0, address); 131 return 0; 132 } 133 134 static int handle_store_cpu_address(struct kvm_vcpu *vcpu) 135 { 136 u16 vcpu_id = vcpu->vcpu_id; 137 u64 ga; 138 int rc; 139 140 vcpu->stat.instruction_stap++; 141 142 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 143 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 144 145 ga = kvm_s390_get_base_disp_s(vcpu); 146 147 if (ga & 1) 148 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 149 150 rc = write_guest(vcpu, ga, &vcpu_id, sizeof(vcpu_id)); 151 if (rc) 152 return kvm_s390_inject_prog_cond(vcpu, rc); 153 154 VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", ga); 155 trace_kvm_s390_handle_stap(vcpu, ga); 156 return 0; 157 } 158 159 static int __skey_check_enable(struct kvm_vcpu *vcpu) 160 { 161 int rc = 0; 162 if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE))) 163 return rc; 164 165 rc = s390_enable_skey(); 166 trace_kvm_s390_skey_related_inst(vcpu); 167 vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE); 168 return rc; 169 } 170 171 172 static int handle_skey(struct kvm_vcpu *vcpu) 173 { 174 int rc = __skey_check_enable(vcpu); 175 176 if (rc) 177 return rc; 178 vcpu->stat.instruction_storage_key++; 179 180 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 181 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 182 183 kvm_s390_rewind_psw(vcpu, 4); 184 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation"); 185 return 0; 186 } 187 188 static int handle_ipte_interlock(struct kvm_vcpu *vcpu) 189 { 190 vcpu->stat.instruction_ipte_interlock++; 191 if (psw_bits(vcpu->arch.sie_block->gpsw).p) 192 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 193 wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu)); 194 kvm_s390_rewind_psw(vcpu, 4); 195 VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation"); 196 return 0; 197 } 198 199 static int handle_test_block(struct kvm_vcpu *vcpu) 200 { 201 gpa_t addr; 202 int reg2; 203 204 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 205 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 206 207 kvm_s390_get_regs_rre(vcpu, NULL, ®2); 208 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 209 addr = kvm_s390_logical_to_effective(vcpu, addr); 210 if (kvm_s390_check_low_addr_protection(vcpu, addr)) 211 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); 212 addr = kvm_s390_real_to_abs(vcpu, addr); 213 214 if (kvm_is_error_gpa(vcpu->kvm, addr)) 215 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 216 /* 217 * We don't expect errors on modern systems, and do not care 218 * about storage keys (yet), so let's just clear the page. 219 */ 220 if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE)) 221 return -EFAULT; 222 kvm_s390_set_psw_cc(vcpu, 0); 223 vcpu->run->s.regs.gprs[0] = 0; 224 return 0; 225 } 226 227 static int handle_tpi(struct kvm_vcpu *vcpu) 228 { 229 struct kvm_s390_interrupt_info *inti; 230 unsigned long len; 231 u32 tpi_data[3]; 232 int cc, rc; 233 u64 addr; 234 235 rc = 0; 236 addr = kvm_s390_get_base_disp_s(vcpu); 237 if (addr & 3) 238 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 239 cc = 0; 240 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0); 241 if (!inti) 242 goto no_interrupt; 243 cc = 1; 244 tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr; 245 tpi_data[1] = inti->io.io_int_parm; 246 tpi_data[2] = inti->io.io_int_word; 247 if (addr) { 248 /* 249 * Store the two-word I/O interruption code into the 250 * provided area. 251 */ 252 len = sizeof(tpi_data) - 4; 253 rc = write_guest(vcpu, addr, &tpi_data, len); 254 if (rc) 255 return kvm_s390_inject_prog_cond(vcpu, rc); 256 } else { 257 /* 258 * Store the three-word I/O interruption code into 259 * the appropriate lowcore area. 260 */ 261 len = sizeof(tpi_data); 262 if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) 263 rc = -EFAULT; 264 } 265 /* 266 * If we encounter a problem storing the interruption code, the 267 * instruction is suppressed from the guest's view: reinject the 268 * interrupt. 269 */ 270 if (!rc) 271 kfree(inti); 272 else 273 kvm_s390_reinject_io_int(vcpu->kvm, inti); 274 no_interrupt: 275 /* Set condition code and we're done. */ 276 if (!rc) 277 kvm_s390_set_psw_cc(vcpu, cc); 278 return rc ? -EFAULT : 0; 279 } 280 281 static int handle_tsch(struct kvm_vcpu *vcpu) 282 { 283 struct kvm_s390_interrupt_info *inti; 284 285 inti = kvm_s390_get_io_int(vcpu->kvm, 0, 286 vcpu->run->s.regs.gprs[1]); 287 288 /* 289 * Prepare exit to userspace. 290 * We indicate whether we dequeued a pending I/O interrupt 291 * so that userspace can re-inject it if the instruction gets 292 * a program check. While this may re-order the pending I/O 293 * interrupts, this is no problem since the priority is kept 294 * intact. 295 */ 296 vcpu->run->exit_reason = KVM_EXIT_S390_TSCH; 297 vcpu->run->s390_tsch.dequeued = !!inti; 298 if (inti) { 299 vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id; 300 vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr; 301 vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm; 302 vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word; 303 } 304 vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb; 305 kfree(inti); 306 return -EREMOTE; 307 } 308 309 static int handle_io_inst(struct kvm_vcpu *vcpu) 310 { 311 VCPU_EVENT(vcpu, 4, "%s", "I/O instruction"); 312 313 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 314 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 315 316 if (vcpu->kvm->arch.css_support) { 317 /* 318 * Most I/O instructions will be handled by userspace. 319 * Exceptions are tpi and the interrupt portion of tsch. 320 */ 321 if (vcpu->arch.sie_block->ipa == 0xb236) 322 return handle_tpi(vcpu); 323 if (vcpu->arch.sie_block->ipa == 0xb235) 324 return handle_tsch(vcpu); 325 /* Handle in userspace. */ 326 return -EOPNOTSUPP; 327 } else { 328 /* 329 * Set condition code 3 to stop the guest from issuing channel 330 * I/O instructions. 331 */ 332 kvm_s390_set_psw_cc(vcpu, 3); 333 return 0; 334 } 335 } 336 337 static int handle_stfl(struct kvm_vcpu *vcpu) 338 { 339 int rc; 340 unsigned int fac; 341 342 vcpu->stat.instruction_stfl++; 343 344 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 345 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 346 347 /* 348 * We need to shift the lower 32 facility bits (bit 0-31) from a u64 349 * into a u32 memory representation. They will remain bits 0-31. 350 */ 351 fac = *vcpu->kvm->arch.model.fac->list >> 32; 352 rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list), 353 &fac, sizeof(fac)); 354 if (rc) 355 return rc; 356 VCPU_EVENT(vcpu, 5, "store facility list value %x", fac); 357 trace_kvm_s390_handle_stfl(vcpu, fac); 358 return 0; 359 } 360 361 #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA) 362 #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL 363 #define PSW_ADDR_24 0x0000000000ffffffUL 364 #define PSW_ADDR_31 0x000000007fffffffUL 365 366 int is_valid_psw(psw_t *psw) 367 { 368 if (psw->mask & PSW_MASK_UNASSIGNED) 369 return 0; 370 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) { 371 if (psw->addr & ~PSW_ADDR_31) 372 return 0; 373 } 374 if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24)) 375 return 0; 376 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA) 377 return 0; 378 if (psw->addr & 1) 379 return 0; 380 return 1; 381 } 382 383 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu) 384 { 385 psw_t *gpsw = &vcpu->arch.sie_block->gpsw; 386 psw_compat_t new_psw; 387 u64 addr; 388 int rc; 389 390 if (gpsw->mask & PSW_MASK_PSTATE) 391 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 392 393 addr = kvm_s390_get_base_disp_s(vcpu); 394 if (addr & 7) 395 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 396 397 rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw)); 398 if (rc) 399 return kvm_s390_inject_prog_cond(vcpu, rc); 400 if (!(new_psw.mask & PSW32_MASK_BASE)) 401 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 402 gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32; 403 gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE; 404 gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE; 405 if (!is_valid_psw(gpsw)) 406 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 407 return 0; 408 } 409 410 static int handle_lpswe(struct kvm_vcpu *vcpu) 411 { 412 psw_t new_psw; 413 u64 addr; 414 int rc; 415 416 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 417 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 418 419 addr = kvm_s390_get_base_disp_s(vcpu); 420 if (addr & 7) 421 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 422 rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw)); 423 if (rc) 424 return kvm_s390_inject_prog_cond(vcpu, rc); 425 vcpu->arch.sie_block->gpsw = new_psw; 426 if (!is_valid_psw(&vcpu->arch.sie_block->gpsw)) 427 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 428 return 0; 429 } 430 431 static int handle_stidp(struct kvm_vcpu *vcpu) 432 { 433 u64 stidp_data = vcpu->arch.stidp_data; 434 u64 operand2; 435 int rc; 436 437 vcpu->stat.instruction_stidp++; 438 439 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 440 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 441 442 operand2 = kvm_s390_get_base_disp_s(vcpu); 443 444 if (operand2 & 7) 445 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 446 447 rc = write_guest(vcpu, operand2, &stidp_data, sizeof(stidp_data)); 448 if (rc) 449 return kvm_s390_inject_prog_cond(vcpu, rc); 450 451 VCPU_EVENT(vcpu, 5, "%s", "store cpu id"); 452 return 0; 453 } 454 455 static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem) 456 { 457 int cpus = 0; 458 int n; 459 460 cpus = atomic_read(&vcpu->kvm->online_vcpus); 461 462 /* deal with other level 3 hypervisors */ 463 if (stsi(mem, 3, 2, 2)) 464 mem->count = 0; 465 if (mem->count < 8) 466 mem->count++; 467 for (n = mem->count - 1; n > 0 ; n--) 468 memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0])); 469 470 mem->vm[0].cpus_total = cpus; 471 mem->vm[0].cpus_configured = cpus; 472 mem->vm[0].cpus_standby = 0; 473 mem->vm[0].cpus_reserved = 0; 474 mem->vm[0].caf = 1000; 475 memcpy(mem->vm[0].name, "KVMguest", 8); 476 ASCEBC(mem->vm[0].name, 8); 477 memcpy(mem->vm[0].cpi, "KVM/Linux ", 16); 478 ASCEBC(mem->vm[0].cpi, 16); 479 } 480 481 static int handle_stsi(struct kvm_vcpu *vcpu) 482 { 483 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28; 484 int sel1 = vcpu->run->s.regs.gprs[0] & 0xff; 485 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff; 486 unsigned long mem = 0; 487 u64 operand2; 488 int rc = 0; 489 490 vcpu->stat.instruction_stsi++; 491 VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2); 492 493 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 494 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 495 496 if (fc > 3) { 497 kvm_s390_set_psw_cc(vcpu, 3); 498 return 0; 499 } 500 501 if (vcpu->run->s.regs.gprs[0] & 0x0fffff00 502 || vcpu->run->s.regs.gprs[1] & 0xffff0000) 503 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 504 505 if (fc == 0) { 506 vcpu->run->s.regs.gprs[0] = 3 << 28; 507 kvm_s390_set_psw_cc(vcpu, 0); 508 return 0; 509 } 510 511 operand2 = kvm_s390_get_base_disp_s(vcpu); 512 513 if (operand2 & 0xfff) 514 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 515 516 switch (fc) { 517 case 1: /* same handling for 1 and 2 */ 518 case 2: 519 mem = get_zeroed_page(GFP_KERNEL); 520 if (!mem) 521 goto out_no_data; 522 if (stsi((void *) mem, fc, sel1, sel2)) 523 goto out_no_data; 524 break; 525 case 3: 526 if (sel1 != 2 || sel2 != 2) 527 goto out_no_data; 528 mem = get_zeroed_page(GFP_KERNEL); 529 if (!mem) 530 goto out_no_data; 531 handle_stsi_3_2_2(vcpu, (void *) mem); 532 break; 533 } 534 535 rc = write_guest(vcpu, operand2, (void *)mem, PAGE_SIZE); 536 if (rc) { 537 rc = kvm_s390_inject_prog_cond(vcpu, rc); 538 goto out; 539 } 540 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2); 541 free_page(mem); 542 kvm_s390_set_psw_cc(vcpu, 0); 543 vcpu->run->s.regs.gprs[0] = 0; 544 return 0; 545 out_no_data: 546 kvm_s390_set_psw_cc(vcpu, 3); 547 out: 548 free_page(mem); 549 return rc; 550 } 551 552 static const intercept_handler_t b2_handlers[256] = { 553 [0x02] = handle_stidp, 554 [0x04] = handle_set_clock, 555 [0x10] = handle_set_prefix, 556 [0x11] = handle_store_prefix, 557 [0x12] = handle_store_cpu_address, 558 [0x21] = handle_ipte_interlock, 559 [0x29] = handle_skey, 560 [0x2a] = handle_skey, 561 [0x2b] = handle_skey, 562 [0x2c] = handle_test_block, 563 [0x30] = handle_io_inst, 564 [0x31] = handle_io_inst, 565 [0x32] = handle_io_inst, 566 [0x33] = handle_io_inst, 567 [0x34] = handle_io_inst, 568 [0x35] = handle_io_inst, 569 [0x36] = handle_io_inst, 570 [0x37] = handle_io_inst, 571 [0x38] = handle_io_inst, 572 [0x39] = handle_io_inst, 573 [0x3a] = handle_io_inst, 574 [0x3b] = handle_io_inst, 575 [0x3c] = handle_io_inst, 576 [0x50] = handle_ipte_interlock, 577 [0x5f] = handle_io_inst, 578 [0x74] = handle_io_inst, 579 [0x76] = handle_io_inst, 580 [0x7d] = handle_stsi, 581 [0xb1] = handle_stfl, 582 [0xb2] = handle_lpswe, 583 }; 584 585 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu) 586 { 587 intercept_handler_t handler; 588 589 /* 590 * A lot of B2 instructions are priviledged. Here we check for 591 * the privileged ones, that we can handle in the kernel. 592 * Anything else goes to userspace. 593 */ 594 handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; 595 if (handler) 596 return handler(vcpu); 597 598 return -EOPNOTSUPP; 599 } 600 601 static int handle_epsw(struct kvm_vcpu *vcpu) 602 { 603 int reg1, reg2; 604 605 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 606 607 /* This basically extracts the mask half of the psw. */ 608 vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL; 609 vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32; 610 if (reg2) { 611 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL; 612 vcpu->run->s.regs.gprs[reg2] |= 613 vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL; 614 } 615 return 0; 616 } 617 618 #define PFMF_RESERVED 0xfffc0101UL 619 #define PFMF_SK 0x00020000UL 620 #define PFMF_CF 0x00010000UL 621 #define PFMF_UI 0x00008000UL 622 #define PFMF_FSC 0x00007000UL 623 #define PFMF_NQ 0x00000800UL 624 #define PFMF_MR 0x00000400UL 625 #define PFMF_MC 0x00000200UL 626 #define PFMF_KEY 0x000000feUL 627 628 static int handle_pfmf(struct kvm_vcpu *vcpu) 629 { 630 int reg1, reg2; 631 unsigned long start, end; 632 633 vcpu->stat.instruction_pfmf++; 634 635 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 636 637 if (!MACHINE_HAS_PFMF) 638 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 639 640 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 641 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 642 643 if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED) 644 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 645 646 /* Only provide non-quiescing support if the host supports it */ 647 if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && !test_facility(14)) 648 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 649 650 /* No support for conditional-SSKE */ 651 if (vcpu->run->s.regs.gprs[reg1] & (PFMF_MR | PFMF_MC)) 652 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 653 654 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 655 start = kvm_s390_logical_to_effective(vcpu, start); 656 657 switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { 658 case 0x00000000: 659 end = (start + (1UL << 12)) & ~((1UL << 12) - 1); 660 break; 661 case 0x00001000: 662 end = (start + (1UL << 20)) & ~((1UL << 20) - 1); 663 break; 664 /* We dont support EDAT2 665 case 0x00002000: 666 end = (start + (1UL << 31)) & ~((1UL << 31) - 1); 667 break;*/ 668 default: 669 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 670 } 671 672 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { 673 if (kvm_s390_check_low_addr_protection(vcpu, start)) 674 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); 675 } 676 677 while (start < end) { 678 unsigned long useraddr, abs_addr; 679 680 /* Translate guest address to host address */ 681 if ((vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) == 0) 682 abs_addr = kvm_s390_real_to_abs(vcpu, start); 683 else 684 abs_addr = start; 685 useraddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(abs_addr)); 686 if (kvm_is_error_hva(useraddr)) 687 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 688 689 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { 690 if (clear_user((void __user *)useraddr, PAGE_SIZE)) 691 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 692 } 693 694 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) { 695 int rc = __skey_check_enable(vcpu); 696 697 if (rc) 698 return rc; 699 if (set_guest_storage_key(current->mm, useraddr, 700 vcpu->run->s.regs.gprs[reg1] & PFMF_KEY, 701 vcpu->run->s.regs.gprs[reg1] & PFMF_NQ)) 702 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 703 } 704 705 start += PAGE_SIZE; 706 } 707 if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) 708 vcpu->run->s.regs.gprs[reg2] = end; 709 return 0; 710 } 711 712 static int handle_essa(struct kvm_vcpu *vcpu) 713 { 714 /* entries expected to be 1FF */ 715 int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3; 716 unsigned long *cbrlo, cbrle; 717 struct gmap *gmap; 718 int i; 719 720 VCPU_EVENT(vcpu, 5, "cmma release %d pages", entries); 721 gmap = vcpu->arch.gmap; 722 vcpu->stat.instruction_essa++; 723 if (!kvm_s390_cmma_enabled(vcpu->kvm)) 724 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 725 726 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 727 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 728 729 if (((vcpu->arch.sie_block->ipb & 0xf0000000) >> 28) > 6) 730 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 731 732 /* Rewind PSW to repeat the ESSA instruction */ 733 kvm_s390_rewind_psw(vcpu, 4); 734 vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */ 735 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo); 736 down_read(&gmap->mm->mmap_sem); 737 for (i = 0; i < entries; ++i) { 738 cbrle = cbrlo[i]; 739 if (unlikely(cbrle & ~PAGE_MASK || cbrle < 2 * PAGE_SIZE)) 740 /* invalid entry */ 741 break; 742 /* try to free backing */ 743 __gmap_zap(gmap, cbrle); 744 } 745 up_read(&gmap->mm->mmap_sem); 746 if (i < entries) 747 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 748 return 0; 749 } 750 751 static const intercept_handler_t b9_handlers[256] = { 752 [0x8a] = handle_ipte_interlock, 753 [0x8d] = handle_epsw, 754 [0x8e] = handle_ipte_interlock, 755 [0x8f] = handle_ipte_interlock, 756 [0xab] = handle_essa, 757 [0xaf] = handle_pfmf, 758 }; 759 760 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu) 761 { 762 intercept_handler_t handler; 763 764 /* This is handled just as for the B2 instructions. */ 765 handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; 766 if (handler) 767 return handler(vcpu); 768 769 return -EOPNOTSUPP; 770 } 771 772 int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) 773 { 774 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 775 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 776 int reg, rc, nr_regs; 777 u32 ctl_array[16]; 778 u64 ga; 779 780 vcpu->stat.instruction_lctl++; 781 782 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 783 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 784 785 ga = kvm_s390_get_base_disp_rs(vcpu); 786 787 if (ga & 3) 788 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 789 790 VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); 791 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga); 792 793 nr_regs = ((reg3 - reg1) & 0xf) + 1; 794 rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32)); 795 if (rc) 796 return kvm_s390_inject_prog_cond(vcpu, rc); 797 reg = reg1; 798 nr_regs = 0; 799 do { 800 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul; 801 vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++]; 802 if (reg == reg3) 803 break; 804 reg = (reg + 1) % 16; 805 } while (1); 806 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 807 return 0; 808 } 809 810 int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu) 811 { 812 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 813 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 814 int reg, rc, nr_regs; 815 u32 ctl_array[16]; 816 u64 ga; 817 818 vcpu->stat.instruction_stctl++; 819 820 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 821 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 822 823 ga = kvm_s390_get_base_disp_rs(vcpu); 824 825 if (ga & 3) 826 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 827 828 VCPU_EVENT(vcpu, 5, "stctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); 829 trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga); 830 831 reg = reg1; 832 nr_regs = 0; 833 do { 834 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg]; 835 if (reg == reg3) 836 break; 837 reg = (reg + 1) % 16; 838 } while (1); 839 rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32)); 840 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; 841 } 842 843 static int handle_lctlg(struct kvm_vcpu *vcpu) 844 { 845 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 846 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 847 int reg, rc, nr_regs; 848 u64 ctl_array[16]; 849 u64 ga; 850 851 vcpu->stat.instruction_lctlg++; 852 853 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 854 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 855 856 ga = kvm_s390_get_base_disp_rsy(vcpu); 857 858 if (ga & 7) 859 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 860 861 VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); 862 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga); 863 864 nr_regs = ((reg3 - reg1) & 0xf) + 1; 865 rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64)); 866 if (rc) 867 return kvm_s390_inject_prog_cond(vcpu, rc); 868 reg = reg1; 869 nr_regs = 0; 870 do { 871 vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++]; 872 if (reg == reg3) 873 break; 874 reg = (reg + 1) % 16; 875 } while (1); 876 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 877 return 0; 878 } 879 880 static int handle_stctg(struct kvm_vcpu *vcpu) 881 { 882 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 883 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 884 int reg, rc, nr_regs; 885 u64 ctl_array[16]; 886 u64 ga; 887 888 vcpu->stat.instruction_stctg++; 889 890 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 891 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 892 893 ga = kvm_s390_get_base_disp_rsy(vcpu); 894 895 if (ga & 7) 896 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 897 898 VCPU_EVENT(vcpu, 5, "stctg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); 899 trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga); 900 901 reg = reg1; 902 nr_regs = 0; 903 do { 904 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg]; 905 if (reg == reg3) 906 break; 907 reg = (reg + 1) % 16; 908 } while (1); 909 rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64)); 910 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; 911 } 912 913 static const intercept_handler_t eb_handlers[256] = { 914 [0x2f] = handle_lctlg, 915 [0x25] = handle_stctg, 916 }; 917 918 int kvm_s390_handle_eb(struct kvm_vcpu *vcpu) 919 { 920 intercept_handler_t handler; 921 922 handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff]; 923 if (handler) 924 return handler(vcpu); 925 return -EOPNOTSUPP; 926 } 927 928 static int handle_tprot(struct kvm_vcpu *vcpu) 929 { 930 u64 address1, address2; 931 unsigned long hva, gpa; 932 int ret = 0, cc = 0; 933 bool writable; 934 935 vcpu->stat.instruction_tprot++; 936 937 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 938 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 939 940 kvm_s390_get_base_disp_sse(vcpu, &address1, &address2); 941 942 /* we only handle the Linux memory detection case: 943 * access key == 0 944 * everything else goes to userspace. */ 945 if (address2 & 0xf0) 946 return -EOPNOTSUPP; 947 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) 948 ipte_lock(vcpu); 949 ret = guest_translate_address(vcpu, address1, &gpa, 1); 950 if (ret == PGM_PROTECTION) { 951 /* Write protected? Try again with read-only... */ 952 cc = 1; 953 ret = guest_translate_address(vcpu, address1, &gpa, 0); 954 } 955 if (ret) { 956 if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) { 957 ret = kvm_s390_inject_program_int(vcpu, ret); 958 } else if (ret > 0) { 959 /* Translation not available */ 960 kvm_s390_set_psw_cc(vcpu, 3); 961 ret = 0; 962 } 963 goto out_unlock; 964 } 965 966 hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable); 967 if (kvm_is_error_hva(hva)) { 968 ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 969 } else { 970 if (!writable) 971 cc = 1; /* Write not permitted ==> read-only */ 972 kvm_s390_set_psw_cc(vcpu, cc); 973 /* Note: CC2 only occurs for storage keys (not supported yet) */ 974 } 975 out_unlock: 976 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) 977 ipte_unlock(vcpu); 978 return ret; 979 } 980 981 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu) 982 { 983 /* For e5xx... instructions we only handle TPROT */ 984 if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01) 985 return handle_tprot(vcpu); 986 return -EOPNOTSUPP; 987 } 988 989 static int handle_sckpf(struct kvm_vcpu *vcpu) 990 { 991 u32 value; 992 993 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 994 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 995 996 if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000) 997 return kvm_s390_inject_program_int(vcpu, 998 PGM_SPECIFICATION); 999 1000 value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff; 1001 vcpu->arch.sie_block->todpr = value; 1002 1003 return 0; 1004 } 1005 1006 static const intercept_handler_t x01_handlers[256] = { 1007 [0x07] = handle_sckpf, 1008 }; 1009 1010 int kvm_s390_handle_01(struct kvm_vcpu *vcpu) 1011 { 1012 intercept_handler_t handler; 1013 1014 handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; 1015 if (handler) 1016 return handler(vcpu); 1017 return -EOPNOTSUPP; 1018 } 1019