1 /* 2 * handling privileged instructions 3 * 4 * Copyright IBM Corp. 2008, 2013 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License (version 2 only) 8 * as published by the Free Software Foundation. 9 * 10 * Author(s): Carsten Otte <cotte@de.ibm.com> 11 * Christian Borntraeger <borntraeger@de.ibm.com> 12 */ 13 14 #include <linux/kvm.h> 15 #include <linux/gfp.h> 16 #include <linux/errno.h> 17 #include <linux/compat.h> 18 #include <asm/asm-offsets.h> 19 #include <asm/facility.h> 20 #include <asm/current.h> 21 #include <asm/debug.h> 22 #include <asm/ebcdic.h> 23 #include <asm/sysinfo.h> 24 #include <asm/pgtable.h> 25 #include <asm/pgalloc.h> 26 #include <asm/io.h> 27 #include <asm/ptrace.h> 28 #include <asm/compat.h> 29 #include "gaccess.h" 30 #include "kvm-s390.h" 31 #include "trace.h" 32 33 /* Handle SCK (SET CLOCK) interception */ 34 static int handle_set_clock(struct kvm_vcpu *vcpu) 35 { 36 struct kvm_vcpu *cpup; 37 s64 hostclk, val; 38 int i, rc; 39 ar_t ar; 40 u64 op2; 41 42 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 43 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 44 45 op2 = kvm_s390_get_base_disp_s(vcpu, &ar); 46 if (op2 & 7) /* Operand must be on a doubleword boundary */ 47 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 48 rc = read_guest(vcpu, op2, ar, &val, sizeof(val)); 49 if (rc) 50 return kvm_s390_inject_prog_cond(vcpu, rc); 51 52 if (store_tod_clock(&hostclk)) { 53 kvm_s390_set_psw_cc(vcpu, 3); 54 return 0; 55 } 56 VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val); 57 val = (val - hostclk) & ~0x3fUL; 58 59 mutex_lock(&vcpu->kvm->lock); 60 preempt_disable(); 61 kvm_for_each_vcpu(i, cpup, vcpu->kvm) 62 cpup->arch.sie_block->epoch = val; 63 preempt_enable(); 64 mutex_unlock(&vcpu->kvm->lock); 65 66 kvm_s390_set_psw_cc(vcpu, 0); 67 return 0; 68 } 69 70 static int handle_set_prefix(struct kvm_vcpu *vcpu) 71 { 72 u64 operand2; 73 u32 address; 74 int rc; 75 ar_t ar; 76 77 vcpu->stat.instruction_spx++; 78 79 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 80 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 81 82 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); 83 84 /* must be word boundary */ 85 if (operand2 & 3) 86 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 87 88 /* get the value */ 89 rc = read_guest(vcpu, operand2, ar, &address, sizeof(address)); 90 if (rc) 91 return kvm_s390_inject_prog_cond(vcpu, rc); 92 93 address &= 0x7fffe000u; 94 95 /* 96 * Make sure the new value is valid memory. We only need to check the 97 * first page, since address is 8k aligned and memory pieces are always 98 * at least 1MB aligned and have at least a size of 1MB. 99 */ 100 if (kvm_is_error_gpa(vcpu->kvm, address)) 101 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 102 103 kvm_s390_set_prefix(vcpu, address); 104 trace_kvm_s390_handle_prefix(vcpu, 1, address); 105 return 0; 106 } 107 108 static int handle_store_prefix(struct kvm_vcpu *vcpu) 109 { 110 u64 operand2; 111 u32 address; 112 int rc; 113 ar_t ar; 114 115 vcpu->stat.instruction_stpx++; 116 117 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 118 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 119 120 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); 121 122 /* must be word boundary */ 123 if (operand2 & 3) 124 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 125 126 address = kvm_s390_get_prefix(vcpu); 127 128 /* get the value */ 129 rc = write_guest(vcpu, operand2, ar, &address, sizeof(address)); 130 if (rc) 131 return kvm_s390_inject_prog_cond(vcpu, rc); 132 133 VCPU_EVENT(vcpu, 3, "STPX: storing prefix 0x%x into 0x%llx", address, operand2); 134 trace_kvm_s390_handle_prefix(vcpu, 0, address); 135 return 0; 136 } 137 138 static int handle_store_cpu_address(struct kvm_vcpu *vcpu) 139 { 140 u16 vcpu_id = vcpu->vcpu_id; 141 u64 ga; 142 int rc; 143 ar_t ar; 144 145 vcpu->stat.instruction_stap++; 146 147 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 148 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 149 150 ga = kvm_s390_get_base_disp_s(vcpu, &ar); 151 152 if (ga & 1) 153 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 154 155 rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id)); 156 if (rc) 157 return kvm_s390_inject_prog_cond(vcpu, rc); 158 159 VCPU_EVENT(vcpu, 3, "STAP: storing cpu address (%u) to 0x%llx", vcpu_id, ga); 160 trace_kvm_s390_handle_stap(vcpu, ga); 161 return 0; 162 } 163 164 static int __skey_check_enable(struct kvm_vcpu *vcpu) 165 { 166 int rc = 0; 167 if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE))) 168 return rc; 169 170 rc = s390_enable_skey(); 171 VCPU_EVENT(vcpu, 3, "%s", "enabling storage keys for guest"); 172 trace_kvm_s390_skey_related_inst(vcpu); 173 vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE); 174 return rc; 175 } 176 177 178 static int handle_skey(struct kvm_vcpu *vcpu) 179 { 180 int rc = __skey_check_enable(vcpu); 181 182 if (rc) 183 return rc; 184 vcpu->stat.instruction_storage_key++; 185 186 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 187 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 188 189 kvm_s390_rewind_psw(vcpu, 4); 190 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation"); 191 return 0; 192 } 193 194 static int handle_ipte_interlock(struct kvm_vcpu *vcpu) 195 { 196 vcpu->stat.instruction_ipte_interlock++; 197 if (psw_bits(vcpu->arch.sie_block->gpsw).p) 198 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 199 wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu)); 200 kvm_s390_rewind_psw(vcpu, 4); 201 VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation"); 202 return 0; 203 } 204 205 static int handle_test_block(struct kvm_vcpu *vcpu) 206 { 207 gpa_t addr; 208 int reg2; 209 210 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 211 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 212 213 kvm_s390_get_regs_rre(vcpu, NULL, ®2); 214 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 215 addr = kvm_s390_logical_to_effective(vcpu, addr); 216 if (kvm_s390_check_low_addr_prot_real(vcpu, addr)) 217 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); 218 addr = kvm_s390_real_to_abs(vcpu, addr); 219 220 if (kvm_is_error_gpa(vcpu->kvm, addr)) 221 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 222 /* 223 * We don't expect errors on modern systems, and do not care 224 * about storage keys (yet), so let's just clear the page. 225 */ 226 if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE)) 227 return -EFAULT; 228 kvm_s390_set_psw_cc(vcpu, 0); 229 vcpu->run->s.regs.gprs[0] = 0; 230 return 0; 231 } 232 233 static int handle_tpi(struct kvm_vcpu *vcpu) 234 { 235 struct kvm_s390_interrupt_info *inti; 236 unsigned long len; 237 u32 tpi_data[3]; 238 int rc; 239 u64 addr; 240 ar_t ar; 241 242 addr = kvm_s390_get_base_disp_s(vcpu, &ar); 243 if (addr & 3) 244 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 245 246 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0); 247 if (!inti) { 248 kvm_s390_set_psw_cc(vcpu, 0); 249 return 0; 250 } 251 252 tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr; 253 tpi_data[1] = inti->io.io_int_parm; 254 tpi_data[2] = inti->io.io_int_word; 255 if (addr) { 256 /* 257 * Store the two-word I/O interruption code into the 258 * provided area. 259 */ 260 len = sizeof(tpi_data) - 4; 261 rc = write_guest(vcpu, addr, ar, &tpi_data, len); 262 if (rc) { 263 rc = kvm_s390_inject_prog_cond(vcpu, rc); 264 goto reinject_interrupt; 265 } 266 } else { 267 /* 268 * Store the three-word I/O interruption code into 269 * the appropriate lowcore area. 270 */ 271 len = sizeof(tpi_data); 272 if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) { 273 /* failed writes to the low core are not recoverable */ 274 rc = -EFAULT; 275 goto reinject_interrupt; 276 } 277 } 278 279 /* irq was successfully handed to the guest */ 280 kfree(inti); 281 kvm_s390_set_psw_cc(vcpu, 1); 282 return 0; 283 reinject_interrupt: 284 /* 285 * If we encounter a problem storing the interruption code, the 286 * instruction is suppressed from the guest's view: reinject the 287 * interrupt. 288 */ 289 if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) { 290 kfree(inti); 291 rc = -EFAULT; 292 } 293 /* don't set the cc, a pgm irq was injected or we drop to user space */ 294 return rc ? -EFAULT : 0; 295 } 296 297 static int handle_tsch(struct kvm_vcpu *vcpu) 298 { 299 struct kvm_s390_interrupt_info *inti = NULL; 300 const u64 isc_mask = 0xffUL << 24; /* all iscs set */ 301 302 /* a valid schid has at least one bit set */ 303 if (vcpu->run->s.regs.gprs[1]) 304 inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask, 305 vcpu->run->s.regs.gprs[1]); 306 307 /* 308 * Prepare exit to userspace. 309 * We indicate whether we dequeued a pending I/O interrupt 310 * so that userspace can re-inject it if the instruction gets 311 * a program check. While this may re-order the pending I/O 312 * interrupts, this is no problem since the priority is kept 313 * intact. 314 */ 315 vcpu->run->exit_reason = KVM_EXIT_S390_TSCH; 316 vcpu->run->s390_tsch.dequeued = !!inti; 317 if (inti) { 318 vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id; 319 vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr; 320 vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm; 321 vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word; 322 } 323 vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb; 324 kfree(inti); 325 return -EREMOTE; 326 } 327 328 static int handle_io_inst(struct kvm_vcpu *vcpu) 329 { 330 VCPU_EVENT(vcpu, 4, "%s", "I/O instruction"); 331 332 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 333 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 334 335 if (vcpu->kvm->arch.css_support) { 336 /* 337 * Most I/O instructions will be handled by userspace. 338 * Exceptions are tpi and the interrupt portion of tsch. 339 */ 340 if (vcpu->arch.sie_block->ipa == 0xb236) 341 return handle_tpi(vcpu); 342 if (vcpu->arch.sie_block->ipa == 0xb235) 343 return handle_tsch(vcpu); 344 /* Handle in userspace. */ 345 return -EOPNOTSUPP; 346 } else { 347 /* 348 * Set condition code 3 to stop the guest from issuing channel 349 * I/O instructions. 350 */ 351 kvm_s390_set_psw_cc(vcpu, 3); 352 return 0; 353 } 354 } 355 356 static int handle_stfl(struct kvm_vcpu *vcpu) 357 { 358 int rc; 359 unsigned int fac; 360 361 vcpu->stat.instruction_stfl++; 362 363 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 364 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 365 366 /* 367 * We need to shift the lower 32 facility bits (bit 0-31) from a u64 368 * into a u32 memory representation. They will remain bits 0-31. 369 */ 370 fac = *vcpu->kvm->arch.model.fac->list >> 32; 371 rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list), 372 &fac, sizeof(fac)); 373 if (rc) 374 return rc; 375 VCPU_EVENT(vcpu, 3, "STFL: store facility list 0x%x", fac); 376 trace_kvm_s390_handle_stfl(vcpu, fac); 377 return 0; 378 } 379 380 #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA) 381 #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL 382 #define PSW_ADDR_24 0x0000000000ffffffUL 383 #define PSW_ADDR_31 0x000000007fffffffUL 384 385 int is_valid_psw(psw_t *psw) 386 { 387 if (psw->mask & PSW_MASK_UNASSIGNED) 388 return 0; 389 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) { 390 if (psw->addr & ~PSW_ADDR_31) 391 return 0; 392 } 393 if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24)) 394 return 0; 395 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA) 396 return 0; 397 if (psw->addr & 1) 398 return 0; 399 return 1; 400 } 401 402 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu) 403 { 404 psw_t *gpsw = &vcpu->arch.sie_block->gpsw; 405 psw_compat_t new_psw; 406 u64 addr; 407 int rc; 408 ar_t ar; 409 410 if (gpsw->mask & PSW_MASK_PSTATE) 411 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 412 413 addr = kvm_s390_get_base_disp_s(vcpu, &ar); 414 if (addr & 7) 415 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 416 417 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw)); 418 if (rc) 419 return kvm_s390_inject_prog_cond(vcpu, rc); 420 if (!(new_psw.mask & PSW32_MASK_BASE)) 421 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 422 gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32; 423 gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE; 424 gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE; 425 if (!is_valid_psw(gpsw)) 426 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 427 return 0; 428 } 429 430 static int handle_lpswe(struct kvm_vcpu *vcpu) 431 { 432 psw_t new_psw; 433 u64 addr; 434 int rc; 435 ar_t ar; 436 437 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 438 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 439 440 addr = kvm_s390_get_base_disp_s(vcpu, &ar); 441 if (addr & 7) 442 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 443 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw)); 444 if (rc) 445 return kvm_s390_inject_prog_cond(vcpu, rc); 446 vcpu->arch.sie_block->gpsw = new_psw; 447 if (!is_valid_psw(&vcpu->arch.sie_block->gpsw)) 448 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 449 return 0; 450 } 451 452 static int handle_stidp(struct kvm_vcpu *vcpu) 453 { 454 u64 stidp_data = vcpu->arch.stidp_data; 455 u64 operand2; 456 int rc; 457 ar_t ar; 458 459 vcpu->stat.instruction_stidp++; 460 461 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 462 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 463 464 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); 465 466 if (operand2 & 7) 467 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 468 469 rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data)); 470 if (rc) 471 return kvm_s390_inject_prog_cond(vcpu, rc); 472 473 VCPU_EVENT(vcpu, 3, "STIDP: store cpu id 0x%llx", stidp_data); 474 return 0; 475 } 476 477 static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem) 478 { 479 int cpus = 0; 480 int n; 481 482 cpus = atomic_read(&vcpu->kvm->online_vcpus); 483 484 /* deal with other level 3 hypervisors */ 485 if (stsi(mem, 3, 2, 2)) 486 mem->count = 0; 487 if (mem->count < 8) 488 mem->count++; 489 for (n = mem->count - 1; n > 0 ; n--) 490 memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0])); 491 492 memset(&mem->vm[0], 0, sizeof(mem->vm[0])); 493 mem->vm[0].cpus_total = cpus; 494 mem->vm[0].cpus_configured = cpus; 495 mem->vm[0].cpus_standby = 0; 496 mem->vm[0].cpus_reserved = 0; 497 mem->vm[0].caf = 1000; 498 memcpy(mem->vm[0].name, "KVMguest", 8); 499 ASCEBC(mem->vm[0].name, 8); 500 memcpy(mem->vm[0].cpi, "KVM/Linux ", 16); 501 ASCEBC(mem->vm[0].cpi, 16); 502 } 503 504 static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, ar_t ar, 505 u8 fc, u8 sel1, u16 sel2) 506 { 507 vcpu->run->exit_reason = KVM_EXIT_S390_STSI; 508 vcpu->run->s390_stsi.addr = addr; 509 vcpu->run->s390_stsi.ar = ar; 510 vcpu->run->s390_stsi.fc = fc; 511 vcpu->run->s390_stsi.sel1 = sel1; 512 vcpu->run->s390_stsi.sel2 = sel2; 513 } 514 515 static int handle_stsi(struct kvm_vcpu *vcpu) 516 { 517 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28; 518 int sel1 = vcpu->run->s.regs.gprs[0] & 0xff; 519 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff; 520 unsigned long mem = 0; 521 u64 operand2; 522 int rc = 0; 523 ar_t ar; 524 525 vcpu->stat.instruction_stsi++; 526 VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2); 527 528 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 529 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 530 531 if (fc > 3) { 532 kvm_s390_set_psw_cc(vcpu, 3); 533 return 0; 534 } 535 536 if (vcpu->run->s.regs.gprs[0] & 0x0fffff00 537 || vcpu->run->s.regs.gprs[1] & 0xffff0000) 538 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 539 540 if (fc == 0) { 541 vcpu->run->s.regs.gprs[0] = 3 << 28; 542 kvm_s390_set_psw_cc(vcpu, 0); 543 return 0; 544 } 545 546 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); 547 548 if (operand2 & 0xfff) 549 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 550 551 switch (fc) { 552 case 1: /* same handling for 1 and 2 */ 553 case 2: 554 mem = get_zeroed_page(GFP_KERNEL); 555 if (!mem) 556 goto out_no_data; 557 if (stsi((void *) mem, fc, sel1, sel2)) 558 goto out_no_data; 559 break; 560 case 3: 561 if (sel1 != 2 || sel2 != 2) 562 goto out_no_data; 563 mem = get_zeroed_page(GFP_KERNEL); 564 if (!mem) 565 goto out_no_data; 566 handle_stsi_3_2_2(vcpu, (void *) mem); 567 break; 568 } 569 570 rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE); 571 if (rc) { 572 rc = kvm_s390_inject_prog_cond(vcpu, rc); 573 goto out; 574 } 575 if (vcpu->kvm->arch.user_stsi) { 576 insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2); 577 rc = -EREMOTE; 578 } 579 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2); 580 free_page(mem); 581 kvm_s390_set_psw_cc(vcpu, 0); 582 vcpu->run->s.regs.gprs[0] = 0; 583 return rc; 584 out_no_data: 585 kvm_s390_set_psw_cc(vcpu, 3); 586 out: 587 free_page(mem); 588 return rc; 589 } 590 591 static const intercept_handler_t b2_handlers[256] = { 592 [0x02] = handle_stidp, 593 [0x04] = handle_set_clock, 594 [0x10] = handle_set_prefix, 595 [0x11] = handle_store_prefix, 596 [0x12] = handle_store_cpu_address, 597 [0x21] = handle_ipte_interlock, 598 [0x29] = handle_skey, 599 [0x2a] = handle_skey, 600 [0x2b] = handle_skey, 601 [0x2c] = handle_test_block, 602 [0x30] = handle_io_inst, 603 [0x31] = handle_io_inst, 604 [0x32] = handle_io_inst, 605 [0x33] = handle_io_inst, 606 [0x34] = handle_io_inst, 607 [0x35] = handle_io_inst, 608 [0x36] = handle_io_inst, 609 [0x37] = handle_io_inst, 610 [0x38] = handle_io_inst, 611 [0x39] = handle_io_inst, 612 [0x3a] = handle_io_inst, 613 [0x3b] = handle_io_inst, 614 [0x3c] = handle_io_inst, 615 [0x50] = handle_ipte_interlock, 616 [0x5f] = handle_io_inst, 617 [0x74] = handle_io_inst, 618 [0x76] = handle_io_inst, 619 [0x7d] = handle_stsi, 620 [0xb1] = handle_stfl, 621 [0xb2] = handle_lpswe, 622 }; 623 624 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu) 625 { 626 intercept_handler_t handler; 627 628 /* 629 * A lot of B2 instructions are priviledged. Here we check for 630 * the privileged ones, that we can handle in the kernel. 631 * Anything else goes to userspace. 632 */ 633 handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; 634 if (handler) 635 return handler(vcpu); 636 637 return -EOPNOTSUPP; 638 } 639 640 static int handle_epsw(struct kvm_vcpu *vcpu) 641 { 642 int reg1, reg2; 643 644 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 645 646 /* This basically extracts the mask half of the psw. */ 647 vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL; 648 vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32; 649 if (reg2) { 650 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL; 651 vcpu->run->s.regs.gprs[reg2] |= 652 vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL; 653 } 654 return 0; 655 } 656 657 #define PFMF_RESERVED 0xfffc0101UL 658 #define PFMF_SK 0x00020000UL 659 #define PFMF_CF 0x00010000UL 660 #define PFMF_UI 0x00008000UL 661 #define PFMF_FSC 0x00007000UL 662 #define PFMF_NQ 0x00000800UL 663 #define PFMF_MR 0x00000400UL 664 #define PFMF_MC 0x00000200UL 665 #define PFMF_KEY 0x000000feUL 666 667 static int handle_pfmf(struct kvm_vcpu *vcpu) 668 { 669 int reg1, reg2; 670 unsigned long start, end; 671 672 vcpu->stat.instruction_pfmf++; 673 674 kvm_s390_get_regs_rre(vcpu, ®1, ®2); 675 676 if (!MACHINE_HAS_PFMF) 677 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 678 679 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 680 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 681 682 if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED) 683 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 684 685 /* Only provide non-quiescing support if the host supports it */ 686 if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && !test_facility(14)) 687 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 688 689 /* No support for conditional-SSKE */ 690 if (vcpu->run->s.regs.gprs[reg1] & (PFMF_MR | PFMF_MC)) 691 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 692 693 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 694 start = kvm_s390_logical_to_effective(vcpu, start); 695 696 switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { 697 case 0x00000000: 698 end = (start + (1UL << 12)) & ~((1UL << 12) - 1); 699 break; 700 case 0x00001000: 701 end = (start + (1UL << 20)) & ~((1UL << 20) - 1); 702 break; 703 case 0x00002000: 704 /* only support 2G frame size if EDAT2 is available and we are 705 not in 24-bit addressing mode */ 706 if (!test_kvm_facility(vcpu->kvm, 78) || 707 psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_24BIT) 708 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 709 end = (start + (1UL << 31)) & ~((1UL << 31) - 1); 710 break; 711 default: 712 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 713 } 714 715 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { 716 if (kvm_s390_check_low_addr_prot_real(vcpu, start)) 717 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); 718 } 719 720 while (start < end) { 721 unsigned long useraddr, abs_addr; 722 723 /* Translate guest address to host address */ 724 if ((vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) == 0) 725 abs_addr = kvm_s390_real_to_abs(vcpu, start); 726 else 727 abs_addr = start; 728 useraddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(abs_addr)); 729 if (kvm_is_error_hva(useraddr)) 730 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 731 732 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { 733 if (clear_user((void __user *)useraddr, PAGE_SIZE)) 734 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 735 } 736 737 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) { 738 int rc = __skey_check_enable(vcpu); 739 740 if (rc) 741 return rc; 742 if (set_guest_storage_key(current->mm, useraddr, 743 vcpu->run->s.regs.gprs[reg1] & PFMF_KEY, 744 vcpu->run->s.regs.gprs[reg1] & PFMF_NQ)) 745 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 746 } 747 748 start += PAGE_SIZE; 749 } 750 if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) 751 vcpu->run->s.regs.gprs[reg2] = end; 752 return 0; 753 } 754 755 static int handle_essa(struct kvm_vcpu *vcpu) 756 { 757 /* entries expected to be 1FF */ 758 int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3; 759 unsigned long *cbrlo, cbrle; 760 struct gmap *gmap; 761 int i; 762 763 VCPU_EVENT(vcpu, 4, "ESSA: release %d pages", entries); 764 gmap = vcpu->arch.gmap; 765 vcpu->stat.instruction_essa++; 766 if (!vcpu->kvm->arch.use_cmma) 767 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 768 769 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 770 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 771 772 if (((vcpu->arch.sie_block->ipb & 0xf0000000) >> 28) > 6) 773 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 774 775 /* Rewind PSW to repeat the ESSA instruction */ 776 kvm_s390_rewind_psw(vcpu, 4); 777 vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */ 778 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo); 779 down_read(&gmap->mm->mmap_sem); 780 for (i = 0; i < entries; ++i) { 781 cbrle = cbrlo[i]; 782 if (unlikely(cbrle & ~PAGE_MASK || cbrle < 2 * PAGE_SIZE)) 783 /* invalid entry */ 784 break; 785 /* try to free backing */ 786 __gmap_zap(gmap, cbrle); 787 } 788 up_read(&gmap->mm->mmap_sem); 789 if (i < entries) 790 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 791 return 0; 792 } 793 794 static const intercept_handler_t b9_handlers[256] = { 795 [0x8a] = handle_ipte_interlock, 796 [0x8d] = handle_epsw, 797 [0x8e] = handle_ipte_interlock, 798 [0x8f] = handle_ipte_interlock, 799 [0xab] = handle_essa, 800 [0xaf] = handle_pfmf, 801 }; 802 803 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu) 804 { 805 intercept_handler_t handler; 806 807 /* This is handled just as for the B2 instructions. */ 808 handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; 809 if (handler) 810 return handler(vcpu); 811 812 return -EOPNOTSUPP; 813 } 814 815 int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) 816 { 817 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 818 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 819 int reg, rc, nr_regs; 820 u32 ctl_array[16]; 821 u64 ga; 822 ar_t ar; 823 824 vcpu->stat.instruction_lctl++; 825 826 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 827 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 828 829 ga = kvm_s390_get_base_disp_rs(vcpu, &ar); 830 831 if (ga & 3) 832 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 833 834 VCPU_EVENT(vcpu, 4, "LCTL: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); 835 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga); 836 837 nr_regs = ((reg3 - reg1) & 0xf) + 1; 838 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32)); 839 if (rc) 840 return kvm_s390_inject_prog_cond(vcpu, rc); 841 reg = reg1; 842 nr_regs = 0; 843 do { 844 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul; 845 vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++]; 846 if (reg == reg3) 847 break; 848 reg = (reg + 1) % 16; 849 } while (1); 850 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 851 return 0; 852 } 853 854 int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu) 855 { 856 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 857 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 858 int reg, rc, nr_regs; 859 u32 ctl_array[16]; 860 u64 ga; 861 ar_t ar; 862 863 vcpu->stat.instruction_stctl++; 864 865 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 866 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 867 868 ga = kvm_s390_get_base_disp_rs(vcpu, &ar); 869 870 if (ga & 3) 871 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 872 873 VCPU_EVENT(vcpu, 4, "STCTL r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); 874 trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga); 875 876 reg = reg1; 877 nr_regs = 0; 878 do { 879 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg]; 880 if (reg == reg3) 881 break; 882 reg = (reg + 1) % 16; 883 } while (1); 884 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32)); 885 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; 886 } 887 888 static int handle_lctlg(struct kvm_vcpu *vcpu) 889 { 890 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 891 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 892 int reg, rc, nr_regs; 893 u64 ctl_array[16]; 894 u64 ga; 895 ar_t ar; 896 897 vcpu->stat.instruction_lctlg++; 898 899 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 900 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 901 902 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar); 903 904 if (ga & 7) 905 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 906 907 VCPU_EVENT(vcpu, 4, "LCTLG: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); 908 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga); 909 910 nr_regs = ((reg3 - reg1) & 0xf) + 1; 911 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64)); 912 if (rc) 913 return kvm_s390_inject_prog_cond(vcpu, rc); 914 reg = reg1; 915 nr_regs = 0; 916 do { 917 vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++]; 918 if (reg == reg3) 919 break; 920 reg = (reg + 1) % 16; 921 } while (1); 922 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 923 return 0; 924 } 925 926 static int handle_stctg(struct kvm_vcpu *vcpu) 927 { 928 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 929 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 930 int reg, rc, nr_regs; 931 u64 ctl_array[16]; 932 u64 ga; 933 ar_t ar; 934 935 vcpu->stat.instruction_stctg++; 936 937 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 938 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 939 940 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar); 941 942 if (ga & 7) 943 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 944 945 VCPU_EVENT(vcpu, 4, "STCTG r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); 946 trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga); 947 948 reg = reg1; 949 nr_regs = 0; 950 do { 951 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg]; 952 if (reg == reg3) 953 break; 954 reg = (reg + 1) % 16; 955 } while (1); 956 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64)); 957 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; 958 } 959 960 static const intercept_handler_t eb_handlers[256] = { 961 [0x2f] = handle_lctlg, 962 [0x25] = handle_stctg, 963 }; 964 965 int kvm_s390_handle_eb(struct kvm_vcpu *vcpu) 966 { 967 intercept_handler_t handler; 968 969 handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff]; 970 if (handler) 971 return handler(vcpu); 972 return -EOPNOTSUPP; 973 } 974 975 static int handle_tprot(struct kvm_vcpu *vcpu) 976 { 977 u64 address1, address2; 978 unsigned long hva, gpa; 979 int ret = 0, cc = 0; 980 bool writable; 981 ar_t ar; 982 983 vcpu->stat.instruction_tprot++; 984 985 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 986 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 987 988 kvm_s390_get_base_disp_sse(vcpu, &address1, &address2, &ar, NULL); 989 990 /* we only handle the Linux memory detection case: 991 * access key == 0 992 * everything else goes to userspace. */ 993 if (address2 & 0xf0) 994 return -EOPNOTSUPP; 995 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) 996 ipte_lock(vcpu); 997 ret = guest_translate_address(vcpu, address1, ar, &gpa, 1); 998 if (ret == PGM_PROTECTION) { 999 /* Write protected? Try again with read-only... */ 1000 cc = 1; 1001 ret = guest_translate_address(vcpu, address1, ar, &gpa, 0); 1002 } 1003 if (ret) { 1004 if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) { 1005 ret = kvm_s390_inject_program_int(vcpu, ret); 1006 } else if (ret > 0) { 1007 /* Translation not available */ 1008 kvm_s390_set_psw_cc(vcpu, 3); 1009 ret = 0; 1010 } 1011 goto out_unlock; 1012 } 1013 1014 hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable); 1015 if (kvm_is_error_hva(hva)) { 1016 ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 1017 } else { 1018 if (!writable) 1019 cc = 1; /* Write not permitted ==> read-only */ 1020 kvm_s390_set_psw_cc(vcpu, cc); 1021 /* Note: CC2 only occurs for storage keys (not supported yet) */ 1022 } 1023 out_unlock: 1024 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) 1025 ipte_unlock(vcpu); 1026 return ret; 1027 } 1028 1029 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu) 1030 { 1031 /* For e5xx... instructions we only handle TPROT */ 1032 if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01) 1033 return handle_tprot(vcpu); 1034 return -EOPNOTSUPP; 1035 } 1036 1037 static int handle_sckpf(struct kvm_vcpu *vcpu) 1038 { 1039 u32 value; 1040 1041 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 1042 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 1043 1044 if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000) 1045 return kvm_s390_inject_program_int(vcpu, 1046 PGM_SPECIFICATION); 1047 1048 value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff; 1049 vcpu->arch.sie_block->todpr = value; 1050 1051 return 0; 1052 } 1053 1054 static const intercept_handler_t x01_handlers[256] = { 1055 [0x07] = handle_sckpf, 1056 }; 1057 1058 int kvm_s390_handle_01(struct kvm_vcpu *vcpu) 1059 { 1060 intercept_handler_t handler; 1061 1062 handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; 1063 if (handler) 1064 return handler(vcpu); 1065 return -EOPNOTSUPP; 1066 } 1067