1 /* 2 * handling kvm guest interrupts 3 * 4 * Copyright IBM Corp. 2008, 2015 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License (version 2 only) 8 * as published by the Free Software Foundation. 9 * 10 * Author(s): Carsten Otte <cotte@de.ibm.com> 11 */ 12 13 #include <linux/interrupt.h> 14 #include <linux/kvm_host.h> 15 #include <linux/hrtimer.h> 16 #include <linux/mmu_context.h> 17 #include <linux/signal.h> 18 #include <linux/slab.h> 19 #include <linux/bitmap.h> 20 #include <linux/vmalloc.h> 21 #include <asm/asm-offsets.h> 22 #include <asm/dis.h> 23 #include <asm/uaccess.h> 24 #include <asm/sclp.h> 25 #include <asm/isc.h> 26 #include <asm/gmap.h> 27 #include "kvm-s390.h" 28 #include "gaccess.h" 29 #include "trace-s390.h" 30 31 #define IOINT_SCHID_MASK 0x0000ffff 32 #define IOINT_SSID_MASK 0x00030000 33 #define IOINT_CSSID_MASK 0x03fc0000 34 #define PFAULT_INIT 0x0600 35 #define PFAULT_DONE 0x0680 36 #define VIRTIO_PARAM 0x0d00 37 38 /* handle external calls via sigp interpretation facility */ 39 static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id) 40 { 41 int c, scn; 42 43 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND)) 44 return 0; 45 46 read_lock(&vcpu->kvm->arch.sca_lock); 47 if (vcpu->kvm->arch.use_esca) { 48 struct esca_block *sca = vcpu->kvm->arch.sca; 49 union esca_sigp_ctrl sigp_ctrl = 50 sca->cpu[vcpu->vcpu_id].sigp_ctrl; 51 52 c = sigp_ctrl.c; 53 scn = sigp_ctrl.scn; 54 } else { 55 struct bsca_block *sca = vcpu->kvm->arch.sca; 56 union bsca_sigp_ctrl sigp_ctrl = 57 sca->cpu[vcpu->vcpu_id].sigp_ctrl; 58 59 c = sigp_ctrl.c; 60 scn = sigp_ctrl.scn; 61 } 62 read_unlock(&vcpu->kvm->arch.sca_lock); 63 64 if (src_id) 65 *src_id = scn; 66 67 return c; 68 } 69 70 static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id) 71 { 72 int expect, rc; 73 74 read_lock(&vcpu->kvm->arch.sca_lock); 75 if (vcpu->kvm->arch.use_esca) { 76 struct esca_block *sca = vcpu->kvm->arch.sca; 77 union esca_sigp_ctrl *sigp_ctrl = 78 &(sca->cpu[vcpu->vcpu_id].sigp_ctrl); 79 union esca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl; 80 81 new_val.scn = src_id; 82 new_val.c = 1; 83 old_val.c = 0; 84 85 expect = old_val.value; 86 rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value); 87 } else { 88 struct bsca_block *sca = vcpu->kvm->arch.sca; 89 union bsca_sigp_ctrl *sigp_ctrl = 90 &(sca->cpu[vcpu->vcpu_id].sigp_ctrl); 91 union bsca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl; 92 93 new_val.scn = src_id; 94 new_val.c = 1; 95 old_val.c = 0; 96 97 expect = old_val.value; 98 rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value); 99 } 100 read_unlock(&vcpu->kvm->arch.sca_lock); 101 102 if (rc != expect) { 103 /* another external call is pending */ 104 return -EBUSY; 105 } 106 atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); 107 return 0; 108 } 109 110 static void sca_clear_ext_call(struct kvm_vcpu *vcpu) 111 { 112 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 113 int rc, expect; 114 115 atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags); 116 read_lock(&vcpu->kvm->arch.sca_lock); 117 if (vcpu->kvm->arch.use_esca) { 118 struct esca_block *sca = vcpu->kvm->arch.sca; 119 union esca_sigp_ctrl *sigp_ctrl = 120 &(sca->cpu[vcpu->vcpu_id].sigp_ctrl); 121 union esca_sigp_ctrl old = *sigp_ctrl; 122 123 expect = old.value; 124 rc = cmpxchg(&sigp_ctrl->value, old.value, 0); 125 } else { 126 struct bsca_block *sca = vcpu->kvm->arch.sca; 127 union bsca_sigp_ctrl *sigp_ctrl = 128 &(sca->cpu[vcpu->vcpu_id].sigp_ctrl); 129 union bsca_sigp_ctrl old = *sigp_ctrl; 130 131 expect = old.value; 132 rc = cmpxchg(&sigp_ctrl->value, old.value, 0); 133 } 134 read_unlock(&vcpu->kvm->arch.sca_lock); 135 WARN_ON(rc != expect); /* cannot clear? */ 136 } 137 138 int psw_extint_disabled(struct kvm_vcpu *vcpu) 139 { 140 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); 141 } 142 143 static int psw_ioint_disabled(struct kvm_vcpu *vcpu) 144 { 145 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO); 146 } 147 148 static int psw_mchk_disabled(struct kvm_vcpu *vcpu) 149 { 150 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK); 151 } 152 153 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu) 154 { 155 return psw_extint_disabled(vcpu) && 156 psw_ioint_disabled(vcpu) && 157 psw_mchk_disabled(vcpu); 158 } 159 160 static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu) 161 { 162 if (psw_extint_disabled(vcpu) || 163 !(vcpu->arch.sie_block->gcr[0] & 0x800ul)) 164 return 0; 165 if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu)) 166 /* No timer interrupts when single stepping */ 167 return 0; 168 return 1; 169 } 170 171 static int ckc_irq_pending(struct kvm_vcpu *vcpu) 172 { 173 if (vcpu->arch.sie_block->ckc >= kvm_s390_get_tod_clock_fast(vcpu->kvm)) 174 return 0; 175 return ckc_interrupts_enabled(vcpu); 176 } 177 178 static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu) 179 { 180 return !psw_extint_disabled(vcpu) && 181 (vcpu->arch.sie_block->gcr[0] & 0x400ul); 182 } 183 184 static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu) 185 { 186 if (!cpu_timer_interrupts_enabled(vcpu)) 187 return 0; 188 return kvm_s390_get_cpu_timer(vcpu) >> 63; 189 } 190 191 static inline int is_ioirq(unsigned long irq_type) 192 { 193 return ((irq_type >= IRQ_PEND_IO_ISC_0) && 194 (irq_type <= IRQ_PEND_IO_ISC_7)); 195 } 196 197 static uint64_t isc_to_isc_bits(int isc) 198 { 199 return (0x80 >> isc) << 24; 200 } 201 202 static inline u8 int_word_to_isc(u32 int_word) 203 { 204 return (int_word & 0x38000000) >> 27; 205 } 206 207 static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu) 208 { 209 return vcpu->kvm->arch.float_int.pending_irqs | 210 vcpu->arch.local_int.pending_irqs; 211 } 212 213 static unsigned long disable_iscs(struct kvm_vcpu *vcpu, 214 unsigned long active_mask) 215 { 216 int i; 217 218 for (i = 0; i <= MAX_ISC; i++) 219 if (!(vcpu->arch.sie_block->gcr[6] & isc_to_isc_bits(i))) 220 active_mask &= ~(1UL << (IRQ_PEND_IO_ISC_0 + i)); 221 222 return active_mask; 223 } 224 225 static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu) 226 { 227 unsigned long active_mask; 228 229 active_mask = pending_irqs(vcpu); 230 if (!active_mask) 231 return 0; 232 233 if (psw_extint_disabled(vcpu)) 234 active_mask &= ~IRQ_PEND_EXT_MASK; 235 if (psw_ioint_disabled(vcpu)) 236 active_mask &= ~IRQ_PEND_IO_MASK; 237 else 238 active_mask = disable_iscs(vcpu, active_mask); 239 if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul)) 240 __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask); 241 if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul)) 242 __clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask); 243 if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul)) 244 __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask); 245 if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul)) 246 __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask); 247 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul)) 248 __clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask); 249 if (psw_mchk_disabled(vcpu)) 250 active_mask &= ~IRQ_PEND_MCHK_MASK; 251 if (!(vcpu->arch.sie_block->gcr[14] & 252 vcpu->kvm->arch.float_int.mchk.cr14)) 253 __clear_bit(IRQ_PEND_MCHK_REP, &active_mask); 254 255 /* 256 * STOP irqs will never be actively delivered. They are triggered via 257 * intercept requests and cleared when the stop intercept is performed. 258 */ 259 __clear_bit(IRQ_PEND_SIGP_STOP, &active_mask); 260 261 return active_mask; 262 } 263 264 static void __set_cpu_idle(struct kvm_vcpu *vcpu) 265 { 266 atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); 267 set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); 268 } 269 270 static void __unset_cpu_idle(struct kvm_vcpu *vcpu) 271 { 272 atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); 273 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); 274 } 275 276 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) 277 { 278 atomic_andnot(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, 279 &vcpu->arch.sie_block->cpuflags); 280 vcpu->arch.sie_block->lctl = 0x0000; 281 vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT); 282 283 if (guestdbg_enabled(vcpu)) { 284 vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 | 285 LCTL_CR10 | LCTL_CR11); 286 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT); 287 } 288 } 289 290 static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) 291 { 292 atomic_or(flag, &vcpu->arch.sie_block->cpuflags); 293 } 294 295 static void set_intercept_indicators_io(struct kvm_vcpu *vcpu) 296 { 297 if (!(pending_irqs(vcpu) & IRQ_PEND_IO_MASK)) 298 return; 299 else if (psw_ioint_disabled(vcpu)) 300 __set_cpuflag(vcpu, CPUSTAT_IO_INT); 301 else 302 vcpu->arch.sie_block->lctl |= LCTL_CR6; 303 } 304 305 static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu) 306 { 307 if (!(pending_irqs(vcpu) & IRQ_PEND_EXT_MASK)) 308 return; 309 if (psw_extint_disabled(vcpu)) 310 __set_cpuflag(vcpu, CPUSTAT_EXT_INT); 311 else 312 vcpu->arch.sie_block->lctl |= LCTL_CR0; 313 } 314 315 static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu) 316 { 317 if (!(pending_irqs(vcpu) & IRQ_PEND_MCHK_MASK)) 318 return; 319 if (psw_mchk_disabled(vcpu)) 320 vcpu->arch.sie_block->ictl |= ICTL_LPSW; 321 else 322 vcpu->arch.sie_block->lctl |= LCTL_CR14; 323 } 324 325 static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu) 326 { 327 if (kvm_s390_is_stop_irq_pending(vcpu)) 328 __set_cpuflag(vcpu, CPUSTAT_STOP_INT); 329 } 330 331 /* Set interception request for non-deliverable interrupts */ 332 static void set_intercept_indicators(struct kvm_vcpu *vcpu) 333 { 334 set_intercept_indicators_io(vcpu); 335 set_intercept_indicators_ext(vcpu); 336 set_intercept_indicators_mchk(vcpu); 337 set_intercept_indicators_stop(vcpu); 338 } 339 340 static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu) 341 { 342 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 343 int rc; 344 345 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER, 346 0, 0); 347 348 rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER, 349 (u16 *)__LC_EXT_INT_CODE); 350 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR); 351 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 352 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 353 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 354 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 355 clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); 356 return rc ? -EFAULT : 0; 357 } 358 359 static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu) 360 { 361 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 362 int rc; 363 364 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP, 365 0, 0); 366 367 rc = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP, 368 (u16 __user *)__LC_EXT_INT_CODE); 369 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR); 370 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 371 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 372 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 373 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 374 clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); 375 return rc ? -EFAULT : 0; 376 } 377 378 static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu) 379 { 380 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 381 struct kvm_s390_ext_info ext; 382 int rc; 383 384 spin_lock(&li->lock); 385 ext = li->irq.ext; 386 clear_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs); 387 li->irq.ext.ext_params2 = 0; 388 spin_unlock(&li->lock); 389 390 VCPU_EVENT(vcpu, 4, "deliver: pfault init token 0x%llx", 391 ext.ext_params2); 392 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, 393 KVM_S390_INT_PFAULT_INIT, 394 0, ext.ext_params2); 395 396 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE); 397 rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR); 398 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 399 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 400 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 401 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 402 rc |= put_guest_lc(vcpu, ext.ext_params2, (u64 *) __LC_EXT_PARAMS2); 403 return rc ? -EFAULT : 0; 404 } 405 406 static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu) 407 { 408 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; 409 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 410 struct kvm_s390_mchk_info mchk = {}; 411 unsigned long adtl_status_addr; 412 int deliver = 0; 413 int rc = 0; 414 415 spin_lock(&fi->lock); 416 spin_lock(&li->lock); 417 if (test_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs) || 418 test_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs)) { 419 /* 420 * If there was an exigent machine check pending, then any 421 * repressible machine checks that might have been pending 422 * are indicated along with it, so always clear bits for 423 * repressible and exigent interrupts 424 */ 425 mchk = li->irq.mchk; 426 clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs); 427 clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs); 428 memset(&li->irq.mchk, 0, sizeof(mchk)); 429 deliver = 1; 430 } 431 /* 432 * We indicate floating repressible conditions along with 433 * other pending conditions. Channel Report Pending and Channel 434 * Subsystem damage are the only two and and are indicated by 435 * bits in mcic and masked in cr14. 436 */ 437 if (test_and_clear_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) { 438 mchk.mcic |= fi->mchk.mcic; 439 mchk.cr14 |= fi->mchk.cr14; 440 memset(&fi->mchk, 0, sizeof(mchk)); 441 deliver = 1; 442 } 443 spin_unlock(&li->lock); 444 spin_unlock(&fi->lock); 445 446 if (deliver) { 447 VCPU_EVENT(vcpu, 3, "deliver: machine check mcic 0x%llx", 448 mchk.mcic); 449 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, 450 KVM_S390_MCHK, 451 mchk.cr14, mchk.mcic); 452 453 rc = kvm_s390_vcpu_store_status(vcpu, 454 KVM_S390_STORE_STATUS_PREFIXED); 455 rc |= read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR, 456 &adtl_status_addr, 457 sizeof(unsigned long)); 458 rc |= kvm_s390_vcpu_store_adtl_status(vcpu, 459 adtl_status_addr); 460 rc |= put_guest_lc(vcpu, mchk.mcic, 461 (u64 __user *) __LC_MCCK_CODE); 462 rc |= put_guest_lc(vcpu, mchk.failing_storage_address, 463 (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR); 464 rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, 465 &mchk.fixed_logout, 466 sizeof(mchk.fixed_logout)); 467 rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW, 468 &vcpu->arch.sie_block->gpsw, 469 sizeof(psw_t)); 470 rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW, 471 &vcpu->arch.sie_block->gpsw, 472 sizeof(psw_t)); 473 } 474 return rc ? -EFAULT : 0; 475 } 476 477 static int __must_check __deliver_restart(struct kvm_vcpu *vcpu) 478 { 479 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 480 int rc; 481 482 VCPU_EVENT(vcpu, 3, "%s", "deliver: cpu restart"); 483 vcpu->stat.deliver_restart_signal++; 484 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0); 485 486 rc = write_guest_lc(vcpu, 487 offsetof(struct lowcore, restart_old_psw), 488 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 489 rc |= read_guest_lc(vcpu, offsetof(struct lowcore, restart_psw), 490 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 491 clear_bit(IRQ_PEND_RESTART, &li->pending_irqs); 492 return rc ? -EFAULT : 0; 493 } 494 495 static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu) 496 { 497 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 498 struct kvm_s390_prefix_info prefix; 499 500 spin_lock(&li->lock); 501 prefix = li->irq.prefix; 502 li->irq.prefix.address = 0; 503 clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs); 504 spin_unlock(&li->lock); 505 506 vcpu->stat.deliver_prefix_signal++; 507 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, 508 KVM_S390_SIGP_SET_PREFIX, 509 prefix.address, 0); 510 511 kvm_s390_set_prefix(vcpu, prefix.address); 512 return 0; 513 } 514 515 static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu) 516 { 517 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 518 int rc; 519 int cpu_addr; 520 521 spin_lock(&li->lock); 522 cpu_addr = find_first_bit(li->sigp_emerg_pending, KVM_MAX_VCPUS); 523 clear_bit(cpu_addr, li->sigp_emerg_pending); 524 if (bitmap_empty(li->sigp_emerg_pending, KVM_MAX_VCPUS)) 525 clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); 526 spin_unlock(&li->lock); 527 528 VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp emerg"); 529 vcpu->stat.deliver_emergency_signal++; 530 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, 531 cpu_addr, 0); 532 533 rc = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG, 534 (u16 *)__LC_EXT_INT_CODE); 535 rc |= put_guest_lc(vcpu, cpu_addr, (u16 *)__LC_EXT_CPU_ADDR); 536 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 537 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 538 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 539 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 540 return rc ? -EFAULT : 0; 541 } 542 543 static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu) 544 { 545 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 546 struct kvm_s390_extcall_info extcall; 547 int rc; 548 549 spin_lock(&li->lock); 550 extcall = li->irq.extcall; 551 li->irq.extcall.code = 0; 552 clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs); 553 spin_unlock(&li->lock); 554 555 VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp ext call"); 556 vcpu->stat.deliver_external_call++; 557 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, 558 KVM_S390_INT_EXTERNAL_CALL, 559 extcall.code, 0); 560 561 rc = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL, 562 (u16 *)__LC_EXT_INT_CODE); 563 rc |= put_guest_lc(vcpu, extcall.code, (u16 *)__LC_EXT_CPU_ADDR); 564 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 565 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 566 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw, 567 sizeof(psw_t)); 568 return rc ? -EFAULT : 0; 569 } 570 571 static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) 572 { 573 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 574 struct kvm_s390_pgm_info pgm_info; 575 int rc = 0, nullifying = false; 576 u16 ilen; 577 578 spin_lock(&li->lock); 579 pgm_info = li->irq.pgm; 580 clear_bit(IRQ_PEND_PROG, &li->pending_irqs); 581 memset(&li->irq.pgm, 0, sizeof(pgm_info)); 582 spin_unlock(&li->lock); 583 584 ilen = pgm_info.flags & KVM_S390_PGM_FLAGS_ILC_MASK; 585 VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilen:%d", 586 pgm_info.code, ilen); 587 vcpu->stat.deliver_program_int++; 588 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, 589 pgm_info.code, 0); 590 591 switch (pgm_info.code & ~PGM_PER) { 592 case PGM_AFX_TRANSLATION: 593 case PGM_ASX_TRANSLATION: 594 case PGM_EX_TRANSLATION: 595 case PGM_LFX_TRANSLATION: 596 case PGM_LSTE_SEQUENCE: 597 case PGM_LSX_TRANSLATION: 598 case PGM_LX_TRANSLATION: 599 case PGM_PRIMARY_AUTHORITY: 600 case PGM_SECONDARY_AUTHORITY: 601 nullifying = true; 602 /* fall through */ 603 case PGM_SPACE_SWITCH: 604 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code, 605 (u64 *)__LC_TRANS_EXC_CODE); 606 break; 607 case PGM_ALEN_TRANSLATION: 608 case PGM_ALE_SEQUENCE: 609 case PGM_ASTE_INSTANCE: 610 case PGM_ASTE_SEQUENCE: 611 case PGM_ASTE_VALIDITY: 612 case PGM_EXTENDED_AUTHORITY: 613 rc = put_guest_lc(vcpu, pgm_info.exc_access_id, 614 (u8 *)__LC_EXC_ACCESS_ID); 615 nullifying = true; 616 break; 617 case PGM_ASCE_TYPE: 618 case PGM_PAGE_TRANSLATION: 619 case PGM_REGION_FIRST_TRANS: 620 case PGM_REGION_SECOND_TRANS: 621 case PGM_REGION_THIRD_TRANS: 622 case PGM_SEGMENT_TRANSLATION: 623 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code, 624 (u64 *)__LC_TRANS_EXC_CODE); 625 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id, 626 (u8 *)__LC_EXC_ACCESS_ID); 627 rc |= put_guest_lc(vcpu, pgm_info.op_access_id, 628 (u8 *)__LC_OP_ACCESS_ID); 629 nullifying = true; 630 break; 631 case PGM_MONITOR: 632 rc = put_guest_lc(vcpu, pgm_info.mon_class_nr, 633 (u16 *)__LC_MON_CLASS_NR); 634 rc |= put_guest_lc(vcpu, pgm_info.mon_code, 635 (u64 *)__LC_MON_CODE); 636 break; 637 case PGM_VECTOR_PROCESSING: 638 case PGM_DATA: 639 rc = put_guest_lc(vcpu, pgm_info.data_exc_code, 640 (u32 *)__LC_DATA_EXC_CODE); 641 break; 642 case PGM_PROTECTION: 643 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code, 644 (u64 *)__LC_TRANS_EXC_CODE); 645 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id, 646 (u8 *)__LC_EXC_ACCESS_ID); 647 break; 648 case PGM_STACK_FULL: 649 case PGM_STACK_EMPTY: 650 case PGM_STACK_SPECIFICATION: 651 case PGM_STACK_TYPE: 652 case PGM_STACK_OPERATION: 653 case PGM_TRACE_TABEL: 654 case PGM_CRYPTO_OPERATION: 655 nullifying = true; 656 break; 657 } 658 659 if (pgm_info.code & PGM_PER) { 660 rc |= put_guest_lc(vcpu, pgm_info.per_code, 661 (u8 *) __LC_PER_CODE); 662 rc |= put_guest_lc(vcpu, pgm_info.per_atmid, 663 (u8 *)__LC_PER_ATMID); 664 rc |= put_guest_lc(vcpu, pgm_info.per_address, 665 (u64 *) __LC_PER_ADDRESS); 666 rc |= put_guest_lc(vcpu, pgm_info.per_access_id, 667 (u8 *) __LC_PER_ACCESS_ID); 668 } 669 670 if (nullifying && !(pgm_info.flags & KVM_S390_PGM_FLAGS_NO_REWIND)) 671 kvm_s390_rewind_psw(vcpu, ilen); 672 673 /* bit 1+2 of the target are the ilc, so we can directly use ilen */ 674 rc |= put_guest_lc(vcpu, ilen, (u16 *) __LC_PGM_ILC); 675 rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea, 676 (u64 *) __LC_LAST_BREAK); 677 rc |= put_guest_lc(vcpu, pgm_info.code, 678 (u16 *)__LC_PGM_INT_CODE); 679 rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW, 680 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 681 rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW, 682 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 683 return rc ? -EFAULT : 0; 684 } 685 686 static int __must_check __deliver_service(struct kvm_vcpu *vcpu) 687 { 688 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; 689 struct kvm_s390_ext_info ext; 690 int rc = 0; 691 692 spin_lock(&fi->lock); 693 if (!(test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs))) { 694 spin_unlock(&fi->lock); 695 return 0; 696 } 697 ext = fi->srv_signal; 698 memset(&fi->srv_signal, 0, sizeof(ext)); 699 clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs); 700 spin_unlock(&fi->lock); 701 702 VCPU_EVENT(vcpu, 4, "deliver: sclp parameter 0x%x", 703 ext.ext_params); 704 vcpu->stat.deliver_service_signal++; 705 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE, 706 ext.ext_params, 0); 707 708 rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE); 709 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR); 710 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 711 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 712 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 713 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 714 rc |= put_guest_lc(vcpu, ext.ext_params, 715 (u32 *)__LC_EXT_PARAMS); 716 717 return rc ? -EFAULT : 0; 718 } 719 720 static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu) 721 { 722 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; 723 struct kvm_s390_interrupt_info *inti; 724 int rc = 0; 725 726 spin_lock(&fi->lock); 727 inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_PFAULT], 728 struct kvm_s390_interrupt_info, 729 list); 730 if (inti) { 731 list_del(&inti->list); 732 fi->counters[FIRQ_CNTR_PFAULT] -= 1; 733 } 734 if (list_empty(&fi->lists[FIRQ_LIST_PFAULT])) 735 clear_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs); 736 spin_unlock(&fi->lock); 737 738 if (inti) { 739 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, 740 KVM_S390_INT_PFAULT_DONE, 0, 741 inti->ext.ext_params2); 742 VCPU_EVENT(vcpu, 4, "deliver: pfault done token 0x%llx", 743 inti->ext.ext_params2); 744 745 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, 746 (u16 *)__LC_EXT_INT_CODE); 747 rc |= put_guest_lc(vcpu, PFAULT_DONE, 748 (u16 *)__LC_EXT_CPU_ADDR); 749 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 750 &vcpu->arch.sie_block->gpsw, 751 sizeof(psw_t)); 752 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 753 &vcpu->arch.sie_block->gpsw, 754 sizeof(psw_t)); 755 rc |= put_guest_lc(vcpu, inti->ext.ext_params2, 756 (u64 *)__LC_EXT_PARAMS2); 757 kfree(inti); 758 } 759 return rc ? -EFAULT : 0; 760 } 761 762 static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu) 763 { 764 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; 765 struct kvm_s390_interrupt_info *inti; 766 int rc = 0; 767 768 spin_lock(&fi->lock); 769 inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_VIRTIO], 770 struct kvm_s390_interrupt_info, 771 list); 772 if (inti) { 773 VCPU_EVENT(vcpu, 4, 774 "deliver: virtio parm: 0x%x,parm64: 0x%llx", 775 inti->ext.ext_params, inti->ext.ext_params2); 776 vcpu->stat.deliver_virtio_interrupt++; 777 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, 778 inti->type, 779 inti->ext.ext_params, 780 inti->ext.ext_params2); 781 list_del(&inti->list); 782 fi->counters[FIRQ_CNTR_VIRTIO] -= 1; 783 } 784 if (list_empty(&fi->lists[FIRQ_LIST_VIRTIO])) 785 clear_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs); 786 spin_unlock(&fi->lock); 787 788 if (inti) { 789 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, 790 (u16 *)__LC_EXT_INT_CODE); 791 rc |= put_guest_lc(vcpu, VIRTIO_PARAM, 792 (u16 *)__LC_EXT_CPU_ADDR); 793 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 794 &vcpu->arch.sie_block->gpsw, 795 sizeof(psw_t)); 796 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 797 &vcpu->arch.sie_block->gpsw, 798 sizeof(psw_t)); 799 rc |= put_guest_lc(vcpu, inti->ext.ext_params, 800 (u32 *)__LC_EXT_PARAMS); 801 rc |= put_guest_lc(vcpu, inti->ext.ext_params2, 802 (u64 *)__LC_EXT_PARAMS2); 803 kfree(inti); 804 } 805 return rc ? -EFAULT : 0; 806 } 807 808 static int __must_check __deliver_io(struct kvm_vcpu *vcpu, 809 unsigned long irq_type) 810 { 811 struct list_head *isc_list; 812 struct kvm_s390_float_interrupt *fi; 813 struct kvm_s390_interrupt_info *inti = NULL; 814 int rc = 0; 815 816 fi = &vcpu->kvm->arch.float_int; 817 818 spin_lock(&fi->lock); 819 isc_list = &fi->lists[irq_type - IRQ_PEND_IO_ISC_0]; 820 inti = list_first_entry_or_null(isc_list, 821 struct kvm_s390_interrupt_info, 822 list); 823 if (inti) { 824 VCPU_EVENT(vcpu, 4, "deliver: I/O 0x%llx", inti->type); 825 vcpu->stat.deliver_io_int++; 826 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, 827 inti->type, 828 ((__u32)inti->io.subchannel_id << 16) | 829 inti->io.subchannel_nr, 830 ((__u64)inti->io.io_int_parm << 32) | 831 inti->io.io_int_word); 832 list_del(&inti->list); 833 fi->counters[FIRQ_CNTR_IO] -= 1; 834 } 835 if (list_empty(isc_list)) 836 clear_bit(irq_type, &fi->pending_irqs); 837 spin_unlock(&fi->lock); 838 839 if (inti) { 840 rc = put_guest_lc(vcpu, inti->io.subchannel_id, 841 (u16 *)__LC_SUBCHANNEL_ID); 842 rc |= put_guest_lc(vcpu, inti->io.subchannel_nr, 843 (u16 *)__LC_SUBCHANNEL_NR); 844 rc |= put_guest_lc(vcpu, inti->io.io_int_parm, 845 (u32 *)__LC_IO_INT_PARM); 846 rc |= put_guest_lc(vcpu, inti->io.io_int_word, 847 (u32 *)__LC_IO_INT_WORD); 848 rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW, 849 &vcpu->arch.sie_block->gpsw, 850 sizeof(psw_t)); 851 rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW, 852 &vcpu->arch.sie_block->gpsw, 853 sizeof(psw_t)); 854 kfree(inti); 855 } 856 857 return rc ? -EFAULT : 0; 858 } 859 860 typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu); 861 862 static const deliver_irq_t deliver_irq_funcs[] = { 863 [IRQ_PEND_MCHK_EX] = __deliver_machine_check, 864 [IRQ_PEND_MCHK_REP] = __deliver_machine_check, 865 [IRQ_PEND_PROG] = __deliver_prog, 866 [IRQ_PEND_EXT_EMERGENCY] = __deliver_emergency_signal, 867 [IRQ_PEND_EXT_EXTERNAL] = __deliver_external_call, 868 [IRQ_PEND_EXT_CLOCK_COMP] = __deliver_ckc, 869 [IRQ_PEND_EXT_CPU_TIMER] = __deliver_cpu_timer, 870 [IRQ_PEND_RESTART] = __deliver_restart, 871 [IRQ_PEND_SET_PREFIX] = __deliver_set_prefix, 872 [IRQ_PEND_PFAULT_INIT] = __deliver_pfault_init, 873 [IRQ_PEND_EXT_SERVICE] = __deliver_service, 874 [IRQ_PEND_PFAULT_DONE] = __deliver_pfault_done, 875 [IRQ_PEND_VIRTIO] = __deliver_virtio, 876 }; 877 878 /* Check whether an external call is pending (deliverable or not) */ 879 int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu) 880 { 881 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 882 883 if (!sclp.has_sigpif) 884 return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs); 885 886 return sca_ext_call_pending(vcpu, NULL); 887 } 888 889 int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop) 890 { 891 if (deliverable_irqs(vcpu)) 892 return 1; 893 894 if (kvm_cpu_has_pending_timer(vcpu)) 895 return 1; 896 897 /* external call pending and deliverable */ 898 if (kvm_s390_ext_call_pending(vcpu) && 899 !psw_extint_disabled(vcpu) && 900 (vcpu->arch.sie_block->gcr[0] & 0x2000ul)) 901 return 1; 902 903 if (!exclude_stop && kvm_s390_is_stop_irq_pending(vcpu)) 904 return 1; 905 return 0; 906 } 907 908 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 909 { 910 return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu); 911 } 912 913 static u64 __calculate_sltime(struct kvm_vcpu *vcpu) 914 { 915 u64 now, cputm, sltime = 0; 916 917 if (ckc_interrupts_enabled(vcpu)) { 918 now = kvm_s390_get_tod_clock_fast(vcpu->kvm); 919 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); 920 /* already expired or overflow? */ 921 if (!sltime || vcpu->arch.sie_block->ckc <= now) 922 return 0; 923 if (cpu_timer_interrupts_enabled(vcpu)) { 924 cputm = kvm_s390_get_cpu_timer(vcpu); 925 /* already expired? */ 926 if (cputm >> 63) 927 return 0; 928 return min(sltime, tod_to_ns(cputm)); 929 } 930 } else if (cpu_timer_interrupts_enabled(vcpu)) { 931 sltime = kvm_s390_get_cpu_timer(vcpu); 932 /* already expired? */ 933 if (sltime >> 63) 934 return 0; 935 } 936 return sltime; 937 } 938 939 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) 940 { 941 u64 sltime; 942 943 vcpu->stat.exit_wait_state++; 944 945 /* fast path */ 946 if (kvm_arch_vcpu_runnable(vcpu)) 947 return 0; 948 949 if (psw_interrupts_disabled(vcpu)) { 950 VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); 951 return -EOPNOTSUPP; /* disabled wait */ 952 } 953 954 if (!ckc_interrupts_enabled(vcpu) && 955 !cpu_timer_interrupts_enabled(vcpu)) { 956 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer"); 957 __set_cpu_idle(vcpu); 958 goto no_timer; 959 } 960 961 sltime = __calculate_sltime(vcpu); 962 if (!sltime) 963 return 0; 964 965 __set_cpu_idle(vcpu); 966 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); 967 VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime); 968 no_timer: 969 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 970 kvm_vcpu_block(vcpu); 971 __unset_cpu_idle(vcpu); 972 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 973 974 hrtimer_cancel(&vcpu->arch.ckc_timer); 975 return 0; 976 } 977 978 void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu) 979 { 980 /* 981 * We cannot move this into the if, as the CPU might be already 982 * in kvm_vcpu_block without having the waitqueue set (polling) 983 */ 984 vcpu->valid_wakeup = true; 985 if (swait_active(&vcpu->wq)) { 986 /* 987 * The vcpu gave up the cpu voluntarily, mark it as a good 988 * yield-candidate. 989 */ 990 vcpu->preempted = true; 991 swake_up(&vcpu->wq); 992 vcpu->stat.halt_wakeup++; 993 } 994 } 995 996 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer) 997 { 998 struct kvm_vcpu *vcpu; 999 u64 sltime; 1000 1001 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); 1002 sltime = __calculate_sltime(vcpu); 1003 1004 /* 1005 * If the monotonic clock runs faster than the tod clock we might be 1006 * woken up too early and have to go back to sleep to avoid deadlocks. 1007 */ 1008 if (sltime && hrtimer_forward_now(timer, ns_to_ktime(sltime))) 1009 return HRTIMER_RESTART; 1010 kvm_s390_vcpu_wakeup(vcpu); 1011 return HRTIMER_NORESTART; 1012 } 1013 1014 void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) 1015 { 1016 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1017 1018 spin_lock(&li->lock); 1019 li->pending_irqs = 0; 1020 bitmap_zero(li->sigp_emerg_pending, KVM_MAX_VCPUS); 1021 memset(&li->irq, 0, sizeof(li->irq)); 1022 spin_unlock(&li->lock); 1023 1024 sca_clear_ext_call(vcpu); 1025 } 1026 1027 int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) 1028 { 1029 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1030 deliver_irq_t func; 1031 int rc = 0; 1032 unsigned long irq_type; 1033 unsigned long irqs; 1034 1035 __reset_intercept_indicators(vcpu); 1036 1037 /* pending ckc conditions might have been invalidated */ 1038 clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); 1039 if (ckc_irq_pending(vcpu)) 1040 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); 1041 1042 /* pending cpu timer conditions might have been invalidated */ 1043 clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); 1044 if (cpu_timer_irq_pending(vcpu)) 1045 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); 1046 1047 while ((irqs = deliverable_irqs(vcpu)) && !rc) { 1048 /* bits are in the order of interrupt priority */ 1049 irq_type = find_first_bit(&irqs, IRQ_PEND_COUNT); 1050 if (is_ioirq(irq_type)) { 1051 rc = __deliver_io(vcpu, irq_type); 1052 } else { 1053 func = deliver_irq_funcs[irq_type]; 1054 if (!func) { 1055 WARN_ON_ONCE(func == NULL); 1056 clear_bit(irq_type, &li->pending_irqs); 1057 continue; 1058 } 1059 rc = func(vcpu); 1060 } 1061 } 1062 1063 set_intercept_indicators(vcpu); 1064 1065 return rc; 1066 } 1067 1068 static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) 1069 { 1070 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1071 1072 VCPU_EVENT(vcpu, 3, "inject: program irq code 0x%x", irq->u.pgm.code); 1073 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, 1074 irq->u.pgm.code, 0); 1075 1076 if (!(irq->u.pgm.flags & KVM_S390_PGM_FLAGS_ILC_VALID)) { 1077 /* auto detection if no valid ILC was given */ 1078 irq->u.pgm.flags &= ~KVM_S390_PGM_FLAGS_ILC_MASK; 1079 irq->u.pgm.flags |= kvm_s390_get_ilen(vcpu); 1080 irq->u.pgm.flags |= KVM_S390_PGM_FLAGS_ILC_VALID; 1081 } 1082 1083 if (irq->u.pgm.code == PGM_PER) { 1084 li->irq.pgm.code |= PGM_PER; 1085 li->irq.pgm.flags = irq->u.pgm.flags; 1086 /* only modify PER related information */ 1087 li->irq.pgm.per_address = irq->u.pgm.per_address; 1088 li->irq.pgm.per_code = irq->u.pgm.per_code; 1089 li->irq.pgm.per_atmid = irq->u.pgm.per_atmid; 1090 li->irq.pgm.per_access_id = irq->u.pgm.per_access_id; 1091 } else if (!(irq->u.pgm.code & PGM_PER)) { 1092 li->irq.pgm.code = (li->irq.pgm.code & PGM_PER) | 1093 irq->u.pgm.code; 1094 li->irq.pgm.flags = irq->u.pgm.flags; 1095 /* only modify non-PER information */ 1096 li->irq.pgm.trans_exc_code = irq->u.pgm.trans_exc_code; 1097 li->irq.pgm.mon_code = irq->u.pgm.mon_code; 1098 li->irq.pgm.data_exc_code = irq->u.pgm.data_exc_code; 1099 li->irq.pgm.mon_class_nr = irq->u.pgm.mon_class_nr; 1100 li->irq.pgm.exc_access_id = irq->u.pgm.exc_access_id; 1101 li->irq.pgm.op_access_id = irq->u.pgm.op_access_id; 1102 } else { 1103 li->irq.pgm = irq->u.pgm; 1104 } 1105 set_bit(IRQ_PEND_PROG, &li->pending_irqs); 1106 return 0; 1107 } 1108 1109 static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) 1110 { 1111 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1112 1113 VCPU_EVENT(vcpu, 4, "inject: pfault init parameter block at 0x%llx", 1114 irq->u.ext.ext_params2); 1115 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT, 1116 irq->u.ext.ext_params, 1117 irq->u.ext.ext_params2); 1118 1119 li->irq.ext = irq->u.ext; 1120 set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs); 1121 atomic_or(CPUSTAT_EXT_INT, li->cpuflags); 1122 return 0; 1123 } 1124 1125 static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) 1126 { 1127 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1128 struct kvm_s390_extcall_info *extcall = &li->irq.extcall; 1129 uint16_t src_id = irq->u.extcall.code; 1130 1131 VCPU_EVENT(vcpu, 4, "inject: external call source-cpu:%u", 1132 src_id); 1133 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL, 1134 src_id, 0); 1135 1136 /* sending vcpu invalid */ 1137 if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL) 1138 return -EINVAL; 1139 1140 if (sclp.has_sigpif) 1141 return sca_inject_ext_call(vcpu, src_id); 1142 1143 if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs)) 1144 return -EBUSY; 1145 *extcall = irq->u.extcall; 1146 atomic_or(CPUSTAT_EXT_INT, li->cpuflags); 1147 return 0; 1148 } 1149 1150 static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) 1151 { 1152 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1153 struct kvm_s390_prefix_info *prefix = &li->irq.prefix; 1154 1155 VCPU_EVENT(vcpu, 3, "inject: set prefix to %x", 1156 irq->u.prefix.address); 1157 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX, 1158 irq->u.prefix.address, 0); 1159 1160 if (!is_vcpu_stopped(vcpu)) 1161 return -EBUSY; 1162 1163 *prefix = irq->u.prefix; 1164 set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs); 1165 return 0; 1166 } 1167 1168 #define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS) 1169 static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) 1170 { 1171 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1172 struct kvm_s390_stop_info *stop = &li->irq.stop; 1173 int rc = 0; 1174 1175 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0); 1176 1177 if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS) 1178 return -EINVAL; 1179 1180 if (is_vcpu_stopped(vcpu)) { 1181 if (irq->u.stop.flags & KVM_S390_STOP_FLAG_STORE_STATUS) 1182 rc = kvm_s390_store_status_unloaded(vcpu, 1183 KVM_S390_STORE_STATUS_NOADDR); 1184 return rc; 1185 } 1186 1187 if (test_and_set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs)) 1188 return -EBUSY; 1189 stop->flags = irq->u.stop.flags; 1190 __set_cpuflag(vcpu, CPUSTAT_STOP_INT); 1191 return 0; 1192 } 1193 1194 static int __inject_sigp_restart(struct kvm_vcpu *vcpu, 1195 struct kvm_s390_irq *irq) 1196 { 1197 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1198 1199 VCPU_EVENT(vcpu, 3, "%s", "inject: restart int"); 1200 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0); 1201 1202 set_bit(IRQ_PEND_RESTART, &li->pending_irqs); 1203 return 0; 1204 } 1205 1206 static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, 1207 struct kvm_s390_irq *irq) 1208 { 1209 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1210 1211 VCPU_EVENT(vcpu, 4, "inject: emergency from cpu %u", 1212 irq->u.emerg.code); 1213 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, 1214 irq->u.emerg.code, 0); 1215 1216 /* sending vcpu invalid */ 1217 if (kvm_get_vcpu_by_id(vcpu->kvm, irq->u.emerg.code) == NULL) 1218 return -EINVAL; 1219 1220 set_bit(irq->u.emerg.code, li->sigp_emerg_pending); 1221 set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); 1222 atomic_or(CPUSTAT_EXT_INT, li->cpuflags); 1223 return 0; 1224 } 1225 1226 static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) 1227 { 1228 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1229 struct kvm_s390_mchk_info *mchk = &li->irq.mchk; 1230 1231 VCPU_EVENT(vcpu, 3, "inject: machine check mcic 0x%llx", 1232 irq->u.mchk.mcic); 1233 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0, 1234 irq->u.mchk.mcic); 1235 1236 /* 1237 * Because repressible machine checks can be indicated along with 1238 * exigent machine checks (PoP, Chapter 11, Interruption action) 1239 * we need to combine cr14, mcic and external damage code. 1240 * Failing storage address and the logout area should not be or'ed 1241 * together, we just indicate the last occurrence of the corresponding 1242 * machine check 1243 */ 1244 mchk->cr14 |= irq->u.mchk.cr14; 1245 mchk->mcic |= irq->u.mchk.mcic; 1246 mchk->ext_damage_code |= irq->u.mchk.ext_damage_code; 1247 mchk->failing_storage_address = irq->u.mchk.failing_storage_address; 1248 memcpy(&mchk->fixed_logout, &irq->u.mchk.fixed_logout, 1249 sizeof(mchk->fixed_logout)); 1250 if (mchk->mcic & MCHK_EX_MASK) 1251 set_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs); 1252 else if (mchk->mcic & MCHK_REP_MASK) 1253 set_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs); 1254 return 0; 1255 } 1256 1257 static int __inject_ckc(struct kvm_vcpu *vcpu) 1258 { 1259 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1260 1261 VCPU_EVENT(vcpu, 3, "%s", "inject: clock comparator external"); 1262 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP, 1263 0, 0); 1264 1265 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); 1266 atomic_or(CPUSTAT_EXT_INT, li->cpuflags); 1267 return 0; 1268 } 1269 1270 static int __inject_cpu_timer(struct kvm_vcpu *vcpu) 1271 { 1272 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1273 1274 VCPU_EVENT(vcpu, 3, "%s", "inject: cpu timer external"); 1275 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER, 1276 0, 0); 1277 1278 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); 1279 atomic_or(CPUSTAT_EXT_INT, li->cpuflags); 1280 return 0; 1281 } 1282 1283 static struct kvm_s390_interrupt_info *get_io_int(struct kvm *kvm, 1284 int isc, u32 schid) 1285 { 1286 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; 1287 struct list_head *isc_list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc]; 1288 struct kvm_s390_interrupt_info *iter; 1289 u16 id = (schid & 0xffff0000U) >> 16; 1290 u16 nr = schid & 0x0000ffffU; 1291 1292 spin_lock(&fi->lock); 1293 list_for_each_entry(iter, isc_list, list) { 1294 if (schid && (id != iter->io.subchannel_id || 1295 nr != iter->io.subchannel_nr)) 1296 continue; 1297 /* found an appropriate entry */ 1298 list_del_init(&iter->list); 1299 fi->counters[FIRQ_CNTR_IO] -= 1; 1300 if (list_empty(isc_list)) 1301 clear_bit(IRQ_PEND_IO_ISC_0 + isc, &fi->pending_irqs); 1302 spin_unlock(&fi->lock); 1303 return iter; 1304 } 1305 spin_unlock(&fi->lock); 1306 return NULL; 1307 } 1308 1309 /* 1310 * Dequeue and return an I/O interrupt matching any of the interruption 1311 * subclasses as designated by the isc mask in cr6 and the schid (if != 0). 1312 */ 1313 struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, 1314 u64 isc_mask, u32 schid) 1315 { 1316 struct kvm_s390_interrupt_info *inti = NULL; 1317 int isc; 1318 1319 for (isc = 0; isc <= MAX_ISC && !inti; isc++) { 1320 if (isc_mask & isc_to_isc_bits(isc)) 1321 inti = get_io_int(kvm, isc, schid); 1322 } 1323 return inti; 1324 } 1325 1326 #define SCCB_MASK 0xFFFFFFF8 1327 #define SCCB_EVENT_PENDING 0x3 1328 1329 static int __inject_service(struct kvm *kvm, 1330 struct kvm_s390_interrupt_info *inti) 1331 { 1332 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; 1333 1334 spin_lock(&fi->lock); 1335 fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_EVENT_PENDING; 1336 /* 1337 * Early versions of the QEMU s390 bios will inject several 1338 * service interrupts after another without handling a 1339 * condition code indicating busy. 1340 * We will silently ignore those superfluous sccb values. 1341 * A future version of QEMU will take care of serialization 1342 * of servc requests 1343 */ 1344 if (fi->srv_signal.ext_params & SCCB_MASK) 1345 goto out; 1346 fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_MASK; 1347 set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs); 1348 out: 1349 spin_unlock(&fi->lock); 1350 kfree(inti); 1351 return 0; 1352 } 1353 1354 static int __inject_virtio(struct kvm *kvm, 1355 struct kvm_s390_interrupt_info *inti) 1356 { 1357 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; 1358 1359 spin_lock(&fi->lock); 1360 if (fi->counters[FIRQ_CNTR_VIRTIO] >= KVM_S390_MAX_VIRTIO_IRQS) { 1361 spin_unlock(&fi->lock); 1362 return -EBUSY; 1363 } 1364 fi->counters[FIRQ_CNTR_VIRTIO] += 1; 1365 list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_VIRTIO]); 1366 set_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs); 1367 spin_unlock(&fi->lock); 1368 return 0; 1369 } 1370 1371 static int __inject_pfault_done(struct kvm *kvm, 1372 struct kvm_s390_interrupt_info *inti) 1373 { 1374 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; 1375 1376 spin_lock(&fi->lock); 1377 if (fi->counters[FIRQ_CNTR_PFAULT] >= 1378 (ASYNC_PF_PER_VCPU * KVM_MAX_VCPUS)) { 1379 spin_unlock(&fi->lock); 1380 return -EBUSY; 1381 } 1382 fi->counters[FIRQ_CNTR_PFAULT] += 1; 1383 list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_PFAULT]); 1384 set_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs); 1385 spin_unlock(&fi->lock); 1386 return 0; 1387 } 1388 1389 #define CR_PENDING_SUBCLASS 28 1390 static int __inject_float_mchk(struct kvm *kvm, 1391 struct kvm_s390_interrupt_info *inti) 1392 { 1393 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; 1394 1395 spin_lock(&fi->lock); 1396 fi->mchk.cr14 |= inti->mchk.cr14 & (1UL << CR_PENDING_SUBCLASS); 1397 fi->mchk.mcic |= inti->mchk.mcic; 1398 set_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs); 1399 spin_unlock(&fi->lock); 1400 kfree(inti); 1401 return 0; 1402 } 1403 1404 static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) 1405 { 1406 struct kvm_s390_float_interrupt *fi; 1407 struct list_head *list; 1408 int isc; 1409 1410 fi = &kvm->arch.float_int; 1411 spin_lock(&fi->lock); 1412 if (fi->counters[FIRQ_CNTR_IO] >= KVM_S390_MAX_FLOAT_IRQS) { 1413 spin_unlock(&fi->lock); 1414 return -EBUSY; 1415 } 1416 fi->counters[FIRQ_CNTR_IO] += 1; 1417 1418 isc = int_word_to_isc(inti->io.io_int_word); 1419 list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc]; 1420 list_add_tail(&inti->list, list); 1421 set_bit(IRQ_PEND_IO_ISC_0 + isc, &fi->pending_irqs); 1422 spin_unlock(&fi->lock); 1423 return 0; 1424 } 1425 1426 /* 1427 * Find a destination VCPU for a floating irq and kick it. 1428 */ 1429 static void __floating_irq_kick(struct kvm *kvm, u64 type) 1430 { 1431 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; 1432 struct kvm_s390_local_interrupt *li; 1433 struct kvm_vcpu *dst_vcpu; 1434 int sigcpu, online_vcpus, nr_tries = 0; 1435 1436 online_vcpus = atomic_read(&kvm->online_vcpus); 1437 if (!online_vcpus) 1438 return; 1439 1440 /* find idle VCPUs first, then round robin */ 1441 sigcpu = find_first_bit(fi->idle_mask, online_vcpus); 1442 if (sigcpu == online_vcpus) { 1443 do { 1444 sigcpu = fi->next_rr_cpu; 1445 fi->next_rr_cpu = (fi->next_rr_cpu + 1) % online_vcpus; 1446 /* avoid endless loops if all vcpus are stopped */ 1447 if (nr_tries++ >= online_vcpus) 1448 return; 1449 } while (is_vcpu_stopped(kvm_get_vcpu(kvm, sigcpu))); 1450 } 1451 dst_vcpu = kvm_get_vcpu(kvm, sigcpu); 1452 1453 /* make the VCPU drop out of the SIE, or wake it up if sleeping */ 1454 li = &dst_vcpu->arch.local_int; 1455 spin_lock(&li->lock); 1456 switch (type) { 1457 case KVM_S390_MCHK: 1458 atomic_or(CPUSTAT_STOP_INT, li->cpuflags); 1459 break; 1460 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1461 atomic_or(CPUSTAT_IO_INT, li->cpuflags); 1462 break; 1463 default: 1464 atomic_or(CPUSTAT_EXT_INT, li->cpuflags); 1465 break; 1466 } 1467 spin_unlock(&li->lock); 1468 kvm_s390_vcpu_wakeup(dst_vcpu); 1469 } 1470 1471 static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) 1472 { 1473 u64 type = READ_ONCE(inti->type); 1474 int rc; 1475 1476 switch (type) { 1477 case KVM_S390_MCHK: 1478 rc = __inject_float_mchk(kvm, inti); 1479 break; 1480 case KVM_S390_INT_VIRTIO: 1481 rc = __inject_virtio(kvm, inti); 1482 break; 1483 case KVM_S390_INT_SERVICE: 1484 rc = __inject_service(kvm, inti); 1485 break; 1486 case KVM_S390_INT_PFAULT_DONE: 1487 rc = __inject_pfault_done(kvm, inti); 1488 break; 1489 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1490 rc = __inject_io(kvm, inti); 1491 break; 1492 default: 1493 rc = -EINVAL; 1494 } 1495 if (rc) 1496 return rc; 1497 1498 __floating_irq_kick(kvm, type); 1499 return 0; 1500 } 1501 1502 int kvm_s390_inject_vm(struct kvm *kvm, 1503 struct kvm_s390_interrupt *s390int) 1504 { 1505 struct kvm_s390_interrupt_info *inti; 1506 int rc; 1507 1508 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 1509 if (!inti) 1510 return -ENOMEM; 1511 1512 inti->type = s390int->type; 1513 switch (inti->type) { 1514 case KVM_S390_INT_VIRTIO: 1515 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx", 1516 s390int->parm, s390int->parm64); 1517 inti->ext.ext_params = s390int->parm; 1518 inti->ext.ext_params2 = s390int->parm64; 1519 break; 1520 case KVM_S390_INT_SERVICE: 1521 VM_EVENT(kvm, 4, "inject: sclp parm:%x", s390int->parm); 1522 inti->ext.ext_params = s390int->parm; 1523 break; 1524 case KVM_S390_INT_PFAULT_DONE: 1525 inti->ext.ext_params2 = s390int->parm64; 1526 break; 1527 case KVM_S390_MCHK: 1528 VM_EVENT(kvm, 3, "inject: machine check mcic 0x%llx", 1529 s390int->parm64); 1530 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */ 1531 inti->mchk.mcic = s390int->parm64; 1532 break; 1533 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1534 if (inti->type & KVM_S390_INT_IO_AI_MASK) 1535 VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)"); 1536 else 1537 VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x", 1538 s390int->type & IOINT_CSSID_MASK, 1539 s390int->type & IOINT_SSID_MASK, 1540 s390int->type & IOINT_SCHID_MASK); 1541 inti->io.subchannel_id = s390int->parm >> 16; 1542 inti->io.subchannel_nr = s390int->parm & 0x0000ffffu; 1543 inti->io.io_int_parm = s390int->parm64 >> 32; 1544 inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull; 1545 break; 1546 default: 1547 kfree(inti); 1548 return -EINVAL; 1549 } 1550 trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64, 1551 2); 1552 1553 rc = __inject_vm(kvm, inti); 1554 if (rc) 1555 kfree(inti); 1556 return rc; 1557 } 1558 1559 int kvm_s390_reinject_io_int(struct kvm *kvm, 1560 struct kvm_s390_interrupt_info *inti) 1561 { 1562 return __inject_vm(kvm, inti); 1563 } 1564 1565 int s390int_to_s390irq(struct kvm_s390_interrupt *s390int, 1566 struct kvm_s390_irq *irq) 1567 { 1568 irq->type = s390int->type; 1569 switch (irq->type) { 1570 case KVM_S390_PROGRAM_INT: 1571 if (s390int->parm & 0xffff0000) 1572 return -EINVAL; 1573 irq->u.pgm.code = s390int->parm; 1574 break; 1575 case KVM_S390_SIGP_SET_PREFIX: 1576 irq->u.prefix.address = s390int->parm; 1577 break; 1578 case KVM_S390_SIGP_STOP: 1579 irq->u.stop.flags = s390int->parm; 1580 break; 1581 case KVM_S390_INT_EXTERNAL_CALL: 1582 if (s390int->parm & 0xffff0000) 1583 return -EINVAL; 1584 irq->u.extcall.code = s390int->parm; 1585 break; 1586 case KVM_S390_INT_EMERGENCY: 1587 if (s390int->parm & 0xffff0000) 1588 return -EINVAL; 1589 irq->u.emerg.code = s390int->parm; 1590 break; 1591 case KVM_S390_MCHK: 1592 irq->u.mchk.mcic = s390int->parm64; 1593 break; 1594 } 1595 return 0; 1596 } 1597 1598 int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu) 1599 { 1600 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1601 1602 return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs); 1603 } 1604 1605 void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu) 1606 { 1607 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1608 1609 spin_lock(&li->lock); 1610 li->irq.stop.flags = 0; 1611 clear_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs); 1612 spin_unlock(&li->lock); 1613 } 1614 1615 static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) 1616 { 1617 int rc; 1618 1619 switch (irq->type) { 1620 case KVM_S390_PROGRAM_INT: 1621 rc = __inject_prog(vcpu, irq); 1622 break; 1623 case KVM_S390_SIGP_SET_PREFIX: 1624 rc = __inject_set_prefix(vcpu, irq); 1625 break; 1626 case KVM_S390_SIGP_STOP: 1627 rc = __inject_sigp_stop(vcpu, irq); 1628 break; 1629 case KVM_S390_RESTART: 1630 rc = __inject_sigp_restart(vcpu, irq); 1631 break; 1632 case KVM_S390_INT_CLOCK_COMP: 1633 rc = __inject_ckc(vcpu); 1634 break; 1635 case KVM_S390_INT_CPU_TIMER: 1636 rc = __inject_cpu_timer(vcpu); 1637 break; 1638 case KVM_S390_INT_EXTERNAL_CALL: 1639 rc = __inject_extcall(vcpu, irq); 1640 break; 1641 case KVM_S390_INT_EMERGENCY: 1642 rc = __inject_sigp_emergency(vcpu, irq); 1643 break; 1644 case KVM_S390_MCHK: 1645 rc = __inject_mchk(vcpu, irq); 1646 break; 1647 case KVM_S390_INT_PFAULT_INIT: 1648 rc = __inject_pfault_init(vcpu, irq); 1649 break; 1650 case KVM_S390_INT_VIRTIO: 1651 case KVM_S390_INT_SERVICE: 1652 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1653 default: 1654 rc = -EINVAL; 1655 } 1656 1657 return rc; 1658 } 1659 1660 int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) 1661 { 1662 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1663 int rc; 1664 1665 spin_lock(&li->lock); 1666 rc = do_inject_vcpu(vcpu, irq); 1667 spin_unlock(&li->lock); 1668 if (!rc) 1669 kvm_s390_vcpu_wakeup(vcpu); 1670 return rc; 1671 } 1672 1673 static inline void clear_irq_list(struct list_head *_list) 1674 { 1675 struct kvm_s390_interrupt_info *inti, *n; 1676 1677 list_for_each_entry_safe(inti, n, _list, list) { 1678 list_del(&inti->list); 1679 kfree(inti); 1680 } 1681 } 1682 1683 static void inti_to_irq(struct kvm_s390_interrupt_info *inti, 1684 struct kvm_s390_irq *irq) 1685 { 1686 irq->type = inti->type; 1687 switch (inti->type) { 1688 case KVM_S390_INT_PFAULT_INIT: 1689 case KVM_S390_INT_PFAULT_DONE: 1690 case KVM_S390_INT_VIRTIO: 1691 irq->u.ext = inti->ext; 1692 break; 1693 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1694 irq->u.io = inti->io; 1695 break; 1696 } 1697 } 1698 1699 void kvm_s390_clear_float_irqs(struct kvm *kvm) 1700 { 1701 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; 1702 int i; 1703 1704 spin_lock(&fi->lock); 1705 fi->pending_irqs = 0; 1706 memset(&fi->srv_signal, 0, sizeof(fi->srv_signal)); 1707 memset(&fi->mchk, 0, sizeof(fi->mchk)); 1708 for (i = 0; i < FIRQ_LIST_COUNT; i++) 1709 clear_irq_list(&fi->lists[i]); 1710 for (i = 0; i < FIRQ_MAX_COUNT; i++) 1711 fi->counters[i] = 0; 1712 spin_unlock(&fi->lock); 1713 }; 1714 1715 static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len) 1716 { 1717 struct kvm_s390_interrupt_info *inti; 1718 struct kvm_s390_float_interrupt *fi; 1719 struct kvm_s390_irq *buf; 1720 struct kvm_s390_irq *irq; 1721 int max_irqs; 1722 int ret = 0; 1723 int n = 0; 1724 int i; 1725 1726 if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0) 1727 return -EINVAL; 1728 1729 /* 1730 * We are already using -ENOMEM to signal 1731 * userspace it may retry with a bigger buffer, 1732 * so we need to use something else for this case 1733 */ 1734 buf = vzalloc(len); 1735 if (!buf) 1736 return -ENOBUFS; 1737 1738 max_irqs = len / sizeof(struct kvm_s390_irq); 1739 1740 fi = &kvm->arch.float_int; 1741 spin_lock(&fi->lock); 1742 for (i = 0; i < FIRQ_LIST_COUNT; i++) { 1743 list_for_each_entry(inti, &fi->lists[i], list) { 1744 if (n == max_irqs) { 1745 /* signal userspace to try again */ 1746 ret = -ENOMEM; 1747 goto out; 1748 } 1749 inti_to_irq(inti, &buf[n]); 1750 n++; 1751 } 1752 } 1753 if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs)) { 1754 if (n == max_irqs) { 1755 /* signal userspace to try again */ 1756 ret = -ENOMEM; 1757 goto out; 1758 } 1759 irq = (struct kvm_s390_irq *) &buf[n]; 1760 irq->type = KVM_S390_INT_SERVICE; 1761 irq->u.ext = fi->srv_signal; 1762 n++; 1763 } 1764 if (test_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) { 1765 if (n == max_irqs) { 1766 /* signal userspace to try again */ 1767 ret = -ENOMEM; 1768 goto out; 1769 } 1770 irq = (struct kvm_s390_irq *) &buf[n]; 1771 irq->type = KVM_S390_MCHK; 1772 irq->u.mchk = fi->mchk; 1773 n++; 1774 } 1775 1776 out: 1777 spin_unlock(&fi->lock); 1778 if (!ret && n > 0) { 1779 if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n)) 1780 ret = -EFAULT; 1781 } 1782 vfree(buf); 1783 1784 return ret < 0 ? ret : n; 1785 } 1786 1787 static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 1788 { 1789 int r; 1790 1791 switch (attr->group) { 1792 case KVM_DEV_FLIC_GET_ALL_IRQS: 1793 r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr, 1794 attr->attr); 1795 break; 1796 default: 1797 r = -EINVAL; 1798 } 1799 1800 return r; 1801 } 1802 1803 static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti, 1804 u64 addr) 1805 { 1806 struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr; 1807 void *target = NULL; 1808 void __user *source; 1809 u64 size; 1810 1811 if (get_user(inti->type, (u64 __user *)addr)) 1812 return -EFAULT; 1813 1814 switch (inti->type) { 1815 case KVM_S390_INT_PFAULT_INIT: 1816 case KVM_S390_INT_PFAULT_DONE: 1817 case KVM_S390_INT_VIRTIO: 1818 case KVM_S390_INT_SERVICE: 1819 target = (void *) &inti->ext; 1820 source = &uptr->u.ext; 1821 size = sizeof(inti->ext); 1822 break; 1823 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1824 target = (void *) &inti->io; 1825 source = &uptr->u.io; 1826 size = sizeof(inti->io); 1827 break; 1828 case KVM_S390_MCHK: 1829 target = (void *) &inti->mchk; 1830 source = &uptr->u.mchk; 1831 size = sizeof(inti->mchk); 1832 break; 1833 default: 1834 return -EINVAL; 1835 } 1836 1837 if (copy_from_user(target, source, size)) 1838 return -EFAULT; 1839 1840 return 0; 1841 } 1842 1843 static int enqueue_floating_irq(struct kvm_device *dev, 1844 struct kvm_device_attr *attr) 1845 { 1846 struct kvm_s390_interrupt_info *inti = NULL; 1847 int r = 0; 1848 int len = attr->attr; 1849 1850 if (len % sizeof(struct kvm_s390_irq) != 0) 1851 return -EINVAL; 1852 else if (len > KVM_S390_FLIC_MAX_BUFFER) 1853 return -EINVAL; 1854 1855 while (len >= sizeof(struct kvm_s390_irq)) { 1856 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 1857 if (!inti) 1858 return -ENOMEM; 1859 1860 r = copy_irq_from_user(inti, attr->addr); 1861 if (r) { 1862 kfree(inti); 1863 return r; 1864 } 1865 r = __inject_vm(dev->kvm, inti); 1866 if (r) { 1867 kfree(inti); 1868 return r; 1869 } 1870 len -= sizeof(struct kvm_s390_irq); 1871 attr->addr += sizeof(struct kvm_s390_irq); 1872 } 1873 1874 return r; 1875 } 1876 1877 static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id) 1878 { 1879 if (id >= MAX_S390_IO_ADAPTERS) 1880 return NULL; 1881 return kvm->arch.adapters[id]; 1882 } 1883 1884 static int register_io_adapter(struct kvm_device *dev, 1885 struct kvm_device_attr *attr) 1886 { 1887 struct s390_io_adapter *adapter; 1888 struct kvm_s390_io_adapter adapter_info; 1889 1890 if (copy_from_user(&adapter_info, 1891 (void __user *)attr->addr, sizeof(adapter_info))) 1892 return -EFAULT; 1893 1894 if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) || 1895 (dev->kvm->arch.adapters[adapter_info.id] != NULL)) 1896 return -EINVAL; 1897 1898 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); 1899 if (!adapter) 1900 return -ENOMEM; 1901 1902 INIT_LIST_HEAD(&adapter->maps); 1903 init_rwsem(&adapter->maps_lock); 1904 atomic_set(&adapter->nr_maps, 0); 1905 adapter->id = adapter_info.id; 1906 adapter->isc = adapter_info.isc; 1907 adapter->maskable = adapter_info.maskable; 1908 adapter->masked = false; 1909 adapter->swap = adapter_info.swap; 1910 dev->kvm->arch.adapters[adapter->id] = adapter; 1911 1912 return 0; 1913 } 1914 1915 int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked) 1916 { 1917 int ret; 1918 struct s390_io_adapter *adapter = get_io_adapter(kvm, id); 1919 1920 if (!adapter || !adapter->maskable) 1921 return -EINVAL; 1922 ret = adapter->masked; 1923 adapter->masked = masked; 1924 return ret; 1925 } 1926 1927 static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr) 1928 { 1929 struct s390_io_adapter *adapter = get_io_adapter(kvm, id); 1930 struct s390_map_info *map; 1931 int ret; 1932 1933 if (!adapter || !addr) 1934 return -EINVAL; 1935 1936 map = kzalloc(sizeof(*map), GFP_KERNEL); 1937 if (!map) { 1938 ret = -ENOMEM; 1939 goto out; 1940 } 1941 INIT_LIST_HEAD(&map->list); 1942 map->guest_addr = addr; 1943 map->addr = gmap_translate(kvm->arch.gmap, addr); 1944 if (map->addr == -EFAULT) { 1945 ret = -EFAULT; 1946 goto out; 1947 } 1948 ret = get_user_pages_fast(map->addr, 1, 1, &map->page); 1949 if (ret < 0) 1950 goto out; 1951 BUG_ON(ret != 1); 1952 down_write(&adapter->maps_lock); 1953 if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) { 1954 list_add_tail(&map->list, &adapter->maps); 1955 ret = 0; 1956 } else { 1957 put_page(map->page); 1958 ret = -EINVAL; 1959 } 1960 up_write(&adapter->maps_lock); 1961 out: 1962 if (ret) 1963 kfree(map); 1964 return ret; 1965 } 1966 1967 static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr) 1968 { 1969 struct s390_io_adapter *adapter = get_io_adapter(kvm, id); 1970 struct s390_map_info *map, *tmp; 1971 int found = 0; 1972 1973 if (!adapter || !addr) 1974 return -EINVAL; 1975 1976 down_write(&adapter->maps_lock); 1977 list_for_each_entry_safe(map, tmp, &adapter->maps, list) { 1978 if (map->guest_addr == addr) { 1979 found = 1; 1980 atomic_dec(&adapter->nr_maps); 1981 list_del(&map->list); 1982 put_page(map->page); 1983 kfree(map); 1984 break; 1985 } 1986 } 1987 up_write(&adapter->maps_lock); 1988 1989 return found ? 0 : -EINVAL; 1990 } 1991 1992 void kvm_s390_destroy_adapters(struct kvm *kvm) 1993 { 1994 int i; 1995 struct s390_map_info *map, *tmp; 1996 1997 for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) { 1998 if (!kvm->arch.adapters[i]) 1999 continue; 2000 list_for_each_entry_safe(map, tmp, 2001 &kvm->arch.adapters[i]->maps, list) { 2002 list_del(&map->list); 2003 put_page(map->page); 2004 kfree(map); 2005 } 2006 kfree(kvm->arch.adapters[i]); 2007 } 2008 } 2009 2010 static int modify_io_adapter(struct kvm_device *dev, 2011 struct kvm_device_attr *attr) 2012 { 2013 struct kvm_s390_io_adapter_req req; 2014 struct s390_io_adapter *adapter; 2015 int ret; 2016 2017 if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req))) 2018 return -EFAULT; 2019 2020 adapter = get_io_adapter(dev->kvm, req.id); 2021 if (!adapter) 2022 return -EINVAL; 2023 switch (req.type) { 2024 case KVM_S390_IO_ADAPTER_MASK: 2025 ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask); 2026 if (ret > 0) 2027 ret = 0; 2028 break; 2029 case KVM_S390_IO_ADAPTER_MAP: 2030 ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr); 2031 break; 2032 case KVM_S390_IO_ADAPTER_UNMAP: 2033 ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr); 2034 break; 2035 default: 2036 ret = -EINVAL; 2037 } 2038 2039 return ret; 2040 } 2041 2042 static int clear_io_irq(struct kvm *kvm, struct kvm_device_attr *attr) 2043 2044 { 2045 const u64 isc_mask = 0xffUL << 24; /* all iscs set */ 2046 u32 schid; 2047 2048 if (attr->flags) 2049 return -EINVAL; 2050 if (attr->attr != sizeof(schid)) 2051 return -EINVAL; 2052 if (copy_from_user(&schid, (void __user *) attr->addr, sizeof(schid))) 2053 return -EFAULT; 2054 kfree(kvm_s390_get_io_int(kvm, isc_mask, schid)); 2055 /* 2056 * If userspace is conforming to the architecture, we can have at most 2057 * one pending I/O interrupt per subchannel, so this is effectively a 2058 * clear all. 2059 */ 2060 return 0; 2061 } 2062 2063 static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 2064 { 2065 int r = 0; 2066 unsigned int i; 2067 struct kvm_vcpu *vcpu; 2068 2069 switch (attr->group) { 2070 case KVM_DEV_FLIC_ENQUEUE: 2071 r = enqueue_floating_irq(dev, attr); 2072 break; 2073 case KVM_DEV_FLIC_CLEAR_IRQS: 2074 kvm_s390_clear_float_irqs(dev->kvm); 2075 break; 2076 case KVM_DEV_FLIC_APF_ENABLE: 2077 dev->kvm->arch.gmap->pfault_enabled = 1; 2078 break; 2079 case KVM_DEV_FLIC_APF_DISABLE_WAIT: 2080 dev->kvm->arch.gmap->pfault_enabled = 0; 2081 /* 2082 * Make sure no async faults are in transition when 2083 * clearing the queues. So we don't need to worry 2084 * about late coming workers. 2085 */ 2086 synchronize_srcu(&dev->kvm->srcu); 2087 kvm_for_each_vcpu(i, vcpu, dev->kvm) 2088 kvm_clear_async_pf_completion_queue(vcpu); 2089 break; 2090 case KVM_DEV_FLIC_ADAPTER_REGISTER: 2091 r = register_io_adapter(dev, attr); 2092 break; 2093 case KVM_DEV_FLIC_ADAPTER_MODIFY: 2094 r = modify_io_adapter(dev, attr); 2095 break; 2096 case KVM_DEV_FLIC_CLEAR_IO_IRQ: 2097 r = clear_io_irq(dev->kvm, attr); 2098 break; 2099 default: 2100 r = -EINVAL; 2101 } 2102 2103 return r; 2104 } 2105 2106 static int flic_has_attr(struct kvm_device *dev, 2107 struct kvm_device_attr *attr) 2108 { 2109 switch (attr->group) { 2110 case KVM_DEV_FLIC_GET_ALL_IRQS: 2111 case KVM_DEV_FLIC_ENQUEUE: 2112 case KVM_DEV_FLIC_CLEAR_IRQS: 2113 case KVM_DEV_FLIC_APF_ENABLE: 2114 case KVM_DEV_FLIC_APF_DISABLE_WAIT: 2115 case KVM_DEV_FLIC_ADAPTER_REGISTER: 2116 case KVM_DEV_FLIC_ADAPTER_MODIFY: 2117 case KVM_DEV_FLIC_CLEAR_IO_IRQ: 2118 return 0; 2119 } 2120 return -ENXIO; 2121 } 2122 2123 static int flic_create(struct kvm_device *dev, u32 type) 2124 { 2125 if (!dev) 2126 return -EINVAL; 2127 if (dev->kvm->arch.flic) 2128 return -EINVAL; 2129 dev->kvm->arch.flic = dev; 2130 return 0; 2131 } 2132 2133 static void flic_destroy(struct kvm_device *dev) 2134 { 2135 dev->kvm->arch.flic = NULL; 2136 kfree(dev); 2137 } 2138 2139 /* s390 floating irq controller (flic) */ 2140 struct kvm_device_ops kvm_flic_ops = { 2141 .name = "kvm-flic", 2142 .get_attr = flic_get_attr, 2143 .set_attr = flic_set_attr, 2144 .has_attr = flic_has_attr, 2145 .create = flic_create, 2146 .destroy = flic_destroy, 2147 }; 2148 2149 static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap) 2150 { 2151 unsigned long bit; 2152 2153 bit = bit_nr + (addr % PAGE_SIZE) * 8; 2154 2155 return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit; 2156 } 2157 2158 static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter, 2159 u64 addr) 2160 { 2161 struct s390_map_info *map; 2162 2163 if (!adapter) 2164 return NULL; 2165 2166 list_for_each_entry(map, &adapter->maps, list) { 2167 if (map->guest_addr == addr) 2168 return map; 2169 } 2170 return NULL; 2171 } 2172 2173 static int adapter_indicators_set(struct kvm *kvm, 2174 struct s390_io_adapter *adapter, 2175 struct kvm_s390_adapter_int *adapter_int) 2176 { 2177 unsigned long bit; 2178 int summary_set, idx; 2179 struct s390_map_info *info; 2180 void *map; 2181 2182 info = get_map_info(adapter, adapter_int->ind_addr); 2183 if (!info) 2184 return -1; 2185 map = page_address(info->page); 2186 bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap); 2187 set_bit(bit, map); 2188 idx = srcu_read_lock(&kvm->srcu); 2189 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT); 2190 set_page_dirty_lock(info->page); 2191 info = get_map_info(adapter, adapter_int->summary_addr); 2192 if (!info) { 2193 srcu_read_unlock(&kvm->srcu, idx); 2194 return -1; 2195 } 2196 map = page_address(info->page); 2197 bit = get_ind_bit(info->addr, adapter_int->summary_offset, 2198 adapter->swap); 2199 summary_set = test_and_set_bit(bit, map); 2200 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT); 2201 set_page_dirty_lock(info->page); 2202 srcu_read_unlock(&kvm->srcu, idx); 2203 return summary_set ? 0 : 1; 2204 } 2205 2206 /* 2207 * < 0 - not injected due to error 2208 * = 0 - coalesced, summary indicator already active 2209 * > 0 - injected interrupt 2210 */ 2211 static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e, 2212 struct kvm *kvm, int irq_source_id, int level, 2213 bool line_status) 2214 { 2215 int ret; 2216 struct s390_io_adapter *adapter; 2217 2218 /* We're only interested in the 0->1 transition. */ 2219 if (!level) 2220 return 0; 2221 adapter = get_io_adapter(kvm, e->adapter.adapter_id); 2222 if (!adapter) 2223 return -1; 2224 down_read(&adapter->maps_lock); 2225 ret = adapter_indicators_set(kvm, adapter, &e->adapter); 2226 up_read(&adapter->maps_lock); 2227 if ((ret > 0) && !adapter->masked) { 2228 struct kvm_s390_interrupt s390int = { 2229 .type = KVM_S390_INT_IO(1, 0, 0, 0), 2230 .parm = 0, 2231 .parm64 = (adapter->isc << 27) | 0x80000000, 2232 }; 2233 ret = kvm_s390_inject_vm(kvm, &s390int); 2234 if (ret == 0) 2235 ret = 1; 2236 } 2237 return ret; 2238 } 2239 2240 int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e, 2241 const struct kvm_irq_routing_entry *ue) 2242 { 2243 int ret; 2244 2245 switch (ue->type) { 2246 case KVM_IRQ_ROUTING_S390_ADAPTER: 2247 e->set = set_adapter_int; 2248 e->adapter.summary_addr = ue->u.adapter.summary_addr; 2249 e->adapter.ind_addr = ue->u.adapter.ind_addr; 2250 e->adapter.summary_offset = ue->u.adapter.summary_offset; 2251 e->adapter.ind_offset = ue->u.adapter.ind_offset; 2252 e->adapter.adapter_id = ue->u.adapter.adapter_id; 2253 ret = 0; 2254 break; 2255 default: 2256 ret = -EINVAL; 2257 } 2258 2259 return ret; 2260 } 2261 2262 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, 2263 int irq_source_id, int level, bool line_status) 2264 { 2265 return -EINVAL; 2266 } 2267 2268 int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, void __user *irqstate, int len) 2269 { 2270 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 2271 struct kvm_s390_irq *buf; 2272 int r = 0; 2273 int n; 2274 2275 buf = vmalloc(len); 2276 if (!buf) 2277 return -ENOMEM; 2278 2279 if (copy_from_user((void *) buf, irqstate, len)) { 2280 r = -EFAULT; 2281 goto out_free; 2282 } 2283 2284 /* 2285 * Don't allow setting the interrupt state 2286 * when there are already interrupts pending 2287 */ 2288 spin_lock(&li->lock); 2289 if (li->pending_irqs) { 2290 r = -EBUSY; 2291 goto out_unlock; 2292 } 2293 2294 for (n = 0; n < len / sizeof(*buf); n++) { 2295 r = do_inject_vcpu(vcpu, &buf[n]); 2296 if (r) 2297 break; 2298 } 2299 2300 out_unlock: 2301 spin_unlock(&li->lock); 2302 out_free: 2303 vfree(buf); 2304 2305 return r; 2306 } 2307 2308 static void store_local_irq(struct kvm_s390_local_interrupt *li, 2309 struct kvm_s390_irq *irq, 2310 unsigned long irq_type) 2311 { 2312 switch (irq_type) { 2313 case IRQ_PEND_MCHK_EX: 2314 case IRQ_PEND_MCHK_REP: 2315 irq->type = KVM_S390_MCHK; 2316 irq->u.mchk = li->irq.mchk; 2317 break; 2318 case IRQ_PEND_PROG: 2319 irq->type = KVM_S390_PROGRAM_INT; 2320 irq->u.pgm = li->irq.pgm; 2321 break; 2322 case IRQ_PEND_PFAULT_INIT: 2323 irq->type = KVM_S390_INT_PFAULT_INIT; 2324 irq->u.ext = li->irq.ext; 2325 break; 2326 case IRQ_PEND_EXT_EXTERNAL: 2327 irq->type = KVM_S390_INT_EXTERNAL_CALL; 2328 irq->u.extcall = li->irq.extcall; 2329 break; 2330 case IRQ_PEND_EXT_CLOCK_COMP: 2331 irq->type = KVM_S390_INT_CLOCK_COMP; 2332 break; 2333 case IRQ_PEND_EXT_CPU_TIMER: 2334 irq->type = KVM_S390_INT_CPU_TIMER; 2335 break; 2336 case IRQ_PEND_SIGP_STOP: 2337 irq->type = KVM_S390_SIGP_STOP; 2338 irq->u.stop = li->irq.stop; 2339 break; 2340 case IRQ_PEND_RESTART: 2341 irq->type = KVM_S390_RESTART; 2342 break; 2343 case IRQ_PEND_SET_PREFIX: 2344 irq->type = KVM_S390_SIGP_SET_PREFIX; 2345 irq->u.prefix = li->irq.prefix; 2346 break; 2347 } 2348 } 2349 2350 int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len) 2351 { 2352 int scn; 2353 unsigned long sigp_emerg_pending[BITS_TO_LONGS(KVM_MAX_VCPUS)]; 2354 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 2355 unsigned long pending_irqs; 2356 struct kvm_s390_irq irq; 2357 unsigned long irq_type; 2358 int cpuaddr; 2359 int n = 0; 2360 2361 spin_lock(&li->lock); 2362 pending_irqs = li->pending_irqs; 2363 memcpy(&sigp_emerg_pending, &li->sigp_emerg_pending, 2364 sizeof(sigp_emerg_pending)); 2365 spin_unlock(&li->lock); 2366 2367 for_each_set_bit(irq_type, &pending_irqs, IRQ_PEND_COUNT) { 2368 memset(&irq, 0, sizeof(irq)); 2369 if (irq_type == IRQ_PEND_EXT_EMERGENCY) 2370 continue; 2371 if (n + sizeof(irq) > len) 2372 return -ENOBUFS; 2373 store_local_irq(&vcpu->arch.local_int, &irq, irq_type); 2374 if (copy_to_user(&buf[n], &irq, sizeof(irq))) 2375 return -EFAULT; 2376 n += sizeof(irq); 2377 } 2378 2379 if (test_bit(IRQ_PEND_EXT_EMERGENCY, &pending_irqs)) { 2380 for_each_set_bit(cpuaddr, sigp_emerg_pending, KVM_MAX_VCPUS) { 2381 memset(&irq, 0, sizeof(irq)); 2382 if (n + sizeof(irq) > len) 2383 return -ENOBUFS; 2384 irq.type = KVM_S390_INT_EMERGENCY; 2385 irq.u.emerg.code = cpuaddr; 2386 if (copy_to_user(&buf[n], &irq, sizeof(irq))) 2387 return -EFAULT; 2388 n += sizeof(irq); 2389 } 2390 } 2391 2392 if (sca_ext_call_pending(vcpu, &scn)) { 2393 if (n + sizeof(irq) > len) 2394 return -ENOBUFS; 2395 memset(&irq, 0, sizeof(irq)); 2396 irq.type = KVM_S390_INT_EXTERNAL_CALL; 2397 irq.u.extcall.code = scn; 2398 if (copy_to_user(&buf[n], &irq, sizeof(irq))) 2399 return -EFAULT; 2400 n += sizeof(irq); 2401 } 2402 2403 return n; 2404 } 2405