1 /* 2 * handling kvm guest interrupts 3 * 4 * Copyright IBM Corp. 2008, 2015 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License (version 2 only) 8 * as published by the Free Software Foundation. 9 * 10 * Author(s): Carsten Otte <cotte@de.ibm.com> 11 */ 12 13 #include <linux/interrupt.h> 14 #include <linux/kvm_host.h> 15 #include <linux/hrtimer.h> 16 #include <linux/mmu_context.h> 17 #include <linux/signal.h> 18 #include <linux/slab.h> 19 #include <linux/bitmap.h> 20 #include <linux/vmalloc.h> 21 #include <asm/asm-offsets.h> 22 #include <asm/dis.h> 23 #include <asm/uaccess.h> 24 #include <asm/sclp.h> 25 #include <asm/isc.h> 26 #include <asm/gmap.h> 27 #include "kvm-s390.h" 28 #include "gaccess.h" 29 #include "trace-s390.h" 30 31 #define PFAULT_INIT 0x0600 32 #define PFAULT_DONE 0x0680 33 #define VIRTIO_PARAM 0x0d00 34 35 /* handle external calls via sigp interpretation facility */ 36 static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id) 37 { 38 int c, scn; 39 40 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND)) 41 return 0; 42 43 read_lock(&vcpu->kvm->arch.sca_lock); 44 if (vcpu->kvm->arch.use_esca) { 45 struct esca_block *sca = vcpu->kvm->arch.sca; 46 union esca_sigp_ctrl sigp_ctrl = 47 sca->cpu[vcpu->vcpu_id].sigp_ctrl; 48 49 c = sigp_ctrl.c; 50 scn = sigp_ctrl.scn; 51 } else { 52 struct bsca_block *sca = vcpu->kvm->arch.sca; 53 union bsca_sigp_ctrl sigp_ctrl = 54 sca->cpu[vcpu->vcpu_id].sigp_ctrl; 55 56 c = sigp_ctrl.c; 57 scn = sigp_ctrl.scn; 58 } 59 read_unlock(&vcpu->kvm->arch.sca_lock); 60 61 if (src_id) 62 *src_id = scn; 63 64 return c; 65 } 66 67 static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id) 68 { 69 int expect, rc; 70 71 read_lock(&vcpu->kvm->arch.sca_lock); 72 if (vcpu->kvm->arch.use_esca) { 73 struct esca_block *sca = vcpu->kvm->arch.sca; 74 union esca_sigp_ctrl *sigp_ctrl = 75 &(sca->cpu[vcpu->vcpu_id].sigp_ctrl); 76 union esca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl; 77 78 new_val.scn = src_id; 79 new_val.c = 1; 80 old_val.c = 0; 81 82 expect = old_val.value; 83 rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value); 84 } else { 85 struct bsca_block *sca = vcpu->kvm->arch.sca; 86 union bsca_sigp_ctrl *sigp_ctrl = 87 &(sca->cpu[vcpu->vcpu_id].sigp_ctrl); 88 union bsca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl; 89 90 new_val.scn = src_id; 91 new_val.c = 1; 92 old_val.c = 0; 93 94 expect = old_val.value; 95 rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value); 96 } 97 read_unlock(&vcpu->kvm->arch.sca_lock); 98 99 if (rc != expect) { 100 /* another external call is pending */ 101 return -EBUSY; 102 } 103 atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); 104 return 0; 105 } 106 107 static void sca_clear_ext_call(struct kvm_vcpu *vcpu) 108 { 109 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 110 int rc, expect; 111 112 atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags); 113 read_lock(&vcpu->kvm->arch.sca_lock); 114 if (vcpu->kvm->arch.use_esca) { 115 struct esca_block *sca = vcpu->kvm->arch.sca; 116 union esca_sigp_ctrl *sigp_ctrl = 117 &(sca->cpu[vcpu->vcpu_id].sigp_ctrl); 118 union esca_sigp_ctrl old = *sigp_ctrl; 119 120 expect = old.value; 121 rc = cmpxchg(&sigp_ctrl->value, old.value, 0); 122 } else { 123 struct bsca_block *sca = vcpu->kvm->arch.sca; 124 union bsca_sigp_ctrl *sigp_ctrl = 125 &(sca->cpu[vcpu->vcpu_id].sigp_ctrl); 126 union bsca_sigp_ctrl old = *sigp_ctrl; 127 128 expect = old.value; 129 rc = cmpxchg(&sigp_ctrl->value, old.value, 0); 130 } 131 read_unlock(&vcpu->kvm->arch.sca_lock); 132 WARN_ON(rc != expect); /* cannot clear? */ 133 } 134 135 int psw_extint_disabled(struct kvm_vcpu *vcpu) 136 { 137 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); 138 } 139 140 static int psw_ioint_disabled(struct kvm_vcpu *vcpu) 141 { 142 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO); 143 } 144 145 static int psw_mchk_disabled(struct kvm_vcpu *vcpu) 146 { 147 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK); 148 } 149 150 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu) 151 { 152 return psw_extint_disabled(vcpu) && 153 psw_ioint_disabled(vcpu) && 154 psw_mchk_disabled(vcpu); 155 } 156 157 static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu) 158 { 159 if (psw_extint_disabled(vcpu) || 160 !(vcpu->arch.sie_block->gcr[0] & 0x800ul)) 161 return 0; 162 if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu)) 163 /* No timer interrupts when single stepping */ 164 return 0; 165 return 1; 166 } 167 168 static int ckc_irq_pending(struct kvm_vcpu *vcpu) 169 { 170 if (vcpu->arch.sie_block->ckc >= kvm_s390_get_tod_clock_fast(vcpu->kvm)) 171 return 0; 172 return ckc_interrupts_enabled(vcpu); 173 } 174 175 static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu) 176 { 177 return !psw_extint_disabled(vcpu) && 178 (vcpu->arch.sie_block->gcr[0] & 0x400ul); 179 } 180 181 static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu) 182 { 183 if (!cpu_timer_interrupts_enabled(vcpu)) 184 return 0; 185 return kvm_s390_get_cpu_timer(vcpu) >> 63; 186 } 187 188 static inline int is_ioirq(unsigned long irq_type) 189 { 190 return ((irq_type >= IRQ_PEND_IO_ISC_0) && 191 (irq_type <= IRQ_PEND_IO_ISC_7)); 192 } 193 194 static uint64_t isc_to_isc_bits(int isc) 195 { 196 return (0x80 >> isc) << 24; 197 } 198 199 static inline u8 int_word_to_isc(u32 int_word) 200 { 201 return (int_word & 0x38000000) >> 27; 202 } 203 204 static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu) 205 { 206 return vcpu->kvm->arch.float_int.pending_irqs | 207 vcpu->arch.local_int.pending_irqs; 208 } 209 210 static unsigned long disable_iscs(struct kvm_vcpu *vcpu, 211 unsigned long active_mask) 212 { 213 int i; 214 215 for (i = 0; i <= MAX_ISC; i++) 216 if (!(vcpu->arch.sie_block->gcr[6] & isc_to_isc_bits(i))) 217 active_mask &= ~(1UL << (IRQ_PEND_IO_ISC_0 + i)); 218 219 return active_mask; 220 } 221 222 static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu) 223 { 224 unsigned long active_mask; 225 226 active_mask = pending_irqs(vcpu); 227 if (!active_mask) 228 return 0; 229 230 if (psw_extint_disabled(vcpu)) 231 active_mask &= ~IRQ_PEND_EXT_MASK; 232 if (psw_ioint_disabled(vcpu)) 233 active_mask &= ~IRQ_PEND_IO_MASK; 234 else 235 active_mask = disable_iscs(vcpu, active_mask); 236 if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul)) 237 __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask); 238 if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul)) 239 __clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask); 240 if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul)) 241 __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask); 242 if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul)) 243 __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask); 244 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul)) 245 __clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask); 246 if (psw_mchk_disabled(vcpu)) 247 active_mask &= ~IRQ_PEND_MCHK_MASK; 248 if (!(vcpu->arch.sie_block->gcr[14] & 249 vcpu->kvm->arch.float_int.mchk.cr14)) 250 __clear_bit(IRQ_PEND_MCHK_REP, &active_mask); 251 252 /* 253 * STOP irqs will never be actively delivered. They are triggered via 254 * intercept requests and cleared when the stop intercept is performed. 255 */ 256 __clear_bit(IRQ_PEND_SIGP_STOP, &active_mask); 257 258 return active_mask; 259 } 260 261 static void __set_cpu_idle(struct kvm_vcpu *vcpu) 262 { 263 atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); 264 set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); 265 } 266 267 static void __unset_cpu_idle(struct kvm_vcpu *vcpu) 268 { 269 atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); 270 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); 271 } 272 273 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) 274 { 275 atomic_andnot(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, 276 &vcpu->arch.sie_block->cpuflags); 277 vcpu->arch.sie_block->lctl = 0x0000; 278 vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT); 279 280 if (guestdbg_enabled(vcpu)) { 281 vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 | 282 LCTL_CR10 | LCTL_CR11); 283 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT); 284 } 285 } 286 287 static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) 288 { 289 atomic_or(flag, &vcpu->arch.sie_block->cpuflags); 290 } 291 292 static void set_intercept_indicators_io(struct kvm_vcpu *vcpu) 293 { 294 if (!(pending_irqs(vcpu) & IRQ_PEND_IO_MASK)) 295 return; 296 else if (psw_ioint_disabled(vcpu)) 297 __set_cpuflag(vcpu, CPUSTAT_IO_INT); 298 else 299 vcpu->arch.sie_block->lctl |= LCTL_CR6; 300 } 301 302 static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu) 303 { 304 if (!(pending_irqs(vcpu) & IRQ_PEND_EXT_MASK)) 305 return; 306 if (psw_extint_disabled(vcpu)) 307 __set_cpuflag(vcpu, CPUSTAT_EXT_INT); 308 else 309 vcpu->arch.sie_block->lctl |= LCTL_CR0; 310 } 311 312 static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu) 313 { 314 if (!(pending_irqs(vcpu) & IRQ_PEND_MCHK_MASK)) 315 return; 316 if (psw_mchk_disabled(vcpu)) 317 vcpu->arch.sie_block->ictl |= ICTL_LPSW; 318 else 319 vcpu->arch.sie_block->lctl |= LCTL_CR14; 320 } 321 322 static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu) 323 { 324 if (kvm_s390_is_stop_irq_pending(vcpu)) 325 __set_cpuflag(vcpu, CPUSTAT_STOP_INT); 326 } 327 328 /* Set interception request for non-deliverable interrupts */ 329 static void set_intercept_indicators(struct kvm_vcpu *vcpu) 330 { 331 set_intercept_indicators_io(vcpu); 332 set_intercept_indicators_ext(vcpu); 333 set_intercept_indicators_mchk(vcpu); 334 set_intercept_indicators_stop(vcpu); 335 } 336 337 static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu) 338 { 339 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 340 int rc; 341 342 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER, 343 0, 0); 344 345 rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER, 346 (u16 *)__LC_EXT_INT_CODE); 347 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR); 348 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 349 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 350 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 351 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 352 clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); 353 return rc ? -EFAULT : 0; 354 } 355 356 static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu) 357 { 358 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 359 int rc; 360 361 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP, 362 0, 0); 363 364 rc = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP, 365 (u16 __user *)__LC_EXT_INT_CODE); 366 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR); 367 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 368 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 369 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 370 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 371 clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); 372 return rc ? -EFAULT : 0; 373 } 374 375 static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu) 376 { 377 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 378 struct kvm_s390_ext_info ext; 379 int rc; 380 381 spin_lock(&li->lock); 382 ext = li->irq.ext; 383 clear_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs); 384 li->irq.ext.ext_params2 = 0; 385 spin_unlock(&li->lock); 386 387 VCPU_EVENT(vcpu, 4, "deliver: pfault init token 0x%llx", 388 ext.ext_params2); 389 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, 390 KVM_S390_INT_PFAULT_INIT, 391 0, ext.ext_params2); 392 393 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE); 394 rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR); 395 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 396 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 397 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 398 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 399 rc |= put_guest_lc(vcpu, ext.ext_params2, (u64 *) __LC_EXT_PARAMS2); 400 return rc ? -EFAULT : 0; 401 } 402 403 static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu) 404 { 405 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; 406 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 407 struct kvm_s390_mchk_info mchk = {}; 408 unsigned long adtl_status_addr; 409 int deliver = 0; 410 int rc = 0; 411 412 spin_lock(&fi->lock); 413 spin_lock(&li->lock); 414 if (test_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs) || 415 test_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs)) { 416 /* 417 * If there was an exigent machine check pending, then any 418 * repressible machine checks that might have been pending 419 * are indicated along with it, so always clear bits for 420 * repressible and exigent interrupts 421 */ 422 mchk = li->irq.mchk; 423 clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs); 424 clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs); 425 memset(&li->irq.mchk, 0, sizeof(mchk)); 426 deliver = 1; 427 } 428 /* 429 * We indicate floating repressible conditions along with 430 * other pending conditions. Channel Report Pending and Channel 431 * Subsystem damage are the only two and and are indicated by 432 * bits in mcic and masked in cr14. 433 */ 434 if (test_and_clear_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) { 435 mchk.mcic |= fi->mchk.mcic; 436 mchk.cr14 |= fi->mchk.cr14; 437 memset(&fi->mchk, 0, sizeof(mchk)); 438 deliver = 1; 439 } 440 spin_unlock(&li->lock); 441 spin_unlock(&fi->lock); 442 443 if (deliver) { 444 VCPU_EVENT(vcpu, 3, "deliver: machine check mcic 0x%llx", 445 mchk.mcic); 446 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, 447 KVM_S390_MCHK, 448 mchk.cr14, mchk.mcic); 449 450 rc = kvm_s390_vcpu_store_status(vcpu, 451 KVM_S390_STORE_STATUS_PREFIXED); 452 rc |= read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR, 453 &adtl_status_addr, 454 sizeof(unsigned long)); 455 rc |= kvm_s390_vcpu_store_adtl_status(vcpu, 456 adtl_status_addr); 457 rc |= put_guest_lc(vcpu, mchk.mcic, 458 (u64 __user *) __LC_MCCK_CODE); 459 rc |= put_guest_lc(vcpu, mchk.failing_storage_address, 460 (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR); 461 rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, 462 &mchk.fixed_logout, 463 sizeof(mchk.fixed_logout)); 464 rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW, 465 &vcpu->arch.sie_block->gpsw, 466 sizeof(psw_t)); 467 rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW, 468 &vcpu->arch.sie_block->gpsw, 469 sizeof(psw_t)); 470 } 471 return rc ? -EFAULT : 0; 472 } 473 474 static int __must_check __deliver_restart(struct kvm_vcpu *vcpu) 475 { 476 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 477 int rc; 478 479 VCPU_EVENT(vcpu, 3, "%s", "deliver: cpu restart"); 480 vcpu->stat.deliver_restart_signal++; 481 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0); 482 483 rc = write_guest_lc(vcpu, 484 offsetof(struct lowcore, restart_old_psw), 485 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 486 rc |= read_guest_lc(vcpu, offsetof(struct lowcore, restart_psw), 487 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 488 clear_bit(IRQ_PEND_RESTART, &li->pending_irqs); 489 return rc ? -EFAULT : 0; 490 } 491 492 static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu) 493 { 494 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 495 struct kvm_s390_prefix_info prefix; 496 497 spin_lock(&li->lock); 498 prefix = li->irq.prefix; 499 li->irq.prefix.address = 0; 500 clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs); 501 spin_unlock(&li->lock); 502 503 vcpu->stat.deliver_prefix_signal++; 504 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, 505 KVM_S390_SIGP_SET_PREFIX, 506 prefix.address, 0); 507 508 kvm_s390_set_prefix(vcpu, prefix.address); 509 return 0; 510 } 511 512 static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu) 513 { 514 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 515 int rc; 516 int cpu_addr; 517 518 spin_lock(&li->lock); 519 cpu_addr = find_first_bit(li->sigp_emerg_pending, KVM_MAX_VCPUS); 520 clear_bit(cpu_addr, li->sigp_emerg_pending); 521 if (bitmap_empty(li->sigp_emerg_pending, KVM_MAX_VCPUS)) 522 clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); 523 spin_unlock(&li->lock); 524 525 VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp emerg"); 526 vcpu->stat.deliver_emergency_signal++; 527 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, 528 cpu_addr, 0); 529 530 rc = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG, 531 (u16 *)__LC_EXT_INT_CODE); 532 rc |= put_guest_lc(vcpu, cpu_addr, (u16 *)__LC_EXT_CPU_ADDR); 533 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 534 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 535 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 536 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 537 return rc ? -EFAULT : 0; 538 } 539 540 static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu) 541 { 542 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 543 struct kvm_s390_extcall_info extcall; 544 int rc; 545 546 spin_lock(&li->lock); 547 extcall = li->irq.extcall; 548 li->irq.extcall.code = 0; 549 clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs); 550 spin_unlock(&li->lock); 551 552 VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp ext call"); 553 vcpu->stat.deliver_external_call++; 554 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, 555 KVM_S390_INT_EXTERNAL_CALL, 556 extcall.code, 0); 557 558 rc = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL, 559 (u16 *)__LC_EXT_INT_CODE); 560 rc |= put_guest_lc(vcpu, extcall.code, (u16 *)__LC_EXT_CPU_ADDR); 561 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 562 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 563 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw, 564 sizeof(psw_t)); 565 return rc ? -EFAULT : 0; 566 } 567 568 static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) 569 { 570 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 571 struct kvm_s390_pgm_info pgm_info; 572 int rc = 0, nullifying = false; 573 u16 ilen; 574 575 spin_lock(&li->lock); 576 pgm_info = li->irq.pgm; 577 clear_bit(IRQ_PEND_PROG, &li->pending_irqs); 578 memset(&li->irq.pgm, 0, sizeof(pgm_info)); 579 spin_unlock(&li->lock); 580 581 ilen = pgm_info.flags & KVM_S390_PGM_FLAGS_ILC_MASK; 582 VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilen:%d", 583 pgm_info.code, ilen); 584 vcpu->stat.deliver_program_int++; 585 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, 586 pgm_info.code, 0); 587 588 switch (pgm_info.code & ~PGM_PER) { 589 case PGM_AFX_TRANSLATION: 590 case PGM_ASX_TRANSLATION: 591 case PGM_EX_TRANSLATION: 592 case PGM_LFX_TRANSLATION: 593 case PGM_LSTE_SEQUENCE: 594 case PGM_LSX_TRANSLATION: 595 case PGM_LX_TRANSLATION: 596 case PGM_PRIMARY_AUTHORITY: 597 case PGM_SECONDARY_AUTHORITY: 598 nullifying = true; 599 /* fall through */ 600 case PGM_SPACE_SWITCH: 601 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code, 602 (u64 *)__LC_TRANS_EXC_CODE); 603 break; 604 case PGM_ALEN_TRANSLATION: 605 case PGM_ALE_SEQUENCE: 606 case PGM_ASTE_INSTANCE: 607 case PGM_ASTE_SEQUENCE: 608 case PGM_ASTE_VALIDITY: 609 case PGM_EXTENDED_AUTHORITY: 610 rc = put_guest_lc(vcpu, pgm_info.exc_access_id, 611 (u8 *)__LC_EXC_ACCESS_ID); 612 nullifying = true; 613 break; 614 case PGM_ASCE_TYPE: 615 case PGM_PAGE_TRANSLATION: 616 case PGM_REGION_FIRST_TRANS: 617 case PGM_REGION_SECOND_TRANS: 618 case PGM_REGION_THIRD_TRANS: 619 case PGM_SEGMENT_TRANSLATION: 620 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code, 621 (u64 *)__LC_TRANS_EXC_CODE); 622 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id, 623 (u8 *)__LC_EXC_ACCESS_ID); 624 rc |= put_guest_lc(vcpu, pgm_info.op_access_id, 625 (u8 *)__LC_OP_ACCESS_ID); 626 nullifying = true; 627 break; 628 case PGM_MONITOR: 629 rc = put_guest_lc(vcpu, pgm_info.mon_class_nr, 630 (u16 *)__LC_MON_CLASS_NR); 631 rc |= put_guest_lc(vcpu, pgm_info.mon_code, 632 (u64 *)__LC_MON_CODE); 633 break; 634 case PGM_VECTOR_PROCESSING: 635 case PGM_DATA: 636 rc = put_guest_lc(vcpu, pgm_info.data_exc_code, 637 (u32 *)__LC_DATA_EXC_CODE); 638 break; 639 case PGM_PROTECTION: 640 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code, 641 (u64 *)__LC_TRANS_EXC_CODE); 642 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id, 643 (u8 *)__LC_EXC_ACCESS_ID); 644 break; 645 case PGM_STACK_FULL: 646 case PGM_STACK_EMPTY: 647 case PGM_STACK_SPECIFICATION: 648 case PGM_STACK_TYPE: 649 case PGM_STACK_OPERATION: 650 case PGM_TRACE_TABEL: 651 case PGM_CRYPTO_OPERATION: 652 nullifying = true; 653 break; 654 } 655 656 if (pgm_info.code & PGM_PER) { 657 rc |= put_guest_lc(vcpu, pgm_info.per_code, 658 (u8 *) __LC_PER_CODE); 659 rc |= put_guest_lc(vcpu, pgm_info.per_atmid, 660 (u8 *)__LC_PER_ATMID); 661 rc |= put_guest_lc(vcpu, pgm_info.per_address, 662 (u64 *) __LC_PER_ADDRESS); 663 rc |= put_guest_lc(vcpu, pgm_info.per_access_id, 664 (u8 *) __LC_PER_ACCESS_ID); 665 } 666 667 if (nullifying && !(pgm_info.flags & KVM_S390_PGM_FLAGS_NO_REWIND)) 668 kvm_s390_rewind_psw(vcpu, ilen); 669 670 /* bit 1+2 of the target are the ilc, so we can directly use ilen */ 671 rc |= put_guest_lc(vcpu, ilen, (u16 *) __LC_PGM_ILC); 672 rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea, 673 (u64 *) __LC_LAST_BREAK); 674 rc |= put_guest_lc(vcpu, pgm_info.code, 675 (u16 *)__LC_PGM_INT_CODE); 676 rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW, 677 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 678 rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW, 679 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 680 return rc ? -EFAULT : 0; 681 } 682 683 static int __must_check __deliver_service(struct kvm_vcpu *vcpu) 684 { 685 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; 686 struct kvm_s390_ext_info ext; 687 int rc = 0; 688 689 spin_lock(&fi->lock); 690 if (!(test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs))) { 691 spin_unlock(&fi->lock); 692 return 0; 693 } 694 ext = fi->srv_signal; 695 memset(&fi->srv_signal, 0, sizeof(ext)); 696 clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs); 697 spin_unlock(&fi->lock); 698 699 VCPU_EVENT(vcpu, 4, "deliver: sclp parameter 0x%x", 700 ext.ext_params); 701 vcpu->stat.deliver_service_signal++; 702 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE, 703 ext.ext_params, 0); 704 705 rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE); 706 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR); 707 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 708 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 709 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 710 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 711 rc |= put_guest_lc(vcpu, ext.ext_params, 712 (u32 *)__LC_EXT_PARAMS); 713 714 return rc ? -EFAULT : 0; 715 } 716 717 static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu) 718 { 719 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; 720 struct kvm_s390_interrupt_info *inti; 721 int rc = 0; 722 723 spin_lock(&fi->lock); 724 inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_PFAULT], 725 struct kvm_s390_interrupt_info, 726 list); 727 if (inti) { 728 list_del(&inti->list); 729 fi->counters[FIRQ_CNTR_PFAULT] -= 1; 730 } 731 if (list_empty(&fi->lists[FIRQ_LIST_PFAULT])) 732 clear_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs); 733 spin_unlock(&fi->lock); 734 735 if (inti) { 736 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, 737 KVM_S390_INT_PFAULT_DONE, 0, 738 inti->ext.ext_params2); 739 VCPU_EVENT(vcpu, 4, "deliver: pfault done token 0x%llx", 740 inti->ext.ext_params2); 741 742 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, 743 (u16 *)__LC_EXT_INT_CODE); 744 rc |= put_guest_lc(vcpu, PFAULT_DONE, 745 (u16 *)__LC_EXT_CPU_ADDR); 746 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 747 &vcpu->arch.sie_block->gpsw, 748 sizeof(psw_t)); 749 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 750 &vcpu->arch.sie_block->gpsw, 751 sizeof(psw_t)); 752 rc |= put_guest_lc(vcpu, inti->ext.ext_params2, 753 (u64 *)__LC_EXT_PARAMS2); 754 kfree(inti); 755 } 756 return rc ? -EFAULT : 0; 757 } 758 759 static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu) 760 { 761 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; 762 struct kvm_s390_interrupt_info *inti; 763 int rc = 0; 764 765 spin_lock(&fi->lock); 766 inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_VIRTIO], 767 struct kvm_s390_interrupt_info, 768 list); 769 if (inti) { 770 VCPU_EVENT(vcpu, 4, 771 "deliver: virtio parm: 0x%x,parm64: 0x%llx", 772 inti->ext.ext_params, inti->ext.ext_params2); 773 vcpu->stat.deliver_virtio_interrupt++; 774 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, 775 inti->type, 776 inti->ext.ext_params, 777 inti->ext.ext_params2); 778 list_del(&inti->list); 779 fi->counters[FIRQ_CNTR_VIRTIO] -= 1; 780 } 781 if (list_empty(&fi->lists[FIRQ_LIST_VIRTIO])) 782 clear_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs); 783 spin_unlock(&fi->lock); 784 785 if (inti) { 786 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, 787 (u16 *)__LC_EXT_INT_CODE); 788 rc |= put_guest_lc(vcpu, VIRTIO_PARAM, 789 (u16 *)__LC_EXT_CPU_ADDR); 790 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 791 &vcpu->arch.sie_block->gpsw, 792 sizeof(psw_t)); 793 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 794 &vcpu->arch.sie_block->gpsw, 795 sizeof(psw_t)); 796 rc |= put_guest_lc(vcpu, inti->ext.ext_params, 797 (u32 *)__LC_EXT_PARAMS); 798 rc |= put_guest_lc(vcpu, inti->ext.ext_params2, 799 (u64 *)__LC_EXT_PARAMS2); 800 kfree(inti); 801 } 802 return rc ? -EFAULT : 0; 803 } 804 805 static int __must_check __deliver_io(struct kvm_vcpu *vcpu, 806 unsigned long irq_type) 807 { 808 struct list_head *isc_list; 809 struct kvm_s390_float_interrupt *fi; 810 struct kvm_s390_interrupt_info *inti = NULL; 811 int rc = 0; 812 813 fi = &vcpu->kvm->arch.float_int; 814 815 spin_lock(&fi->lock); 816 isc_list = &fi->lists[irq_type - IRQ_PEND_IO_ISC_0]; 817 inti = list_first_entry_or_null(isc_list, 818 struct kvm_s390_interrupt_info, 819 list); 820 if (inti) { 821 if (inti->type & KVM_S390_INT_IO_AI_MASK) 822 VCPU_EVENT(vcpu, 4, "%s", "deliver: I/O (AI)"); 823 else 824 VCPU_EVENT(vcpu, 4, "deliver: I/O %x ss %x schid %04x", 825 inti->io.subchannel_id >> 8, 826 inti->io.subchannel_id >> 1 & 0x3, 827 inti->io.subchannel_nr); 828 829 vcpu->stat.deliver_io_int++; 830 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, 831 inti->type, 832 ((__u32)inti->io.subchannel_id << 16) | 833 inti->io.subchannel_nr, 834 ((__u64)inti->io.io_int_parm << 32) | 835 inti->io.io_int_word); 836 list_del(&inti->list); 837 fi->counters[FIRQ_CNTR_IO] -= 1; 838 } 839 if (list_empty(isc_list)) 840 clear_bit(irq_type, &fi->pending_irqs); 841 spin_unlock(&fi->lock); 842 843 if (inti) { 844 rc = put_guest_lc(vcpu, inti->io.subchannel_id, 845 (u16 *)__LC_SUBCHANNEL_ID); 846 rc |= put_guest_lc(vcpu, inti->io.subchannel_nr, 847 (u16 *)__LC_SUBCHANNEL_NR); 848 rc |= put_guest_lc(vcpu, inti->io.io_int_parm, 849 (u32 *)__LC_IO_INT_PARM); 850 rc |= put_guest_lc(vcpu, inti->io.io_int_word, 851 (u32 *)__LC_IO_INT_WORD); 852 rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW, 853 &vcpu->arch.sie_block->gpsw, 854 sizeof(psw_t)); 855 rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW, 856 &vcpu->arch.sie_block->gpsw, 857 sizeof(psw_t)); 858 kfree(inti); 859 } 860 861 return rc ? -EFAULT : 0; 862 } 863 864 typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu); 865 866 static const deliver_irq_t deliver_irq_funcs[] = { 867 [IRQ_PEND_MCHK_EX] = __deliver_machine_check, 868 [IRQ_PEND_MCHK_REP] = __deliver_machine_check, 869 [IRQ_PEND_PROG] = __deliver_prog, 870 [IRQ_PEND_EXT_EMERGENCY] = __deliver_emergency_signal, 871 [IRQ_PEND_EXT_EXTERNAL] = __deliver_external_call, 872 [IRQ_PEND_EXT_CLOCK_COMP] = __deliver_ckc, 873 [IRQ_PEND_EXT_CPU_TIMER] = __deliver_cpu_timer, 874 [IRQ_PEND_RESTART] = __deliver_restart, 875 [IRQ_PEND_SET_PREFIX] = __deliver_set_prefix, 876 [IRQ_PEND_PFAULT_INIT] = __deliver_pfault_init, 877 [IRQ_PEND_EXT_SERVICE] = __deliver_service, 878 [IRQ_PEND_PFAULT_DONE] = __deliver_pfault_done, 879 [IRQ_PEND_VIRTIO] = __deliver_virtio, 880 }; 881 882 /* Check whether an external call is pending (deliverable or not) */ 883 int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu) 884 { 885 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 886 887 if (!sclp.has_sigpif) 888 return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs); 889 890 return sca_ext_call_pending(vcpu, NULL); 891 } 892 893 int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop) 894 { 895 if (deliverable_irqs(vcpu)) 896 return 1; 897 898 if (kvm_cpu_has_pending_timer(vcpu)) 899 return 1; 900 901 /* external call pending and deliverable */ 902 if (kvm_s390_ext_call_pending(vcpu) && 903 !psw_extint_disabled(vcpu) && 904 (vcpu->arch.sie_block->gcr[0] & 0x2000ul)) 905 return 1; 906 907 if (!exclude_stop && kvm_s390_is_stop_irq_pending(vcpu)) 908 return 1; 909 return 0; 910 } 911 912 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 913 { 914 return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu); 915 } 916 917 static u64 __calculate_sltime(struct kvm_vcpu *vcpu) 918 { 919 u64 now, cputm, sltime = 0; 920 921 if (ckc_interrupts_enabled(vcpu)) { 922 now = kvm_s390_get_tod_clock_fast(vcpu->kvm); 923 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); 924 /* already expired or overflow? */ 925 if (!sltime || vcpu->arch.sie_block->ckc <= now) 926 return 0; 927 if (cpu_timer_interrupts_enabled(vcpu)) { 928 cputm = kvm_s390_get_cpu_timer(vcpu); 929 /* already expired? */ 930 if (cputm >> 63) 931 return 0; 932 return min(sltime, tod_to_ns(cputm)); 933 } 934 } else if (cpu_timer_interrupts_enabled(vcpu)) { 935 sltime = kvm_s390_get_cpu_timer(vcpu); 936 /* already expired? */ 937 if (sltime >> 63) 938 return 0; 939 } 940 return sltime; 941 } 942 943 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) 944 { 945 u64 sltime; 946 947 vcpu->stat.exit_wait_state++; 948 949 /* fast path */ 950 if (kvm_arch_vcpu_runnable(vcpu)) 951 return 0; 952 953 if (psw_interrupts_disabled(vcpu)) { 954 VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); 955 return -EOPNOTSUPP; /* disabled wait */ 956 } 957 958 if (!ckc_interrupts_enabled(vcpu) && 959 !cpu_timer_interrupts_enabled(vcpu)) { 960 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer"); 961 __set_cpu_idle(vcpu); 962 goto no_timer; 963 } 964 965 sltime = __calculate_sltime(vcpu); 966 if (!sltime) 967 return 0; 968 969 __set_cpu_idle(vcpu); 970 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); 971 VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime); 972 no_timer: 973 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 974 kvm_vcpu_block(vcpu); 975 __unset_cpu_idle(vcpu); 976 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 977 978 hrtimer_cancel(&vcpu->arch.ckc_timer); 979 return 0; 980 } 981 982 void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu) 983 { 984 /* 985 * We cannot move this into the if, as the CPU might be already 986 * in kvm_vcpu_block without having the waitqueue set (polling) 987 */ 988 vcpu->valid_wakeup = true; 989 if (swait_active(&vcpu->wq)) { 990 /* 991 * The vcpu gave up the cpu voluntarily, mark it as a good 992 * yield-candidate. 993 */ 994 vcpu->preempted = true; 995 swake_up(&vcpu->wq); 996 vcpu->stat.halt_wakeup++; 997 } 998 /* 999 * The VCPU might not be sleeping but is executing the VSIE. Let's 1000 * kick it, so it leaves the SIE to process the request. 1001 */ 1002 kvm_s390_vsie_kick(vcpu); 1003 } 1004 1005 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer) 1006 { 1007 struct kvm_vcpu *vcpu; 1008 u64 sltime; 1009 1010 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); 1011 sltime = __calculate_sltime(vcpu); 1012 1013 /* 1014 * If the monotonic clock runs faster than the tod clock we might be 1015 * woken up too early and have to go back to sleep to avoid deadlocks. 1016 */ 1017 if (sltime && hrtimer_forward_now(timer, ns_to_ktime(sltime))) 1018 return HRTIMER_RESTART; 1019 kvm_s390_vcpu_wakeup(vcpu); 1020 return HRTIMER_NORESTART; 1021 } 1022 1023 void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) 1024 { 1025 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1026 1027 spin_lock(&li->lock); 1028 li->pending_irqs = 0; 1029 bitmap_zero(li->sigp_emerg_pending, KVM_MAX_VCPUS); 1030 memset(&li->irq, 0, sizeof(li->irq)); 1031 spin_unlock(&li->lock); 1032 1033 sca_clear_ext_call(vcpu); 1034 } 1035 1036 int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) 1037 { 1038 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1039 deliver_irq_t func; 1040 int rc = 0; 1041 unsigned long irq_type; 1042 unsigned long irqs; 1043 1044 __reset_intercept_indicators(vcpu); 1045 1046 /* pending ckc conditions might have been invalidated */ 1047 clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); 1048 if (ckc_irq_pending(vcpu)) 1049 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); 1050 1051 /* pending cpu timer conditions might have been invalidated */ 1052 clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); 1053 if (cpu_timer_irq_pending(vcpu)) 1054 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); 1055 1056 while ((irqs = deliverable_irqs(vcpu)) && !rc) { 1057 /* bits are in the order of interrupt priority */ 1058 irq_type = find_first_bit(&irqs, IRQ_PEND_COUNT); 1059 if (is_ioirq(irq_type)) { 1060 rc = __deliver_io(vcpu, irq_type); 1061 } else { 1062 func = deliver_irq_funcs[irq_type]; 1063 if (!func) { 1064 WARN_ON_ONCE(func == NULL); 1065 clear_bit(irq_type, &li->pending_irqs); 1066 continue; 1067 } 1068 rc = func(vcpu); 1069 } 1070 } 1071 1072 set_intercept_indicators(vcpu); 1073 1074 return rc; 1075 } 1076 1077 static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) 1078 { 1079 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1080 1081 VCPU_EVENT(vcpu, 3, "inject: program irq code 0x%x", irq->u.pgm.code); 1082 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, 1083 irq->u.pgm.code, 0); 1084 1085 if (!(irq->u.pgm.flags & KVM_S390_PGM_FLAGS_ILC_VALID)) { 1086 /* auto detection if no valid ILC was given */ 1087 irq->u.pgm.flags &= ~KVM_S390_PGM_FLAGS_ILC_MASK; 1088 irq->u.pgm.flags |= kvm_s390_get_ilen(vcpu); 1089 irq->u.pgm.flags |= KVM_S390_PGM_FLAGS_ILC_VALID; 1090 } 1091 1092 if (irq->u.pgm.code == PGM_PER) { 1093 li->irq.pgm.code |= PGM_PER; 1094 li->irq.pgm.flags = irq->u.pgm.flags; 1095 /* only modify PER related information */ 1096 li->irq.pgm.per_address = irq->u.pgm.per_address; 1097 li->irq.pgm.per_code = irq->u.pgm.per_code; 1098 li->irq.pgm.per_atmid = irq->u.pgm.per_atmid; 1099 li->irq.pgm.per_access_id = irq->u.pgm.per_access_id; 1100 } else if (!(irq->u.pgm.code & PGM_PER)) { 1101 li->irq.pgm.code = (li->irq.pgm.code & PGM_PER) | 1102 irq->u.pgm.code; 1103 li->irq.pgm.flags = irq->u.pgm.flags; 1104 /* only modify non-PER information */ 1105 li->irq.pgm.trans_exc_code = irq->u.pgm.trans_exc_code; 1106 li->irq.pgm.mon_code = irq->u.pgm.mon_code; 1107 li->irq.pgm.data_exc_code = irq->u.pgm.data_exc_code; 1108 li->irq.pgm.mon_class_nr = irq->u.pgm.mon_class_nr; 1109 li->irq.pgm.exc_access_id = irq->u.pgm.exc_access_id; 1110 li->irq.pgm.op_access_id = irq->u.pgm.op_access_id; 1111 } else { 1112 li->irq.pgm = irq->u.pgm; 1113 } 1114 set_bit(IRQ_PEND_PROG, &li->pending_irqs); 1115 return 0; 1116 } 1117 1118 static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) 1119 { 1120 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1121 1122 VCPU_EVENT(vcpu, 4, "inject: pfault init parameter block at 0x%llx", 1123 irq->u.ext.ext_params2); 1124 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT, 1125 irq->u.ext.ext_params, 1126 irq->u.ext.ext_params2); 1127 1128 li->irq.ext = irq->u.ext; 1129 set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs); 1130 atomic_or(CPUSTAT_EXT_INT, li->cpuflags); 1131 return 0; 1132 } 1133 1134 static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) 1135 { 1136 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1137 struct kvm_s390_extcall_info *extcall = &li->irq.extcall; 1138 uint16_t src_id = irq->u.extcall.code; 1139 1140 VCPU_EVENT(vcpu, 4, "inject: external call source-cpu:%u", 1141 src_id); 1142 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL, 1143 src_id, 0); 1144 1145 /* sending vcpu invalid */ 1146 if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL) 1147 return -EINVAL; 1148 1149 if (sclp.has_sigpif) 1150 return sca_inject_ext_call(vcpu, src_id); 1151 1152 if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs)) 1153 return -EBUSY; 1154 *extcall = irq->u.extcall; 1155 atomic_or(CPUSTAT_EXT_INT, li->cpuflags); 1156 return 0; 1157 } 1158 1159 static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) 1160 { 1161 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1162 struct kvm_s390_prefix_info *prefix = &li->irq.prefix; 1163 1164 VCPU_EVENT(vcpu, 3, "inject: set prefix to %x", 1165 irq->u.prefix.address); 1166 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX, 1167 irq->u.prefix.address, 0); 1168 1169 if (!is_vcpu_stopped(vcpu)) 1170 return -EBUSY; 1171 1172 *prefix = irq->u.prefix; 1173 set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs); 1174 return 0; 1175 } 1176 1177 #define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS) 1178 static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) 1179 { 1180 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1181 struct kvm_s390_stop_info *stop = &li->irq.stop; 1182 int rc = 0; 1183 1184 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0); 1185 1186 if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS) 1187 return -EINVAL; 1188 1189 if (is_vcpu_stopped(vcpu)) { 1190 if (irq->u.stop.flags & KVM_S390_STOP_FLAG_STORE_STATUS) 1191 rc = kvm_s390_store_status_unloaded(vcpu, 1192 KVM_S390_STORE_STATUS_NOADDR); 1193 return rc; 1194 } 1195 1196 if (test_and_set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs)) 1197 return -EBUSY; 1198 stop->flags = irq->u.stop.flags; 1199 __set_cpuflag(vcpu, CPUSTAT_STOP_INT); 1200 return 0; 1201 } 1202 1203 static int __inject_sigp_restart(struct kvm_vcpu *vcpu, 1204 struct kvm_s390_irq *irq) 1205 { 1206 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1207 1208 VCPU_EVENT(vcpu, 3, "%s", "inject: restart int"); 1209 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0); 1210 1211 set_bit(IRQ_PEND_RESTART, &li->pending_irqs); 1212 return 0; 1213 } 1214 1215 static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, 1216 struct kvm_s390_irq *irq) 1217 { 1218 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1219 1220 VCPU_EVENT(vcpu, 4, "inject: emergency from cpu %u", 1221 irq->u.emerg.code); 1222 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, 1223 irq->u.emerg.code, 0); 1224 1225 /* sending vcpu invalid */ 1226 if (kvm_get_vcpu_by_id(vcpu->kvm, irq->u.emerg.code) == NULL) 1227 return -EINVAL; 1228 1229 set_bit(irq->u.emerg.code, li->sigp_emerg_pending); 1230 set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); 1231 atomic_or(CPUSTAT_EXT_INT, li->cpuflags); 1232 return 0; 1233 } 1234 1235 static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) 1236 { 1237 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1238 struct kvm_s390_mchk_info *mchk = &li->irq.mchk; 1239 1240 VCPU_EVENT(vcpu, 3, "inject: machine check mcic 0x%llx", 1241 irq->u.mchk.mcic); 1242 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0, 1243 irq->u.mchk.mcic); 1244 1245 /* 1246 * Because repressible machine checks can be indicated along with 1247 * exigent machine checks (PoP, Chapter 11, Interruption action) 1248 * we need to combine cr14, mcic and external damage code. 1249 * Failing storage address and the logout area should not be or'ed 1250 * together, we just indicate the last occurrence of the corresponding 1251 * machine check 1252 */ 1253 mchk->cr14 |= irq->u.mchk.cr14; 1254 mchk->mcic |= irq->u.mchk.mcic; 1255 mchk->ext_damage_code |= irq->u.mchk.ext_damage_code; 1256 mchk->failing_storage_address = irq->u.mchk.failing_storage_address; 1257 memcpy(&mchk->fixed_logout, &irq->u.mchk.fixed_logout, 1258 sizeof(mchk->fixed_logout)); 1259 if (mchk->mcic & MCHK_EX_MASK) 1260 set_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs); 1261 else if (mchk->mcic & MCHK_REP_MASK) 1262 set_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs); 1263 return 0; 1264 } 1265 1266 static int __inject_ckc(struct kvm_vcpu *vcpu) 1267 { 1268 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1269 1270 VCPU_EVENT(vcpu, 3, "%s", "inject: clock comparator external"); 1271 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP, 1272 0, 0); 1273 1274 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); 1275 atomic_or(CPUSTAT_EXT_INT, li->cpuflags); 1276 return 0; 1277 } 1278 1279 static int __inject_cpu_timer(struct kvm_vcpu *vcpu) 1280 { 1281 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1282 1283 VCPU_EVENT(vcpu, 3, "%s", "inject: cpu timer external"); 1284 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER, 1285 0, 0); 1286 1287 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); 1288 atomic_or(CPUSTAT_EXT_INT, li->cpuflags); 1289 return 0; 1290 } 1291 1292 static struct kvm_s390_interrupt_info *get_io_int(struct kvm *kvm, 1293 int isc, u32 schid) 1294 { 1295 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; 1296 struct list_head *isc_list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc]; 1297 struct kvm_s390_interrupt_info *iter; 1298 u16 id = (schid & 0xffff0000U) >> 16; 1299 u16 nr = schid & 0x0000ffffU; 1300 1301 spin_lock(&fi->lock); 1302 list_for_each_entry(iter, isc_list, list) { 1303 if (schid && (id != iter->io.subchannel_id || 1304 nr != iter->io.subchannel_nr)) 1305 continue; 1306 /* found an appropriate entry */ 1307 list_del_init(&iter->list); 1308 fi->counters[FIRQ_CNTR_IO] -= 1; 1309 if (list_empty(isc_list)) 1310 clear_bit(IRQ_PEND_IO_ISC_0 + isc, &fi->pending_irqs); 1311 spin_unlock(&fi->lock); 1312 return iter; 1313 } 1314 spin_unlock(&fi->lock); 1315 return NULL; 1316 } 1317 1318 /* 1319 * Dequeue and return an I/O interrupt matching any of the interruption 1320 * subclasses as designated by the isc mask in cr6 and the schid (if != 0). 1321 */ 1322 struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, 1323 u64 isc_mask, u32 schid) 1324 { 1325 struct kvm_s390_interrupt_info *inti = NULL; 1326 int isc; 1327 1328 for (isc = 0; isc <= MAX_ISC && !inti; isc++) { 1329 if (isc_mask & isc_to_isc_bits(isc)) 1330 inti = get_io_int(kvm, isc, schid); 1331 } 1332 return inti; 1333 } 1334 1335 #define SCCB_MASK 0xFFFFFFF8 1336 #define SCCB_EVENT_PENDING 0x3 1337 1338 static int __inject_service(struct kvm *kvm, 1339 struct kvm_s390_interrupt_info *inti) 1340 { 1341 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; 1342 1343 spin_lock(&fi->lock); 1344 fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_EVENT_PENDING; 1345 /* 1346 * Early versions of the QEMU s390 bios will inject several 1347 * service interrupts after another without handling a 1348 * condition code indicating busy. 1349 * We will silently ignore those superfluous sccb values. 1350 * A future version of QEMU will take care of serialization 1351 * of servc requests 1352 */ 1353 if (fi->srv_signal.ext_params & SCCB_MASK) 1354 goto out; 1355 fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_MASK; 1356 set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs); 1357 out: 1358 spin_unlock(&fi->lock); 1359 kfree(inti); 1360 return 0; 1361 } 1362 1363 static int __inject_virtio(struct kvm *kvm, 1364 struct kvm_s390_interrupt_info *inti) 1365 { 1366 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; 1367 1368 spin_lock(&fi->lock); 1369 if (fi->counters[FIRQ_CNTR_VIRTIO] >= KVM_S390_MAX_VIRTIO_IRQS) { 1370 spin_unlock(&fi->lock); 1371 return -EBUSY; 1372 } 1373 fi->counters[FIRQ_CNTR_VIRTIO] += 1; 1374 list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_VIRTIO]); 1375 set_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs); 1376 spin_unlock(&fi->lock); 1377 return 0; 1378 } 1379 1380 static int __inject_pfault_done(struct kvm *kvm, 1381 struct kvm_s390_interrupt_info *inti) 1382 { 1383 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; 1384 1385 spin_lock(&fi->lock); 1386 if (fi->counters[FIRQ_CNTR_PFAULT] >= 1387 (ASYNC_PF_PER_VCPU * KVM_MAX_VCPUS)) { 1388 spin_unlock(&fi->lock); 1389 return -EBUSY; 1390 } 1391 fi->counters[FIRQ_CNTR_PFAULT] += 1; 1392 list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_PFAULT]); 1393 set_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs); 1394 spin_unlock(&fi->lock); 1395 return 0; 1396 } 1397 1398 #define CR_PENDING_SUBCLASS 28 1399 static int __inject_float_mchk(struct kvm *kvm, 1400 struct kvm_s390_interrupt_info *inti) 1401 { 1402 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; 1403 1404 spin_lock(&fi->lock); 1405 fi->mchk.cr14 |= inti->mchk.cr14 & (1UL << CR_PENDING_SUBCLASS); 1406 fi->mchk.mcic |= inti->mchk.mcic; 1407 set_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs); 1408 spin_unlock(&fi->lock); 1409 kfree(inti); 1410 return 0; 1411 } 1412 1413 static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) 1414 { 1415 struct kvm_s390_float_interrupt *fi; 1416 struct list_head *list; 1417 int isc; 1418 1419 fi = &kvm->arch.float_int; 1420 spin_lock(&fi->lock); 1421 if (fi->counters[FIRQ_CNTR_IO] >= KVM_S390_MAX_FLOAT_IRQS) { 1422 spin_unlock(&fi->lock); 1423 return -EBUSY; 1424 } 1425 fi->counters[FIRQ_CNTR_IO] += 1; 1426 1427 if (inti->type & KVM_S390_INT_IO_AI_MASK) 1428 VM_EVENT(kvm, 4, "%s", "inject: I/O (AI)"); 1429 else 1430 VM_EVENT(kvm, 4, "inject: I/O %x ss %x schid %04x", 1431 inti->io.subchannel_id >> 8, 1432 inti->io.subchannel_id >> 1 & 0x3, 1433 inti->io.subchannel_nr); 1434 isc = int_word_to_isc(inti->io.io_int_word); 1435 list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc]; 1436 list_add_tail(&inti->list, list); 1437 set_bit(IRQ_PEND_IO_ISC_0 + isc, &fi->pending_irqs); 1438 spin_unlock(&fi->lock); 1439 return 0; 1440 } 1441 1442 /* 1443 * Find a destination VCPU for a floating irq and kick it. 1444 */ 1445 static void __floating_irq_kick(struct kvm *kvm, u64 type) 1446 { 1447 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; 1448 struct kvm_s390_local_interrupt *li; 1449 struct kvm_vcpu *dst_vcpu; 1450 int sigcpu, online_vcpus, nr_tries = 0; 1451 1452 online_vcpus = atomic_read(&kvm->online_vcpus); 1453 if (!online_vcpus) 1454 return; 1455 1456 /* find idle VCPUs first, then round robin */ 1457 sigcpu = find_first_bit(fi->idle_mask, online_vcpus); 1458 if (sigcpu == online_vcpus) { 1459 do { 1460 sigcpu = fi->next_rr_cpu; 1461 fi->next_rr_cpu = (fi->next_rr_cpu + 1) % online_vcpus; 1462 /* avoid endless loops if all vcpus are stopped */ 1463 if (nr_tries++ >= online_vcpus) 1464 return; 1465 } while (is_vcpu_stopped(kvm_get_vcpu(kvm, sigcpu))); 1466 } 1467 dst_vcpu = kvm_get_vcpu(kvm, sigcpu); 1468 1469 /* make the VCPU drop out of the SIE, or wake it up if sleeping */ 1470 li = &dst_vcpu->arch.local_int; 1471 spin_lock(&li->lock); 1472 switch (type) { 1473 case KVM_S390_MCHK: 1474 atomic_or(CPUSTAT_STOP_INT, li->cpuflags); 1475 break; 1476 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1477 atomic_or(CPUSTAT_IO_INT, li->cpuflags); 1478 break; 1479 default: 1480 atomic_or(CPUSTAT_EXT_INT, li->cpuflags); 1481 break; 1482 } 1483 spin_unlock(&li->lock); 1484 kvm_s390_vcpu_wakeup(dst_vcpu); 1485 } 1486 1487 static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) 1488 { 1489 u64 type = READ_ONCE(inti->type); 1490 int rc; 1491 1492 switch (type) { 1493 case KVM_S390_MCHK: 1494 rc = __inject_float_mchk(kvm, inti); 1495 break; 1496 case KVM_S390_INT_VIRTIO: 1497 rc = __inject_virtio(kvm, inti); 1498 break; 1499 case KVM_S390_INT_SERVICE: 1500 rc = __inject_service(kvm, inti); 1501 break; 1502 case KVM_S390_INT_PFAULT_DONE: 1503 rc = __inject_pfault_done(kvm, inti); 1504 break; 1505 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1506 rc = __inject_io(kvm, inti); 1507 break; 1508 default: 1509 rc = -EINVAL; 1510 } 1511 if (rc) 1512 return rc; 1513 1514 __floating_irq_kick(kvm, type); 1515 return 0; 1516 } 1517 1518 int kvm_s390_inject_vm(struct kvm *kvm, 1519 struct kvm_s390_interrupt *s390int) 1520 { 1521 struct kvm_s390_interrupt_info *inti; 1522 int rc; 1523 1524 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 1525 if (!inti) 1526 return -ENOMEM; 1527 1528 inti->type = s390int->type; 1529 switch (inti->type) { 1530 case KVM_S390_INT_VIRTIO: 1531 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx", 1532 s390int->parm, s390int->parm64); 1533 inti->ext.ext_params = s390int->parm; 1534 inti->ext.ext_params2 = s390int->parm64; 1535 break; 1536 case KVM_S390_INT_SERVICE: 1537 VM_EVENT(kvm, 4, "inject: sclp parm:%x", s390int->parm); 1538 inti->ext.ext_params = s390int->parm; 1539 break; 1540 case KVM_S390_INT_PFAULT_DONE: 1541 inti->ext.ext_params2 = s390int->parm64; 1542 break; 1543 case KVM_S390_MCHK: 1544 VM_EVENT(kvm, 3, "inject: machine check mcic 0x%llx", 1545 s390int->parm64); 1546 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */ 1547 inti->mchk.mcic = s390int->parm64; 1548 break; 1549 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1550 inti->io.subchannel_id = s390int->parm >> 16; 1551 inti->io.subchannel_nr = s390int->parm & 0x0000ffffu; 1552 inti->io.io_int_parm = s390int->parm64 >> 32; 1553 inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull; 1554 break; 1555 default: 1556 kfree(inti); 1557 return -EINVAL; 1558 } 1559 trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64, 1560 2); 1561 1562 rc = __inject_vm(kvm, inti); 1563 if (rc) 1564 kfree(inti); 1565 return rc; 1566 } 1567 1568 int kvm_s390_reinject_io_int(struct kvm *kvm, 1569 struct kvm_s390_interrupt_info *inti) 1570 { 1571 return __inject_vm(kvm, inti); 1572 } 1573 1574 int s390int_to_s390irq(struct kvm_s390_interrupt *s390int, 1575 struct kvm_s390_irq *irq) 1576 { 1577 irq->type = s390int->type; 1578 switch (irq->type) { 1579 case KVM_S390_PROGRAM_INT: 1580 if (s390int->parm & 0xffff0000) 1581 return -EINVAL; 1582 irq->u.pgm.code = s390int->parm; 1583 break; 1584 case KVM_S390_SIGP_SET_PREFIX: 1585 irq->u.prefix.address = s390int->parm; 1586 break; 1587 case KVM_S390_SIGP_STOP: 1588 irq->u.stop.flags = s390int->parm; 1589 break; 1590 case KVM_S390_INT_EXTERNAL_CALL: 1591 if (s390int->parm & 0xffff0000) 1592 return -EINVAL; 1593 irq->u.extcall.code = s390int->parm; 1594 break; 1595 case KVM_S390_INT_EMERGENCY: 1596 if (s390int->parm & 0xffff0000) 1597 return -EINVAL; 1598 irq->u.emerg.code = s390int->parm; 1599 break; 1600 case KVM_S390_MCHK: 1601 irq->u.mchk.mcic = s390int->parm64; 1602 break; 1603 } 1604 return 0; 1605 } 1606 1607 int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu) 1608 { 1609 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1610 1611 return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs); 1612 } 1613 1614 void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu) 1615 { 1616 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1617 1618 spin_lock(&li->lock); 1619 li->irq.stop.flags = 0; 1620 clear_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs); 1621 spin_unlock(&li->lock); 1622 } 1623 1624 static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) 1625 { 1626 int rc; 1627 1628 switch (irq->type) { 1629 case KVM_S390_PROGRAM_INT: 1630 rc = __inject_prog(vcpu, irq); 1631 break; 1632 case KVM_S390_SIGP_SET_PREFIX: 1633 rc = __inject_set_prefix(vcpu, irq); 1634 break; 1635 case KVM_S390_SIGP_STOP: 1636 rc = __inject_sigp_stop(vcpu, irq); 1637 break; 1638 case KVM_S390_RESTART: 1639 rc = __inject_sigp_restart(vcpu, irq); 1640 break; 1641 case KVM_S390_INT_CLOCK_COMP: 1642 rc = __inject_ckc(vcpu); 1643 break; 1644 case KVM_S390_INT_CPU_TIMER: 1645 rc = __inject_cpu_timer(vcpu); 1646 break; 1647 case KVM_S390_INT_EXTERNAL_CALL: 1648 rc = __inject_extcall(vcpu, irq); 1649 break; 1650 case KVM_S390_INT_EMERGENCY: 1651 rc = __inject_sigp_emergency(vcpu, irq); 1652 break; 1653 case KVM_S390_MCHK: 1654 rc = __inject_mchk(vcpu, irq); 1655 break; 1656 case KVM_S390_INT_PFAULT_INIT: 1657 rc = __inject_pfault_init(vcpu, irq); 1658 break; 1659 case KVM_S390_INT_VIRTIO: 1660 case KVM_S390_INT_SERVICE: 1661 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1662 default: 1663 rc = -EINVAL; 1664 } 1665 1666 return rc; 1667 } 1668 1669 int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) 1670 { 1671 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1672 int rc; 1673 1674 spin_lock(&li->lock); 1675 rc = do_inject_vcpu(vcpu, irq); 1676 spin_unlock(&li->lock); 1677 if (!rc) 1678 kvm_s390_vcpu_wakeup(vcpu); 1679 return rc; 1680 } 1681 1682 static inline void clear_irq_list(struct list_head *_list) 1683 { 1684 struct kvm_s390_interrupt_info *inti, *n; 1685 1686 list_for_each_entry_safe(inti, n, _list, list) { 1687 list_del(&inti->list); 1688 kfree(inti); 1689 } 1690 } 1691 1692 static void inti_to_irq(struct kvm_s390_interrupt_info *inti, 1693 struct kvm_s390_irq *irq) 1694 { 1695 irq->type = inti->type; 1696 switch (inti->type) { 1697 case KVM_S390_INT_PFAULT_INIT: 1698 case KVM_S390_INT_PFAULT_DONE: 1699 case KVM_S390_INT_VIRTIO: 1700 irq->u.ext = inti->ext; 1701 break; 1702 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1703 irq->u.io = inti->io; 1704 break; 1705 } 1706 } 1707 1708 void kvm_s390_clear_float_irqs(struct kvm *kvm) 1709 { 1710 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; 1711 int i; 1712 1713 spin_lock(&fi->lock); 1714 fi->pending_irqs = 0; 1715 memset(&fi->srv_signal, 0, sizeof(fi->srv_signal)); 1716 memset(&fi->mchk, 0, sizeof(fi->mchk)); 1717 for (i = 0; i < FIRQ_LIST_COUNT; i++) 1718 clear_irq_list(&fi->lists[i]); 1719 for (i = 0; i < FIRQ_MAX_COUNT; i++) 1720 fi->counters[i] = 0; 1721 spin_unlock(&fi->lock); 1722 }; 1723 1724 static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len) 1725 { 1726 struct kvm_s390_interrupt_info *inti; 1727 struct kvm_s390_float_interrupt *fi; 1728 struct kvm_s390_irq *buf; 1729 struct kvm_s390_irq *irq; 1730 int max_irqs; 1731 int ret = 0; 1732 int n = 0; 1733 int i; 1734 1735 if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0) 1736 return -EINVAL; 1737 1738 /* 1739 * We are already using -ENOMEM to signal 1740 * userspace it may retry with a bigger buffer, 1741 * so we need to use something else for this case 1742 */ 1743 buf = vzalloc(len); 1744 if (!buf) 1745 return -ENOBUFS; 1746 1747 max_irqs = len / sizeof(struct kvm_s390_irq); 1748 1749 fi = &kvm->arch.float_int; 1750 spin_lock(&fi->lock); 1751 for (i = 0; i < FIRQ_LIST_COUNT; i++) { 1752 list_for_each_entry(inti, &fi->lists[i], list) { 1753 if (n == max_irqs) { 1754 /* signal userspace to try again */ 1755 ret = -ENOMEM; 1756 goto out; 1757 } 1758 inti_to_irq(inti, &buf[n]); 1759 n++; 1760 } 1761 } 1762 if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs)) { 1763 if (n == max_irqs) { 1764 /* signal userspace to try again */ 1765 ret = -ENOMEM; 1766 goto out; 1767 } 1768 irq = (struct kvm_s390_irq *) &buf[n]; 1769 irq->type = KVM_S390_INT_SERVICE; 1770 irq->u.ext = fi->srv_signal; 1771 n++; 1772 } 1773 if (test_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) { 1774 if (n == max_irqs) { 1775 /* signal userspace to try again */ 1776 ret = -ENOMEM; 1777 goto out; 1778 } 1779 irq = (struct kvm_s390_irq *) &buf[n]; 1780 irq->type = KVM_S390_MCHK; 1781 irq->u.mchk = fi->mchk; 1782 n++; 1783 } 1784 1785 out: 1786 spin_unlock(&fi->lock); 1787 if (!ret && n > 0) { 1788 if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n)) 1789 ret = -EFAULT; 1790 } 1791 vfree(buf); 1792 1793 return ret < 0 ? ret : n; 1794 } 1795 1796 static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 1797 { 1798 int r; 1799 1800 switch (attr->group) { 1801 case KVM_DEV_FLIC_GET_ALL_IRQS: 1802 r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr, 1803 attr->attr); 1804 break; 1805 default: 1806 r = -EINVAL; 1807 } 1808 1809 return r; 1810 } 1811 1812 static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti, 1813 u64 addr) 1814 { 1815 struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr; 1816 void *target = NULL; 1817 void __user *source; 1818 u64 size; 1819 1820 if (get_user(inti->type, (u64 __user *)addr)) 1821 return -EFAULT; 1822 1823 switch (inti->type) { 1824 case KVM_S390_INT_PFAULT_INIT: 1825 case KVM_S390_INT_PFAULT_DONE: 1826 case KVM_S390_INT_VIRTIO: 1827 case KVM_S390_INT_SERVICE: 1828 target = (void *) &inti->ext; 1829 source = &uptr->u.ext; 1830 size = sizeof(inti->ext); 1831 break; 1832 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1833 target = (void *) &inti->io; 1834 source = &uptr->u.io; 1835 size = sizeof(inti->io); 1836 break; 1837 case KVM_S390_MCHK: 1838 target = (void *) &inti->mchk; 1839 source = &uptr->u.mchk; 1840 size = sizeof(inti->mchk); 1841 break; 1842 default: 1843 return -EINVAL; 1844 } 1845 1846 if (copy_from_user(target, source, size)) 1847 return -EFAULT; 1848 1849 return 0; 1850 } 1851 1852 static int enqueue_floating_irq(struct kvm_device *dev, 1853 struct kvm_device_attr *attr) 1854 { 1855 struct kvm_s390_interrupt_info *inti = NULL; 1856 int r = 0; 1857 int len = attr->attr; 1858 1859 if (len % sizeof(struct kvm_s390_irq) != 0) 1860 return -EINVAL; 1861 else if (len > KVM_S390_FLIC_MAX_BUFFER) 1862 return -EINVAL; 1863 1864 while (len >= sizeof(struct kvm_s390_irq)) { 1865 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 1866 if (!inti) 1867 return -ENOMEM; 1868 1869 r = copy_irq_from_user(inti, attr->addr); 1870 if (r) { 1871 kfree(inti); 1872 return r; 1873 } 1874 r = __inject_vm(dev->kvm, inti); 1875 if (r) { 1876 kfree(inti); 1877 return r; 1878 } 1879 len -= sizeof(struct kvm_s390_irq); 1880 attr->addr += sizeof(struct kvm_s390_irq); 1881 } 1882 1883 return r; 1884 } 1885 1886 static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id) 1887 { 1888 if (id >= MAX_S390_IO_ADAPTERS) 1889 return NULL; 1890 return kvm->arch.adapters[id]; 1891 } 1892 1893 static int register_io_adapter(struct kvm_device *dev, 1894 struct kvm_device_attr *attr) 1895 { 1896 struct s390_io_adapter *adapter; 1897 struct kvm_s390_io_adapter adapter_info; 1898 1899 if (copy_from_user(&adapter_info, 1900 (void __user *)attr->addr, sizeof(adapter_info))) 1901 return -EFAULT; 1902 1903 if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) || 1904 (dev->kvm->arch.adapters[adapter_info.id] != NULL)) 1905 return -EINVAL; 1906 1907 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); 1908 if (!adapter) 1909 return -ENOMEM; 1910 1911 INIT_LIST_HEAD(&adapter->maps); 1912 init_rwsem(&adapter->maps_lock); 1913 atomic_set(&adapter->nr_maps, 0); 1914 adapter->id = adapter_info.id; 1915 adapter->isc = adapter_info.isc; 1916 adapter->maskable = adapter_info.maskable; 1917 adapter->masked = false; 1918 adapter->swap = adapter_info.swap; 1919 dev->kvm->arch.adapters[adapter->id] = adapter; 1920 1921 return 0; 1922 } 1923 1924 int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked) 1925 { 1926 int ret; 1927 struct s390_io_adapter *adapter = get_io_adapter(kvm, id); 1928 1929 if (!adapter || !adapter->maskable) 1930 return -EINVAL; 1931 ret = adapter->masked; 1932 adapter->masked = masked; 1933 return ret; 1934 } 1935 1936 static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr) 1937 { 1938 struct s390_io_adapter *adapter = get_io_adapter(kvm, id); 1939 struct s390_map_info *map; 1940 int ret; 1941 1942 if (!adapter || !addr) 1943 return -EINVAL; 1944 1945 map = kzalloc(sizeof(*map), GFP_KERNEL); 1946 if (!map) { 1947 ret = -ENOMEM; 1948 goto out; 1949 } 1950 INIT_LIST_HEAD(&map->list); 1951 map->guest_addr = addr; 1952 map->addr = gmap_translate(kvm->arch.gmap, addr); 1953 if (map->addr == -EFAULT) { 1954 ret = -EFAULT; 1955 goto out; 1956 } 1957 ret = get_user_pages_fast(map->addr, 1, 1, &map->page); 1958 if (ret < 0) 1959 goto out; 1960 BUG_ON(ret != 1); 1961 down_write(&adapter->maps_lock); 1962 if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) { 1963 list_add_tail(&map->list, &adapter->maps); 1964 ret = 0; 1965 } else { 1966 put_page(map->page); 1967 ret = -EINVAL; 1968 } 1969 up_write(&adapter->maps_lock); 1970 out: 1971 if (ret) 1972 kfree(map); 1973 return ret; 1974 } 1975 1976 static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr) 1977 { 1978 struct s390_io_adapter *adapter = get_io_adapter(kvm, id); 1979 struct s390_map_info *map, *tmp; 1980 int found = 0; 1981 1982 if (!adapter || !addr) 1983 return -EINVAL; 1984 1985 down_write(&adapter->maps_lock); 1986 list_for_each_entry_safe(map, tmp, &adapter->maps, list) { 1987 if (map->guest_addr == addr) { 1988 found = 1; 1989 atomic_dec(&adapter->nr_maps); 1990 list_del(&map->list); 1991 put_page(map->page); 1992 kfree(map); 1993 break; 1994 } 1995 } 1996 up_write(&adapter->maps_lock); 1997 1998 return found ? 0 : -EINVAL; 1999 } 2000 2001 void kvm_s390_destroy_adapters(struct kvm *kvm) 2002 { 2003 int i; 2004 struct s390_map_info *map, *tmp; 2005 2006 for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) { 2007 if (!kvm->arch.adapters[i]) 2008 continue; 2009 list_for_each_entry_safe(map, tmp, 2010 &kvm->arch.adapters[i]->maps, list) { 2011 list_del(&map->list); 2012 put_page(map->page); 2013 kfree(map); 2014 } 2015 kfree(kvm->arch.adapters[i]); 2016 } 2017 } 2018 2019 static int modify_io_adapter(struct kvm_device *dev, 2020 struct kvm_device_attr *attr) 2021 { 2022 struct kvm_s390_io_adapter_req req; 2023 struct s390_io_adapter *adapter; 2024 int ret; 2025 2026 if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req))) 2027 return -EFAULT; 2028 2029 adapter = get_io_adapter(dev->kvm, req.id); 2030 if (!adapter) 2031 return -EINVAL; 2032 switch (req.type) { 2033 case KVM_S390_IO_ADAPTER_MASK: 2034 ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask); 2035 if (ret > 0) 2036 ret = 0; 2037 break; 2038 case KVM_S390_IO_ADAPTER_MAP: 2039 ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr); 2040 break; 2041 case KVM_S390_IO_ADAPTER_UNMAP: 2042 ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr); 2043 break; 2044 default: 2045 ret = -EINVAL; 2046 } 2047 2048 return ret; 2049 } 2050 2051 static int clear_io_irq(struct kvm *kvm, struct kvm_device_attr *attr) 2052 2053 { 2054 const u64 isc_mask = 0xffUL << 24; /* all iscs set */ 2055 u32 schid; 2056 2057 if (attr->flags) 2058 return -EINVAL; 2059 if (attr->attr != sizeof(schid)) 2060 return -EINVAL; 2061 if (copy_from_user(&schid, (void __user *) attr->addr, sizeof(schid))) 2062 return -EFAULT; 2063 kfree(kvm_s390_get_io_int(kvm, isc_mask, schid)); 2064 /* 2065 * If userspace is conforming to the architecture, we can have at most 2066 * one pending I/O interrupt per subchannel, so this is effectively a 2067 * clear all. 2068 */ 2069 return 0; 2070 } 2071 2072 static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 2073 { 2074 int r = 0; 2075 unsigned int i; 2076 struct kvm_vcpu *vcpu; 2077 2078 switch (attr->group) { 2079 case KVM_DEV_FLIC_ENQUEUE: 2080 r = enqueue_floating_irq(dev, attr); 2081 break; 2082 case KVM_DEV_FLIC_CLEAR_IRQS: 2083 kvm_s390_clear_float_irqs(dev->kvm); 2084 break; 2085 case KVM_DEV_FLIC_APF_ENABLE: 2086 dev->kvm->arch.gmap->pfault_enabled = 1; 2087 break; 2088 case KVM_DEV_FLIC_APF_DISABLE_WAIT: 2089 dev->kvm->arch.gmap->pfault_enabled = 0; 2090 /* 2091 * Make sure no async faults are in transition when 2092 * clearing the queues. So we don't need to worry 2093 * about late coming workers. 2094 */ 2095 synchronize_srcu(&dev->kvm->srcu); 2096 kvm_for_each_vcpu(i, vcpu, dev->kvm) 2097 kvm_clear_async_pf_completion_queue(vcpu); 2098 break; 2099 case KVM_DEV_FLIC_ADAPTER_REGISTER: 2100 r = register_io_adapter(dev, attr); 2101 break; 2102 case KVM_DEV_FLIC_ADAPTER_MODIFY: 2103 r = modify_io_adapter(dev, attr); 2104 break; 2105 case KVM_DEV_FLIC_CLEAR_IO_IRQ: 2106 r = clear_io_irq(dev->kvm, attr); 2107 break; 2108 default: 2109 r = -EINVAL; 2110 } 2111 2112 return r; 2113 } 2114 2115 static int flic_has_attr(struct kvm_device *dev, 2116 struct kvm_device_attr *attr) 2117 { 2118 switch (attr->group) { 2119 case KVM_DEV_FLIC_GET_ALL_IRQS: 2120 case KVM_DEV_FLIC_ENQUEUE: 2121 case KVM_DEV_FLIC_CLEAR_IRQS: 2122 case KVM_DEV_FLIC_APF_ENABLE: 2123 case KVM_DEV_FLIC_APF_DISABLE_WAIT: 2124 case KVM_DEV_FLIC_ADAPTER_REGISTER: 2125 case KVM_DEV_FLIC_ADAPTER_MODIFY: 2126 case KVM_DEV_FLIC_CLEAR_IO_IRQ: 2127 return 0; 2128 } 2129 return -ENXIO; 2130 } 2131 2132 static int flic_create(struct kvm_device *dev, u32 type) 2133 { 2134 if (!dev) 2135 return -EINVAL; 2136 if (dev->kvm->arch.flic) 2137 return -EINVAL; 2138 dev->kvm->arch.flic = dev; 2139 return 0; 2140 } 2141 2142 static void flic_destroy(struct kvm_device *dev) 2143 { 2144 dev->kvm->arch.flic = NULL; 2145 kfree(dev); 2146 } 2147 2148 /* s390 floating irq controller (flic) */ 2149 struct kvm_device_ops kvm_flic_ops = { 2150 .name = "kvm-flic", 2151 .get_attr = flic_get_attr, 2152 .set_attr = flic_set_attr, 2153 .has_attr = flic_has_attr, 2154 .create = flic_create, 2155 .destroy = flic_destroy, 2156 }; 2157 2158 static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap) 2159 { 2160 unsigned long bit; 2161 2162 bit = bit_nr + (addr % PAGE_SIZE) * 8; 2163 2164 return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit; 2165 } 2166 2167 static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter, 2168 u64 addr) 2169 { 2170 struct s390_map_info *map; 2171 2172 if (!adapter) 2173 return NULL; 2174 2175 list_for_each_entry(map, &adapter->maps, list) { 2176 if (map->guest_addr == addr) 2177 return map; 2178 } 2179 return NULL; 2180 } 2181 2182 static int adapter_indicators_set(struct kvm *kvm, 2183 struct s390_io_adapter *adapter, 2184 struct kvm_s390_adapter_int *adapter_int) 2185 { 2186 unsigned long bit; 2187 int summary_set, idx; 2188 struct s390_map_info *info; 2189 void *map; 2190 2191 info = get_map_info(adapter, adapter_int->ind_addr); 2192 if (!info) 2193 return -1; 2194 map = page_address(info->page); 2195 bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap); 2196 set_bit(bit, map); 2197 idx = srcu_read_lock(&kvm->srcu); 2198 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT); 2199 set_page_dirty_lock(info->page); 2200 info = get_map_info(adapter, adapter_int->summary_addr); 2201 if (!info) { 2202 srcu_read_unlock(&kvm->srcu, idx); 2203 return -1; 2204 } 2205 map = page_address(info->page); 2206 bit = get_ind_bit(info->addr, adapter_int->summary_offset, 2207 adapter->swap); 2208 summary_set = test_and_set_bit(bit, map); 2209 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT); 2210 set_page_dirty_lock(info->page); 2211 srcu_read_unlock(&kvm->srcu, idx); 2212 return summary_set ? 0 : 1; 2213 } 2214 2215 /* 2216 * < 0 - not injected due to error 2217 * = 0 - coalesced, summary indicator already active 2218 * > 0 - injected interrupt 2219 */ 2220 static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e, 2221 struct kvm *kvm, int irq_source_id, int level, 2222 bool line_status) 2223 { 2224 int ret; 2225 struct s390_io_adapter *adapter; 2226 2227 /* We're only interested in the 0->1 transition. */ 2228 if (!level) 2229 return 0; 2230 adapter = get_io_adapter(kvm, e->adapter.adapter_id); 2231 if (!adapter) 2232 return -1; 2233 down_read(&adapter->maps_lock); 2234 ret = adapter_indicators_set(kvm, adapter, &e->adapter); 2235 up_read(&adapter->maps_lock); 2236 if ((ret > 0) && !adapter->masked) { 2237 struct kvm_s390_interrupt s390int = { 2238 .type = KVM_S390_INT_IO(1, 0, 0, 0), 2239 .parm = 0, 2240 .parm64 = (adapter->isc << 27) | 0x80000000, 2241 }; 2242 ret = kvm_s390_inject_vm(kvm, &s390int); 2243 if (ret == 0) 2244 ret = 1; 2245 } 2246 return ret; 2247 } 2248 2249 int kvm_set_routing_entry(struct kvm *kvm, 2250 struct kvm_kernel_irq_routing_entry *e, 2251 const struct kvm_irq_routing_entry *ue) 2252 { 2253 int ret; 2254 2255 switch (ue->type) { 2256 case KVM_IRQ_ROUTING_S390_ADAPTER: 2257 e->set = set_adapter_int; 2258 e->adapter.summary_addr = ue->u.adapter.summary_addr; 2259 e->adapter.ind_addr = ue->u.adapter.ind_addr; 2260 e->adapter.summary_offset = ue->u.adapter.summary_offset; 2261 e->adapter.ind_offset = ue->u.adapter.ind_offset; 2262 e->adapter.adapter_id = ue->u.adapter.adapter_id; 2263 ret = 0; 2264 break; 2265 default: 2266 ret = -EINVAL; 2267 } 2268 2269 return ret; 2270 } 2271 2272 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, 2273 int irq_source_id, int level, bool line_status) 2274 { 2275 return -EINVAL; 2276 } 2277 2278 int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, void __user *irqstate, int len) 2279 { 2280 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 2281 struct kvm_s390_irq *buf; 2282 int r = 0; 2283 int n; 2284 2285 buf = vmalloc(len); 2286 if (!buf) 2287 return -ENOMEM; 2288 2289 if (copy_from_user((void *) buf, irqstate, len)) { 2290 r = -EFAULT; 2291 goto out_free; 2292 } 2293 2294 /* 2295 * Don't allow setting the interrupt state 2296 * when there are already interrupts pending 2297 */ 2298 spin_lock(&li->lock); 2299 if (li->pending_irqs) { 2300 r = -EBUSY; 2301 goto out_unlock; 2302 } 2303 2304 for (n = 0; n < len / sizeof(*buf); n++) { 2305 r = do_inject_vcpu(vcpu, &buf[n]); 2306 if (r) 2307 break; 2308 } 2309 2310 out_unlock: 2311 spin_unlock(&li->lock); 2312 out_free: 2313 vfree(buf); 2314 2315 return r; 2316 } 2317 2318 static void store_local_irq(struct kvm_s390_local_interrupt *li, 2319 struct kvm_s390_irq *irq, 2320 unsigned long irq_type) 2321 { 2322 switch (irq_type) { 2323 case IRQ_PEND_MCHK_EX: 2324 case IRQ_PEND_MCHK_REP: 2325 irq->type = KVM_S390_MCHK; 2326 irq->u.mchk = li->irq.mchk; 2327 break; 2328 case IRQ_PEND_PROG: 2329 irq->type = KVM_S390_PROGRAM_INT; 2330 irq->u.pgm = li->irq.pgm; 2331 break; 2332 case IRQ_PEND_PFAULT_INIT: 2333 irq->type = KVM_S390_INT_PFAULT_INIT; 2334 irq->u.ext = li->irq.ext; 2335 break; 2336 case IRQ_PEND_EXT_EXTERNAL: 2337 irq->type = KVM_S390_INT_EXTERNAL_CALL; 2338 irq->u.extcall = li->irq.extcall; 2339 break; 2340 case IRQ_PEND_EXT_CLOCK_COMP: 2341 irq->type = KVM_S390_INT_CLOCK_COMP; 2342 break; 2343 case IRQ_PEND_EXT_CPU_TIMER: 2344 irq->type = KVM_S390_INT_CPU_TIMER; 2345 break; 2346 case IRQ_PEND_SIGP_STOP: 2347 irq->type = KVM_S390_SIGP_STOP; 2348 irq->u.stop = li->irq.stop; 2349 break; 2350 case IRQ_PEND_RESTART: 2351 irq->type = KVM_S390_RESTART; 2352 break; 2353 case IRQ_PEND_SET_PREFIX: 2354 irq->type = KVM_S390_SIGP_SET_PREFIX; 2355 irq->u.prefix = li->irq.prefix; 2356 break; 2357 } 2358 } 2359 2360 int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len) 2361 { 2362 int scn; 2363 unsigned long sigp_emerg_pending[BITS_TO_LONGS(KVM_MAX_VCPUS)]; 2364 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 2365 unsigned long pending_irqs; 2366 struct kvm_s390_irq irq; 2367 unsigned long irq_type; 2368 int cpuaddr; 2369 int n = 0; 2370 2371 spin_lock(&li->lock); 2372 pending_irqs = li->pending_irqs; 2373 memcpy(&sigp_emerg_pending, &li->sigp_emerg_pending, 2374 sizeof(sigp_emerg_pending)); 2375 spin_unlock(&li->lock); 2376 2377 for_each_set_bit(irq_type, &pending_irqs, IRQ_PEND_COUNT) { 2378 memset(&irq, 0, sizeof(irq)); 2379 if (irq_type == IRQ_PEND_EXT_EMERGENCY) 2380 continue; 2381 if (n + sizeof(irq) > len) 2382 return -ENOBUFS; 2383 store_local_irq(&vcpu->arch.local_int, &irq, irq_type); 2384 if (copy_to_user(&buf[n], &irq, sizeof(irq))) 2385 return -EFAULT; 2386 n += sizeof(irq); 2387 } 2388 2389 if (test_bit(IRQ_PEND_EXT_EMERGENCY, &pending_irqs)) { 2390 for_each_set_bit(cpuaddr, sigp_emerg_pending, KVM_MAX_VCPUS) { 2391 memset(&irq, 0, sizeof(irq)); 2392 if (n + sizeof(irq) > len) 2393 return -ENOBUFS; 2394 irq.type = KVM_S390_INT_EMERGENCY; 2395 irq.u.emerg.code = cpuaddr; 2396 if (copy_to_user(&buf[n], &irq, sizeof(irq))) 2397 return -EFAULT; 2398 n += sizeof(irq); 2399 } 2400 } 2401 2402 if (sca_ext_call_pending(vcpu, &scn)) { 2403 if (n + sizeof(irq) > len) 2404 return -ENOBUFS; 2405 memset(&irq, 0, sizeof(irq)); 2406 irq.type = KVM_S390_INT_EXTERNAL_CALL; 2407 irq.u.extcall.code = scn; 2408 if (copy_to_user(&buf[n], &irq, sizeof(irq))) 2409 return -EFAULT; 2410 n += sizeof(irq); 2411 } 2412 2413 return n; 2414 } 2415