1 /* 2 * interrupt.c - handling kvm guest interrupts 3 * 4 * Copyright IBM Corp. 2008 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License (version 2 only) 8 * as published by the Free Software Foundation. 9 * 10 * Author(s): Carsten Otte <cotte@de.ibm.com> 11 */ 12 13 #include <linux/interrupt.h> 14 #include <linux/kvm_host.h> 15 #include <linux/hrtimer.h> 16 #include <linux/signal.h> 17 #include <linux/slab.h> 18 #include <asm/asm-offsets.h> 19 #include <asm/uaccess.h> 20 #include "kvm-s390.h" 21 #include "gaccess.h" 22 23 static int psw_extint_disabled(struct kvm_vcpu *vcpu) 24 { 25 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); 26 } 27 28 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu) 29 { 30 if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) || 31 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) || 32 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT)) 33 return 0; 34 return 1; 35 } 36 37 static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu, 38 struct kvm_s390_interrupt_info *inti) 39 { 40 switch (inti->type) { 41 case KVM_S390_INT_EXTERNAL_CALL: 42 if (psw_extint_disabled(vcpu)) 43 return 0; 44 if (vcpu->arch.sie_block->gcr[0] & 0x2000ul) 45 return 1; 46 case KVM_S390_INT_EMERGENCY: 47 if (psw_extint_disabled(vcpu)) 48 return 0; 49 if (vcpu->arch.sie_block->gcr[0] & 0x4000ul) 50 return 1; 51 return 0; 52 case KVM_S390_INT_SERVICE: 53 if (psw_extint_disabled(vcpu)) 54 return 0; 55 if (vcpu->arch.sie_block->gcr[0] & 0x200ul) 56 return 1; 57 return 0; 58 case KVM_S390_INT_VIRTIO: 59 if (psw_extint_disabled(vcpu)) 60 return 0; 61 if (vcpu->arch.sie_block->gcr[0] & 0x200ul) 62 return 1; 63 return 0; 64 case KVM_S390_PROGRAM_INT: 65 case KVM_S390_SIGP_STOP: 66 case KVM_S390_SIGP_SET_PREFIX: 67 case KVM_S390_RESTART: 68 return 1; 69 default: 70 BUG(); 71 } 72 return 0; 73 } 74 75 static void __set_cpu_idle(struct kvm_vcpu *vcpu) 76 { 77 BUG_ON(vcpu->vcpu_id > KVM_MAX_VCPUS - 1); 78 atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); 79 set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); 80 } 81 82 static void __unset_cpu_idle(struct kvm_vcpu *vcpu) 83 { 84 BUG_ON(vcpu->vcpu_id > KVM_MAX_VCPUS - 1); 85 atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); 86 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); 87 } 88 89 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) 90 { 91 atomic_clear_mask(CPUSTAT_ECALL_PEND | 92 CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, 93 &vcpu->arch.sie_block->cpuflags); 94 vcpu->arch.sie_block->lctl = 0x0000; 95 } 96 97 static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) 98 { 99 atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags); 100 } 101 102 static void __set_intercept_indicator(struct kvm_vcpu *vcpu, 103 struct kvm_s390_interrupt_info *inti) 104 { 105 switch (inti->type) { 106 case KVM_S390_INT_EXTERNAL_CALL: 107 case KVM_S390_INT_EMERGENCY: 108 case KVM_S390_INT_SERVICE: 109 case KVM_S390_INT_VIRTIO: 110 if (psw_extint_disabled(vcpu)) 111 __set_cpuflag(vcpu, CPUSTAT_EXT_INT); 112 else 113 vcpu->arch.sie_block->lctl |= LCTL_CR0; 114 break; 115 case KVM_S390_SIGP_STOP: 116 __set_cpuflag(vcpu, CPUSTAT_STOP_INT); 117 break; 118 default: 119 BUG(); 120 } 121 } 122 123 static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, 124 struct kvm_s390_interrupt_info *inti) 125 { 126 const unsigned short table[] = { 2, 4, 4, 6 }; 127 int rc, exception = 0; 128 129 switch (inti->type) { 130 case KVM_S390_INT_EMERGENCY: 131 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg"); 132 vcpu->stat.deliver_emergency_signal++; 133 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1201); 134 if (rc == -EFAULT) 135 exception = 1; 136 137 rc = put_guest_u16(vcpu, __LC_CPU_ADDRESS, inti->emerg.code); 138 if (rc == -EFAULT) 139 exception = 1; 140 141 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, 142 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 143 if (rc == -EFAULT) 144 exception = 1; 145 146 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 147 __LC_EXT_NEW_PSW, sizeof(psw_t)); 148 if (rc == -EFAULT) 149 exception = 1; 150 break; 151 152 case KVM_S390_INT_EXTERNAL_CALL: 153 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call"); 154 vcpu->stat.deliver_external_call++; 155 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1202); 156 if (rc == -EFAULT) 157 exception = 1; 158 159 rc = put_guest_u16(vcpu, __LC_CPU_ADDRESS, inti->extcall.code); 160 if (rc == -EFAULT) 161 exception = 1; 162 163 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, 164 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 165 if (rc == -EFAULT) 166 exception = 1; 167 168 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 169 __LC_EXT_NEW_PSW, sizeof(psw_t)); 170 if (rc == -EFAULT) 171 exception = 1; 172 break; 173 174 case KVM_S390_INT_SERVICE: 175 VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", 176 inti->ext.ext_params); 177 vcpu->stat.deliver_service_signal++; 178 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2401); 179 if (rc == -EFAULT) 180 exception = 1; 181 182 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, 183 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 184 if (rc == -EFAULT) 185 exception = 1; 186 187 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 188 __LC_EXT_NEW_PSW, sizeof(psw_t)); 189 if (rc == -EFAULT) 190 exception = 1; 191 192 rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params); 193 if (rc == -EFAULT) 194 exception = 1; 195 break; 196 197 case KVM_S390_INT_VIRTIO: 198 VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx", 199 inti->ext.ext_params, inti->ext.ext_params2); 200 vcpu->stat.deliver_virtio_interrupt++; 201 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2603); 202 if (rc == -EFAULT) 203 exception = 1; 204 205 rc = put_guest_u16(vcpu, __LC_CPU_ADDRESS, 0x0d00); 206 if (rc == -EFAULT) 207 exception = 1; 208 209 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, 210 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 211 if (rc == -EFAULT) 212 exception = 1; 213 214 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 215 __LC_EXT_NEW_PSW, sizeof(psw_t)); 216 if (rc == -EFAULT) 217 exception = 1; 218 219 rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params); 220 if (rc == -EFAULT) 221 exception = 1; 222 223 rc = put_guest_u64(vcpu, __LC_EXT_PARAMS2, 224 inti->ext.ext_params2); 225 if (rc == -EFAULT) 226 exception = 1; 227 break; 228 229 case KVM_S390_SIGP_STOP: 230 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop"); 231 vcpu->stat.deliver_stop_signal++; 232 __set_intercept_indicator(vcpu, inti); 233 break; 234 235 case KVM_S390_SIGP_SET_PREFIX: 236 VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", 237 inti->prefix.address); 238 vcpu->stat.deliver_prefix_signal++; 239 vcpu->arch.sie_block->prefix = inti->prefix.address; 240 vcpu->arch.sie_block->ihcpu = 0xffff; 241 break; 242 243 case KVM_S390_RESTART: 244 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart"); 245 vcpu->stat.deliver_restart_signal++; 246 rc = copy_to_guest(vcpu, offsetof(struct _lowcore, 247 restart_old_psw), &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 248 if (rc == -EFAULT) 249 exception = 1; 250 251 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 252 offsetof(struct _lowcore, restart_psw), sizeof(psw_t)); 253 if (rc == -EFAULT) 254 exception = 1; 255 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 256 break; 257 258 case KVM_S390_PROGRAM_INT: 259 VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x", 260 inti->pgm.code, 261 table[vcpu->arch.sie_block->ipa >> 14]); 262 vcpu->stat.deliver_program_int++; 263 rc = put_guest_u16(vcpu, __LC_PGM_INT_CODE, inti->pgm.code); 264 if (rc == -EFAULT) 265 exception = 1; 266 267 rc = put_guest_u16(vcpu, __LC_PGM_ILC, 268 table[vcpu->arch.sie_block->ipa >> 14]); 269 if (rc == -EFAULT) 270 exception = 1; 271 272 rc = copy_to_guest(vcpu, __LC_PGM_OLD_PSW, 273 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 274 if (rc == -EFAULT) 275 exception = 1; 276 277 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 278 __LC_PGM_NEW_PSW, sizeof(psw_t)); 279 if (rc == -EFAULT) 280 exception = 1; 281 break; 282 283 default: 284 BUG(); 285 } 286 if (exception) { 287 printk("kvm: The guest lowcore is not mapped during interrupt " 288 "delivery, killing userspace\n"); 289 do_exit(SIGKILL); 290 } 291 } 292 293 static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu) 294 { 295 int rc, exception = 0; 296 297 if (psw_extint_disabled(vcpu)) 298 return 0; 299 if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul)) 300 return 0; 301 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1004); 302 if (rc == -EFAULT) 303 exception = 1; 304 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, 305 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 306 if (rc == -EFAULT) 307 exception = 1; 308 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 309 __LC_EXT_NEW_PSW, sizeof(psw_t)); 310 if (rc == -EFAULT) 311 exception = 1; 312 if (exception) { 313 printk("kvm: The guest lowcore is not mapped during interrupt " 314 "delivery, killing userspace\n"); 315 do_exit(SIGKILL); 316 } 317 return 1; 318 } 319 320 static int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) 321 { 322 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 323 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; 324 struct kvm_s390_interrupt_info *inti; 325 int rc = 0; 326 327 if (atomic_read(&li->active)) { 328 spin_lock_bh(&li->lock); 329 list_for_each_entry(inti, &li->list, list) 330 if (__interrupt_is_deliverable(vcpu, inti)) { 331 rc = 1; 332 break; 333 } 334 spin_unlock_bh(&li->lock); 335 } 336 337 if ((!rc) && atomic_read(&fi->active)) { 338 spin_lock(&fi->lock); 339 list_for_each_entry(inti, &fi->list, list) 340 if (__interrupt_is_deliverable(vcpu, inti)) { 341 rc = 1; 342 break; 343 } 344 spin_unlock(&fi->lock); 345 } 346 347 if ((!rc) && (vcpu->arch.sie_block->ckc < 348 get_clock() + vcpu->arch.sie_block->epoch)) { 349 if ((!psw_extint_disabled(vcpu)) && 350 (vcpu->arch.sie_block->gcr[0] & 0x800ul)) 351 rc = 1; 352 } 353 354 return rc; 355 } 356 357 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 358 { 359 return 0; 360 } 361 362 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) 363 { 364 u64 now, sltime; 365 DECLARE_WAITQUEUE(wait, current); 366 367 vcpu->stat.exit_wait_state++; 368 if (kvm_cpu_has_interrupt(vcpu)) 369 return 0; 370 371 __set_cpu_idle(vcpu); 372 spin_lock_bh(&vcpu->arch.local_int.lock); 373 vcpu->arch.local_int.timer_due = 0; 374 spin_unlock_bh(&vcpu->arch.local_int.lock); 375 376 if (psw_interrupts_disabled(vcpu)) { 377 VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); 378 __unset_cpu_idle(vcpu); 379 return -EOPNOTSUPP; /* disabled wait */ 380 } 381 382 if (psw_extint_disabled(vcpu) || 383 (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))) { 384 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer"); 385 goto no_timer; 386 } 387 388 now = get_clock() + vcpu->arch.sie_block->epoch; 389 if (vcpu->arch.sie_block->ckc < now) { 390 __unset_cpu_idle(vcpu); 391 return 0; 392 } 393 394 sltime = ((vcpu->arch.sie_block->ckc - now)*125)>>9; 395 396 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); 397 VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime); 398 no_timer: 399 spin_lock(&vcpu->arch.local_int.float_int->lock); 400 spin_lock_bh(&vcpu->arch.local_int.lock); 401 add_wait_queue(&vcpu->arch.local_int.wq, &wait); 402 while (list_empty(&vcpu->arch.local_int.list) && 403 list_empty(&vcpu->arch.local_int.float_int->list) && 404 (!vcpu->arch.local_int.timer_due) && 405 !signal_pending(current)) { 406 set_current_state(TASK_INTERRUPTIBLE); 407 spin_unlock_bh(&vcpu->arch.local_int.lock); 408 spin_unlock(&vcpu->arch.local_int.float_int->lock); 409 vcpu_put(vcpu); 410 schedule(); 411 vcpu_load(vcpu); 412 spin_lock(&vcpu->arch.local_int.float_int->lock); 413 spin_lock_bh(&vcpu->arch.local_int.lock); 414 } 415 __unset_cpu_idle(vcpu); 416 __set_current_state(TASK_RUNNING); 417 remove_wait_queue(&vcpu->arch.local_int.wq, &wait); 418 spin_unlock_bh(&vcpu->arch.local_int.lock); 419 spin_unlock(&vcpu->arch.local_int.float_int->lock); 420 hrtimer_try_to_cancel(&vcpu->arch.ckc_timer); 421 return 0; 422 } 423 424 void kvm_s390_tasklet(unsigned long parm) 425 { 426 struct kvm_vcpu *vcpu = (struct kvm_vcpu *) parm; 427 428 spin_lock(&vcpu->arch.local_int.lock); 429 vcpu->arch.local_int.timer_due = 1; 430 if (waitqueue_active(&vcpu->arch.local_int.wq)) 431 wake_up_interruptible(&vcpu->arch.local_int.wq); 432 spin_unlock(&vcpu->arch.local_int.lock); 433 } 434 435 /* 436 * low level hrtimer wake routine. Because this runs in hardirq context 437 * we schedule a tasklet to do the real work. 438 */ 439 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer) 440 { 441 struct kvm_vcpu *vcpu; 442 443 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); 444 tasklet_schedule(&vcpu->arch.tasklet); 445 446 return HRTIMER_NORESTART; 447 } 448 449 void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) 450 { 451 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 452 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; 453 struct kvm_s390_interrupt_info *n, *inti = NULL; 454 int deliver; 455 456 __reset_intercept_indicators(vcpu); 457 if (atomic_read(&li->active)) { 458 do { 459 deliver = 0; 460 spin_lock_bh(&li->lock); 461 list_for_each_entry_safe(inti, n, &li->list, list) { 462 if (__interrupt_is_deliverable(vcpu, inti)) { 463 list_del(&inti->list); 464 deliver = 1; 465 break; 466 } 467 __set_intercept_indicator(vcpu, inti); 468 } 469 if (list_empty(&li->list)) 470 atomic_set(&li->active, 0); 471 spin_unlock_bh(&li->lock); 472 if (deliver) { 473 __do_deliver_interrupt(vcpu, inti); 474 kfree(inti); 475 } 476 } while (deliver); 477 } 478 479 if ((vcpu->arch.sie_block->ckc < 480 get_clock() + vcpu->arch.sie_block->epoch)) 481 __try_deliver_ckc_interrupt(vcpu); 482 483 if (atomic_read(&fi->active)) { 484 do { 485 deliver = 0; 486 spin_lock(&fi->lock); 487 list_for_each_entry_safe(inti, n, &fi->list, list) { 488 if (__interrupt_is_deliverable(vcpu, inti)) { 489 list_del(&inti->list); 490 deliver = 1; 491 break; 492 } 493 __set_intercept_indicator(vcpu, inti); 494 } 495 if (list_empty(&fi->list)) 496 atomic_set(&fi->active, 0); 497 spin_unlock(&fi->lock); 498 if (deliver) { 499 __do_deliver_interrupt(vcpu, inti); 500 kfree(inti); 501 } 502 } while (deliver); 503 } 504 } 505 506 int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) 507 { 508 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 509 struct kvm_s390_interrupt_info *inti; 510 511 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 512 if (!inti) 513 return -ENOMEM; 514 515 inti->type = KVM_S390_PROGRAM_INT; 516 inti->pgm.code = code; 517 518 VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code); 519 spin_lock_bh(&li->lock); 520 list_add(&inti->list, &li->list); 521 atomic_set(&li->active, 1); 522 BUG_ON(waitqueue_active(&li->wq)); 523 spin_unlock_bh(&li->lock); 524 return 0; 525 } 526 527 int kvm_s390_inject_vm(struct kvm *kvm, 528 struct kvm_s390_interrupt *s390int) 529 { 530 struct kvm_s390_local_interrupt *li; 531 struct kvm_s390_float_interrupt *fi; 532 struct kvm_s390_interrupt_info *inti; 533 int sigcpu; 534 535 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 536 if (!inti) 537 return -ENOMEM; 538 539 switch (s390int->type) { 540 case KVM_S390_INT_VIRTIO: 541 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx", 542 s390int->parm, s390int->parm64); 543 inti->type = s390int->type; 544 inti->ext.ext_params = s390int->parm; 545 inti->ext.ext_params2 = s390int->parm64; 546 break; 547 case KVM_S390_INT_SERVICE: 548 VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm); 549 inti->type = s390int->type; 550 inti->ext.ext_params = s390int->parm; 551 break; 552 case KVM_S390_PROGRAM_INT: 553 case KVM_S390_SIGP_STOP: 554 case KVM_S390_INT_EXTERNAL_CALL: 555 case KVM_S390_INT_EMERGENCY: 556 default: 557 kfree(inti); 558 return -EINVAL; 559 } 560 561 mutex_lock(&kvm->lock); 562 fi = &kvm->arch.float_int; 563 spin_lock(&fi->lock); 564 list_add_tail(&inti->list, &fi->list); 565 atomic_set(&fi->active, 1); 566 sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS); 567 if (sigcpu == KVM_MAX_VCPUS) { 568 do { 569 sigcpu = fi->next_rr_cpu++; 570 if (sigcpu == KVM_MAX_VCPUS) 571 sigcpu = fi->next_rr_cpu = 0; 572 } while (fi->local_int[sigcpu] == NULL); 573 } 574 li = fi->local_int[sigcpu]; 575 spin_lock_bh(&li->lock); 576 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 577 if (waitqueue_active(&li->wq)) 578 wake_up_interruptible(&li->wq); 579 spin_unlock_bh(&li->lock); 580 spin_unlock(&fi->lock); 581 mutex_unlock(&kvm->lock); 582 return 0; 583 } 584 585 int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, 586 struct kvm_s390_interrupt *s390int) 587 { 588 struct kvm_s390_local_interrupt *li; 589 struct kvm_s390_interrupt_info *inti; 590 591 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 592 if (!inti) 593 return -ENOMEM; 594 595 switch (s390int->type) { 596 case KVM_S390_PROGRAM_INT: 597 if (s390int->parm & 0xffff0000) { 598 kfree(inti); 599 return -EINVAL; 600 } 601 inti->type = s390int->type; 602 inti->pgm.code = s390int->parm; 603 VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)", 604 s390int->parm); 605 break; 606 case KVM_S390_SIGP_SET_PREFIX: 607 inti->prefix.address = s390int->parm; 608 inti->type = s390int->type; 609 VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)", 610 s390int->parm); 611 break; 612 case KVM_S390_SIGP_STOP: 613 case KVM_S390_RESTART: 614 case KVM_S390_INT_EXTERNAL_CALL: 615 case KVM_S390_INT_EMERGENCY: 616 VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type); 617 inti->type = s390int->type; 618 break; 619 case KVM_S390_INT_VIRTIO: 620 case KVM_S390_INT_SERVICE: 621 default: 622 kfree(inti); 623 return -EINVAL; 624 } 625 626 mutex_lock(&vcpu->kvm->lock); 627 li = &vcpu->arch.local_int; 628 spin_lock_bh(&li->lock); 629 if (inti->type == KVM_S390_PROGRAM_INT) 630 list_add(&inti->list, &li->list); 631 else 632 list_add_tail(&inti->list, &li->list); 633 atomic_set(&li->active, 1); 634 if (inti->type == KVM_S390_SIGP_STOP) 635 li->action_bits |= ACTION_STOP_ON_STOP; 636 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 637 if (waitqueue_active(&li->wq)) 638 wake_up_interruptible(&vcpu->arch.local_int.wq); 639 spin_unlock_bh(&li->lock); 640 mutex_unlock(&vcpu->kvm->lock); 641 return 0; 642 } 643