interrupt.c (b3c17f10fa2cfc29cf35e4821275e046e725213e) interrupt.c (1e133ab296f3ff8d9e58a5e758291ed39ba72ad7)
1/*
2 * handling kvm guest interrupts
3 *
4 * Copyright IBM Corp. 2008, 2015
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.

--- 9 unchanged lines hidden (view full) ---

18#include <linux/slab.h>
19#include <linux/bitmap.h>
20#include <linux/vmalloc.h>
21#include <asm/asm-offsets.h>
22#include <asm/dis.h>
23#include <asm/uaccess.h>
24#include <asm/sclp.h>
25#include <asm/isc.h>
1/*
2 * handling kvm guest interrupts
3 *
4 * Copyright IBM Corp. 2008, 2015
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.

--- 9 unchanged lines hidden (view full) ---

18#include <linux/slab.h>
19#include <linux/bitmap.h>
20#include <linux/vmalloc.h>
21#include <asm/asm-offsets.h>
22#include <asm/dis.h>
23#include <asm/uaccess.h>
24#include <asm/sclp.h>
25#include <asm/isc.h>
26#include <asm/gmap.h>
26#include "kvm-s390.h"
27#include "gaccess.h"
28#include "trace-s390.h"
29
30#define IOINT_SCHID_MASK 0x0000ffff
31#define IOINT_SSID_MASK 0x00030000
32#define IOINT_CSSID_MASK 0x03fc0000
33#define PFAULT_INIT 0x0600

--- 143 unchanged lines hidden (view full) ---

177static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu)
178{
179 return !psw_extint_disabled(vcpu) &&
180 (vcpu->arch.sie_block->gcr[0] & 0x400ul);
181}
182
183static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu)
184{
27#include "kvm-s390.h"
28#include "gaccess.h"
29#include "trace-s390.h"
30
31#define IOINT_SCHID_MASK 0x0000ffff
32#define IOINT_SSID_MASK 0x00030000
33#define IOINT_CSSID_MASK 0x03fc0000
34#define PFAULT_INIT 0x0600

--- 143 unchanged lines hidden (view full) ---

178static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu)
179{
180 return !psw_extint_disabled(vcpu) &&
181 (vcpu->arch.sie_block->gcr[0] & 0x400ul);
182}
183
184static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu)
185{
185 if (!cpu_timer_interrupts_enabled(vcpu))
186 return 0;
187 return kvm_s390_get_cpu_timer(vcpu) >> 63;
186 return (vcpu->arch.sie_block->cputm >> 63) &&
187 cpu_timer_interrupts_enabled(vcpu);
188}
189
190static inline int is_ioirq(unsigned long irq_type)
191{
192 return ((irq_type >= IRQ_PEND_IO_ISC_0) &&
193 (irq_type <= IRQ_PEND_IO_ISC_7));
194}
195

--- 135 unchanged lines hidden (view full) ---

331static void set_intercept_indicators(struct kvm_vcpu *vcpu)
332{
333 set_intercept_indicators_io(vcpu);
334 set_intercept_indicators_ext(vcpu);
335 set_intercept_indicators_mchk(vcpu);
336 set_intercept_indicators_stop(vcpu);
337}
338
188}
189
190static inline int is_ioirq(unsigned long irq_type)
191{
192 return ((irq_type >= IRQ_PEND_IO_ISC_0) &&
193 (irq_type <= IRQ_PEND_IO_ISC_7));
194}
195

--- 135 unchanged lines hidden (view full) ---

331static void set_intercept_indicators(struct kvm_vcpu *vcpu)
332{
333 set_intercept_indicators_io(vcpu);
334 set_intercept_indicators_ext(vcpu);
335 set_intercept_indicators_mchk(vcpu);
336 set_intercept_indicators_stop(vcpu);
337}
338
339static u16 get_ilc(struct kvm_vcpu *vcpu)
340{
341 switch (vcpu->arch.sie_block->icptcode) {
342 case ICPT_INST:
343 case ICPT_INSTPROGI:
344 case ICPT_OPEREXC:
345 case ICPT_PARTEXEC:
346 case ICPT_IOINST:
347 /* last instruction only stored for these icptcodes */
348 return insn_length(vcpu->arch.sie_block->ipa >> 8);
349 case ICPT_PROGI:
350 return vcpu->arch.sie_block->pgmilc;
351 default:
352 return 0;
353 }
354}
355
339static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
340{
341 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
342 int rc;
343
344 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
345 0, 0);
346

--- 220 unchanged lines hidden (view full) ---

567 return rc ? -EFAULT : 0;
568}
569
570static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
571{
572 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
573 struct kvm_s390_pgm_info pgm_info;
574 int rc = 0, nullifying = false;
356static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
357{
358 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
359 int rc;
360
361 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
362 0, 0);
363

--- 220 unchanged lines hidden (view full) ---

584 return rc ? -EFAULT : 0;
585}
586
587static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
588{
589 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
590 struct kvm_s390_pgm_info pgm_info;
591 int rc = 0, nullifying = false;
575 u16 ilen;
592 u16 ilc = get_ilc(vcpu);
576
577 spin_lock(&li->lock);
578 pgm_info = li->irq.pgm;
579 clear_bit(IRQ_PEND_PROG, &li->pending_irqs);
580 memset(&li->irq.pgm, 0, sizeof(pgm_info));
581 spin_unlock(&li->lock);
582
593
594 spin_lock(&li->lock);
595 pgm_info = li->irq.pgm;
596 clear_bit(IRQ_PEND_PROG, &li->pending_irqs);
597 memset(&li->irq.pgm, 0, sizeof(pgm_info));
598 spin_unlock(&li->lock);
599
583 ilen = pgm_info.flags & KVM_S390_PGM_FLAGS_ILC_MASK;
584 VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilen:%d",
585 pgm_info.code, ilen);
600 VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilc:%d",
601 pgm_info.code, ilc);
586 vcpu->stat.deliver_program_int++;
587 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
588 pgm_info.code, 0);
589
590 switch (pgm_info.code & ~PGM_PER) {
591 case PGM_AFX_TRANSLATION:
592 case PGM_ASX_TRANSLATION:
593 case PGM_EX_TRANSLATION:

--- 67 unchanged lines hidden (view full) ---

661 rc |= put_guest_lc(vcpu, pgm_info.per_atmid,
662 (u8 *)__LC_PER_ATMID);
663 rc |= put_guest_lc(vcpu, pgm_info.per_address,
664 (u64 *) __LC_PER_ADDRESS);
665 rc |= put_guest_lc(vcpu, pgm_info.per_access_id,
666 (u8 *) __LC_PER_ACCESS_ID);
667 }
668
602 vcpu->stat.deliver_program_int++;
603 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
604 pgm_info.code, 0);
605
606 switch (pgm_info.code & ~PGM_PER) {
607 case PGM_AFX_TRANSLATION:
608 case PGM_ASX_TRANSLATION:
609 case PGM_EX_TRANSLATION:

--- 67 unchanged lines hidden (view full) ---

677 rc |= put_guest_lc(vcpu, pgm_info.per_atmid,
678 (u8 *)__LC_PER_ATMID);
679 rc |= put_guest_lc(vcpu, pgm_info.per_address,
680 (u64 *) __LC_PER_ADDRESS);
681 rc |= put_guest_lc(vcpu, pgm_info.per_access_id,
682 (u8 *) __LC_PER_ACCESS_ID);
683 }
684
669 if (nullifying && !(pgm_info.flags & KVM_S390_PGM_FLAGS_NO_REWIND))
670 kvm_s390_rewind_psw(vcpu, ilen);
685 if (nullifying && vcpu->arch.sie_block->icptcode == ICPT_INST)
686 kvm_s390_rewind_psw(vcpu, ilc);
671
687
672 /* bit 1+2 of the target are the ilc, so we can directly use ilen */
673 rc |= put_guest_lc(vcpu, ilen, (u16 *) __LC_PGM_ILC);
688 rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC);
674 rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea,
675 (u64 *) __LC_LAST_BREAK);
676 rc |= put_guest_lc(vcpu, pgm_info.code,
677 (u16 *)__LC_PGM_INT_CODE);
678 rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
679 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
680 rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW,
681 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));

--- 222 unchanged lines hidden (view full) ---

904 return 0;
905}
906
907int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
908{
909 return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu);
910}
911
689 rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea,
690 (u64 *) __LC_LAST_BREAK);
691 rc |= put_guest_lc(vcpu, pgm_info.code,
692 (u16 *)__LC_PGM_INT_CODE);
693 rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
694 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
695 rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW,
696 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));

--- 222 unchanged lines hidden (view full) ---

919 return 0;
920}
921
922int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
923{
924 return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu);
925}
926
912static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
913{
914 u64 now, cputm, sltime = 0;
915
916 if (ckc_interrupts_enabled(vcpu)) {
917 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
918 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
919 /* already expired or overflow? */
920 if (!sltime || vcpu->arch.sie_block->ckc <= now)
921 return 0;
922 if (cpu_timer_interrupts_enabled(vcpu)) {
923 cputm = kvm_s390_get_cpu_timer(vcpu);
924 /* already expired? */
925 if (cputm >> 63)
926 return 0;
927 return min(sltime, tod_to_ns(cputm));
928 }
929 } else if (cpu_timer_interrupts_enabled(vcpu)) {
930 sltime = kvm_s390_get_cpu_timer(vcpu);
931 /* already expired? */
932 if (sltime >> 63)
933 return 0;
934 }
935 return sltime;
936}
937
938int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
939{
927int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
928{
940 u64 sltime;
929 u64 now, sltime;
941
942 vcpu->stat.exit_wait_state++;
943
944 /* fast path */
945 if (kvm_arch_vcpu_runnable(vcpu))
946 return 0;
947
948 if (psw_interrupts_disabled(vcpu)) {
949 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
950 return -EOPNOTSUPP; /* disabled wait */
951 }
952
930
931 vcpu->stat.exit_wait_state++;
932
933 /* fast path */
934 if (kvm_arch_vcpu_runnable(vcpu))
935 return 0;
936
937 if (psw_interrupts_disabled(vcpu)) {
938 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
939 return -EOPNOTSUPP; /* disabled wait */
940 }
941
953 if (!ckc_interrupts_enabled(vcpu) &&
954 !cpu_timer_interrupts_enabled(vcpu)) {
942 if (!ckc_interrupts_enabled(vcpu)) {
955 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
956 __set_cpu_idle(vcpu);
957 goto no_timer;
958 }
959
943 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
944 __set_cpu_idle(vcpu);
945 goto no_timer;
946 }
947
960 sltime = __calculate_sltime(vcpu);
961 if (!sltime)
948 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
949 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
950
951 /* underflow */
952 if (vcpu->arch.sie_block->ckc < now)
962 return 0;
963
964 __set_cpu_idle(vcpu);
965 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
953 return 0;
954
955 __set_cpu_idle(vcpu);
956 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
966 VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime);
957 VCPU_EVENT(vcpu, 4, "enabled wait via clock comparator: %llu ns", sltime);
967no_timer:
968 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
969 kvm_vcpu_block(vcpu);
970 __unset_cpu_idle(vcpu);
971 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
972
973 hrtimer_cancel(&vcpu->arch.ckc_timer);
974 return 0;

--- 10 unchanged lines hidden (view full) ---

985 wake_up_interruptible(&vcpu->wq);
986 vcpu->stat.halt_wakeup++;
987 }
988}
989
990enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
991{
992 struct kvm_vcpu *vcpu;
958no_timer:
959 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
960 kvm_vcpu_block(vcpu);
961 __unset_cpu_idle(vcpu);
962 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
963
964 hrtimer_cancel(&vcpu->arch.ckc_timer);
965 return 0;

--- 10 unchanged lines hidden (view full) ---

976 wake_up_interruptible(&vcpu->wq);
977 vcpu->stat.halt_wakeup++;
978 }
979}
980
981enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
982{
983 struct kvm_vcpu *vcpu;
993 u64 sltime;
984 u64 now, sltime;
994
995 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
985
986 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
996 sltime = __calculate_sltime(vcpu);
987 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
988 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
997
998 /*
999 * If the monotonic clock runs faster than the tod clock we might be
1000 * woken up too early and have to go back to sleep to avoid deadlocks.
1001 */
989
990 /*
991 * If the monotonic clock runs faster than the tod clock we might be
992 * woken up too early and have to go back to sleep to avoid deadlocks.
993 */
1002 if (sltime && hrtimer_forward_now(timer, ns_to_ktime(sltime)))
994 if (vcpu->arch.sie_block->ckc > now &&
995 hrtimer_forward_now(timer, ns_to_ktime(sltime)))
1003 return HRTIMER_RESTART;
1004 kvm_s390_vcpu_wakeup(vcpu);
1005 return HRTIMER_NORESTART;
1006}
1007
1008void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
1009{
1010 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;

--- 51 unchanged lines hidden (view full) ---

1062static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1063{
1064 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1065
1066 VCPU_EVENT(vcpu, 3, "inject: program irq code 0x%x", irq->u.pgm.code);
1067 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
1068 irq->u.pgm.code, 0);
1069
996 return HRTIMER_RESTART;
997 kvm_s390_vcpu_wakeup(vcpu);
998 return HRTIMER_NORESTART;
999}
1000
1001void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
1002{
1003 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;

--- 51 unchanged lines hidden (view full) ---

1055static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1056{
1057 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1058
1059 VCPU_EVENT(vcpu, 3, "inject: program irq code 0x%x", irq->u.pgm.code);
1060 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
1061 irq->u.pgm.code, 0);
1062
1070 if (!(irq->u.pgm.flags & KVM_S390_PGM_FLAGS_ILC_VALID)) {
1071 /* auto detection if no valid ILC was given */
1072 irq->u.pgm.flags &= ~KVM_S390_PGM_FLAGS_ILC_MASK;
1073 irq->u.pgm.flags |= kvm_s390_get_ilen(vcpu);
1074 irq->u.pgm.flags |= KVM_S390_PGM_FLAGS_ILC_VALID;
1075 }
1076
1077 if (irq->u.pgm.code == PGM_PER) {
1078 li->irq.pgm.code |= PGM_PER;
1063 if (irq->u.pgm.code == PGM_PER) {
1064 li->irq.pgm.code |= PGM_PER;
1079 li->irq.pgm.flags = irq->u.pgm.flags;
1080 /* only modify PER related information */
1081 li->irq.pgm.per_address = irq->u.pgm.per_address;
1082 li->irq.pgm.per_code = irq->u.pgm.per_code;
1083 li->irq.pgm.per_atmid = irq->u.pgm.per_atmid;
1084 li->irq.pgm.per_access_id = irq->u.pgm.per_access_id;
1085 } else if (!(irq->u.pgm.code & PGM_PER)) {
1086 li->irq.pgm.code = (li->irq.pgm.code & PGM_PER) |
1087 irq->u.pgm.code;
1065 /* only modify PER related information */
1066 li->irq.pgm.per_address = irq->u.pgm.per_address;
1067 li->irq.pgm.per_code = irq->u.pgm.per_code;
1068 li->irq.pgm.per_atmid = irq->u.pgm.per_atmid;
1069 li->irq.pgm.per_access_id = irq->u.pgm.per_access_id;
1070 } else if (!(irq->u.pgm.code & PGM_PER)) {
1071 li->irq.pgm.code = (li->irq.pgm.code & PGM_PER) |
1072 irq->u.pgm.code;
1088 li->irq.pgm.flags = irq->u.pgm.flags;
1089 /* only modify non-PER information */
1090 li->irq.pgm.trans_exc_code = irq->u.pgm.trans_exc_code;
1091 li->irq.pgm.mon_code = irq->u.pgm.mon_code;
1092 li->irq.pgm.data_exc_code = irq->u.pgm.data_exc_code;
1093 li->irq.pgm.mon_class_nr = irq->u.pgm.mon_class_nr;
1094 li->irq.pgm.exc_access_id = irq->u.pgm.exc_access_id;
1095 li->irq.pgm.op_access_id = irq->u.pgm.op_access_id;
1096 } else {

--- 1260 unchanged lines hidden ---
1073 /* only modify non-PER information */
1074 li->irq.pgm.trans_exc_code = irq->u.pgm.trans_exc_code;
1075 li->irq.pgm.mon_code = irq->u.pgm.mon_code;
1076 li->irq.pgm.data_exc_code = irq->u.pgm.data_exc_code;
1077 li->irq.pgm.mon_class_nr = irq->u.pgm.mon_class_nr;
1078 li->irq.pgm.exc_access_id = irq->u.pgm.exc_access_id;
1079 li->irq.pgm.op_access_id = irq->u.pgm.op_access_id;
1080 } else {

--- 1260 unchanged lines hidden ---