1d809aa23SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
28f2abe6aSChristian Borntraeger /*
3a53c8fabSHeiko Carstens * in-kernel handling for sie intercepts
48f2abe6aSChristian Borntraeger *
549710db0SJanosch Frank * Copyright IBM Corp. 2008, 2020
68f2abe6aSChristian Borntraeger *
78f2abe6aSChristian Borntraeger * Author(s): Carsten Otte <cotte@de.ibm.com>
88f2abe6aSChristian Borntraeger * Christian Borntraeger <borntraeger@de.ibm.com>
98f2abe6aSChristian Borntraeger */
108f2abe6aSChristian Borntraeger
118f2abe6aSChristian Borntraeger #include <linux/kvm_host.h>
128f2abe6aSChristian Borntraeger #include <linux/errno.h>
138f2abe6aSChristian Borntraeger #include <linux/pagemap.h>
148f2abe6aSChristian Borntraeger
15a86dcc24SMichael Mueller #include <asm/asm-offsets.h>
16f14d82e0SThomas Huth #include <asm/irq.h>
17b7c92f1aSQingFeng Hao #include <asm/sysinfo.h>
1853227810SClaudio Imbrenda #include <asm/uv.h>
198f2abe6aSChristian Borntraeger
208f2abe6aSChristian Borntraeger #include "kvm-s390.h"
21ba5c1e9bSCarsten Otte #include "gaccess.h"
225786fffaSCornelia Huck #include "trace.h"
23ade38c31SCornelia Huck #include "trace-s390.h"
24ba5c1e9bSCarsten Otte
kvm_s390_get_ilen(struct kvm_vcpu * vcpu)250e8bc06aSDavid Hildenbrand u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu)
2604b41acdSThomas Huth {
2704b41acdSThomas Huth struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
280e8bc06aSDavid Hildenbrand u8 ilen = 0;
2904b41acdSThomas Huth
300e8bc06aSDavid Hildenbrand switch (vcpu->arch.sie_block->icptcode) {
310e8bc06aSDavid Hildenbrand case ICPT_INST:
320e8bc06aSDavid Hildenbrand case ICPT_INSTPROGI:
330e8bc06aSDavid Hildenbrand case ICPT_OPEREXC:
340e8bc06aSDavid Hildenbrand case ICPT_PARTEXEC:
350e8bc06aSDavid Hildenbrand case ICPT_IOINST:
360e8bc06aSDavid Hildenbrand /* instruction only stored for these icptcodes */
370e8bc06aSDavid Hildenbrand ilen = insn_length(vcpu->arch.sie_block->ipa >> 8);
3804b41acdSThomas Huth /* Use the length of the EXECUTE instruction if necessary */
3904b41acdSThomas Huth if (sie_block->icptstatus & 1) {
400e8bc06aSDavid Hildenbrand ilen = (sie_block->icptstatus >> 4) & 0x6;
410e8bc06aSDavid Hildenbrand if (!ilen)
420e8bc06aSDavid Hildenbrand ilen = 4;
4304b41acdSThomas Huth }
440e8bc06aSDavid Hildenbrand break;
450e8bc06aSDavid Hildenbrand case ICPT_PROGI:
460e8bc06aSDavid Hildenbrand /* bit 1+2 of pgmilc are the ilc, so we directly get ilen */
470e8bc06aSDavid Hildenbrand ilen = vcpu->arch.sie_block->pgmilc & 0x6;
480e8bc06aSDavid Hildenbrand break;
490e8bc06aSDavid Hildenbrand }
500e8bc06aSDavid Hildenbrand return ilen;
5104b41acdSThomas Huth }
5204b41acdSThomas Huth
handle_stop(struct kvm_vcpu * vcpu)538f2abe6aSChristian Borntraeger static int handle_stop(struct kvm_vcpu *vcpu)
548f2abe6aSChristian Borntraeger {
556cddd432SDavid Hildenbrand struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
569ace903dSChristian Ehrhardt int rc = 0;
576cddd432SDavid Hildenbrand uint8_t flags, stop_pending;
585288fbf0SChristian Borntraeger
598f2abe6aSChristian Borntraeger vcpu->stat.exit_stop_request++;
60ade38c31SCornelia Huck
619a022067SDavid Hildenbrand /* delay the stop if any non-stop irq is pending */
629a022067SDavid Hildenbrand if (kvm_s390_vcpu_has_irq(vcpu, 1))
639a022067SDavid Hildenbrand return 0;
649a022067SDavid Hildenbrand
656cddd432SDavid Hildenbrand /* avoid races with the injection/SIGP STOP code */
666cddd432SDavid Hildenbrand spin_lock(&li->lock);
676cddd432SDavid Hildenbrand flags = li->irq.stop.flags;
686cddd432SDavid Hildenbrand stop_pending = kvm_s390_is_stop_irq_pending(vcpu);
696cddd432SDavid Hildenbrand spin_unlock(&li->lock);
709ace903dSChristian Ehrhardt
716cddd432SDavid Hildenbrand trace_kvm_s390_stop_request(stop_pending, flags);
726cddd432SDavid Hildenbrand if (!stop_pending)
7332f5ff63SDavid Hildenbrand return 0;
7432f5ff63SDavid Hildenbrand
756cddd432SDavid Hildenbrand if (flags & KVM_S390_STOP_FLAG_STORE_STATUS) {
769e0d5473SJens Freimann rc = kvm_s390_vcpu_store_status(vcpu,
779e0d5473SJens Freimann KVM_S390_STORE_STATUS_NOADDR);
7832f5ff63SDavid Hildenbrand if (rc)
795288fbf0SChristian Borntraeger return rc;
808f2abe6aSChristian Borntraeger }
818f2abe6aSChristian Borntraeger
82fe28c786SJanosch Frank /*
83fe28c786SJanosch Frank * no need to check the return value of vcpu_stop as it can only have
84fe28c786SJanosch Frank * an error for protvirt, but protvirt means user cpu state
85fe28c786SJanosch Frank */
866352e4d2SDavid Hildenbrand if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
8732f5ff63SDavid Hildenbrand kvm_s390_vcpu_stop(vcpu);
8832f5ff63SDavid Hildenbrand return -EOPNOTSUPP;
8932f5ff63SDavid Hildenbrand }
9032f5ff63SDavid Hildenbrand
handle_validity(struct kvm_vcpu * vcpu)918f2abe6aSChristian Borntraeger static int handle_validity(struct kvm_vcpu *vcpu)
928f2abe6aSChristian Borntraeger {
938f2abe6aSChristian Borntraeger int viwhy = vcpu->arch.sie_block->ipb >> 16;
943edbcff9SCarsten Otte
958f2abe6aSChristian Borntraeger vcpu->stat.exit_validity++;
965786fffaSCornelia Huck trace_kvm_s390_intercept_validity(vcpu, viwhy);
97a5efb6b6SChristian Borntraeger KVM_EVENT(3, "validity intercept 0x%x for pid %u (kvm 0x%pK)", viwhy,
98a5efb6b6SChristian Borntraeger current->pid, vcpu->kvm);
99a5efb6b6SChristian Borntraeger
100a5efb6b6SChristian Borntraeger /* do not warn on invalid runtime instrumentation mode */
101a5efb6b6SChristian Borntraeger WARN_ONCE(viwhy != 0x44, "kvm: unhandled validity intercept 0x%x\n",
102a5efb6b6SChristian Borntraeger viwhy);
103a5efb6b6SChristian Borntraeger return -EINVAL;
1048f2abe6aSChristian Borntraeger }
1058f2abe6aSChristian Borntraeger
handle_instruction(struct kvm_vcpu * vcpu)106ba5c1e9bSCarsten Otte static int handle_instruction(struct kvm_vcpu *vcpu)
107ba5c1e9bSCarsten Otte {
108ba5c1e9bSCarsten Otte vcpu->stat.exit_instruction++;
1095786fffaSCornelia Huck trace_kvm_s390_intercept_instruction(vcpu,
1105786fffaSCornelia Huck vcpu->arch.sie_block->ipa,
1115786fffaSCornelia Huck vcpu->arch.sie_block->ipb);
112cb7485daSChristian Borntraeger
113cb7485daSChristian Borntraeger switch (vcpu->arch.sie_block->ipa >> 8) {
114cb7485daSChristian Borntraeger case 0x01:
115cb7485daSChristian Borntraeger return kvm_s390_handle_01(vcpu);
116cb7485daSChristian Borntraeger case 0x82:
117cb7485daSChristian Borntraeger return kvm_s390_handle_lpsw(vcpu);
118cb7485daSChristian Borntraeger case 0x83:
119cb7485daSChristian Borntraeger return kvm_s390_handle_diag(vcpu);
120cb7485daSChristian Borntraeger case 0xaa:
121cb7485daSChristian Borntraeger return kvm_s390_handle_aa(vcpu);
122cb7485daSChristian Borntraeger case 0xae:
123cb7485daSChristian Borntraeger return kvm_s390_handle_sigp(vcpu);
124cb7485daSChristian Borntraeger case 0xb2:
125cb7485daSChristian Borntraeger return kvm_s390_handle_b2(vcpu);
126cb7485daSChristian Borntraeger case 0xb6:
127cb7485daSChristian Borntraeger return kvm_s390_handle_stctl(vcpu);
128cb7485daSChristian Borntraeger case 0xb7:
129cb7485daSChristian Borntraeger return kvm_s390_handle_lctl(vcpu);
130cb7485daSChristian Borntraeger case 0xb9:
131cb7485daSChristian Borntraeger return kvm_s390_handle_b9(vcpu);
132cb7485daSChristian Borntraeger case 0xe3:
133cb7485daSChristian Borntraeger return kvm_s390_handle_e3(vcpu);
134cb7485daSChristian Borntraeger case 0xe5:
135cb7485daSChristian Borntraeger return kvm_s390_handle_e5(vcpu);
136cb7485daSChristian Borntraeger case 0xeb:
137cb7485daSChristian Borntraeger return kvm_s390_handle_eb(vcpu);
138cb7485daSChristian Borntraeger default:
139b8e660b8SHeiko Carstens return -EOPNOTSUPP;
140ba5c1e9bSCarsten Otte }
141cb7485daSChristian Borntraeger }
142ba5c1e9bSCarsten Otte
inject_prog_on_prog_intercept(struct kvm_vcpu * vcpu)143f6af84e7SDavid Hildenbrand static int inject_prog_on_prog_intercept(struct kvm_vcpu *vcpu)
144439716a5SDavid Hildenbrand {
145f6af84e7SDavid Hildenbrand struct kvm_s390_pgm_info pgm_info = {
146f6af84e7SDavid Hildenbrand .code = vcpu->arch.sie_block->iprcc,
147eaa4f416SDavid Hildenbrand /* the PSW has already been rewound */
148eaa4f416SDavid Hildenbrand .flags = KVM_S390_PGM_FLAGS_NO_REWIND,
149f6af84e7SDavid Hildenbrand };
150439716a5SDavid Hildenbrand
151439716a5SDavid Hildenbrand switch (vcpu->arch.sie_block->iprcc & ~PGM_PER) {
152439716a5SDavid Hildenbrand case PGM_AFX_TRANSLATION:
153439716a5SDavid Hildenbrand case PGM_ASX_TRANSLATION:
154439716a5SDavid Hildenbrand case PGM_EX_TRANSLATION:
155439716a5SDavid Hildenbrand case PGM_LFX_TRANSLATION:
156439716a5SDavid Hildenbrand case PGM_LSTE_SEQUENCE:
157439716a5SDavid Hildenbrand case PGM_LSX_TRANSLATION:
158439716a5SDavid Hildenbrand case PGM_LX_TRANSLATION:
159439716a5SDavid Hildenbrand case PGM_PRIMARY_AUTHORITY:
160439716a5SDavid Hildenbrand case PGM_SECONDARY_AUTHORITY:
161439716a5SDavid Hildenbrand case PGM_SPACE_SWITCH:
162f6af84e7SDavid Hildenbrand pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
163439716a5SDavid Hildenbrand break;
164439716a5SDavid Hildenbrand case PGM_ALEN_TRANSLATION:
165439716a5SDavid Hildenbrand case PGM_ALE_SEQUENCE:
166439716a5SDavid Hildenbrand case PGM_ASTE_INSTANCE:
167439716a5SDavid Hildenbrand case PGM_ASTE_SEQUENCE:
168439716a5SDavid Hildenbrand case PGM_ASTE_VALIDITY:
169439716a5SDavid Hildenbrand case PGM_EXTENDED_AUTHORITY:
170f6af84e7SDavid Hildenbrand pgm_info.exc_access_id = vcpu->arch.sie_block->eai;
171439716a5SDavid Hildenbrand break;
172439716a5SDavid Hildenbrand case PGM_ASCE_TYPE:
173439716a5SDavid Hildenbrand case PGM_PAGE_TRANSLATION:
174439716a5SDavid Hildenbrand case PGM_REGION_FIRST_TRANS:
175439716a5SDavid Hildenbrand case PGM_REGION_SECOND_TRANS:
176439716a5SDavid Hildenbrand case PGM_REGION_THIRD_TRANS:
177439716a5SDavid Hildenbrand case PGM_SEGMENT_TRANSLATION:
178f6af84e7SDavid Hildenbrand pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
179f6af84e7SDavid Hildenbrand pgm_info.exc_access_id = vcpu->arch.sie_block->eai;
180f6af84e7SDavid Hildenbrand pgm_info.op_access_id = vcpu->arch.sie_block->oai;
181439716a5SDavid Hildenbrand break;
182439716a5SDavid Hildenbrand case PGM_MONITOR:
183f6af84e7SDavid Hildenbrand pgm_info.mon_class_nr = vcpu->arch.sie_block->mcn;
184f6af84e7SDavid Hildenbrand pgm_info.mon_code = vcpu->arch.sie_block->tecmc;
185439716a5SDavid Hildenbrand break;
186403c8648SEric Farman case PGM_VECTOR_PROCESSING:
187439716a5SDavid Hildenbrand case PGM_DATA:
188f6af84e7SDavid Hildenbrand pgm_info.data_exc_code = vcpu->arch.sie_block->dxc;
189439716a5SDavid Hildenbrand break;
190439716a5SDavid Hildenbrand case PGM_PROTECTION:
191f6af84e7SDavid Hildenbrand pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
192f6af84e7SDavid Hildenbrand pgm_info.exc_access_id = vcpu->arch.sie_block->eai;
193439716a5SDavid Hildenbrand break;
194439716a5SDavid Hildenbrand default:
195439716a5SDavid Hildenbrand break;
196439716a5SDavid Hildenbrand }
197439716a5SDavid Hildenbrand
198439716a5SDavid Hildenbrand if (vcpu->arch.sie_block->iprcc & PGM_PER) {
199f6af84e7SDavid Hildenbrand pgm_info.per_code = vcpu->arch.sie_block->perc;
200f6af84e7SDavid Hildenbrand pgm_info.per_atmid = vcpu->arch.sie_block->peratmid;
201f6af84e7SDavid Hildenbrand pgm_info.per_address = vcpu->arch.sie_block->peraddr;
202f6af84e7SDavid Hildenbrand pgm_info.per_access_id = vcpu->arch.sie_block->peraid;
203439716a5SDavid Hildenbrand }
204f6af84e7SDavid Hildenbrand return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
205439716a5SDavid Hildenbrand }
206439716a5SDavid Hildenbrand
207e325fe69SMichael Mueller /*
208e325fe69SMichael Mueller * restore ITDB to program-interruption TDB in guest lowcore
209e325fe69SMichael Mueller * and set TX abort indication if required
210e325fe69SMichael Mueller */
handle_itdb(struct kvm_vcpu * vcpu)211e325fe69SMichael Mueller static int handle_itdb(struct kvm_vcpu *vcpu)
212e325fe69SMichael Mueller {
213e325fe69SMichael Mueller struct kvm_s390_itdb *itdb;
214e325fe69SMichael Mueller int rc;
215e325fe69SMichael Mueller
216e325fe69SMichael Mueller if (!IS_TE_ENABLED(vcpu) || !IS_ITDB_VALID(vcpu))
217e325fe69SMichael Mueller return 0;
218e325fe69SMichael Mueller if (current->thread.per_flags & PER_FLAG_NO_TE)
219e325fe69SMichael Mueller return 0;
220fe0ef003SNico Boehr itdb = phys_to_virt(vcpu->arch.sie_block->itdba);
221e325fe69SMichael Mueller rc = write_guest_lc(vcpu, __LC_PGM_TDB, itdb, sizeof(*itdb));
222e325fe69SMichael Mueller if (rc)
223e325fe69SMichael Mueller return rc;
224e325fe69SMichael Mueller memset(itdb, 0, sizeof(*itdb));
225e325fe69SMichael Mueller
226e325fe69SMichael Mueller return 0;
227e325fe69SMichael Mueller }
228e325fe69SMichael Mueller
22927291e21SDavid Hildenbrand #define per_event(vcpu) (vcpu->arch.sie_block->iprcc & PGM_PER)
23027291e21SDavid Hildenbrand
should_handle_per_event(const struct kvm_vcpu * vcpu)23174a439efSIlya Leoshkevich static bool should_handle_per_event(const struct kvm_vcpu *vcpu)
23274a439efSIlya Leoshkevich {
23374a439efSIlya Leoshkevich if (!guestdbg_enabled(vcpu) || !per_event(vcpu))
23474a439efSIlya Leoshkevich return false;
23574a439efSIlya Leoshkevich if (guestdbg_sstep_enabled(vcpu) &&
23674a439efSIlya Leoshkevich vcpu->arch.sie_block->iprcc != PGM_PER) {
23774a439efSIlya Leoshkevich /*
23874a439efSIlya Leoshkevich * __vcpu_run() will exit after delivering the concurrently
23974a439efSIlya Leoshkevich * indicated condition.
24074a439efSIlya Leoshkevich */
24174a439efSIlya Leoshkevich return false;
24274a439efSIlya Leoshkevich }
24374a439efSIlya Leoshkevich return true;
24474a439efSIlya Leoshkevich }
24574a439efSIlya Leoshkevich
handle_prog(struct kvm_vcpu * vcpu)246ba5c1e9bSCarsten Otte static int handle_prog(struct kvm_vcpu *vcpu)
247ba5c1e9bSCarsten Otte {
248684135e0SThomas Huth psw_t psw;
2490040e7d2SHeiko Carstens int rc;
2500040e7d2SHeiko Carstens
251ba5c1e9bSCarsten Otte vcpu->stat.exit_program_interruption++;
2527feb6bb8SMichael Mueller
253e663df91SJanosch Frank /*
254e663df91SJanosch Frank * Intercept 8 indicates a loop of specification exceptions
255e663df91SJanosch Frank * for protected guests.
256e663df91SJanosch Frank */
257e663df91SJanosch Frank if (kvm_s390_pv_cpu_is_protected(vcpu))
258e663df91SJanosch Frank return -EOPNOTSUPP;
259e663df91SJanosch Frank
26074a439efSIlya Leoshkevich if (should_handle_per_event(vcpu)) {
261a69cbe81SDavid Hildenbrand rc = kvm_s390_handle_per_event(vcpu);
262a69cbe81SDavid Hildenbrand if (rc)
263a69cbe81SDavid Hildenbrand return rc;
26427291e21SDavid Hildenbrand /* the interrupt might have been filtered out completely */
26527291e21SDavid Hildenbrand if (vcpu->arch.sie_block->iprcc == 0)
26627291e21SDavid Hildenbrand return 0;
26727291e21SDavid Hildenbrand }
26827291e21SDavid Hildenbrand
269e325fe69SMichael Mueller trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc);
270684135e0SThomas Huth if (vcpu->arch.sie_block->iprcc == PGM_SPECIFICATION) {
271684135e0SThomas Huth rc = read_guest_lc(vcpu, __LC_PGM_NEW_PSW, &psw, sizeof(psw_t));
272684135e0SThomas Huth if (rc)
273684135e0SThomas Huth return rc;
274684135e0SThomas Huth /* Avoid endless loops of specification exceptions */
275684135e0SThomas Huth if (!is_valid_psw(&psw))
276684135e0SThomas Huth return -EOPNOTSUPP;
277684135e0SThomas Huth }
278e325fe69SMichael Mueller rc = handle_itdb(vcpu);
2790040e7d2SHeiko Carstens if (rc)
2800040e7d2SHeiko Carstens return rc;
281439716a5SDavid Hildenbrand
282f6af84e7SDavid Hildenbrand return inject_prog_on_prog_intercept(vcpu);
283ba5c1e9bSCarsten Otte }
284ba5c1e9bSCarsten Otte
2859a558ee3SThomas Huth /**
286f14d82e0SThomas Huth * handle_external_interrupt - used for external interruption interceptions
28725b5476aSJanosch Frank * @vcpu: virtual cpu
288f14d82e0SThomas Huth *
28921f27df8SNico Boehr * This interception occurs if:
29021f27df8SNico Boehr * - the CPUSTAT_EXT_INT bit was already set when the external interrupt
29121f27df8SNico Boehr * occurred. In this case, the interrupt needs to be injected manually to
29221f27df8SNico Boehr * preserve interrupt priority.
29321f27df8SNico Boehr * - the external new PSW has external interrupts enabled, which will cause an
29421f27df8SNico Boehr * interruption loop. We drop to userspace in this case.
29521f27df8SNico Boehr *
29621f27df8SNico Boehr * The latter case can be detected by inspecting the external mask bit in the
29721f27df8SNico Boehr * external new psw.
29821f27df8SNico Boehr *
29921f27df8SNico Boehr * Under PV, only the latter case can occur, since interrupt priorities are
30021f27df8SNico Boehr * handled in the ultravisor.
301f14d82e0SThomas Huth */
handle_external_interrupt(struct kvm_vcpu * vcpu)302f14d82e0SThomas Huth static int handle_external_interrupt(struct kvm_vcpu *vcpu)
303f14d82e0SThomas Huth {
304f14d82e0SThomas Huth u16 eic = vcpu->arch.sie_block->eic;
305383d0b05SJens Freimann struct kvm_s390_irq irq;
306f14d82e0SThomas Huth psw_t newpsw;
307f14d82e0SThomas Huth int rc;
308f14d82e0SThomas Huth
309f14d82e0SThomas Huth vcpu->stat.exit_external_interrupt++;
310f14d82e0SThomas Huth
31121f27df8SNico Boehr if (kvm_s390_pv_cpu_is_protected(vcpu)) {
31221f27df8SNico Boehr newpsw = vcpu->arch.sie_block->gpsw;
31321f27df8SNico Boehr } else {
314f14d82e0SThomas Huth rc = read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &newpsw, sizeof(psw_t));
315f14d82e0SThomas Huth if (rc)
316f14d82e0SThomas Huth return rc;
31721f27df8SNico Boehr }
31821f27df8SNico Boehr
31921f27df8SNico Boehr /*
32021f27df8SNico Boehr * Clock comparator or timer interrupt with external interrupt enabled
32121f27df8SNico Boehr * will cause interrupt loop. Drop to userspace.
32221f27df8SNico Boehr */
323f14d82e0SThomas Huth if ((eic == EXT_IRQ_CLK_COMP || eic == EXT_IRQ_CPU_TIMER) &&
324f14d82e0SThomas Huth (newpsw.mask & PSW_MASK_EXT))
325f14d82e0SThomas Huth return -EOPNOTSUPP;
326f14d82e0SThomas Huth
327f14d82e0SThomas Huth switch (eic) {
328f14d82e0SThomas Huth case EXT_IRQ_CLK_COMP:
329f14d82e0SThomas Huth irq.type = KVM_S390_INT_CLOCK_COMP;
330f14d82e0SThomas Huth break;
331f14d82e0SThomas Huth case EXT_IRQ_CPU_TIMER:
332f14d82e0SThomas Huth irq.type = KVM_S390_INT_CPU_TIMER;
333f14d82e0SThomas Huth break;
334f14d82e0SThomas Huth case EXT_IRQ_EXTERNAL_CALL:
335f14d82e0SThomas Huth irq.type = KVM_S390_INT_EXTERNAL_CALL;
336383d0b05SJens Freimann irq.u.extcall.code = vcpu->arch.sie_block->extcpuaddr;
337ea5f4969SDavid Hildenbrand rc = kvm_s390_inject_vcpu(vcpu, &irq);
338ea5f4969SDavid Hildenbrand /* ignore if another external call is already pending */
339ea5f4969SDavid Hildenbrand if (rc == -EBUSY)
340ea5f4969SDavid Hildenbrand return 0;
341ea5f4969SDavid Hildenbrand return rc;
342f14d82e0SThomas Huth default:
343f14d82e0SThomas Huth return -EOPNOTSUPP;
344f14d82e0SThomas Huth }
345f14d82e0SThomas Huth
346f14d82e0SThomas Huth return kvm_s390_inject_vcpu(vcpu, &irq);
347f14d82e0SThomas Huth }
348f14d82e0SThomas Huth
349f14d82e0SThomas Huth /**
35025b5476aSJanosch Frank * handle_mvpg_pei - Handle MOVE PAGE partial execution interception.
35125b5476aSJanosch Frank * @vcpu: virtual cpu
3529a558ee3SThomas Huth *
3539a558ee3SThomas Huth * This interception can only happen for guests with DAT disabled and
3549a558ee3SThomas Huth * addresses that are currently not mapped in the host. Thus we try to
3559a558ee3SThomas Huth * set up the mappings for the corresponding user pages here (or throw
3569a558ee3SThomas Huth * addressing exceptions in case of illegal guest addresses).
3579a558ee3SThomas Huth */
handle_mvpg_pei(struct kvm_vcpu * vcpu)3589a558ee3SThomas Huth static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
3599a558ee3SThomas Huth {
360f22166dcSThomas Huth unsigned long srcaddr, dstaddr;
3619a558ee3SThomas Huth int reg1, reg2, rc;
3629a558ee3SThomas Huth
3639a558ee3SThomas Huth kvm_s390_get_regs_rre(vcpu, ®1, ®2);
3649a558ee3SThomas Huth
365e613d834SJanis Schoetterl-Glausch /* Ensure that the source is paged-in, no actual access -> no key checking */
366e613d834SJanis Schoetterl-Glausch rc = guest_translate_address_with_key(vcpu, vcpu->run->s.regs.gprs[reg2],
367e613d834SJanis Schoetterl-Glausch reg2, &srcaddr, GACC_FETCH, 0);
3683cfad023SThomas Huth if (rc)
3693cfad023SThomas Huth return kvm_s390_inject_prog_cond(vcpu, rc);
370f22166dcSThomas Huth rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0);
371f22166dcSThomas Huth if (rc != 0)
3729a558ee3SThomas Huth return rc;
3739a558ee3SThomas Huth
374e613d834SJanis Schoetterl-Glausch /* Ensure that the source is paged-in, no actual access -> no key checking */
375e613d834SJanis Schoetterl-Glausch rc = guest_translate_address_with_key(vcpu, vcpu->run->s.regs.gprs[reg1],
376e613d834SJanis Schoetterl-Glausch reg1, &dstaddr, GACC_STORE, 0);
3773cfad023SThomas Huth if (rc)
3783cfad023SThomas Huth return kvm_s390_inject_prog_cond(vcpu, rc);
379f22166dcSThomas Huth rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1);
380f22166dcSThomas Huth if (rc != 0)
3819a558ee3SThomas Huth return rc;
3829a558ee3SThomas Huth
3830e8bc06aSDavid Hildenbrand kvm_s390_retry_instr(vcpu);
3849a558ee3SThomas Huth
3859a558ee3SThomas Huth return 0;
3869a558ee3SThomas Huth }
3879a558ee3SThomas Huth
handle_partial_execution(struct kvm_vcpu * vcpu)3889a558ee3SThomas Huth static int handle_partial_execution(struct kvm_vcpu *vcpu)
3899a558ee3SThomas Huth {
3909ec6de19SAlexander Yarygin vcpu->stat.exit_pei++;
3919ec6de19SAlexander Yarygin
3929a558ee3SThomas Huth if (vcpu->arch.sie_block->ipa == 0xb254) /* MVPG */
3939a558ee3SThomas Huth return handle_mvpg_pei(vcpu);
3944953919fSDavid Hildenbrand if (vcpu->arch.sie_block->ipa >> 8 == 0xae) /* SIGP */
3954953919fSDavid Hildenbrand return kvm_s390_handle_sigp_pei(vcpu);
3969a558ee3SThomas Huth
3979a558ee3SThomas Huth return -EOPNOTSUPP;
3989a558ee3SThomas Huth }
3999a558ee3SThomas Huth
400b7c92f1aSQingFeng Hao /*
401b7c92f1aSQingFeng Hao * Handle the sthyi instruction that provides the guest with system
402b7c92f1aSQingFeng Hao * information, like current CPU resources available at each level of
403b7c92f1aSQingFeng Hao * the machine.
404b7c92f1aSQingFeng Hao */
handle_sthyi(struct kvm_vcpu * vcpu)405b7c92f1aSQingFeng Hao int handle_sthyi(struct kvm_vcpu *vcpu)
406b7c92f1aSQingFeng Hao {
4070c02cc57SHeiko Carstens int reg1, reg2, cc = 0, r = 0;
4080c02cc57SHeiko Carstens u64 code, addr, rc = 0;
409b7c92f1aSQingFeng Hao struct sthyi_sctns *sctns = NULL;
410b7c92f1aSQingFeng Hao
411b7c92f1aSQingFeng Hao if (!test_kvm_facility(vcpu->kvm, 74))
412b7c92f1aSQingFeng Hao return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
413b7c92f1aSQingFeng Hao
414b7c92f1aSQingFeng Hao kvm_s390_get_regs_rre(vcpu, ®1, ®2);
415b7c92f1aSQingFeng Hao code = vcpu->run->s.regs.gprs[reg1];
416b7c92f1aSQingFeng Hao addr = vcpu->run->s.regs.gprs[reg2];
417b7c92f1aSQingFeng Hao
418b7c92f1aSQingFeng Hao vcpu->stat.instruction_sthyi++;
419b7c92f1aSQingFeng Hao VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr);
420b7c92f1aSQingFeng Hao trace_kvm_s390_handle_sthyi(vcpu, code, addr);
421b7c92f1aSQingFeng Hao
422b7c92f1aSQingFeng Hao if (reg1 == reg2 || reg1 & 1 || reg2 & 1)
423b7c92f1aSQingFeng Hao return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
424b7c92f1aSQingFeng Hao
425b7c92f1aSQingFeng Hao if (code & 0xffff) {
426b7c92f1aSQingFeng Hao cc = 3;
427b7c92f1aSQingFeng Hao rc = 4;
428b7c92f1aSQingFeng Hao goto out;
429b7c92f1aSQingFeng Hao }
430b7c92f1aSQingFeng Hao
43122d768c3SJanosch Frank if (!kvm_s390_pv_cpu_is_protected(vcpu) && (addr & ~PAGE_MASK))
432b7c92f1aSQingFeng Hao return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
433b7c92f1aSQingFeng Hao
434c4196218SChristian Borntraeger sctns = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
435b7c92f1aSQingFeng Hao if (!sctns)
436b7c92f1aSQingFeng Hao return -ENOMEM;
437b7c92f1aSQingFeng Hao
438b7c92f1aSQingFeng Hao cc = sthyi_fill(sctns, &rc);
4390c02cc57SHeiko Carstens if (cc < 0) {
4400c02cc57SHeiko Carstens free_page((unsigned long)sctns);
4410c02cc57SHeiko Carstens return cc;
4420c02cc57SHeiko Carstens }
443b7c92f1aSQingFeng Hao out:
444b7c92f1aSQingFeng Hao if (!cc) {
44522d768c3SJanosch Frank if (kvm_s390_pv_cpu_is_protected(vcpu)) {
446b99f4512SNico Boehr memcpy(sida_addr(vcpu->arch.sie_block), sctns, PAGE_SIZE);
44722d768c3SJanosch Frank } else {
448b7c92f1aSQingFeng Hao r = write_guest(vcpu, addr, reg2, sctns, PAGE_SIZE);
449b7c92f1aSQingFeng Hao if (r) {
450b7c92f1aSQingFeng Hao free_page((unsigned long)sctns);
451b7c92f1aSQingFeng Hao return kvm_s390_inject_prog_cond(vcpu, r);
452b7c92f1aSQingFeng Hao }
453b7c92f1aSQingFeng Hao }
45422d768c3SJanosch Frank }
455b7c92f1aSQingFeng Hao
456b7c92f1aSQingFeng Hao free_page((unsigned long)sctns);
457b7c92f1aSQingFeng Hao vcpu->run->s.regs.gprs[reg2 + 1] = rc;
458b7c92f1aSQingFeng Hao kvm_s390_set_psw_cc(vcpu, cc);
459b7c92f1aSQingFeng Hao return r;
460b7c92f1aSQingFeng Hao }
461b7c92f1aSQingFeng Hao
handle_operexc(struct kvm_vcpu * vcpu)462a011eeb2SJanosch Frank static int handle_operexc(struct kvm_vcpu *vcpu)
463a011eeb2SJanosch Frank {
464fb7dc1d4SChristian Borntraeger psw_t oldpsw, newpsw;
465fb7dc1d4SChristian Borntraeger int rc;
466fb7dc1d4SChristian Borntraeger
467a011eeb2SJanosch Frank vcpu->stat.exit_operation_exception++;
468a011eeb2SJanosch Frank trace_kvm_s390_handle_operexc(vcpu, vcpu->arch.sie_block->ipa,
469a011eeb2SJanosch Frank vcpu->arch.sie_block->ipb);
470a011eeb2SJanosch Frank
471c0a6bfdcSChristian Borntraeger if (vcpu->arch.sie_block->ipa == 0xb256)
47295ca2cb5SJanosch Frank return handle_sthyi(vcpu);
47395ca2cb5SJanosch Frank
4746502a34cSDavid Hildenbrand if (vcpu->arch.sie_block->ipa == 0 && vcpu->kvm->arch.user_instr0)
4756502a34cSDavid Hildenbrand return -EOPNOTSUPP;
476fb7dc1d4SChristian Borntraeger rc = read_guest_lc(vcpu, __LC_PGM_NEW_PSW, &newpsw, sizeof(psw_t));
477fb7dc1d4SChristian Borntraeger if (rc)
478fb7dc1d4SChristian Borntraeger return rc;
479fb7dc1d4SChristian Borntraeger /*
480fb7dc1d4SChristian Borntraeger * Avoid endless loops of operation exceptions, if the pgm new
481fb7dc1d4SChristian Borntraeger * PSW will cause a new operation exception.
482fb7dc1d4SChristian Borntraeger * The heuristic checks if the pgm new psw is within 6 bytes before
483fb7dc1d4SChristian Borntraeger * the faulting psw address (with same DAT, AS settings) and the
484fb7dc1d4SChristian Borntraeger * new psw is not a wait psw and the fault was not triggered by
485fb7dc1d4SChristian Borntraeger * problem state.
486fb7dc1d4SChristian Borntraeger */
487fb7dc1d4SChristian Borntraeger oldpsw = vcpu->arch.sie_block->gpsw;
488fb7dc1d4SChristian Borntraeger if (oldpsw.addr - newpsw.addr <= 6 &&
489fb7dc1d4SChristian Borntraeger !(newpsw.mask & PSW_MASK_WAIT) &&
490fb7dc1d4SChristian Borntraeger !(oldpsw.mask & PSW_MASK_PSTATE) &&
491fb7dc1d4SChristian Borntraeger (newpsw.mask & PSW_MASK_ASC) == (oldpsw.mask & PSW_MASK_ASC) &&
492fb7dc1d4SChristian Borntraeger (newpsw.mask & PSW_MASK_DAT) == (oldpsw.mask & PSW_MASK_DAT))
493fb7dc1d4SChristian Borntraeger return -EOPNOTSUPP;
4946502a34cSDavid Hildenbrand
495a011eeb2SJanosch Frank return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
496a011eeb2SJanosch Frank }
497a011eeb2SJanosch Frank
handle_pv_spx(struct kvm_vcpu * vcpu)498d274995eSJanosch Frank static int handle_pv_spx(struct kvm_vcpu *vcpu)
499d274995eSJanosch Frank {
500b99f4512SNico Boehr u32 pref = *(u32 *)sida_addr(vcpu->arch.sie_block);
501d274995eSJanosch Frank
502d274995eSJanosch Frank kvm_s390_set_prefix(vcpu, pref);
503d274995eSJanosch Frank trace_kvm_s390_handle_prefix(vcpu, 1, pref);
504d274995eSJanosch Frank return 0;
505d274995eSJanosch Frank }
506d274995eSJanosch Frank
handle_pv_sclp(struct kvm_vcpu * vcpu)5070890ddeaSChristian Borntraeger static int handle_pv_sclp(struct kvm_vcpu *vcpu)
5080890ddeaSChristian Borntraeger {
5090890ddeaSChristian Borntraeger struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
5100890ddeaSChristian Borntraeger
5110890ddeaSChristian Borntraeger spin_lock(&fi->lock);
5120890ddeaSChristian Borntraeger /*
5130890ddeaSChristian Borntraeger * 2 cases:
5140890ddeaSChristian Borntraeger * a: an sccb answering interrupt was already pending or in flight.
5150890ddeaSChristian Borntraeger * As the sccb value is not known we can simply set some value to
5160890ddeaSChristian Borntraeger * trigger delivery of a saved SCCB. UV will then use its saved
5170890ddeaSChristian Borntraeger * copy of the SCCB value.
5180890ddeaSChristian Borntraeger * b: an error SCCB interrupt needs to be injected so we also inject
5190890ddeaSChristian Borntraeger * a fake SCCB address. Firmware will use the proper one.
5200890ddeaSChristian Borntraeger * This makes sure, that both errors and real sccb returns will only
5210890ddeaSChristian Borntraeger * be delivered after a notification intercept (instruction has
5220890ddeaSChristian Borntraeger * finished) but not after others.
5230890ddeaSChristian Borntraeger */
5240890ddeaSChristian Borntraeger fi->srv_signal.ext_params |= 0x43000;
5250890ddeaSChristian Borntraeger set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
5260890ddeaSChristian Borntraeger clear_bit(IRQ_PEND_EXT_SERVICE, &fi->masked_irqs);
5270890ddeaSChristian Borntraeger spin_unlock(&fi->lock);
5280890ddeaSChristian Borntraeger return 0;
5290890ddeaSChristian Borntraeger }
5300890ddeaSChristian Borntraeger
handle_pv_uvc(struct kvm_vcpu * vcpu)53153227810SClaudio Imbrenda static int handle_pv_uvc(struct kvm_vcpu *vcpu)
53253227810SClaudio Imbrenda {
533b99f4512SNico Boehr struct uv_cb_share *guest_uvcb = sida_addr(vcpu->arch.sie_block);
53453227810SClaudio Imbrenda struct uv_cb_cts uvcb = {
53553227810SClaudio Imbrenda .header.cmd = UVC_CMD_UNPIN_PAGE_SHARED,
53653227810SClaudio Imbrenda .header.len = sizeof(uvcb),
53753227810SClaudio Imbrenda .guest_handle = kvm_s390_pv_get_handle(vcpu->kvm),
53853227810SClaudio Imbrenda .gaddr = guest_uvcb->paddr,
53953227810SClaudio Imbrenda };
54053227810SClaudio Imbrenda int rc;
54153227810SClaudio Imbrenda
54253227810SClaudio Imbrenda if (guest_uvcb->header.cmd != UVC_CMD_REMOVE_SHARED_ACCESS) {
54353227810SClaudio Imbrenda WARN_ONCE(1, "Unexpected notification intercept for UVC 0x%x\n",
54453227810SClaudio Imbrenda guest_uvcb->header.cmd);
54553227810SClaudio Imbrenda return 0;
54653227810SClaudio Imbrenda }
54753227810SClaudio Imbrenda rc = gmap_make_secure(vcpu->arch.gmap, uvcb.gaddr, &uvcb);
54853227810SClaudio Imbrenda /*
54953227810SClaudio Imbrenda * If the unpin did not succeed, the guest will exit again for the UVC
55053227810SClaudio Imbrenda * and we will retry the unpin.
55153227810SClaudio Imbrenda */
55253227810SClaudio Imbrenda if (rc == -EINVAL)
55353227810SClaudio Imbrenda return 0;
554f0a1a061SClaudio Imbrenda /*
555f0a1a061SClaudio Imbrenda * If we got -EAGAIN here, we simply return it. It will eventually
556f0a1a061SClaudio Imbrenda * get propagated all the way to userspace, which should then try
557f0a1a061SClaudio Imbrenda * again.
558f0a1a061SClaudio Imbrenda */
55953227810SClaudio Imbrenda return rc;
56053227810SClaudio Imbrenda }
56153227810SClaudio Imbrenda
handle_pv_notification(struct kvm_vcpu * vcpu)562da24a0ccSJanosch Frank static int handle_pv_notification(struct kvm_vcpu *vcpu)
563da24a0ccSJanosch Frank {
564c3f0e5fdSNico Boehr int ret;
565c3f0e5fdSNico Boehr
566d274995eSJanosch Frank if (vcpu->arch.sie_block->ipa == 0xb210)
567d274995eSJanosch Frank return handle_pv_spx(vcpu);
5680890ddeaSChristian Borntraeger if (vcpu->arch.sie_block->ipa == 0xb220)
5690890ddeaSChristian Borntraeger return handle_pv_sclp(vcpu);
57053227810SClaudio Imbrenda if (vcpu->arch.sie_block->ipa == 0xb9a4)
57153227810SClaudio Imbrenda return handle_pv_uvc(vcpu);
572c3f0e5fdSNico Boehr if (vcpu->arch.sie_block->ipa >> 8 == 0xae) {
573c3f0e5fdSNico Boehr /*
574c3f0e5fdSNico Boehr * Besides external call, other SIGP orders also cause a
575c3f0e5fdSNico Boehr * 108 (pv notify) intercept. In contrast to external call,
576c3f0e5fdSNico Boehr * these orders need to be emulated and hence the appropriate
577c3f0e5fdSNico Boehr * place to handle them is in handle_instruction().
578c3f0e5fdSNico Boehr * So first try kvm_s390_handle_sigp_pei() and if that isn't
579c3f0e5fdSNico Boehr * successful, go on with handle_instruction().
580c3f0e5fdSNico Boehr */
581c3f0e5fdSNico Boehr ret = kvm_s390_handle_sigp_pei(vcpu);
582c3f0e5fdSNico Boehr if (!ret)
583c3f0e5fdSNico Boehr return ret;
584c3f0e5fdSNico Boehr }
5850890ddeaSChristian Borntraeger
586da24a0ccSJanosch Frank return handle_instruction(vcpu);
587da24a0ccSJanosch Frank }
588da24a0ccSJanosch Frank
should_handle_per_ifetch(const struct kvm_vcpu * vcpu,int rc)589ba853a4eSIlya Leoshkevich static bool should_handle_per_ifetch(const struct kvm_vcpu *vcpu, int rc)
590ba853a4eSIlya Leoshkevich {
591ba853a4eSIlya Leoshkevich /* Process PER, also if the instruction is processed in user space. */
592ba853a4eSIlya Leoshkevich if (!(vcpu->arch.sie_block->icptstatus & 0x02))
593ba853a4eSIlya Leoshkevich return false;
594ba853a4eSIlya Leoshkevich if (rc != 0 && rc != -EOPNOTSUPP)
595ba853a4eSIlya Leoshkevich return false;
596ba853a4eSIlya Leoshkevich if (guestdbg_sstep_enabled(vcpu) && vcpu->arch.local_int.pending_irqs)
597ba853a4eSIlya Leoshkevich /* __vcpu_run() will exit after delivering the interrupt. */
598ba853a4eSIlya Leoshkevich return false;
599ba853a4eSIlya Leoshkevich return true;
600ba853a4eSIlya Leoshkevich }
601ba853a4eSIlya Leoshkevich
kvm_handle_sie_intercept(struct kvm_vcpu * vcpu)6028f2abe6aSChristian Borntraeger int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
6038f2abe6aSChristian Borntraeger {
6045ffe466cSDavid Hildenbrand int rc, per_rc = 0;
6055ffe466cSDavid Hildenbrand
60671f116bfSDavid Hildenbrand if (kvm_is_ucontrol(vcpu->kvm))
60771f116bfSDavid Hildenbrand return -EOPNOTSUPP;
60871f116bfSDavid Hildenbrand
60946b708eaSChristian Borntraeger switch (vcpu->arch.sie_block->icptcode) {
610947b8972SFarhan Ali case ICPT_EXTREQ:
611a5e0aceaSChristian Borntraeger vcpu->stat.exit_external_request++;
612a5e0aceaSChristian Borntraeger return 0;
613947b8972SFarhan Ali case ICPT_IOREQ:
614a5e0aceaSChristian Borntraeger vcpu->stat.exit_io_request++;
615a5e0aceaSChristian Borntraeger return 0;
616947b8972SFarhan Ali case ICPT_INST:
6175ffe466cSDavid Hildenbrand rc = handle_instruction(vcpu);
6185ffe466cSDavid Hildenbrand break;
619947b8972SFarhan Ali case ICPT_PROGI:
62046b708eaSChristian Borntraeger return handle_prog(vcpu);
621947b8972SFarhan Ali case ICPT_EXTINT:
62246b708eaSChristian Borntraeger return handle_external_interrupt(vcpu);
623947b8972SFarhan Ali case ICPT_WAIT:
62446b708eaSChristian Borntraeger return kvm_s390_handle_wait(vcpu);
625947b8972SFarhan Ali case ICPT_VALIDITY:
62646b708eaSChristian Borntraeger return handle_validity(vcpu);
627947b8972SFarhan Ali case ICPT_STOP:
62846b708eaSChristian Borntraeger return handle_stop(vcpu);
629947b8972SFarhan Ali case ICPT_OPEREXC:
6305ffe466cSDavid Hildenbrand rc = handle_operexc(vcpu);
6315ffe466cSDavid Hildenbrand break;
632947b8972SFarhan Ali case ICPT_PARTEXEC:
6335ffe466cSDavid Hildenbrand rc = handle_partial_execution(vcpu);
6345ffe466cSDavid Hildenbrand break;
635730cd632SFarhan Ali case ICPT_KSS:
636*fdbeb55eSIlya Leoshkevich /* Instruction will be redriven, skip the PER check. */
637*fdbeb55eSIlya Leoshkevich return kvm_s390_skey_check_enable(vcpu);
63849710db0SJanosch Frank case ICPT_MCHKREQ:
63949710db0SJanosch Frank case ICPT_INT_ENABLE:
64049710db0SJanosch Frank /*
64149710db0SJanosch Frank * PSW bit 13 or a CR (0, 6, 14) changed and we might
64249710db0SJanosch Frank * now be able to deliver interrupts. The pre-run code
64349710db0SJanosch Frank * will take care of this.
64449710db0SJanosch Frank */
64549710db0SJanosch Frank rc = 0;
64649710db0SJanosch Frank break;
647da24a0ccSJanosch Frank case ICPT_PV_INSTR:
648da24a0ccSJanosch Frank rc = handle_instruction(vcpu);
649da24a0ccSJanosch Frank break;
650da24a0ccSJanosch Frank case ICPT_PV_NOTIFY:
651da24a0ccSJanosch Frank rc = handle_pv_notification(vcpu);
652da24a0ccSJanosch Frank break;
653d274995eSJanosch Frank case ICPT_PV_PREF:
654d274995eSJanosch Frank rc = 0;
655d274995eSJanosch Frank gmap_convert_to_secure(vcpu->arch.gmap,
656d274995eSJanosch Frank kvm_s390_get_prefix(vcpu));
657d274995eSJanosch Frank gmap_convert_to_secure(vcpu->arch.gmap,
658d274995eSJanosch Frank kvm_s390_get_prefix(vcpu) + PAGE_SIZE);
659d274995eSJanosch Frank break;
66046b708eaSChristian Borntraeger default:
661b8e660b8SHeiko Carstens return -EOPNOTSUPP;
66246b708eaSChristian Borntraeger }
6635ffe466cSDavid Hildenbrand
664ba853a4eSIlya Leoshkevich if (should_handle_per_ifetch(vcpu, rc))
6655ffe466cSDavid Hildenbrand per_rc = kvm_s390_handle_per_ifetch_icpt(vcpu);
6665ffe466cSDavid Hildenbrand return per_rc ? per_rc : rc;
6678f2abe6aSChristian Borntraeger }
668