xref: /linux/arch/s390/kvm/interrupt.c (revision 51d90a15fedf8366cb96ef68d0ea2d0bf15417d2)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * handling kvm guest interrupts
4  *
5  * Copyright IBM Corp. 2008, 2020
6  *
7  *    Author(s): Carsten Otte <cotte@de.ibm.com>
8  */
9 
10 #define pr_fmt(fmt) "kvm-s390: " fmt
11 
12 #include <linux/cpufeature.h>
13 #include <linux/interrupt.h>
14 #include <linux/kvm_host.h>
15 #include <linux/hrtimer.h>
16 #include <linux/export.h>
17 #include <linux/mmu_context.h>
18 #include <linux/nospec.h>
19 #include <linux/signal.h>
20 #include <linux/slab.h>
21 #include <linux/bitmap.h>
22 #include <linux/vmalloc.h>
23 #include <asm/access-regs.h>
24 #include <asm/asm-offsets.h>
25 #include <asm/dis.h>
26 #include <linux/uaccess.h>
27 #include <asm/sclp.h>
28 #include <asm/isc.h>
29 #include <asm/gmap.h>
30 #include <asm/nmi.h>
31 #include <asm/airq.h>
32 #include <asm/tpi.h>
33 #include "kvm-s390.h"
34 #include "gaccess.h"
35 #include "trace-s390.h"
36 #include "pci.h"
37 
38 #define PFAULT_INIT 0x0600
39 #define PFAULT_DONE 0x0680
40 #define VIRTIO_PARAM 0x0d00
41 
42 static struct kvm_s390_gib *gib;
43 
44 /* handle external calls via sigp interpretation facility */
sca_ext_call_pending(struct kvm_vcpu * vcpu,int * src_id)45 static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id)
46 {
47 	struct esca_block *sca = vcpu->kvm->arch.sca;
48 	union esca_sigp_ctrl sigp_ctrl = sca->cpu[vcpu->vcpu_id].sigp_ctrl;
49 
50 	if (!kvm_s390_test_cpuflags(vcpu, CPUSTAT_ECALL_PEND))
51 		return 0;
52 
53 	BUG_ON(!kvm_s390_use_sca_entries());
54 
55 	if (src_id)
56 		*src_id = sigp_ctrl.scn;
57 
58 	return sigp_ctrl.c;
59 }
60 
sca_inject_ext_call(struct kvm_vcpu * vcpu,int src_id)61 static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
62 {
63 	struct esca_block *sca = vcpu->kvm->arch.sca;
64 	union esca_sigp_ctrl *sigp_ctrl = &sca->cpu[vcpu->vcpu_id].sigp_ctrl;
65 	union esca_sigp_ctrl old_val, new_val = {.scn = src_id, .c = 1};
66 	int expect, rc;
67 
68 	BUG_ON(!kvm_s390_use_sca_entries());
69 
70 	old_val = READ_ONCE(*sigp_ctrl);
71 	old_val.c = 0;
72 
73 	expect = old_val.value;
74 	rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
75 
76 	if (rc != expect) {
77 		/* another external call is pending */
78 		return -EBUSY;
79 	}
80 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_ECALL_PEND);
81 	return 0;
82 }
83 
sca_clear_ext_call(struct kvm_vcpu * vcpu)84 static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
85 {
86 	struct esca_block *sca = vcpu->kvm->arch.sca;
87 	union esca_sigp_ctrl *sigp_ctrl = &sca->cpu[vcpu->vcpu_id].sigp_ctrl;
88 
89 	if (!kvm_s390_use_sca_entries())
90 		return;
91 	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_ECALL_PEND);
92 
93 	WRITE_ONCE(sigp_ctrl->value, 0);
94 }
95 
psw_extint_disabled(struct kvm_vcpu * vcpu)96 int psw_extint_disabled(struct kvm_vcpu *vcpu)
97 {
98 	return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
99 }
100 
psw_ioint_disabled(struct kvm_vcpu * vcpu)101 static int psw_ioint_disabled(struct kvm_vcpu *vcpu)
102 {
103 	return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO);
104 }
105 
psw_mchk_disabled(struct kvm_vcpu * vcpu)106 static int psw_mchk_disabled(struct kvm_vcpu *vcpu)
107 {
108 	return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK);
109 }
110 
psw_interrupts_disabled(struct kvm_vcpu * vcpu)111 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
112 {
113 	return psw_extint_disabled(vcpu) &&
114 	       psw_ioint_disabled(vcpu) &&
115 	       psw_mchk_disabled(vcpu);
116 }
117 
ckc_interrupts_enabled(struct kvm_vcpu * vcpu)118 static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
119 {
120 	if (psw_extint_disabled(vcpu) ||
121 	    !(vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SUBMASK))
122 		return 0;
123 	if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu))
124 		/* No timer interrupts when single stepping */
125 		return 0;
126 	return 1;
127 }
128 
ckc_irq_pending(struct kvm_vcpu * vcpu)129 static int ckc_irq_pending(struct kvm_vcpu *vcpu)
130 {
131 	const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
132 	const u64 ckc = vcpu->arch.sie_block->ckc;
133 
134 	if (vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SIGN) {
135 		if ((s64)ckc >= (s64)now)
136 			return 0;
137 	} else if (ckc >= now) {
138 		return 0;
139 	}
140 	return ckc_interrupts_enabled(vcpu);
141 }
142 
cpu_timer_interrupts_enabled(struct kvm_vcpu * vcpu)143 static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu)
144 {
145 	return !psw_extint_disabled(vcpu) &&
146 	       (vcpu->arch.sie_block->gcr[0] & CR0_CPU_TIMER_SUBMASK);
147 }
148 
cpu_timer_irq_pending(struct kvm_vcpu * vcpu)149 static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu)
150 {
151 	if (!cpu_timer_interrupts_enabled(vcpu))
152 		return 0;
153 	return kvm_s390_get_cpu_timer(vcpu) >> 63;
154 }
155 
isc_to_isc_bits(int isc)156 static uint64_t isc_to_isc_bits(int isc)
157 {
158 	return (0x80 >> isc) << 24;
159 }
160 
isc_to_int_word(u8 isc)161 static inline u32 isc_to_int_word(u8 isc)
162 {
163 	return ((u32)isc << 27) | 0x80000000;
164 }
165 
int_word_to_isc(u32 int_word)166 static inline u8 int_word_to_isc(u32 int_word)
167 {
168 	return (int_word & 0x38000000) >> 27;
169 }
170 
171 /*
172  * To use atomic bitmap functions, we have to provide a bitmap address
173  * that is u64 aligned. However, the ipm might be u32 aligned.
174  * Therefore, we logically start the bitmap at the very beginning of the
175  * struct and fixup the bit number.
176  */
177 #define IPM_BIT_OFFSET (offsetof(struct kvm_s390_gisa, ipm) * BITS_PER_BYTE)
178 
179 /**
180  * gisa_set_iam - change the GISA interruption alert mask
181  *
182  * @gisa: gisa to operate on
183  * @iam: new IAM value to use
184  *
185  * Change the IAM atomically with the next alert address and the IPM
186  * of the GISA if the GISA is not part of the GIB alert list. All three
187  * fields are located in the first long word of the GISA.
188  *
189  * Returns: 0 on success
190  *          -EBUSY in case the gisa is part of the alert list
191  */
gisa_set_iam(struct kvm_s390_gisa * gisa,u8 iam)192 static inline int gisa_set_iam(struct kvm_s390_gisa *gisa, u8 iam)
193 {
194 	u64 word, _word;
195 
196 	word = READ_ONCE(gisa->u64.word[0]);
197 	do {
198 		if ((u64)gisa != word >> 32)
199 			return -EBUSY;
200 		_word = (word & ~0xffUL) | iam;
201 	} while (!try_cmpxchg(&gisa->u64.word[0], &word, _word));
202 
203 	return 0;
204 }
205 
206 /**
207  * gisa_clear_ipm - clear the GISA interruption pending mask
208  *
209  * @gisa: gisa to operate on
210  *
211  * Clear the IPM atomically with the next alert address and the IAM
212  * of the GISA unconditionally. All three fields are located in the
213  * first long word of the GISA.
214  */
gisa_clear_ipm(struct kvm_s390_gisa * gisa)215 static inline void gisa_clear_ipm(struct kvm_s390_gisa *gisa)
216 {
217 	u64 word, _word;
218 
219 	word = READ_ONCE(gisa->u64.word[0]);
220 	do {
221 		_word = word & ~(0xffUL << 24);
222 	} while (!try_cmpxchg(&gisa->u64.word[0], &word, _word));
223 }
224 
225 /**
226  * gisa_get_ipm_or_restore_iam - return IPM or restore GISA IAM
227  *
228  * @gi: gisa interrupt struct to work on
229  *
230  * Atomically restores the interruption alert mask if none of the
231  * relevant ISCs are pending and return the IPM.
232  *
233  * Returns: the relevant pending ISCs
234  */
gisa_get_ipm_or_restore_iam(struct kvm_s390_gisa_interrupt * gi)235 static inline u8 gisa_get_ipm_or_restore_iam(struct kvm_s390_gisa_interrupt *gi)
236 {
237 	u8 pending_mask, alert_mask;
238 	u64 word, _word;
239 
240 	word = READ_ONCE(gi->origin->u64.word[0]);
241 	do {
242 		alert_mask = READ_ONCE(gi->alert.mask);
243 		pending_mask = (u8)(word >> 24) & alert_mask;
244 		if (pending_mask)
245 			return pending_mask;
246 		_word = (word & ~0xffUL) | alert_mask;
247 	} while (!try_cmpxchg(&gi->origin->u64.word[0], &word, _word));
248 
249 	return 0;
250 }
251 
gisa_set_ipm_gisc(struct kvm_s390_gisa * gisa,u32 gisc)252 static inline void gisa_set_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
253 {
254 	set_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
255 }
256 
gisa_get_ipm(struct kvm_s390_gisa * gisa)257 static inline u8 gisa_get_ipm(struct kvm_s390_gisa *gisa)
258 {
259 	return READ_ONCE(gisa->ipm);
260 }
261 
gisa_tac_ipm_gisc(struct kvm_s390_gisa * gisa,u32 gisc)262 static inline int gisa_tac_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
263 {
264 	return test_and_clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
265 }
266 
pending_irqs_no_gisa(struct kvm_vcpu * vcpu)267 static inline unsigned long pending_irqs_no_gisa(struct kvm_vcpu *vcpu)
268 {
269 	unsigned long pending = vcpu->kvm->arch.float_int.pending_irqs |
270 				vcpu->arch.local_int.pending_irqs;
271 
272 	pending &= ~vcpu->kvm->arch.float_int.masked_irqs;
273 	return pending;
274 }
275 
pending_irqs(struct kvm_vcpu * vcpu)276 static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu)
277 {
278 	struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int;
279 	unsigned long pending_mask;
280 
281 	pending_mask = pending_irqs_no_gisa(vcpu);
282 	if (gi->origin)
283 		pending_mask |= gisa_get_ipm(gi->origin) << IRQ_PEND_IO_ISC_7;
284 	return pending_mask;
285 }
286 
isc_to_irq_type(unsigned long isc)287 static inline int isc_to_irq_type(unsigned long isc)
288 {
289 	return IRQ_PEND_IO_ISC_0 - isc;
290 }
291 
irq_type_to_isc(unsigned long irq_type)292 static inline int irq_type_to_isc(unsigned long irq_type)
293 {
294 	return IRQ_PEND_IO_ISC_0 - irq_type;
295 }
296 
disable_iscs(struct kvm_vcpu * vcpu,unsigned long active_mask)297 static unsigned long disable_iscs(struct kvm_vcpu *vcpu,
298 				   unsigned long active_mask)
299 {
300 	int i;
301 
302 	for (i = 0; i <= MAX_ISC; i++)
303 		if (!(vcpu->arch.sie_block->gcr[6] & isc_to_isc_bits(i)))
304 			active_mask &= ~(1UL << (isc_to_irq_type(i)));
305 
306 	return active_mask;
307 }
308 
deliverable_irqs(struct kvm_vcpu * vcpu)309 static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
310 {
311 	unsigned long active_mask;
312 
313 	active_mask = pending_irqs(vcpu);
314 	if (!active_mask)
315 		return 0;
316 
317 	if (psw_extint_disabled(vcpu))
318 		active_mask &= ~IRQ_PEND_EXT_MASK;
319 	if (psw_ioint_disabled(vcpu))
320 		active_mask &= ~IRQ_PEND_IO_MASK;
321 	else
322 		active_mask = disable_iscs(vcpu, active_mask);
323 	if (!(vcpu->arch.sie_block->gcr[0] & CR0_EXTERNAL_CALL_SUBMASK))
324 		__clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask);
325 	if (!(vcpu->arch.sie_block->gcr[0] & CR0_EMERGENCY_SIGNAL_SUBMASK))
326 		__clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask);
327 	if (!(vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SUBMASK))
328 		__clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask);
329 	if (!(vcpu->arch.sie_block->gcr[0] & CR0_CPU_TIMER_SUBMASK))
330 		__clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask);
331 	if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK)) {
332 		__clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask);
333 		__clear_bit(IRQ_PEND_EXT_SERVICE_EV, &active_mask);
334 	}
335 	if (psw_mchk_disabled(vcpu))
336 		active_mask &= ~IRQ_PEND_MCHK_MASK;
337 	/* PV guest cpus can have a single interruption injected at a time. */
338 	if (kvm_s390_pv_cpu_get_handle(vcpu) &&
339 	    vcpu->arch.sie_block->iictl != IICTL_CODE_NONE)
340 		active_mask &= ~(IRQ_PEND_EXT_II_MASK |
341 				 IRQ_PEND_IO_MASK |
342 				 IRQ_PEND_MCHK_MASK);
343 	/*
344 	 * Check both floating and local interrupt's cr14 because
345 	 * bit IRQ_PEND_MCHK_REP could be set in both cases.
346 	 */
347 	if (!(vcpu->arch.sie_block->gcr[14] &
348 	   (vcpu->kvm->arch.float_int.mchk.cr14 |
349 	   vcpu->arch.local_int.irq.mchk.cr14)))
350 		__clear_bit(IRQ_PEND_MCHK_REP, &active_mask);
351 
352 	/*
353 	 * STOP irqs will never be actively delivered. They are triggered via
354 	 * intercept requests and cleared when the stop intercept is performed.
355 	 */
356 	__clear_bit(IRQ_PEND_SIGP_STOP, &active_mask);
357 
358 	return active_mask;
359 }
360 
__set_cpu_idle(struct kvm_vcpu * vcpu)361 static void __set_cpu_idle(struct kvm_vcpu *vcpu)
362 {
363 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
364 	set_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
365 }
366 
__unset_cpu_idle(struct kvm_vcpu * vcpu)367 static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
368 {
369 	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
370 	clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
371 }
372 
__reset_intercept_indicators(struct kvm_vcpu * vcpu)373 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
374 {
375 	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IO_INT | CPUSTAT_EXT_INT |
376 				      CPUSTAT_STOP_INT);
377 	vcpu->arch.sie_block->lctl = 0x0000;
378 	vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
379 
380 	if (guestdbg_enabled(vcpu)) {
381 		vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 |
382 					       LCTL_CR10 | LCTL_CR11);
383 		vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
384 	}
385 }
386 
set_intercept_indicators_io(struct kvm_vcpu * vcpu)387 static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
388 {
389 	if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_IO_MASK))
390 		return;
391 	if (psw_ioint_disabled(vcpu))
392 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_IO_INT);
393 	else
394 		vcpu->arch.sie_block->lctl |= LCTL_CR6;
395 }
396 
set_intercept_indicators_ext(struct kvm_vcpu * vcpu)397 static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
398 {
399 	if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_EXT_MASK))
400 		return;
401 	if (psw_extint_disabled(vcpu))
402 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
403 	else
404 		vcpu->arch.sie_block->lctl |= LCTL_CR0;
405 }
406 
set_intercept_indicators_mchk(struct kvm_vcpu * vcpu)407 static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
408 {
409 	if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_MCHK_MASK))
410 		return;
411 	if (psw_mchk_disabled(vcpu))
412 		vcpu->arch.sie_block->ictl |= ICTL_LPSW;
413 	else
414 		vcpu->arch.sie_block->lctl |= LCTL_CR14;
415 }
416 
set_intercept_indicators_stop(struct kvm_vcpu * vcpu)417 static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu)
418 {
419 	if (kvm_s390_is_stop_irq_pending(vcpu))
420 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
421 }
422 
423 /* Set interception request for non-deliverable interrupts */
set_intercept_indicators(struct kvm_vcpu * vcpu)424 static void set_intercept_indicators(struct kvm_vcpu *vcpu)
425 {
426 	set_intercept_indicators_io(vcpu);
427 	set_intercept_indicators_ext(vcpu);
428 	set_intercept_indicators_mchk(vcpu);
429 	set_intercept_indicators_stop(vcpu);
430 }
431 
__deliver_cpu_timer(struct kvm_vcpu * vcpu)432 static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
433 {
434 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
435 	int rc = 0;
436 
437 	vcpu->stat.deliver_cputm++;
438 	trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
439 					 0, 0);
440 	if (kvm_s390_pv_cpu_is_protected(vcpu)) {
441 		vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
442 		vcpu->arch.sie_block->eic = EXT_IRQ_CPU_TIMER;
443 	} else {
444 		rc  = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
445 				   (u16 *)__LC_EXT_INT_CODE);
446 		rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
447 		rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
448 				     &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
449 		rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
450 				    &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
451 	}
452 	clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
453 	return rc ? -EFAULT : 0;
454 }
455 
__deliver_ckc(struct kvm_vcpu * vcpu)456 static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu)
457 {
458 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
459 	int rc = 0;
460 
461 	vcpu->stat.deliver_ckc++;
462 	trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
463 					 0, 0);
464 	if (kvm_s390_pv_cpu_is_protected(vcpu)) {
465 		vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
466 		vcpu->arch.sie_block->eic = EXT_IRQ_CLK_COMP;
467 	} else {
468 		rc  = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP,
469 				   (u16 __user *)__LC_EXT_INT_CODE);
470 		rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
471 		rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
472 				     &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
473 		rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
474 				    &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
475 	}
476 	clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
477 	return rc ? -EFAULT : 0;
478 }
479 
__deliver_pfault_init(struct kvm_vcpu * vcpu)480 static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu)
481 {
482 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
483 	struct kvm_s390_ext_info ext;
484 	int rc;
485 
486 	spin_lock(&li->lock);
487 	ext = li->irq.ext;
488 	clear_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
489 	li->irq.ext.ext_params2 = 0;
490 	spin_unlock(&li->lock);
491 
492 	VCPU_EVENT(vcpu, 4, "deliver: pfault init token 0x%llx",
493 		   ext.ext_params2);
494 	trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
495 					 KVM_S390_INT_PFAULT_INIT,
496 					 0, ext.ext_params2);
497 
498 	rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE);
499 	rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR);
500 	rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
501 			     &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
502 	rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
503 			    &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
504 	rc |= put_guest_lc(vcpu, ext.ext_params2, (u64 *) __LC_EXT_PARAMS2);
505 	return rc ? -EFAULT : 0;
506 }
507 
__write_machine_check(struct kvm_vcpu * vcpu,struct kvm_s390_mchk_info * mchk)508 static int __write_machine_check(struct kvm_vcpu *vcpu,
509 				 struct kvm_s390_mchk_info *mchk)
510 {
511 	unsigned long ext_sa_addr;
512 	unsigned long lc;
513 	freg_t fprs[NUM_FPRS];
514 	union mci mci;
515 	int rc;
516 
517 	/*
518 	 * All other possible payload for a machine check (e.g. the register
519 	 * contents in the save area) will be handled by the ultravisor, as
520 	 * the hypervisor does not not have the needed information for
521 	 * protected guests.
522 	 */
523 	if (kvm_s390_pv_cpu_is_protected(vcpu)) {
524 		vcpu->arch.sie_block->iictl = IICTL_CODE_MCHK;
525 		vcpu->arch.sie_block->mcic = mchk->mcic;
526 		vcpu->arch.sie_block->faddr = mchk->failing_storage_address;
527 		vcpu->arch.sie_block->edc = mchk->ext_damage_code;
528 		return 0;
529 	}
530 
531 	mci.val = mchk->mcic;
532 	/* take care of lazy register loading */
533 	kvm_s390_fpu_store(vcpu->run);
534 	save_access_regs(vcpu->run->s.regs.acrs);
535 	if (cpu_has_gs() && vcpu->arch.gs_enabled)
536 		save_gs_cb(current->thread.gs_cb);
537 
538 	/* Extended save area */
539 	rc = read_guest_lc(vcpu, __LC_MCESAD, &ext_sa_addr,
540 			   sizeof(unsigned long));
541 	/* Only bits 0 through 63-LC are used for address formation */
542 	lc = ext_sa_addr & MCESA_LC_MASK;
543 	if (test_kvm_facility(vcpu->kvm, 133)) {
544 		switch (lc) {
545 		case 0:
546 		case 10:
547 			ext_sa_addr &= ~0x3ffUL;
548 			break;
549 		case 11:
550 			ext_sa_addr &= ~0x7ffUL;
551 			break;
552 		case 12:
553 			ext_sa_addr &= ~0xfffUL;
554 			break;
555 		default:
556 			ext_sa_addr = 0;
557 			break;
558 		}
559 	} else {
560 		ext_sa_addr &= ~0x3ffUL;
561 	}
562 
563 	if (!rc && mci.vr && ext_sa_addr && test_kvm_facility(vcpu->kvm, 129)) {
564 		if (write_guest_abs(vcpu, ext_sa_addr, vcpu->run->s.regs.vrs,
565 				    512))
566 			mci.vr = 0;
567 	} else {
568 		mci.vr = 0;
569 	}
570 	if (!rc && mci.gs && ext_sa_addr && test_kvm_facility(vcpu->kvm, 133)
571 	    && (lc == 11 || lc == 12)) {
572 		if (write_guest_abs(vcpu, ext_sa_addr + 1024,
573 				    &vcpu->run->s.regs.gscb, 32))
574 			mci.gs = 0;
575 	} else {
576 		mci.gs = 0;
577 	}
578 
579 	/* General interruption information */
580 	rc |= put_guest_lc(vcpu, 1, (u8 __user *) __LC_AR_MODE_ID);
581 	rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
582 			     &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
583 	rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
584 			    &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
585 	rc |= put_guest_lc(vcpu, mci.val, (u64 __user *) __LC_MCCK_CODE);
586 
587 	/* Register-save areas */
588 	if (cpu_has_vx()) {
589 		convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
590 		rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA, fprs, 128);
591 	} else {
592 		rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA,
593 				     vcpu->run->s.regs.fprs, 128);
594 	}
595 	rc |= write_guest_lc(vcpu, __LC_GPREGS_SAVE_AREA,
596 			     vcpu->run->s.regs.gprs, 128);
597 	rc |= put_guest_lc(vcpu, vcpu->run->s.regs.fpc,
598 			   (u32 __user *) __LC_FP_CREG_SAVE_AREA);
599 	rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->todpr,
600 			   (u32 __user *) __LC_TOD_PROGREG_SAVE_AREA);
601 	rc |= put_guest_lc(vcpu, kvm_s390_get_cpu_timer(vcpu),
602 			   (u64 __user *) __LC_CPU_TIMER_SAVE_AREA);
603 	rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->ckc >> 8,
604 			   (u64 __user *) __LC_CLOCK_COMP_SAVE_AREA);
605 	rc |= write_guest_lc(vcpu, __LC_AREGS_SAVE_AREA,
606 			     &vcpu->run->s.regs.acrs, 64);
607 	rc |= write_guest_lc(vcpu, __LC_CREGS_SAVE_AREA,
608 			     &vcpu->arch.sie_block->gcr, 128);
609 
610 	/* Extended interruption information */
611 	rc |= put_guest_lc(vcpu, mchk->ext_damage_code,
612 			   (u32 __user *) __LC_EXT_DAMAGE_CODE);
613 	rc |= put_guest_lc(vcpu, mchk->failing_storage_address,
614 			   (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
615 	rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, &mchk->fixed_logout,
616 			     sizeof(mchk->fixed_logout));
617 	return rc ? -EFAULT : 0;
618 }
619 
__deliver_machine_check(struct kvm_vcpu * vcpu)620 static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
621 {
622 	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
623 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
624 	struct kvm_s390_mchk_info mchk = {};
625 	int deliver = 0;
626 	int rc = 0;
627 
628 	spin_lock(&fi->lock);
629 	spin_lock(&li->lock);
630 	if (test_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs) ||
631 	    test_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs)) {
632 		/*
633 		 * If there was an exigent machine check pending, then any
634 		 * repressible machine checks that might have been pending
635 		 * are indicated along with it, so always clear bits for
636 		 * repressible and exigent interrupts
637 		 */
638 		mchk = li->irq.mchk;
639 		clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
640 		clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
641 		memset(&li->irq.mchk, 0, sizeof(mchk));
642 		deliver = 1;
643 	}
644 	/*
645 	 * We indicate floating repressible conditions along with
646 	 * other pending conditions. Channel Report Pending and Channel
647 	 * Subsystem damage are the only two and are indicated by
648 	 * bits in mcic and masked in cr14.
649 	 */
650 	if (test_and_clear_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
651 		mchk.mcic |= fi->mchk.mcic;
652 		mchk.cr14 |= fi->mchk.cr14;
653 		memset(&fi->mchk, 0, sizeof(mchk));
654 		deliver = 1;
655 	}
656 	spin_unlock(&li->lock);
657 	spin_unlock(&fi->lock);
658 
659 	if (deliver) {
660 		VCPU_EVENT(vcpu, 3, "deliver: machine check mcic 0x%llx",
661 			   mchk.mcic);
662 		trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
663 						 KVM_S390_MCHK,
664 						 mchk.cr14, mchk.mcic);
665 		vcpu->stat.deliver_machine_check++;
666 		rc = __write_machine_check(vcpu, &mchk);
667 	}
668 	return rc;
669 }
670 
__deliver_restart(struct kvm_vcpu * vcpu)671 static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
672 {
673 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
674 	int rc = 0;
675 
676 	VCPU_EVENT(vcpu, 3, "%s", "deliver: cpu restart");
677 	vcpu->stat.deliver_restart_signal++;
678 	trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
679 
680 	if (kvm_s390_pv_cpu_is_protected(vcpu)) {
681 		vcpu->arch.sie_block->iictl = IICTL_CODE_RESTART;
682 	} else {
683 		rc  = write_guest_lc(vcpu,
684 				     offsetof(struct lowcore, restart_old_psw),
685 				     &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
686 		rc |= read_guest_lc(vcpu, offsetof(struct lowcore, restart_psw),
687 				    &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
688 	}
689 	clear_bit(IRQ_PEND_RESTART, &li->pending_irqs);
690 	return rc ? -EFAULT : 0;
691 }
692 
__deliver_set_prefix(struct kvm_vcpu * vcpu)693 static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu)
694 {
695 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
696 	struct kvm_s390_prefix_info prefix;
697 
698 	spin_lock(&li->lock);
699 	prefix = li->irq.prefix;
700 	li->irq.prefix.address = 0;
701 	clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
702 	spin_unlock(&li->lock);
703 
704 	vcpu->stat.deliver_prefix_signal++;
705 	trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
706 					 KVM_S390_SIGP_SET_PREFIX,
707 					 prefix.address, 0);
708 
709 	kvm_s390_set_prefix(vcpu, prefix.address);
710 	return 0;
711 }
712 
__deliver_emergency_signal(struct kvm_vcpu * vcpu)713 static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu)
714 {
715 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
716 	int rc;
717 	int cpu_addr;
718 
719 	spin_lock(&li->lock);
720 	cpu_addr = find_first_bit(li->sigp_emerg_pending, KVM_MAX_VCPUS);
721 	clear_bit(cpu_addr, li->sigp_emerg_pending);
722 	if (bitmap_empty(li->sigp_emerg_pending, KVM_MAX_VCPUS))
723 		clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
724 	spin_unlock(&li->lock);
725 
726 	VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp emerg");
727 	vcpu->stat.deliver_emergency_signal++;
728 	trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
729 					 cpu_addr, 0);
730 	if (kvm_s390_pv_cpu_is_protected(vcpu)) {
731 		vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
732 		vcpu->arch.sie_block->eic = EXT_IRQ_EMERGENCY_SIG;
733 		vcpu->arch.sie_block->extcpuaddr = cpu_addr;
734 		return 0;
735 	}
736 
737 	rc  = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG,
738 			   (u16 *)__LC_EXT_INT_CODE);
739 	rc |= put_guest_lc(vcpu, cpu_addr, (u16 *)__LC_EXT_CPU_ADDR);
740 	rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
741 			     &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
742 	rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
743 			    &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
744 	return rc ? -EFAULT : 0;
745 }
746 
__deliver_external_call(struct kvm_vcpu * vcpu)747 static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu)
748 {
749 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
750 	struct kvm_s390_extcall_info extcall;
751 	int rc;
752 
753 	spin_lock(&li->lock);
754 	extcall = li->irq.extcall;
755 	li->irq.extcall.code = 0;
756 	clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
757 	spin_unlock(&li->lock);
758 
759 	VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp ext call");
760 	vcpu->stat.deliver_external_call++;
761 	trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
762 					 KVM_S390_INT_EXTERNAL_CALL,
763 					 extcall.code, 0);
764 	if (kvm_s390_pv_cpu_is_protected(vcpu)) {
765 		vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
766 		vcpu->arch.sie_block->eic = EXT_IRQ_EXTERNAL_CALL;
767 		vcpu->arch.sie_block->extcpuaddr = extcall.code;
768 		return 0;
769 	}
770 
771 	rc  = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL,
772 			   (u16 *)__LC_EXT_INT_CODE);
773 	rc |= put_guest_lc(vcpu, extcall.code, (u16 *)__LC_EXT_CPU_ADDR);
774 	rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
775 			     &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
776 	rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw,
777 			    sizeof(psw_t));
778 	return rc ? -EFAULT : 0;
779 }
780 
__deliver_prog_pv(struct kvm_vcpu * vcpu,u16 code)781 static int __deliver_prog_pv(struct kvm_vcpu *vcpu, u16 code)
782 {
783 	switch (code) {
784 	case PGM_SPECIFICATION:
785 		vcpu->arch.sie_block->iictl = IICTL_CODE_SPECIFICATION;
786 		break;
787 	case PGM_OPERAND:
788 		vcpu->arch.sie_block->iictl = IICTL_CODE_OPERAND;
789 		break;
790 	default:
791 		return -EINVAL;
792 	}
793 	return 0;
794 }
795 
__deliver_prog(struct kvm_vcpu * vcpu)796 static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
797 {
798 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
799 	struct kvm_s390_pgm_info pgm_info;
800 	int rc = 0, nullifying = false;
801 	u16 ilen;
802 
803 	spin_lock(&li->lock);
804 	pgm_info = li->irq.pgm;
805 	clear_bit(IRQ_PEND_PROG, &li->pending_irqs);
806 	memset(&li->irq.pgm, 0, sizeof(pgm_info));
807 	spin_unlock(&li->lock);
808 
809 	ilen = pgm_info.flags & KVM_S390_PGM_FLAGS_ILC_MASK;
810 	VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilen:%d",
811 		   pgm_info.code, ilen);
812 	vcpu->stat.deliver_program++;
813 	trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
814 					 pgm_info.code, 0);
815 
816 	/* PER is handled by the ultravisor */
817 	if (kvm_s390_pv_cpu_is_protected(vcpu))
818 		return __deliver_prog_pv(vcpu, pgm_info.code & ~PGM_PER);
819 
820 	switch (pgm_info.code & ~PGM_PER) {
821 	case PGM_AFX_TRANSLATION:
822 	case PGM_ASX_TRANSLATION:
823 	case PGM_EX_TRANSLATION:
824 	case PGM_LFX_TRANSLATION:
825 	case PGM_LSTE_SEQUENCE:
826 	case PGM_LSX_TRANSLATION:
827 	case PGM_LX_TRANSLATION:
828 	case PGM_PRIMARY_AUTHORITY:
829 	case PGM_SECONDARY_AUTHORITY:
830 		nullifying = true;
831 		fallthrough;
832 	case PGM_SPACE_SWITCH:
833 		rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
834 				  (u64 *)__LC_TRANS_EXC_CODE);
835 		break;
836 	case PGM_ALEN_TRANSLATION:
837 	case PGM_ALE_SEQUENCE:
838 	case PGM_ASTE_INSTANCE:
839 	case PGM_ASTE_SEQUENCE:
840 	case PGM_ASTE_VALIDITY:
841 	case PGM_EXTENDED_AUTHORITY:
842 		rc = put_guest_lc(vcpu, pgm_info.exc_access_id,
843 				  (u8 *)__LC_EXC_ACCESS_ID);
844 		nullifying = true;
845 		break;
846 	case PGM_ASCE_TYPE:
847 	case PGM_PAGE_TRANSLATION:
848 	case PGM_REGION_FIRST_TRANS:
849 	case PGM_REGION_SECOND_TRANS:
850 	case PGM_REGION_THIRD_TRANS:
851 	case PGM_SEGMENT_TRANSLATION:
852 		rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
853 				  (u64 *)__LC_TRANS_EXC_CODE);
854 		rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
855 				   (u8 *)__LC_EXC_ACCESS_ID);
856 		rc |= put_guest_lc(vcpu, pgm_info.op_access_id,
857 				   (u8 *)__LC_OP_ACCESS_ID);
858 		nullifying = true;
859 		break;
860 	case PGM_MONITOR:
861 		rc = put_guest_lc(vcpu, pgm_info.mon_class_nr,
862 				  (u16 *)__LC_MON_CLASS_NR);
863 		rc |= put_guest_lc(vcpu, pgm_info.mon_code,
864 				   (u64 *)__LC_MON_CODE);
865 		break;
866 	case PGM_VECTOR_PROCESSING:
867 	case PGM_DATA:
868 		rc = put_guest_lc(vcpu, pgm_info.data_exc_code,
869 				  (u32 *)__LC_DATA_EXC_CODE);
870 		break;
871 	case PGM_PROTECTION:
872 		rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
873 				  (u64 *)__LC_TRANS_EXC_CODE);
874 		rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
875 				   (u8 *)__LC_EXC_ACCESS_ID);
876 		break;
877 	case PGM_STACK_FULL:
878 	case PGM_STACK_EMPTY:
879 	case PGM_STACK_SPECIFICATION:
880 	case PGM_STACK_TYPE:
881 	case PGM_STACK_OPERATION:
882 	case PGM_TRACE_TABEL:
883 	case PGM_CRYPTO_OPERATION:
884 		nullifying = true;
885 		break;
886 	}
887 
888 	if (pgm_info.code & PGM_PER) {
889 		rc |= put_guest_lc(vcpu, pgm_info.per_code,
890 				   (u8 *) __LC_PER_CODE);
891 		rc |= put_guest_lc(vcpu, pgm_info.per_atmid,
892 				   (u8 *)__LC_PER_ATMID);
893 		rc |= put_guest_lc(vcpu, pgm_info.per_address,
894 				   (u64 *) __LC_PER_ADDRESS);
895 		rc |= put_guest_lc(vcpu, pgm_info.per_access_id,
896 				   (u8 *) __LC_PER_ACCESS_ID);
897 	}
898 
899 	if (nullifying && !(pgm_info.flags & KVM_S390_PGM_FLAGS_NO_REWIND))
900 		kvm_s390_rewind_psw(vcpu, ilen);
901 
902 	/* bit 1+2 of the target are the ilc, so we can directly use ilen */
903 	rc |= put_guest_lc(vcpu, ilen, (u16 *) __LC_PGM_ILC);
904 	rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea,
905 				 (u64 *) __LC_PGM_LAST_BREAK);
906 	rc |= put_guest_lc(vcpu, pgm_info.code, (u16 *)__LC_PGM_CODE);
907 	rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
908 			     &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
909 	rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW,
910 			    &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
911 	return rc ? -EFAULT : 0;
912 }
913 
914 #define SCCB_MASK 0xFFFFFFF8
915 #define SCCB_EVENT_PENDING 0x3
916 
write_sclp(struct kvm_vcpu * vcpu,u32 parm)917 static int write_sclp(struct kvm_vcpu *vcpu, u32 parm)
918 {
919 	int rc;
920 
921 	if (kvm_s390_pv_cpu_get_handle(vcpu)) {
922 		vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
923 		vcpu->arch.sie_block->eic = EXT_IRQ_SERVICE_SIG;
924 		vcpu->arch.sie_block->eiparams = parm;
925 		return 0;
926 	}
927 
928 	rc  = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE);
929 	rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
930 	rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
931 			     &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
932 	rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
933 			    &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
934 	rc |= put_guest_lc(vcpu, parm,
935 			   (u32 *)__LC_EXT_PARAMS);
936 
937 	return rc ? -EFAULT : 0;
938 }
939 
__deliver_service(struct kvm_vcpu * vcpu)940 static int __must_check __deliver_service(struct kvm_vcpu *vcpu)
941 {
942 	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
943 	struct kvm_s390_ext_info ext;
944 
945 	spin_lock(&fi->lock);
946 	if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->masked_irqs) ||
947 	    !(test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs))) {
948 		spin_unlock(&fi->lock);
949 		return 0;
950 	}
951 	ext = fi->srv_signal;
952 	memset(&fi->srv_signal, 0, sizeof(ext));
953 	clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
954 	clear_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs);
955 	if (kvm_s390_pv_cpu_is_protected(vcpu))
956 		set_bit(IRQ_PEND_EXT_SERVICE, &fi->masked_irqs);
957 	spin_unlock(&fi->lock);
958 
959 	VCPU_EVENT(vcpu, 4, "deliver: sclp parameter 0x%x",
960 		   ext.ext_params);
961 	vcpu->stat.deliver_service_signal++;
962 	trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE,
963 					 ext.ext_params, 0);
964 
965 	return write_sclp(vcpu, ext.ext_params);
966 }
967 
__deliver_service_ev(struct kvm_vcpu * vcpu)968 static int __must_check __deliver_service_ev(struct kvm_vcpu *vcpu)
969 {
970 	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
971 	struct kvm_s390_ext_info ext;
972 
973 	spin_lock(&fi->lock);
974 	if (!(test_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs))) {
975 		spin_unlock(&fi->lock);
976 		return 0;
977 	}
978 	ext = fi->srv_signal;
979 	/* only clear the event bits */
980 	fi->srv_signal.ext_params &= ~SCCB_EVENT_PENDING;
981 	clear_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs);
982 	spin_unlock(&fi->lock);
983 
984 	VCPU_EVENT(vcpu, 4, "%s", "deliver: sclp parameter event");
985 	vcpu->stat.deliver_service_signal++;
986 	trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE,
987 					 ext.ext_params, 0);
988 
989 	return write_sclp(vcpu, ext.ext_params & SCCB_EVENT_PENDING);
990 }
991 
__deliver_pfault_done(struct kvm_vcpu * vcpu)992 static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu)
993 {
994 	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
995 	struct kvm_s390_interrupt_info *inti;
996 	int rc = 0;
997 
998 	spin_lock(&fi->lock);
999 	inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_PFAULT],
1000 					struct kvm_s390_interrupt_info,
1001 					list);
1002 	if (inti) {
1003 		list_del(&inti->list);
1004 		fi->counters[FIRQ_CNTR_PFAULT] -= 1;
1005 	}
1006 	if (list_empty(&fi->lists[FIRQ_LIST_PFAULT]))
1007 		clear_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
1008 	spin_unlock(&fi->lock);
1009 
1010 	if (inti) {
1011 		trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
1012 						 KVM_S390_INT_PFAULT_DONE, 0,
1013 						 inti->ext.ext_params2);
1014 		VCPU_EVENT(vcpu, 4, "deliver: pfault done token 0x%llx",
1015 			   inti->ext.ext_params2);
1016 
1017 		rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
1018 				(u16 *)__LC_EXT_INT_CODE);
1019 		rc |= put_guest_lc(vcpu, PFAULT_DONE,
1020 				(u16 *)__LC_EXT_CPU_ADDR);
1021 		rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
1022 				&vcpu->arch.sie_block->gpsw,
1023 				sizeof(psw_t));
1024 		rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
1025 				&vcpu->arch.sie_block->gpsw,
1026 				sizeof(psw_t));
1027 		rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
1028 				(u64 *)__LC_EXT_PARAMS2);
1029 		kfree(inti);
1030 	}
1031 	return rc ? -EFAULT : 0;
1032 }
1033 
__deliver_virtio(struct kvm_vcpu * vcpu)1034 static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu)
1035 {
1036 	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
1037 	struct kvm_s390_interrupt_info *inti;
1038 	int rc = 0;
1039 
1040 	spin_lock(&fi->lock);
1041 	inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_VIRTIO],
1042 					struct kvm_s390_interrupt_info,
1043 					list);
1044 	if (inti) {
1045 		VCPU_EVENT(vcpu, 4,
1046 			   "deliver: virtio parm: 0x%x,parm64: 0x%llx",
1047 			   inti->ext.ext_params, inti->ext.ext_params2);
1048 		vcpu->stat.deliver_virtio++;
1049 		trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
1050 				inti->type,
1051 				inti->ext.ext_params,
1052 				inti->ext.ext_params2);
1053 		list_del(&inti->list);
1054 		fi->counters[FIRQ_CNTR_VIRTIO] -= 1;
1055 	}
1056 	if (list_empty(&fi->lists[FIRQ_LIST_VIRTIO]))
1057 		clear_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
1058 	spin_unlock(&fi->lock);
1059 
1060 	if (inti) {
1061 		rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
1062 				(u16 *)__LC_EXT_INT_CODE);
1063 		rc |= put_guest_lc(vcpu, VIRTIO_PARAM,
1064 				(u16 *)__LC_EXT_CPU_ADDR);
1065 		rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
1066 				&vcpu->arch.sie_block->gpsw,
1067 				sizeof(psw_t));
1068 		rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
1069 				&vcpu->arch.sie_block->gpsw,
1070 				sizeof(psw_t));
1071 		rc |= put_guest_lc(vcpu, inti->ext.ext_params,
1072 				(u32 *)__LC_EXT_PARAMS);
1073 		rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
1074 				(u64 *)__LC_EXT_PARAMS2);
1075 		kfree(inti);
1076 	}
1077 	return rc ? -EFAULT : 0;
1078 }
1079 
__do_deliver_io(struct kvm_vcpu * vcpu,struct kvm_s390_io_info * io)1080 static int __do_deliver_io(struct kvm_vcpu *vcpu, struct kvm_s390_io_info *io)
1081 {
1082 	int rc;
1083 
1084 	if (kvm_s390_pv_cpu_is_protected(vcpu)) {
1085 		vcpu->arch.sie_block->iictl = IICTL_CODE_IO;
1086 		vcpu->arch.sie_block->subchannel_id = io->subchannel_id;
1087 		vcpu->arch.sie_block->subchannel_nr = io->subchannel_nr;
1088 		vcpu->arch.sie_block->io_int_parm = io->io_int_parm;
1089 		vcpu->arch.sie_block->io_int_word = io->io_int_word;
1090 		return 0;
1091 	}
1092 
1093 	rc  = put_guest_lc(vcpu, io->subchannel_id, (u16 *)__LC_SUBCHANNEL_ID);
1094 	rc |= put_guest_lc(vcpu, io->subchannel_nr, (u16 *)__LC_SUBCHANNEL_NR);
1095 	rc |= put_guest_lc(vcpu, io->io_int_parm, (u32 *)__LC_IO_INT_PARM);
1096 	rc |= put_guest_lc(vcpu, io->io_int_word, (u32 *)__LC_IO_INT_WORD);
1097 	rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
1098 			     &vcpu->arch.sie_block->gpsw,
1099 			     sizeof(psw_t));
1100 	rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
1101 			    &vcpu->arch.sie_block->gpsw,
1102 			    sizeof(psw_t));
1103 	return rc ? -EFAULT : 0;
1104 }
1105 
__deliver_io(struct kvm_vcpu * vcpu,unsigned long irq_type)1106 static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
1107 				     unsigned long irq_type)
1108 {
1109 	struct list_head *isc_list;
1110 	struct kvm_s390_float_interrupt *fi;
1111 	struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int;
1112 	struct kvm_s390_interrupt_info *inti = NULL;
1113 	struct kvm_s390_io_info io;
1114 	u32 isc;
1115 	int rc = 0;
1116 
1117 	fi = &vcpu->kvm->arch.float_int;
1118 
1119 	spin_lock(&fi->lock);
1120 	isc = irq_type_to_isc(irq_type);
1121 	isc_list = &fi->lists[isc];
1122 	inti = list_first_entry_or_null(isc_list,
1123 					struct kvm_s390_interrupt_info,
1124 					list);
1125 	if (inti) {
1126 		if (inti->type & KVM_S390_INT_IO_AI_MASK)
1127 			VCPU_EVENT(vcpu, 4, "%s", "deliver: I/O (AI)");
1128 		else
1129 			VCPU_EVENT(vcpu, 4, "deliver: I/O %x ss %x schid %04x",
1130 			inti->io.subchannel_id >> 8,
1131 			inti->io.subchannel_id >> 1 & 0x3,
1132 			inti->io.subchannel_nr);
1133 
1134 		vcpu->stat.deliver_io++;
1135 		trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
1136 				inti->type,
1137 				((__u32)inti->io.subchannel_id << 16) |
1138 				inti->io.subchannel_nr,
1139 				((__u64)inti->io.io_int_parm << 32) |
1140 				inti->io.io_int_word);
1141 		list_del(&inti->list);
1142 		fi->counters[FIRQ_CNTR_IO] -= 1;
1143 	}
1144 	if (list_empty(isc_list))
1145 		clear_bit(irq_type, &fi->pending_irqs);
1146 	spin_unlock(&fi->lock);
1147 
1148 	if (inti) {
1149 		rc = __do_deliver_io(vcpu, &(inti->io));
1150 		kfree(inti);
1151 		goto out;
1152 	}
1153 
1154 	if (gi->origin && gisa_tac_ipm_gisc(gi->origin, isc)) {
1155 		/*
1156 		 * in case an adapter interrupt was not delivered
1157 		 * in SIE context KVM will handle the delivery
1158 		 */
1159 		VCPU_EVENT(vcpu, 4, "%s isc %u", "deliver: I/O (AI/gisa)", isc);
1160 		memset(&io, 0, sizeof(io));
1161 		io.io_int_word = isc_to_int_word(isc);
1162 		vcpu->stat.deliver_io++;
1163 		trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
1164 			KVM_S390_INT_IO(1, 0, 0, 0),
1165 			((__u32)io.subchannel_id << 16) |
1166 			io.subchannel_nr,
1167 			((__u64)io.io_int_parm << 32) |
1168 			io.io_int_word);
1169 		rc = __do_deliver_io(vcpu, &io);
1170 	}
1171 out:
1172 	return rc;
1173 }
1174 
1175 /* Check whether an external call is pending (deliverable or not) */
kvm_s390_ext_call_pending(struct kvm_vcpu * vcpu)1176 int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
1177 {
1178 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1179 
1180 	if (!kvm_s390_use_sca_entries())
1181 		return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
1182 
1183 	return sca_ext_call_pending(vcpu, NULL);
1184 }
1185 
kvm_s390_vcpu_has_irq(struct kvm_vcpu * vcpu,int exclude_stop)1186 int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
1187 {
1188 	if (deliverable_irqs(vcpu))
1189 		return 1;
1190 
1191 	if (kvm_cpu_has_pending_timer(vcpu))
1192 		return 1;
1193 
1194 	/* external call pending and deliverable */
1195 	if (kvm_s390_ext_call_pending(vcpu) &&
1196 	    !psw_extint_disabled(vcpu) &&
1197 	    (vcpu->arch.sie_block->gcr[0] & CR0_EXTERNAL_CALL_SUBMASK))
1198 		return 1;
1199 
1200 	if (!exclude_stop && kvm_s390_is_stop_irq_pending(vcpu))
1201 		return 1;
1202 	return 0;
1203 }
1204 
kvm_cpu_has_pending_timer(struct kvm_vcpu * vcpu)1205 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1206 {
1207 	return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu);
1208 }
1209 
__calculate_sltime(struct kvm_vcpu * vcpu)1210 static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
1211 {
1212 	const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
1213 	const u64 ckc = vcpu->arch.sie_block->ckc;
1214 	u64 cputm, sltime = 0;
1215 
1216 	if (ckc_interrupts_enabled(vcpu)) {
1217 		if (vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SIGN) {
1218 			if ((s64)now < (s64)ckc)
1219 				sltime = tod_to_ns((s64)ckc - (s64)now);
1220 		} else if (now < ckc) {
1221 			sltime = tod_to_ns(ckc - now);
1222 		}
1223 		/* already expired */
1224 		if (!sltime)
1225 			return 0;
1226 		if (cpu_timer_interrupts_enabled(vcpu)) {
1227 			cputm = kvm_s390_get_cpu_timer(vcpu);
1228 			/* already expired? */
1229 			if (cputm >> 63)
1230 				return 0;
1231 			return min_t(u64, sltime, tod_to_ns(cputm));
1232 		}
1233 	} else if (cpu_timer_interrupts_enabled(vcpu)) {
1234 		sltime = kvm_s390_get_cpu_timer(vcpu);
1235 		/* already expired? */
1236 		if (sltime >> 63)
1237 			return 0;
1238 	}
1239 	return sltime;
1240 }
1241 
kvm_s390_handle_wait(struct kvm_vcpu * vcpu)1242 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
1243 {
1244 	struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int;
1245 	u64 sltime;
1246 
1247 	vcpu->stat.exit_wait_state++;
1248 
1249 	/* fast path */
1250 	if (kvm_arch_vcpu_runnable(vcpu))
1251 		return 0;
1252 
1253 	if (psw_interrupts_disabled(vcpu)) {
1254 		VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
1255 		return -EOPNOTSUPP; /* disabled wait */
1256 	}
1257 
1258 	if (gi->origin &&
1259 	    (gisa_get_ipm_or_restore_iam(gi) &
1260 	     vcpu->arch.sie_block->gcr[6] >> 24))
1261 		return 0;
1262 
1263 	if (!ckc_interrupts_enabled(vcpu) &&
1264 	    !cpu_timer_interrupts_enabled(vcpu)) {
1265 		VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
1266 		__set_cpu_idle(vcpu);
1267 		goto no_timer;
1268 	}
1269 
1270 	sltime = __calculate_sltime(vcpu);
1271 	if (!sltime)
1272 		return 0;
1273 
1274 	__set_cpu_idle(vcpu);
1275 	hrtimer_start(&vcpu->arch.ckc_timer, sltime, HRTIMER_MODE_REL);
1276 	VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime);
1277 no_timer:
1278 	kvm_vcpu_srcu_read_unlock(vcpu);
1279 	vcpu->kvm->arch.float_int.last_sleep_cpu = vcpu->vcpu_idx;
1280 	kvm_vcpu_halt(vcpu);
1281 	vcpu->valid_wakeup = false;
1282 	__unset_cpu_idle(vcpu);
1283 	kvm_vcpu_srcu_read_lock(vcpu);
1284 
1285 	hrtimer_cancel(&vcpu->arch.ckc_timer);
1286 	return 0;
1287 }
1288 
kvm_s390_vcpu_wakeup(struct kvm_vcpu * vcpu)1289 void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
1290 {
1291 	vcpu->valid_wakeup = true;
1292 	kvm_vcpu_wake_up(vcpu);
1293 
1294 	/*
1295 	 * The VCPU might not be sleeping but rather executing VSIE. Let's
1296 	 * kick it, so it leaves the SIE to process the request.
1297 	 */
1298 	kvm_s390_vsie_kick(vcpu);
1299 }
1300 
kvm_s390_idle_wakeup(struct hrtimer * timer)1301 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
1302 {
1303 	struct kvm_vcpu *vcpu;
1304 	u64 sltime;
1305 
1306 	vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
1307 	sltime = __calculate_sltime(vcpu);
1308 
1309 	/*
1310 	 * If the monotonic clock runs faster than the tod clock we might be
1311 	 * woken up too early and have to go back to sleep to avoid deadlocks.
1312 	 */
1313 	if (sltime && hrtimer_forward_now(timer, ns_to_ktime(sltime)))
1314 		return HRTIMER_RESTART;
1315 	kvm_s390_vcpu_wakeup(vcpu);
1316 	return HRTIMER_NORESTART;
1317 }
1318 
kvm_s390_clear_local_irqs(struct kvm_vcpu * vcpu)1319 void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
1320 {
1321 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1322 
1323 	spin_lock(&li->lock);
1324 	li->pending_irqs = 0;
1325 	bitmap_zero(li->sigp_emerg_pending, KVM_MAX_VCPUS);
1326 	memset(&li->irq, 0, sizeof(li->irq));
1327 	spin_unlock(&li->lock);
1328 
1329 	sca_clear_ext_call(vcpu);
1330 }
1331 
kvm_s390_deliver_pending_interrupts(struct kvm_vcpu * vcpu)1332 int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
1333 {
1334 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1335 	int rc = 0;
1336 	bool delivered = false;
1337 	unsigned long irq_type;
1338 	unsigned long irqs;
1339 
1340 	__reset_intercept_indicators(vcpu);
1341 
1342 	/* pending ckc conditions might have been invalidated */
1343 	clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1344 	if (ckc_irq_pending(vcpu))
1345 		set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1346 
1347 	/* pending cpu timer conditions might have been invalidated */
1348 	clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1349 	if (cpu_timer_irq_pending(vcpu))
1350 		set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1351 
1352 	while ((irqs = deliverable_irqs(vcpu)) && !rc) {
1353 		/* bits are in the reverse order of interrupt priority */
1354 		irq_type = find_last_bit(&irqs, IRQ_PEND_COUNT);
1355 		switch (irq_type) {
1356 		case IRQ_PEND_IO_ISC_0:
1357 		case IRQ_PEND_IO_ISC_1:
1358 		case IRQ_PEND_IO_ISC_2:
1359 		case IRQ_PEND_IO_ISC_3:
1360 		case IRQ_PEND_IO_ISC_4:
1361 		case IRQ_PEND_IO_ISC_5:
1362 		case IRQ_PEND_IO_ISC_6:
1363 		case IRQ_PEND_IO_ISC_7:
1364 			rc = __deliver_io(vcpu, irq_type);
1365 			break;
1366 		case IRQ_PEND_MCHK_EX:
1367 		case IRQ_PEND_MCHK_REP:
1368 			rc = __deliver_machine_check(vcpu);
1369 			break;
1370 		case IRQ_PEND_PROG:
1371 			rc = __deliver_prog(vcpu);
1372 			break;
1373 		case IRQ_PEND_EXT_EMERGENCY:
1374 			rc = __deliver_emergency_signal(vcpu);
1375 			break;
1376 		case IRQ_PEND_EXT_EXTERNAL:
1377 			rc = __deliver_external_call(vcpu);
1378 			break;
1379 		case IRQ_PEND_EXT_CLOCK_COMP:
1380 			rc = __deliver_ckc(vcpu);
1381 			break;
1382 		case IRQ_PEND_EXT_CPU_TIMER:
1383 			rc = __deliver_cpu_timer(vcpu);
1384 			break;
1385 		case IRQ_PEND_RESTART:
1386 			rc = __deliver_restart(vcpu);
1387 			break;
1388 		case IRQ_PEND_SET_PREFIX:
1389 			rc = __deliver_set_prefix(vcpu);
1390 			break;
1391 		case IRQ_PEND_PFAULT_INIT:
1392 			rc = __deliver_pfault_init(vcpu);
1393 			break;
1394 		case IRQ_PEND_EXT_SERVICE:
1395 			rc = __deliver_service(vcpu);
1396 			break;
1397 		case IRQ_PEND_EXT_SERVICE_EV:
1398 			rc = __deliver_service_ev(vcpu);
1399 			break;
1400 		case IRQ_PEND_PFAULT_DONE:
1401 			rc = __deliver_pfault_done(vcpu);
1402 			break;
1403 		case IRQ_PEND_VIRTIO:
1404 			rc = __deliver_virtio(vcpu);
1405 			break;
1406 		default:
1407 			WARN_ONCE(1, "Unknown pending irq type %ld", irq_type);
1408 			clear_bit(irq_type, &li->pending_irqs);
1409 		}
1410 		delivered |= !rc;
1411 	}
1412 
1413 	/*
1414 	 * We delivered at least one interrupt and modified the PC. Force a
1415 	 * singlestep event now.
1416 	 */
1417 	if (delivered && guestdbg_sstep_enabled(vcpu)) {
1418 		struct kvm_debug_exit_arch *debug_exit = &vcpu->run->debug.arch;
1419 
1420 		debug_exit->addr = vcpu->arch.sie_block->gpsw.addr;
1421 		debug_exit->type = KVM_SINGLESTEP;
1422 		vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING;
1423 	}
1424 
1425 	set_intercept_indicators(vcpu);
1426 
1427 	return rc;
1428 }
1429 
__inject_prog(struct kvm_vcpu * vcpu,struct kvm_s390_irq * irq)1430 static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1431 {
1432 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1433 
1434 	vcpu->stat.inject_program++;
1435 	VCPU_EVENT(vcpu, 3, "inject: program irq code 0x%x", irq->u.pgm.code);
1436 	trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
1437 				   irq->u.pgm.code, 0);
1438 
1439 	if (!(irq->u.pgm.flags & KVM_S390_PGM_FLAGS_ILC_VALID)) {
1440 		/* auto detection if no valid ILC was given */
1441 		irq->u.pgm.flags &= ~KVM_S390_PGM_FLAGS_ILC_MASK;
1442 		irq->u.pgm.flags |= kvm_s390_get_ilen(vcpu);
1443 		irq->u.pgm.flags |= KVM_S390_PGM_FLAGS_ILC_VALID;
1444 	}
1445 
1446 	if (irq->u.pgm.code == PGM_PER) {
1447 		li->irq.pgm.code |= PGM_PER;
1448 		li->irq.pgm.flags = irq->u.pgm.flags;
1449 		/* only modify PER related information */
1450 		li->irq.pgm.per_address = irq->u.pgm.per_address;
1451 		li->irq.pgm.per_code = irq->u.pgm.per_code;
1452 		li->irq.pgm.per_atmid = irq->u.pgm.per_atmid;
1453 		li->irq.pgm.per_access_id = irq->u.pgm.per_access_id;
1454 	} else if (!(irq->u.pgm.code & PGM_PER)) {
1455 		li->irq.pgm.code = (li->irq.pgm.code & PGM_PER) |
1456 				   irq->u.pgm.code;
1457 		li->irq.pgm.flags = irq->u.pgm.flags;
1458 		/* only modify non-PER information */
1459 		li->irq.pgm.trans_exc_code = irq->u.pgm.trans_exc_code;
1460 		li->irq.pgm.mon_code = irq->u.pgm.mon_code;
1461 		li->irq.pgm.data_exc_code = irq->u.pgm.data_exc_code;
1462 		li->irq.pgm.mon_class_nr = irq->u.pgm.mon_class_nr;
1463 		li->irq.pgm.exc_access_id = irq->u.pgm.exc_access_id;
1464 		li->irq.pgm.op_access_id = irq->u.pgm.op_access_id;
1465 	} else {
1466 		li->irq.pgm = irq->u.pgm;
1467 	}
1468 	set_bit(IRQ_PEND_PROG, &li->pending_irqs);
1469 	return 0;
1470 }
1471 
__inject_pfault_init(struct kvm_vcpu * vcpu,struct kvm_s390_irq * irq)1472 static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1473 {
1474 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1475 
1476 	vcpu->stat.inject_pfault_init++;
1477 	VCPU_EVENT(vcpu, 4, "inject: pfault init parameter block at 0x%llx",
1478 		   irq->u.ext.ext_params2);
1479 	trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT,
1480 				   irq->u.ext.ext_params,
1481 				   irq->u.ext.ext_params2);
1482 
1483 	li->irq.ext = irq->u.ext;
1484 	set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
1485 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
1486 	return 0;
1487 }
1488 
__inject_extcall(struct kvm_vcpu * vcpu,struct kvm_s390_irq * irq)1489 static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1490 {
1491 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1492 	struct kvm_s390_extcall_info *extcall = &li->irq.extcall;
1493 	uint16_t src_id = irq->u.extcall.code;
1494 
1495 	vcpu->stat.inject_external_call++;
1496 	VCPU_EVENT(vcpu, 4, "inject: external call source-cpu:%u",
1497 		   src_id);
1498 	trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL,
1499 				   src_id, 0);
1500 
1501 	/* sending vcpu invalid */
1502 	if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL)
1503 		return -EINVAL;
1504 
1505 	if (kvm_s390_use_sca_entries() && !kvm_s390_pv_cpu_get_handle(vcpu))
1506 		return sca_inject_ext_call(vcpu, src_id);
1507 
1508 	if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
1509 		return -EBUSY;
1510 	*extcall = irq->u.extcall;
1511 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
1512 	return 0;
1513 }
1514 
__inject_set_prefix(struct kvm_vcpu * vcpu,struct kvm_s390_irq * irq)1515 static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1516 {
1517 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1518 	struct kvm_s390_prefix_info *prefix = &li->irq.prefix;
1519 
1520 	vcpu->stat.inject_set_prefix++;
1521 	VCPU_EVENT(vcpu, 3, "inject: set prefix to %x",
1522 		   irq->u.prefix.address);
1523 	trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX,
1524 				   irq->u.prefix.address, 0);
1525 
1526 	if (!is_vcpu_stopped(vcpu))
1527 		return -EBUSY;
1528 
1529 	*prefix = irq->u.prefix;
1530 	set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
1531 	return 0;
1532 }
1533 
1534 #define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS)
__inject_sigp_stop(struct kvm_vcpu * vcpu,struct kvm_s390_irq * irq)1535 static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1536 {
1537 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1538 	struct kvm_s390_stop_info *stop = &li->irq.stop;
1539 	int rc = 0;
1540 
1541 	vcpu->stat.inject_stop_signal++;
1542 	trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0);
1543 
1544 	if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS)
1545 		return -EINVAL;
1546 
1547 	if (is_vcpu_stopped(vcpu)) {
1548 		if (irq->u.stop.flags & KVM_S390_STOP_FLAG_STORE_STATUS)
1549 			rc = kvm_s390_store_status_unloaded(vcpu,
1550 						KVM_S390_STORE_STATUS_NOADDR);
1551 		return rc;
1552 	}
1553 
1554 	if (test_and_set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs))
1555 		return -EBUSY;
1556 	stop->flags = irq->u.stop.flags;
1557 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
1558 	return 0;
1559 }
1560 
__inject_sigp_restart(struct kvm_vcpu * vcpu)1561 static int __inject_sigp_restart(struct kvm_vcpu *vcpu)
1562 {
1563 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1564 
1565 	vcpu->stat.inject_restart++;
1566 	VCPU_EVENT(vcpu, 3, "%s", "inject: restart int");
1567 	trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
1568 
1569 	set_bit(IRQ_PEND_RESTART, &li->pending_irqs);
1570 	return 0;
1571 }
1572 
__inject_sigp_emergency(struct kvm_vcpu * vcpu,struct kvm_s390_irq * irq)1573 static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
1574 				   struct kvm_s390_irq *irq)
1575 {
1576 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1577 
1578 	vcpu->stat.inject_emergency_signal++;
1579 	VCPU_EVENT(vcpu, 4, "inject: emergency from cpu %u",
1580 		   irq->u.emerg.code);
1581 	trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
1582 				   irq->u.emerg.code, 0);
1583 
1584 	/* sending vcpu invalid */
1585 	if (kvm_get_vcpu_by_id(vcpu->kvm, irq->u.emerg.code) == NULL)
1586 		return -EINVAL;
1587 
1588 	set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
1589 	set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
1590 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
1591 	return 0;
1592 }
1593 
__inject_mchk(struct kvm_vcpu * vcpu,struct kvm_s390_irq * irq)1594 static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1595 {
1596 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1597 	struct kvm_s390_mchk_info *mchk = &li->irq.mchk;
1598 
1599 	vcpu->stat.inject_mchk++;
1600 	VCPU_EVENT(vcpu, 3, "inject: machine check mcic 0x%llx",
1601 		   irq->u.mchk.mcic);
1602 	trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0,
1603 				   irq->u.mchk.mcic);
1604 
1605 	/*
1606 	 * Because repressible machine checks can be indicated along with
1607 	 * exigent machine checks (PoP, Chapter 11, Interruption action)
1608 	 * we need to combine cr14, mcic and external damage code.
1609 	 * Failing storage address and the logout area should not be or'ed
1610 	 * together, we just indicate the last occurrence of the corresponding
1611 	 * machine check
1612 	 */
1613 	mchk->cr14 |= irq->u.mchk.cr14;
1614 	mchk->mcic |= irq->u.mchk.mcic;
1615 	mchk->ext_damage_code |= irq->u.mchk.ext_damage_code;
1616 	mchk->failing_storage_address = irq->u.mchk.failing_storage_address;
1617 	memcpy(&mchk->fixed_logout, &irq->u.mchk.fixed_logout,
1618 	       sizeof(mchk->fixed_logout));
1619 	if (mchk->mcic & MCHK_EX_MASK)
1620 		set_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
1621 	else if (mchk->mcic & MCHK_REP_MASK)
1622 		set_bit(IRQ_PEND_MCHK_REP,  &li->pending_irqs);
1623 	return 0;
1624 }
1625 
__inject_ckc(struct kvm_vcpu * vcpu)1626 static int __inject_ckc(struct kvm_vcpu *vcpu)
1627 {
1628 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1629 
1630 	vcpu->stat.inject_ckc++;
1631 	VCPU_EVENT(vcpu, 3, "%s", "inject: clock comparator external");
1632 	trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
1633 				   0, 0);
1634 
1635 	set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1636 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
1637 	return 0;
1638 }
1639 
__inject_cpu_timer(struct kvm_vcpu * vcpu)1640 static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
1641 {
1642 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1643 
1644 	vcpu->stat.inject_cputm++;
1645 	VCPU_EVENT(vcpu, 3, "%s", "inject: cpu timer external");
1646 	trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
1647 				   0, 0);
1648 
1649 	set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1650 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
1651 	return 0;
1652 }
1653 
get_io_int(struct kvm * kvm,int isc,u32 schid)1654 static struct kvm_s390_interrupt_info *get_io_int(struct kvm *kvm,
1655 						  int isc, u32 schid)
1656 {
1657 	struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1658 	struct list_head *isc_list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
1659 	struct kvm_s390_interrupt_info *iter;
1660 	u16 id = (schid & 0xffff0000U) >> 16;
1661 	u16 nr = schid & 0x0000ffffU;
1662 
1663 	spin_lock(&fi->lock);
1664 	list_for_each_entry(iter, isc_list, list) {
1665 		if (schid && (id != iter->io.subchannel_id ||
1666 			      nr != iter->io.subchannel_nr))
1667 			continue;
1668 		/* found an appropriate entry */
1669 		list_del_init(&iter->list);
1670 		fi->counters[FIRQ_CNTR_IO] -= 1;
1671 		if (list_empty(isc_list))
1672 			clear_bit(isc_to_irq_type(isc), &fi->pending_irqs);
1673 		spin_unlock(&fi->lock);
1674 		return iter;
1675 	}
1676 	spin_unlock(&fi->lock);
1677 	return NULL;
1678 }
1679 
get_top_io_int(struct kvm * kvm,u64 isc_mask,u32 schid)1680 static struct kvm_s390_interrupt_info *get_top_io_int(struct kvm *kvm,
1681 						      u64 isc_mask, u32 schid)
1682 {
1683 	struct kvm_s390_interrupt_info *inti = NULL;
1684 	int isc;
1685 
1686 	for (isc = 0; isc <= MAX_ISC && !inti; isc++) {
1687 		if (isc_mask & isc_to_isc_bits(isc))
1688 			inti = get_io_int(kvm, isc, schid);
1689 	}
1690 	return inti;
1691 }
1692 
get_top_gisa_isc(struct kvm * kvm,u64 isc_mask,u32 schid)1693 static int get_top_gisa_isc(struct kvm *kvm, u64 isc_mask, u32 schid)
1694 {
1695 	struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
1696 	unsigned long active_mask;
1697 	int isc;
1698 
1699 	if (schid)
1700 		goto out;
1701 	if (!gi->origin)
1702 		goto out;
1703 
1704 	active_mask = (isc_mask & gisa_get_ipm(gi->origin) << 24) << 32;
1705 	while (active_mask) {
1706 		isc = __fls(active_mask) ^ (BITS_PER_LONG - 1);
1707 		if (gisa_tac_ipm_gisc(gi->origin, isc))
1708 			return isc;
1709 		clear_bit_inv(isc, &active_mask);
1710 	}
1711 out:
1712 	return -EINVAL;
1713 }
1714 
1715 /*
1716  * Dequeue and return an I/O interrupt matching any of the interruption
1717  * subclasses as designated by the isc mask in cr6 and the schid (if != 0).
1718  * Take into account the interrupts pending in the interrupt list and in GISA.
1719  *
1720  * Note that for a guest that does not enable I/O interrupts
1721  * but relies on TPI, a flood of classic interrupts may starve
1722  * out adapter interrupts on the same isc. Linux does not do
1723  * that, and it is possible to work around the issue by configuring
1724  * different iscs for classic and adapter interrupts in the guest,
1725  * but we may want to revisit this in the future.
1726  */
kvm_s390_get_io_int(struct kvm * kvm,u64 isc_mask,u32 schid)1727 struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
1728 						    u64 isc_mask, u32 schid)
1729 {
1730 	struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
1731 	struct kvm_s390_interrupt_info *inti, *tmp_inti;
1732 	int isc;
1733 
1734 	inti = get_top_io_int(kvm, isc_mask, schid);
1735 
1736 	isc = get_top_gisa_isc(kvm, isc_mask, schid);
1737 	if (isc < 0)
1738 		/* no AI in GISA */
1739 		goto out;
1740 
1741 	if (!inti)
1742 		/* AI in GISA but no classical IO int */
1743 		goto gisa_out;
1744 
1745 	/* both types of interrupts present */
1746 	if (int_word_to_isc(inti->io.io_int_word) <= isc) {
1747 		/* classical IO int with higher priority */
1748 		gisa_set_ipm_gisc(gi->origin, isc);
1749 		goto out;
1750 	}
1751 gisa_out:
1752 	tmp_inti = kzalloc(sizeof(*inti), GFP_KERNEL_ACCOUNT);
1753 	if (tmp_inti) {
1754 		tmp_inti->type = KVM_S390_INT_IO(1, 0, 0, 0);
1755 		tmp_inti->io.io_int_word = isc_to_int_word(isc);
1756 		if (inti)
1757 			kvm_s390_reinject_io_int(kvm, inti);
1758 		inti = tmp_inti;
1759 	} else
1760 		gisa_set_ipm_gisc(gi->origin, isc);
1761 out:
1762 	return inti;
1763 }
1764 
__inject_service(struct kvm * kvm,struct kvm_s390_interrupt_info * inti)1765 static int __inject_service(struct kvm *kvm,
1766 			     struct kvm_s390_interrupt_info *inti)
1767 {
1768 	struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1769 
1770 	kvm->stat.inject_service_signal++;
1771 	spin_lock(&fi->lock);
1772 	fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_EVENT_PENDING;
1773 
1774 	/* We always allow events, track them separately from the sccb ints */
1775 	if (fi->srv_signal.ext_params & SCCB_EVENT_PENDING)
1776 		set_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs);
1777 
1778 	/*
1779 	 * Early versions of the QEMU s390 bios will inject several
1780 	 * service interrupts after another without handling a
1781 	 * condition code indicating busy.
1782 	 * We will silently ignore those superfluous sccb values.
1783 	 * A future version of QEMU will take care of serialization
1784 	 * of servc requests
1785 	 */
1786 	if (fi->srv_signal.ext_params & SCCB_MASK)
1787 		goto out;
1788 	fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_MASK;
1789 	set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
1790 out:
1791 	spin_unlock(&fi->lock);
1792 	kfree(inti);
1793 	return 0;
1794 }
1795 
__inject_virtio(struct kvm * kvm,struct kvm_s390_interrupt_info * inti)1796 static int __inject_virtio(struct kvm *kvm,
1797 			    struct kvm_s390_interrupt_info *inti)
1798 {
1799 	struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1800 
1801 	kvm->stat.inject_virtio++;
1802 	spin_lock(&fi->lock);
1803 	if (fi->counters[FIRQ_CNTR_VIRTIO] >= KVM_S390_MAX_VIRTIO_IRQS) {
1804 		spin_unlock(&fi->lock);
1805 		return -EBUSY;
1806 	}
1807 	fi->counters[FIRQ_CNTR_VIRTIO] += 1;
1808 	list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_VIRTIO]);
1809 	set_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
1810 	spin_unlock(&fi->lock);
1811 	return 0;
1812 }
1813 
__inject_pfault_done(struct kvm * kvm,struct kvm_s390_interrupt_info * inti)1814 static int __inject_pfault_done(struct kvm *kvm,
1815 				 struct kvm_s390_interrupt_info *inti)
1816 {
1817 	struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1818 
1819 	kvm->stat.inject_pfault_done++;
1820 	spin_lock(&fi->lock);
1821 	if (fi->counters[FIRQ_CNTR_PFAULT] >=
1822 		(ASYNC_PF_PER_VCPU * KVM_MAX_VCPUS)) {
1823 		spin_unlock(&fi->lock);
1824 		return -EBUSY;
1825 	}
1826 	fi->counters[FIRQ_CNTR_PFAULT] += 1;
1827 	list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_PFAULT]);
1828 	set_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
1829 	spin_unlock(&fi->lock);
1830 	return 0;
1831 }
1832 
1833 #define CR_PENDING_SUBCLASS 28
__inject_float_mchk(struct kvm * kvm,struct kvm_s390_interrupt_info * inti)1834 static int __inject_float_mchk(struct kvm *kvm,
1835 				struct kvm_s390_interrupt_info *inti)
1836 {
1837 	struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1838 
1839 	kvm->stat.inject_float_mchk++;
1840 	spin_lock(&fi->lock);
1841 	fi->mchk.cr14 |= inti->mchk.cr14 & (1UL << CR_PENDING_SUBCLASS);
1842 	fi->mchk.mcic |= inti->mchk.mcic;
1843 	set_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs);
1844 	spin_unlock(&fi->lock);
1845 	kfree(inti);
1846 	return 0;
1847 }
1848 
__inject_io(struct kvm * kvm,struct kvm_s390_interrupt_info * inti)1849 static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
1850 {
1851 	struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
1852 	struct kvm_s390_float_interrupt *fi;
1853 	struct list_head *list;
1854 	int isc;
1855 
1856 	kvm->stat.inject_io++;
1857 	isc = int_word_to_isc(inti->io.io_int_word);
1858 
1859 	/*
1860 	 * We do not use the lock checking variant as this is just a
1861 	 * performance optimization and we do not hold the lock here.
1862 	 * This is ok as the code will pick interrupts from both "lists"
1863 	 * for delivery.
1864 	 */
1865 	if (gi->origin && inti->type & KVM_S390_INT_IO_AI_MASK) {
1866 		VM_EVENT(kvm, 4, "%s isc %1u", "inject: I/O (AI/gisa)", isc);
1867 		gisa_set_ipm_gisc(gi->origin, isc);
1868 		kfree(inti);
1869 		return 0;
1870 	}
1871 
1872 	fi = &kvm->arch.float_int;
1873 	spin_lock(&fi->lock);
1874 	if (fi->counters[FIRQ_CNTR_IO] >= KVM_S390_MAX_FLOAT_IRQS) {
1875 		spin_unlock(&fi->lock);
1876 		return -EBUSY;
1877 	}
1878 	fi->counters[FIRQ_CNTR_IO] += 1;
1879 
1880 	if (inti->type & KVM_S390_INT_IO_AI_MASK)
1881 		VM_EVENT(kvm, 4, "%s", "inject: I/O (AI)");
1882 	else
1883 		VM_EVENT(kvm, 4, "inject: I/O %x ss %x schid %04x",
1884 			inti->io.subchannel_id >> 8,
1885 			inti->io.subchannel_id >> 1 & 0x3,
1886 			inti->io.subchannel_nr);
1887 	list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
1888 	list_add_tail(&inti->list, list);
1889 	set_bit(isc_to_irq_type(isc), &fi->pending_irqs);
1890 	spin_unlock(&fi->lock);
1891 	return 0;
1892 }
1893 
1894 /*
1895  * Find a destination VCPU for a floating irq and kick it.
1896  */
__floating_irq_kick(struct kvm * kvm,u64 type)1897 static void __floating_irq_kick(struct kvm *kvm, u64 type)
1898 {
1899 	struct kvm_vcpu *dst_vcpu;
1900 	int sigcpu, online_vcpus, nr_tries = 0;
1901 
1902 	online_vcpus = atomic_read(&kvm->online_vcpus);
1903 	if (!online_vcpus)
1904 		return;
1905 
1906 	for (sigcpu = kvm->arch.float_int.last_sleep_cpu; ; sigcpu++) {
1907 		sigcpu %= online_vcpus;
1908 		dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
1909 		if (!is_vcpu_stopped(dst_vcpu))
1910 			break;
1911 		/* avoid endless loops if all vcpus are stopped */
1912 		if (nr_tries++ >= online_vcpus)
1913 			return;
1914 	}
1915 
1916 	/* make the VCPU drop out of the SIE, or wake it up if sleeping */
1917 	switch (type) {
1918 	case KVM_S390_MCHK:
1919 		kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_STOP_INT);
1920 		break;
1921 	case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1922 		if (!(type & KVM_S390_INT_IO_AI_MASK &&
1923 		      kvm->arch.gisa_int.origin) ||
1924 		      kvm_s390_pv_cpu_get_handle(dst_vcpu))
1925 			kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_IO_INT);
1926 		break;
1927 	default:
1928 		kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_EXT_INT);
1929 		break;
1930 	}
1931 	kvm_s390_vcpu_wakeup(dst_vcpu);
1932 }
1933 
__inject_vm(struct kvm * kvm,struct kvm_s390_interrupt_info * inti)1934 static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
1935 {
1936 	u64 type = READ_ONCE(inti->type);
1937 	int rc;
1938 
1939 	switch (type) {
1940 	case KVM_S390_MCHK:
1941 		rc = __inject_float_mchk(kvm, inti);
1942 		break;
1943 	case KVM_S390_INT_VIRTIO:
1944 		rc = __inject_virtio(kvm, inti);
1945 		break;
1946 	case KVM_S390_INT_SERVICE:
1947 		rc = __inject_service(kvm, inti);
1948 		break;
1949 	case KVM_S390_INT_PFAULT_DONE:
1950 		rc = __inject_pfault_done(kvm, inti);
1951 		break;
1952 	case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1953 		rc = __inject_io(kvm, inti);
1954 		break;
1955 	default:
1956 		rc = -EINVAL;
1957 	}
1958 	if (rc)
1959 		return rc;
1960 
1961 	__floating_irq_kick(kvm, type);
1962 	return 0;
1963 }
1964 
kvm_s390_inject_vm(struct kvm * kvm,struct kvm_s390_interrupt * s390int)1965 int kvm_s390_inject_vm(struct kvm *kvm,
1966 		       struct kvm_s390_interrupt *s390int)
1967 {
1968 	struct kvm_s390_interrupt_info *inti;
1969 	int rc;
1970 
1971 	inti = kzalloc(sizeof(*inti), GFP_KERNEL_ACCOUNT);
1972 	if (!inti)
1973 		return -ENOMEM;
1974 
1975 	inti->type = s390int->type;
1976 	switch (inti->type) {
1977 	case KVM_S390_INT_VIRTIO:
1978 		VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
1979 			 s390int->parm, s390int->parm64);
1980 		inti->ext.ext_params = s390int->parm;
1981 		inti->ext.ext_params2 = s390int->parm64;
1982 		break;
1983 	case KVM_S390_INT_SERVICE:
1984 		VM_EVENT(kvm, 4, "inject: sclp parm:%x", s390int->parm);
1985 		inti->ext.ext_params = s390int->parm;
1986 		break;
1987 	case KVM_S390_INT_PFAULT_DONE:
1988 		inti->ext.ext_params2 = s390int->parm64;
1989 		break;
1990 	case KVM_S390_MCHK:
1991 		VM_EVENT(kvm, 3, "inject: machine check mcic 0x%llx",
1992 			 s390int->parm64);
1993 		inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
1994 		inti->mchk.mcic = s390int->parm64;
1995 		break;
1996 	case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1997 		inti->io.subchannel_id = s390int->parm >> 16;
1998 		inti->io.subchannel_nr = s390int->parm & 0x0000ffffu;
1999 		inti->io.io_int_parm = s390int->parm64 >> 32;
2000 		inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull;
2001 		break;
2002 	default:
2003 		kfree(inti);
2004 		return -EINVAL;
2005 	}
2006 	trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
2007 				 2);
2008 
2009 	rc = __inject_vm(kvm, inti);
2010 	if (rc)
2011 		kfree(inti);
2012 	return rc;
2013 }
2014 
kvm_s390_reinject_io_int(struct kvm * kvm,struct kvm_s390_interrupt_info * inti)2015 int kvm_s390_reinject_io_int(struct kvm *kvm,
2016 			      struct kvm_s390_interrupt_info *inti)
2017 {
2018 	return __inject_vm(kvm, inti);
2019 }
2020 
s390int_to_s390irq(struct kvm_s390_interrupt * s390int,struct kvm_s390_irq * irq)2021 int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
2022 		       struct kvm_s390_irq *irq)
2023 {
2024 	irq->type = s390int->type;
2025 	switch (irq->type) {
2026 	case KVM_S390_PROGRAM_INT:
2027 		if (s390int->parm & 0xffff0000)
2028 			return -EINVAL;
2029 		irq->u.pgm.code = s390int->parm;
2030 		break;
2031 	case KVM_S390_SIGP_SET_PREFIX:
2032 		irq->u.prefix.address = s390int->parm;
2033 		break;
2034 	case KVM_S390_SIGP_STOP:
2035 		irq->u.stop.flags = s390int->parm;
2036 		break;
2037 	case KVM_S390_INT_EXTERNAL_CALL:
2038 		if (s390int->parm & 0xffff0000)
2039 			return -EINVAL;
2040 		irq->u.extcall.code = s390int->parm;
2041 		break;
2042 	case KVM_S390_INT_EMERGENCY:
2043 		if (s390int->parm & 0xffff0000)
2044 			return -EINVAL;
2045 		irq->u.emerg.code = s390int->parm;
2046 		break;
2047 	case KVM_S390_MCHK:
2048 		irq->u.mchk.mcic = s390int->parm64;
2049 		break;
2050 	case KVM_S390_INT_PFAULT_INIT:
2051 		irq->u.ext.ext_params = s390int->parm;
2052 		irq->u.ext.ext_params2 = s390int->parm64;
2053 		break;
2054 	case KVM_S390_RESTART:
2055 	case KVM_S390_INT_CLOCK_COMP:
2056 	case KVM_S390_INT_CPU_TIMER:
2057 		break;
2058 	default:
2059 		return -EINVAL;
2060 	}
2061 	return 0;
2062 }
2063 
kvm_s390_is_stop_irq_pending(struct kvm_vcpu * vcpu)2064 int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu)
2065 {
2066 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2067 
2068 	return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
2069 }
2070 
kvm_s390_is_restart_irq_pending(struct kvm_vcpu * vcpu)2071 int kvm_s390_is_restart_irq_pending(struct kvm_vcpu *vcpu)
2072 {
2073 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2074 
2075 	return test_bit(IRQ_PEND_RESTART, &li->pending_irqs);
2076 }
2077 
kvm_s390_clear_stop_irq(struct kvm_vcpu * vcpu)2078 void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu)
2079 {
2080 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2081 
2082 	spin_lock(&li->lock);
2083 	li->irq.stop.flags = 0;
2084 	clear_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
2085 	spin_unlock(&li->lock);
2086 }
2087 
do_inject_vcpu(struct kvm_vcpu * vcpu,struct kvm_s390_irq * irq)2088 static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
2089 {
2090 	int rc;
2091 
2092 	switch (irq->type) {
2093 	case KVM_S390_PROGRAM_INT:
2094 		rc = __inject_prog(vcpu, irq);
2095 		break;
2096 	case KVM_S390_SIGP_SET_PREFIX:
2097 		rc = __inject_set_prefix(vcpu, irq);
2098 		break;
2099 	case KVM_S390_SIGP_STOP:
2100 		rc = __inject_sigp_stop(vcpu, irq);
2101 		break;
2102 	case KVM_S390_RESTART:
2103 		rc = __inject_sigp_restart(vcpu);
2104 		break;
2105 	case KVM_S390_INT_CLOCK_COMP:
2106 		rc = __inject_ckc(vcpu);
2107 		break;
2108 	case KVM_S390_INT_CPU_TIMER:
2109 		rc = __inject_cpu_timer(vcpu);
2110 		break;
2111 	case KVM_S390_INT_EXTERNAL_CALL:
2112 		rc = __inject_extcall(vcpu, irq);
2113 		break;
2114 	case KVM_S390_INT_EMERGENCY:
2115 		rc = __inject_sigp_emergency(vcpu, irq);
2116 		break;
2117 	case KVM_S390_MCHK:
2118 		rc = __inject_mchk(vcpu, irq);
2119 		break;
2120 	case KVM_S390_INT_PFAULT_INIT:
2121 		rc = __inject_pfault_init(vcpu, irq);
2122 		break;
2123 	case KVM_S390_INT_VIRTIO:
2124 	case KVM_S390_INT_SERVICE:
2125 	case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
2126 	default:
2127 		rc = -EINVAL;
2128 	}
2129 
2130 	return rc;
2131 }
2132 
kvm_s390_inject_vcpu(struct kvm_vcpu * vcpu,struct kvm_s390_irq * irq)2133 int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
2134 {
2135 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2136 	int rc;
2137 
2138 	spin_lock(&li->lock);
2139 	rc = do_inject_vcpu(vcpu, irq);
2140 	spin_unlock(&li->lock);
2141 	if (!rc)
2142 		kvm_s390_vcpu_wakeup(vcpu);
2143 	return rc;
2144 }
2145 
clear_irq_list(struct list_head * _list)2146 static inline void clear_irq_list(struct list_head *_list)
2147 {
2148 	struct kvm_s390_interrupt_info *inti, *n;
2149 
2150 	list_for_each_entry_safe(inti, n, _list, list) {
2151 		list_del(&inti->list);
2152 		kfree(inti);
2153 	}
2154 }
2155 
inti_to_irq(struct kvm_s390_interrupt_info * inti,struct kvm_s390_irq * irq)2156 static void inti_to_irq(struct kvm_s390_interrupt_info *inti,
2157 		       struct kvm_s390_irq *irq)
2158 {
2159 	irq->type = inti->type;
2160 	switch (inti->type) {
2161 	case KVM_S390_INT_PFAULT_INIT:
2162 	case KVM_S390_INT_PFAULT_DONE:
2163 	case KVM_S390_INT_VIRTIO:
2164 		irq->u.ext = inti->ext;
2165 		break;
2166 	case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
2167 		irq->u.io = inti->io;
2168 		break;
2169 	}
2170 }
2171 
kvm_s390_clear_float_irqs(struct kvm * kvm)2172 void kvm_s390_clear_float_irqs(struct kvm *kvm)
2173 {
2174 	struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2175 	int i;
2176 
2177 	mutex_lock(&kvm->lock);
2178 	if (!kvm_s390_pv_is_protected(kvm))
2179 		fi->masked_irqs = 0;
2180 	mutex_unlock(&kvm->lock);
2181 	spin_lock(&fi->lock);
2182 	fi->pending_irqs = 0;
2183 	memset(&fi->srv_signal, 0, sizeof(fi->srv_signal));
2184 	memset(&fi->mchk, 0, sizeof(fi->mchk));
2185 	for (i = 0; i < FIRQ_LIST_COUNT; i++)
2186 		clear_irq_list(&fi->lists[i]);
2187 	for (i = 0; i < FIRQ_MAX_COUNT; i++)
2188 		fi->counters[i] = 0;
2189 	spin_unlock(&fi->lock);
2190 	kvm_s390_gisa_clear(kvm);
2191 };
2192 
get_all_floating_irqs(struct kvm * kvm,u8 __user * usrbuf,u64 len)2193 static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
2194 {
2195 	struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
2196 	struct kvm_s390_interrupt_info *inti;
2197 	struct kvm_s390_float_interrupt *fi;
2198 	struct kvm_s390_irq *buf;
2199 	struct kvm_s390_irq *irq;
2200 	int max_irqs;
2201 	int ret = 0;
2202 	int n = 0;
2203 	int i;
2204 
2205 	if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0)
2206 		return -EINVAL;
2207 
2208 	/*
2209 	 * We are already using -ENOMEM to signal
2210 	 * userspace it may retry with a bigger buffer,
2211 	 * so we need to use something else for this case
2212 	 */
2213 	buf = vzalloc(len);
2214 	if (!buf)
2215 		return -ENOBUFS;
2216 
2217 	max_irqs = len / sizeof(struct kvm_s390_irq);
2218 
2219 	if (gi->origin && gisa_get_ipm(gi->origin)) {
2220 		for (i = 0; i <= MAX_ISC; i++) {
2221 			if (n == max_irqs) {
2222 				/* signal userspace to try again */
2223 				ret = -ENOMEM;
2224 				goto out_nolock;
2225 			}
2226 			if (gisa_tac_ipm_gisc(gi->origin, i)) {
2227 				irq = (struct kvm_s390_irq *) &buf[n];
2228 				irq->type = KVM_S390_INT_IO(1, 0, 0, 0);
2229 				irq->u.io.io_int_word = isc_to_int_word(i);
2230 				n++;
2231 			}
2232 		}
2233 	}
2234 	fi = &kvm->arch.float_int;
2235 	spin_lock(&fi->lock);
2236 	for (i = 0; i < FIRQ_LIST_COUNT; i++) {
2237 		list_for_each_entry(inti, &fi->lists[i], list) {
2238 			if (n == max_irqs) {
2239 				/* signal userspace to try again */
2240 				ret = -ENOMEM;
2241 				goto out;
2242 			}
2243 			inti_to_irq(inti, &buf[n]);
2244 			n++;
2245 		}
2246 	}
2247 	if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs) ||
2248 	    test_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs)) {
2249 		if (n == max_irqs) {
2250 			/* signal userspace to try again */
2251 			ret = -ENOMEM;
2252 			goto out;
2253 		}
2254 		irq = (struct kvm_s390_irq *) &buf[n];
2255 		irq->type = KVM_S390_INT_SERVICE;
2256 		irq->u.ext = fi->srv_signal;
2257 		n++;
2258 	}
2259 	if (test_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
2260 		if (n == max_irqs) {
2261 				/* signal userspace to try again */
2262 				ret = -ENOMEM;
2263 				goto out;
2264 		}
2265 		irq = (struct kvm_s390_irq *) &buf[n];
2266 		irq->type = KVM_S390_MCHK;
2267 		irq->u.mchk = fi->mchk;
2268 		n++;
2269 }
2270 
2271 out:
2272 	spin_unlock(&fi->lock);
2273 out_nolock:
2274 	if (!ret && n > 0) {
2275 		if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n))
2276 			ret = -EFAULT;
2277 	}
2278 	vfree(buf);
2279 
2280 	return ret < 0 ? ret : n;
2281 }
2282 
flic_ais_mode_get_all(struct kvm * kvm,struct kvm_device_attr * attr)2283 static int flic_ais_mode_get_all(struct kvm *kvm, struct kvm_device_attr *attr)
2284 {
2285 	struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2286 	struct kvm_s390_ais_all ais;
2287 
2288 	if (attr->attr < sizeof(ais))
2289 		return -EINVAL;
2290 
2291 	if (!test_kvm_facility(kvm, 72))
2292 		return -EOPNOTSUPP;
2293 
2294 	mutex_lock(&fi->ais_lock);
2295 	ais.simm = fi->simm;
2296 	ais.nimm = fi->nimm;
2297 	mutex_unlock(&fi->ais_lock);
2298 
2299 	if (copy_to_user((void __user *)attr->addr, &ais, sizeof(ais)))
2300 		return -EFAULT;
2301 
2302 	return 0;
2303 }
2304 
flic_get_attr(struct kvm_device * dev,struct kvm_device_attr * attr)2305 static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2306 {
2307 	int r;
2308 
2309 	switch (attr->group) {
2310 	case KVM_DEV_FLIC_GET_ALL_IRQS:
2311 		r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr,
2312 					  attr->attr);
2313 		break;
2314 	case KVM_DEV_FLIC_AISM_ALL:
2315 		r = flic_ais_mode_get_all(dev->kvm, attr);
2316 		break;
2317 	default:
2318 		r = -EINVAL;
2319 	}
2320 
2321 	return r;
2322 }
2323 
copy_irq_from_user(struct kvm_s390_interrupt_info * inti,u64 addr)2324 static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti,
2325 				     u64 addr)
2326 {
2327 	struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
2328 	void *target = NULL;
2329 	void __user *source;
2330 	u64 size;
2331 
2332 	if (get_user(inti->type, (u64 __user *)addr))
2333 		return -EFAULT;
2334 
2335 	switch (inti->type) {
2336 	case KVM_S390_INT_PFAULT_INIT:
2337 	case KVM_S390_INT_PFAULT_DONE:
2338 	case KVM_S390_INT_VIRTIO:
2339 	case KVM_S390_INT_SERVICE:
2340 		target = (void *) &inti->ext;
2341 		source = &uptr->u.ext;
2342 		size = sizeof(inti->ext);
2343 		break;
2344 	case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
2345 		target = (void *) &inti->io;
2346 		source = &uptr->u.io;
2347 		size = sizeof(inti->io);
2348 		break;
2349 	case KVM_S390_MCHK:
2350 		target = (void *) &inti->mchk;
2351 		source = &uptr->u.mchk;
2352 		size = sizeof(inti->mchk);
2353 		break;
2354 	default:
2355 		return -EINVAL;
2356 	}
2357 
2358 	if (copy_from_user(target, source, size))
2359 		return -EFAULT;
2360 
2361 	return 0;
2362 }
2363 
enqueue_floating_irq(struct kvm_device * dev,struct kvm_device_attr * attr)2364 static int enqueue_floating_irq(struct kvm_device *dev,
2365 				struct kvm_device_attr *attr)
2366 {
2367 	struct kvm_s390_interrupt_info *inti = NULL;
2368 	int r = 0;
2369 	int len = attr->attr;
2370 
2371 	if (len % sizeof(struct kvm_s390_irq) != 0)
2372 		return -EINVAL;
2373 	else if (len > KVM_S390_FLIC_MAX_BUFFER)
2374 		return -EINVAL;
2375 
2376 	while (len >= sizeof(struct kvm_s390_irq)) {
2377 		inti = kzalloc(sizeof(*inti), GFP_KERNEL_ACCOUNT);
2378 		if (!inti)
2379 			return -ENOMEM;
2380 
2381 		r = copy_irq_from_user(inti, attr->addr);
2382 		if (r) {
2383 			kfree(inti);
2384 			return r;
2385 		}
2386 		r = __inject_vm(dev->kvm, inti);
2387 		if (r) {
2388 			kfree(inti);
2389 			return r;
2390 		}
2391 		len -= sizeof(struct kvm_s390_irq);
2392 		attr->addr += sizeof(struct kvm_s390_irq);
2393 	}
2394 
2395 	return r;
2396 }
2397 
get_io_adapter(struct kvm * kvm,unsigned int id)2398 static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id)
2399 {
2400 	if (id >= MAX_S390_IO_ADAPTERS)
2401 		return NULL;
2402 	id = array_index_nospec(id, MAX_S390_IO_ADAPTERS);
2403 	return kvm->arch.adapters[id];
2404 }
2405 
register_io_adapter(struct kvm_device * dev,struct kvm_device_attr * attr)2406 static int register_io_adapter(struct kvm_device *dev,
2407 			       struct kvm_device_attr *attr)
2408 {
2409 	struct s390_io_adapter *adapter;
2410 	struct kvm_s390_io_adapter adapter_info;
2411 
2412 	if (copy_from_user(&adapter_info,
2413 			   (void __user *)attr->addr, sizeof(adapter_info)))
2414 		return -EFAULT;
2415 
2416 	if (adapter_info.id >= MAX_S390_IO_ADAPTERS)
2417 		return -EINVAL;
2418 
2419 	adapter_info.id = array_index_nospec(adapter_info.id,
2420 					     MAX_S390_IO_ADAPTERS);
2421 
2422 	if (dev->kvm->arch.adapters[adapter_info.id] != NULL)
2423 		return -EINVAL;
2424 
2425 	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL_ACCOUNT);
2426 	if (!adapter)
2427 		return -ENOMEM;
2428 
2429 	adapter->id = adapter_info.id;
2430 	adapter->isc = adapter_info.isc;
2431 	adapter->maskable = adapter_info.maskable;
2432 	adapter->masked = false;
2433 	adapter->swap = adapter_info.swap;
2434 	adapter->suppressible = (adapter_info.flags) &
2435 				KVM_S390_ADAPTER_SUPPRESSIBLE;
2436 	dev->kvm->arch.adapters[adapter->id] = adapter;
2437 
2438 	return 0;
2439 }
2440 
kvm_s390_mask_adapter(struct kvm * kvm,unsigned int id,bool masked)2441 int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked)
2442 {
2443 	int ret;
2444 	struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
2445 
2446 	if (!adapter || !adapter->maskable)
2447 		return -EINVAL;
2448 	ret = adapter->masked;
2449 	adapter->masked = masked;
2450 	return ret;
2451 }
2452 
kvm_s390_destroy_adapters(struct kvm * kvm)2453 void kvm_s390_destroy_adapters(struct kvm *kvm)
2454 {
2455 	int i;
2456 
2457 	for (i = 0; i < MAX_S390_IO_ADAPTERS; i++)
2458 		kfree(kvm->arch.adapters[i]);
2459 }
2460 
modify_io_adapter(struct kvm_device * dev,struct kvm_device_attr * attr)2461 static int modify_io_adapter(struct kvm_device *dev,
2462 			     struct kvm_device_attr *attr)
2463 {
2464 	struct kvm_s390_io_adapter_req req;
2465 	struct s390_io_adapter *adapter;
2466 	int ret;
2467 
2468 	if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
2469 		return -EFAULT;
2470 
2471 	adapter = get_io_adapter(dev->kvm, req.id);
2472 	if (!adapter)
2473 		return -EINVAL;
2474 	switch (req.type) {
2475 	case KVM_S390_IO_ADAPTER_MASK:
2476 		ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask);
2477 		if (ret > 0)
2478 			ret = 0;
2479 		break;
2480 	/*
2481 	 * The following operations are no longer needed and therefore no-ops.
2482 	 * The gpa to hva translation is done when an IRQ route is set up. The
2483 	 * set_irq code uses get_user_pages_remote() to do the actual write.
2484 	 */
2485 	case KVM_S390_IO_ADAPTER_MAP:
2486 	case KVM_S390_IO_ADAPTER_UNMAP:
2487 		ret = 0;
2488 		break;
2489 	default:
2490 		ret = -EINVAL;
2491 	}
2492 
2493 	return ret;
2494 }
2495 
clear_io_irq(struct kvm * kvm,struct kvm_device_attr * attr)2496 static int clear_io_irq(struct kvm *kvm, struct kvm_device_attr *attr)
2497 
2498 {
2499 	const u64 isc_mask = 0xffUL << 24; /* all iscs set */
2500 	u32 schid;
2501 
2502 	if (attr->flags)
2503 		return -EINVAL;
2504 	if (attr->attr != sizeof(schid))
2505 		return -EINVAL;
2506 	if (copy_from_user(&schid, (void __user *) attr->addr, sizeof(schid)))
2507 		return -EFAULT;
2508 	if (!schid)
2509 		return -EINVAL;
2510 	kfree(kvm_s390_get_io_int(kvm, isc_mask, schid));
2511 	/*
2512 	 * If userspace is conforming to the architecture, we can have at most
2513 	 * one pending I/O interrupt per subchannel, so this is effectively a
2514 	 * clear all.
2515 	 */
2516 	return 0;
2517 }
2518 
modify_ais_mode(struct kvm * kvm,struct kvm_device_attr * attr)2519 static int modify_ais_mode(struct kvm *kvm, struct kvm_device_attr *attr)
2520 {
2521 	struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2522 	struct kvm_s390_ais_req req;
2523 	int ret = 0;
2524 
2525 	if (!test_kvm_facility(kvm, 72))
2526 		return -EOPNOTSUPP;
2527 
2528 	if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
2529 		return -EFAULT;
2530 
2531 	if (req.isc > MAX_ISC)
2532 		return -EINVAL;
2533 
2534 	trace_kvm_s390_modify_ais_mode(req.isc,
2535 				       (fi->simm & AIS_MODE_MASK(req.isc)) ?
2536 				       (fi->nimm & AIS_MODE_MASK(req.isc)) ?
2537 				       2 : KVM_S390_AIS_MODE_SINGLE :
2538 				       KVM_S390_AIS_MODE_ALL, req.mode);
2539 
2540 	mutex_lock(&fi->ais_lock);
2541 	switch (req.mode) {
2542 	case KVM_S390_AIS_MODE_ALL:
2543 		fi->simm &= ~AIS_MODE_MASK(req.isc);
2544 		fi->nimm &= ~AIS_MODE_MASK(req.isc);
2545 		break;
2546 	case KVM_S390_AIS_MODE_SINGLE:
2547 		fi->simm |= AIS_MODE_MASK(req.isc);
2548 		fi->nimm &= ~AIS_MODE_MASK(req.isc);
2549 		break;
2550 	default:
2551 		ret = -EINVAL;
2552 	}
2553 	mutex_unlock(&fi->ais_lock);
2554 
2555 	return ret;
2556 }
2557 
kvm_s390_inject_airq(struct kvm * kvm,struct s390_io_adapter * adapter)2558 static int kvm_s390_inject_airq(struct kvm *kvm,
2559 				struct s390_io_adapter *adapter)
2560 {
2561 	struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2562 	struct kvm_s390_interrupt s390int = {
2563 		.type = KVM_S390_INT_IO(1, 0, 0, 0),
2564 		.parm = 0,
2565 		.parm64 = isc_to_int_word(adapter->isc),
2566 	};
2567 	int ret = 0;
2568 
2569 	if (!test_kvm_facility(kvm, 72) || !adapter->suppressible)
2570 		return kvm_s390_inject_vm(kvm, &s390int);
2571 
2572 	mutex_lock(&fi->ais_lock);
2573 	if (fi->nimm & AIS_MODE_MASK(adapter->isc)) {
2574 		trace_kvm_s390_airq_suppressed(adapter->id, adapter->isc);
2575 		goto out;
2576 	}
2577 
2578 	ret = kvm_s390_inject_vm(kvm, &s390int);
2579 	if (!ret && (fi->simm & AIS_MODE_MASK(adapter->isc))) {
2580 		fi->nimm |= AIS_MODE_MASK(adapter->isc);
2581 		trace_kvm_s390_modify_ais_mode(adapter->isc,
2582 					       KVM_S390_AIS_MODE_SINGLE, 2);
2583 	}
2584 out:
2585 	mutex_unlock(&fi->ais_lock);
2586 	return ret;
2587 }
2588 
flic_inject_airq(struct kvm * kvm,struct kvm_device_attr * attr)2589 static int flic_inject_airq(struct kvm *kvm, struct kvm_device_attr *attr)
2590 {
2591 	unsigned int id = attr->attr;
2592 	struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
2593 
2594 	if (!adapter)
2595 		return -EINVAL;
2596 
2597 	return kvm_s390_inject_airq(kvm, adapter);
2598 }
2599 
flic_ais_mode_set_all(struct kvm * kvm,struct kvm_device_attr * attr)2600 static int flic_ais_mode_set_all(struct kvm *kvm, struct kvm_device_attr *attr)
2601 {
2602 	struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2603 	struct kvm_s390_ais_all ais;
2604 
2605 	if (!test_kvm_facility(kvm, 72))
2606 		return -EOPNOTSUPP;
2607 
2608 	if (copy_from_user(&ais, (void __user *)attr->addr, sizeof(ais)))
2609 		return -EFAULT;
2610 
2611 	mutex_lock(&fi->ais_lock);
2612 	fi->simm = ais.simm;
2613 	fi->nimm = ais.nimm;
2614 	mutex_unlock(&fi->ais_lock);
2615 
2616 	return 0;
2617 }
2618 
flic_set_attr(struct kvm_device * dev,struct kvm_device_attr * attr)2619 static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2620 {
2621 	int r = 0;
2622 	unsigned long i;
2623 	struct kvm_vcpu *vcpu;
2624 
2625 	switch (attr->group) {
2626 	case KVM_DEV_FLIC_ENQUEUE:
2627 		r = enqueue_floating_irq(dev, attr);
2628 		break;
2629 	case KVM_DEV_FLIC_CLEAR_IRQS:
2630 		kvm_s390_clear_float_irqs(dev->kvm);
2631 		break;
2632 	case KVM_DEV_FLIC_APF_ENABLE:
2633 		if (kvm_is_ucontrol(dev->kvm))
2634 			return -EINVAL;
2635 		dev->kvm->arch.gmap->pfault_enabled = 1;
2636 		break;
2637 	case KVM_DEV_FLIC_APF_DISABLE_WAIT:
2638 		if (kvm_is_ucontrol(dev->kvm))
2639 			return -EINVAL;
2640 		dev->kvm->arch.gmap->pfault_enabled = 0;
2641 		/*
2642 		 * Make sure no async faults are in transition when
2643 		 * clearing the queues. So we don't need to worry
2644 		 * about late coming workers.
2645 		 */
2646 		synchronize_srcu(&dev->kvm->srcu);
2647 		kvm_for_each_vcpu(i, vcpu, dev->kvm)
2648 			kvm_clear_async_pf_completion_queue(vcpu);
2649 		break;
2650 	case KVM_DEV_FLIC_ADAPTER_REGISTER:
2651 		r = register_io_adapter(dev, attr);
2652 		break;
2653 	case KVM_DEV_FLIC_ADAPTER_MODIFY:
2654 		r = modify_io_adapter(dev, attr);
2655 		break;
2656 	case KVM_DEV_FLIC_CLEAR_IO_IRQ:
2657 		r = clear_io_irq(dev->kvm, attr);
2658 		break;
2659 	case KVM_DEV_FLIC_AISM:
2660 		r = modify_ais_mode(dev->kvm, attr);
2661 		break;
2662 	case KVM_DEV_FLIC_AIRQ_INJECT:
2663 		r = flic_inject_airq(dev->kvm, attr);
2664 		break;
2665 	case KVM_DEV_FLIC_AISM_ALL:
2666 		r = flic_ais_mode_set_all(dev->kvm, attr);
2667 		break;
2668 	default:
2669 		r = -EINVAL;
2670 	}
2671 
2672 	return r;
2673 }
2674 
flic_has_attr(struct kvm_device * dev,struct kvm_device_attr * attr)2675 static int flic_has_attr(struct kvm_device *dev,
2676 			     struct kvm_device_attr *attr)
2677 {
2678 	switch (attr->group) {
2679 	case KVM_DEV_FLIC_GET_ALL_IRQS:
2680 	case KVM_DEV_FLIC_ENQUEUE:
2681 	case KVM_DEV_FLIC_CLEAR_IRQS:
2682 	case KVM_DEV_FLIC_APF_ENABLE:
2683 	case KVM_DEV_FLIC_APF_DISABLE_WAIT:
2684 	case KVM_DEV_FLIC_ADAPTER_REGISTER:
2685 	case KVM_DEV_FLIC_ADAPTER_MODIFY:
2686 	case KVM_DEV_FLIC_CLEAR_IO_IRQ:
2687 	case KVM_DEV_FLIC_AISM:
2688 	case KVM_DEV_FLIC_AIRQ_INJECT:
2689 	case KVM_DEV_FLIC_AISM_ALL:
2690 		return 0;
2691 	}
2692 	return -ENXIO;
2693 }
2694 
flic_create(struct kvm_device * dev,u32 type)2695 static int flic_create(struct kvm_device *dev, u32 type)
2696 {
2697 	if (!dev)
2698 		return -EINVAL;
2699 	if (dev->kvm->arch.flic)
2700 		return -EINVAL;
2701 	dev->kvm->arch.flic = dev;
2702 	return 0;
2703 }
2704 
flic_destroy(struct kvm_device * dev)2705 static void flic_destroy(struct kvm_device *dev)
2706 {
2707 	dev->kvm->arch.flic = NULL;
2708 	kfree(dev);
2709 }
2710 
2711 /* s390 floating irq controller (flic) */
2712 struct kvm_device_ops kvm_flic_ops = {
2713 	.name = "kvm-flic",
2714 	.get_attr = flic_get_attr,
2715 	.set_attr = flic_set_attr,
2716 	.has_attr = flic_has_attr,
2717 	.create = flic_create,
2718 	.destroy = flic_destroy,
2719 };
2720 
get_ind_bit(__u64 addr,unsigned long bit_nr,bool swap)2721 static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap)
2722 {
2723 	unsigned long bit;
2724 
2725 	bit = bit_nr + (addr % PAGE_SIZE) * 8;
2726 
2727 	return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit;
2728 }
2729 
get_map_page(struct kvm * kvm,u64 uaddr)2730 static struct page *get_map_page(struct kvm *kvm, u64 uaddr)
2731 {
2732 	struct mm_struct *mm = kvm->mm;
2733 	struct page *page = NULL;
2734 	int locked = 1;
2735 
2736 	if (mmget_not_zero(mm)) {
2737 		mmap_read_lock(mm);
2738 		get_user_pages_remote(mm, uaddr, 1, FOLL_WRITE,
2739 				      &page, &locked);
2740 		if (locked)
2741 			mmap_read_unlock(mm);
2742 		mmput(mm);
2743 	}
2744 
2745 	return page;
2746 }
2747 
adapter_indicators_set(struct kvm * kvm,struct s390_io_adapter * adapter,struct kvm_s390_adapter_int * adapter_int)2748 static int adapter_indicators_set(struct kvm *kvm,
2749 				  struct s390_io_adapter *adapter,
2750 				  struct kvm_s390_adapter_int *adapter_int)
2751 {
2752 	unsigned long bit;
2753 	int summary_set, idx;
2754 	struct page *ind_page, *summary_page;
2755 	void *map;
2756 
2757 	ind_page = get_map_page(kvm, adapter_int->ind_addr);
2758 	if (!ind_page)
2759 		return -1;
2760 	summary_page = get_map_page(kvm, adapter_int->summary_addr);
2761 	if (!summary_page) {
2762 		put_page(ind_page);
2763 		return -1;
2764 	}
2765 
2766 	idx = srcu_read_lock(&kvm->srcu);
2767 	map = page_address(ind_page);
2768 	bit = get_ind_bit(adapter_int->ind_addr,
2769 			  adapter_int->ind_offset, adapter->swap);
2770 	set_bit(bit, map);
2771 	mark_page_dirty(kvm, adapter_int->ind_addr >> PAGE_SHIFT);
2772 	set_page_dirty_lock(ind_page);
2773 	map = page_address(summary_page);
2774 	bit = get_ind_bit(adapter_int->summary_addr,
2775 			  adapter_int->summary_offset, adapter->swap);
2776 	summary_set = test_and_set_bit(bit, map);
2777 	mark_page_dirty(kvm, adapter_int->summary_addr >> PAGE_SHIFT);
2778 	set_page_dirty_lock(summary_page);
2779 	srcu_read_unlock(&kvm->srcu, idx);
2780 
2781 	put_page(ind_page);
2782 	put_page(summary_page);
2783 	return summary_set ? 0 : 1;
2784 }
2785 
2786 /*
2787  * < 0 - not injected due to error
2788  * = 0 - coalesced, summary indicator already active
2789  * > 0 - injected interrupt
2790  */
set_adapter_int(struct kvm_kernel_irq_routing_entry * e,struct kvm * kvm,int irq_source_id,int level,bool line_status)2791 static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
2792 			   struct kvm *kvm, int irq_source_id, int level,
2793 			   bool line_status)
2794 {
2795 	int ret;
2796 	struct s390_io_adapter *adapter;
2797 
2798 	/* We're only interested in the 0->1 transition. */
2799 	if (!level)
2800 		return 0;
2801 	adapter = get_io_adapter(kvm, e->adapter.adapter_id);
2802 	if (!adapter)
2803 		return -1;
2804 	ret = adapter_indicators_set(kvm, adapter, &e->adapter);
2805 	if ((ret > 0) && !adapter->masked) {
2806 		ret = kvm_s390_inject_airq(kvm, adapter);
2807 		if (ret == 0)
2808 			ret = 1;
2809 	}
2810 	return ret;
2811 }
2812 
2813 /*
2814  * Inject the machine check to the guest.
2815  */
kvm_s390_reinject_machine_check(struct kvm_vcpu * vcpu,struct mcck_volatile_info * mcck_info)2816 void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu,
2817 				     struct mcck_volatile_info *mcck_info)
2818 {
2819 	struct kvm_s390_interrupt_info inti;
2820 	struct kvm_s390_irq irq;
2821 	struct kvm_s390_mchk_info *mchk;
2822 	union mci mci;
2823 	__u64 cr14 = 0;         /* upper bits are not used */
2824 	int rc;
2825 
2826 	mci.val = mcck_info->mcic;
2827 	if (mci.sr)
2828 		cr14 |= CR14_RECOVERY_SUBMASK;
2829 	if (mci.dg)
2830 		cr14 |= CR14_DEGRADATION_SUBMASK;
2831 	if (mci.w)
2832 		cr14 |= CR14_WARNING_SUBMASK;
2833 
2834 	mchk = mci.ck ? &inti.mchk : &irq.u.mchk;
2835 	mchk->cr14 = cr14;
2836 	mchk->mcic = mcck_info->mcic;
2837 	mchk->ext_damage_code = mcck_info->ext_damage_code;
2838 	mchk->failing_storage_address = mcck_info->failing_storage_address;
2839 	if (mci.ck) {
2840 		/* Inject the floating machine check */
2841 		inti.type = KVM_S390_MCHK;
2842 		rc = __inject_vm(vcpu->kvm, &inti);
2843 	} else {
2844 		/* Inject the machine check to specified vcpu */
2845 		irq.type = KVM_S390_MCHK;
2846 		rc = kvm_s390_inject_vcpu(vcpu, &irq);
2847 	}
2848 	WARN_ON_ONCE(rc);
2849 }
2850 
kvm_set_routing_entry(struct kvm * kvm,struct kvm_kernel_irq_routing_entry * e,const struct kvm_irq_routing_entry * ue)2851 int kvm_set_routing_entry(struct kvm *kvm,
2852 			  struct kvm_kernel_irq_routing_entry *e,
2853 			  const struct kvm_irq_routing_entry *ue)
2854 {
2855 	u64 uaddr_s, uaddr_i;
2856 	int idx;
2857 
2858 	switch (ue->type) {
2859 	/* we store the userspace addresses instead of the guest addresses */
2860 	case KVM_IRQ_ROUTING_S390_ADAPTER:
2861 		if (kvm_is_ucontrol(kvm))
2862 			return -EINVAL;
2863 		e->set = set_adapter_int;
2864 
2865 		idx = srcu_read_lock(&kvm->srcu);
2866 		uaddr_s = gpa_to_hva(kvm, ue->u.adapter.summary_addr);
2867 		uaddr_i = gpa_to_hva(kvm, ue->u.adapter.ind_addr);
2868 		srcu_read_unlock(&kvm->srcu, idx);
2869 
2870 		if (kvm_is_error_hva(uaddr_s) || kvm_is_error_hva(uaddr_i))
2871 			return -EFAULT;
2872 		e->adapter.summary_addr = uaddr_s;
2873 		e->adapter.ind_addr = uaddr_i;
2874 		e->adapter.summary_offset = ue->u.adapter.summary_offset;
2875 		e->adapter.ind_offset = ue->u.adapter.ind_offset;
2876 		e->adapter.adapter_id = ue->u.adapter.adapter_id;
2877 		return 0;
2878 	default:
2879 		return -EINVAL;
2880 	}
2881 }
2882 
kvm_set_msi(struct kvm_kernel_irq_routing_entry * e,struct kvm * kvm,int irq_source_id,int level,bool line_status)2883 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
2884 		int irq_source_id, int level, bool line_status)
2885 {
2886 	return -EINVAL;
2887 }
2888 
kvm_s390_set_irq_state(struct kvm_vcpu * vcpu,void __user * irqstate,int len)2889 int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, void __user *irqstate, int len)
2890 {
2891 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2892 	struct kvm_s390_irq *buf;
2893 	int r = 0;
2894 	int n;
2895 
2896 	buf = vmalloc(len);
2897 	if (!buf)
2898 		return -ENOMEM;
2899 
2900 	if (copy_from_user((void *) buf, irqstate, len)) {
2901 		r = -EFAULT;
2902 		goto out_free;
2903 	}
2904 
2905 	/*
2906 	 * Don't allow setting the interrupt state
2907 	 * when there are already interrupts pending
2908 	 */
2909 	spin_lock(&li->lock);
2910 	if (li->pending_irqs) {
2911 		r = -EBUSY;
2912 		goto out_unlock;
2913 	}
2914 
2915 	for (n = 0; n < len / sizeof(*buf); n++) {
2916 		r = do_inject_vcpu(vcpu, &buf[n]);
2917 		if (r)
2918 			break;
2919 	}
2920 
2921 out_unlock:
2922 	spin_unlock(&li->lock);
2923 out_free:
2924 	vfree(buf);
2925 
2926 	return r;
2927 }
2928 
store_local_irq(struct kvm_s390_local_interrupt * li,struct kvm_s390_irq * irq,unsigned long irq_type)2929 static void store_local_irq(struct kvm_s390_local_interrupt *li,
2930 			    struct kvm_s390_irq *irq,
2931 			    unsigned long irq_type)
2932 {
2933 	switch (irq_type) {
2934 	case IRQ_PEND_MCHK_EX:
2935 	case IRQ_PEND_MCHK_REP:
2936 		irq->type = KVM_S390_MCHK;
2937 		irq->u.mchk = li->irq.mchk;
2938 		break;
2939 	case IRQ_PEND_PROG:
2940 		irq->type = KVM_S390_PROGRAM_INT;
2941 		irq->u.pgm = li->irq.pgm;
2942 		break;
2943 	case IRQ_PEND_PFAULT_INIT:
2944 		irq->type = KVM_S390_INT_PFAULT_INIT;
2945 		irq->u.ext = li->irq.ext;
2946 		break;
2947 	case IRQ_PEND_EXT_EXTERNAL:
2948 		irq->type = KVM_S390_INT_EXTERNAL_CALL;
2949 		irq->u.extcall = li->irq.extcall;
2950 		break;
2951 	case IRQ_PEND_EXT_CLOCK_COMP:
2952 		irq->type = KVM_S390_INT_CLOCK_COMP;
2953 		break;
2954 	case IRQ_PEND_EXT_CPU_TIMER:
2955 		irq->type = KVM_S390_INT_CPU_TIMER;
2956 		break;
2957 	case IRQ_PEND_SIGP_STOP:
2958 		irq->type = KVM_S390_SIGP_STOP;
2959 		irq->u.stop = li->irq.stop;
2960 		break;
2961 	case IRQ_PEND_RESTART:
2962 		irq->type = KVM_S390_RESTART;
2963 		break;
2964 	case IRQ_PEND_SET_PREFIX:
2965 		irq->type = KVM_S390_SIGP_SET_PREFIX;
2966 		irq->u.prefix = li->irq.prefix;
2967 		break;
2968 	}
2969 }
2970 
kvm_s390_get_irq_state(struct kvm_vcpu * vcpu,__u8 __user * buf,int len)2971 int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
2972 {
2973 	int scn;
2974 	DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
2975 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2976 	unsigned long pending_irqs;
2977 	struct kvm_s390_irq irq;
2978 	unsigned long irq_type;
2979 	int cpuaddr;
2980 	int n = 0;
2981 
2982 	spin_lock(&li->lock);
2983 	pending_irqs = li->pending_irqs;
2984 	memcpy(&sigp_emerg_pending, &li->sigp_emerg_pending,
2985 	       sizeof(sigp_emerg_pending));
2986 	spin_unlock(&li->lock);
2987 
2988 	for_each_set_bit(irq_type, &pending_irqs, IRQ_PEND_COUNT) {
2989 		memset(&irq, 0, sizeof(irq));
2990 		if (irq_type == IRQ_PEND_EXT_EMERGENCY)
2991 			continue;
2992 		if (n + sizeof(irq) > len)
2993 			return -ENOBUFS;
2994 		store_local_irq(&vcpu->arch.local_int, &irq, irq_type);
2995 		if (copy_to_user(&buf[n], &irq, sizeof(irq)))
2996 			return -EFAULT;
2997 		n += sizeof(irq);
2998 	}
2999 
3000 	if (test_bit(IRQ_PEND_EXT_EMERGENCY, &pending_irqs)) {
3001 		for_each_set_bit(cpuaddr, sigp_emerg_pending, KVM_MAX_VCPUS) {
3002 			memset(&irq, 0, sizeof(irq));
3003 			if (n + sizeof(irq) > len)
3004 				return -ENOBUFS;
3005 			irq.type = KVM_S390_INT_EMERGENCY;
3006 			irq.u.emerg.code = cpuaddr;
3007 			if (copy_to_user(&buf[n], &irq, sizeof(irq)))
3008 				return -EFAULT;
3009 			n += sizeof(irq);
3010 		}
3011 	}
3012 
3013 	if (sca_ext_call_pending(vcpu, &scn)) {
3014 		if (n + sizeof(irq) > len)
3015 			return -ENOBUFS;
3016 		memset(&irq, 0, sizeof(irq));
3017 		irq.type = KVM_S390_INT_EXTERNAL_CALL;
3018 		irq.u.extcall.code = scn;
3019 		if (copy_to_user(&buf[n], &irq, sizeof(irq)))
3020 			return -EFAULT;
3021 		n += sizeof(irq);
3022 	}
3023 
3024 	return n;
3025 }
3026 
__airqs_kick_single_vcpu(struct kvm * kvm,u8 deliverable_mask)3027 static void __airqs_kick_single_vcpu(struct kvm *kvm, u8 deliverable_mask)
3028 {
3029 	int vcpu_idx, online_vcpus = atomic_read(&kvm->online_vcpus);
3030 	struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
3031 	struct kvm_vcpu *vcpu;
3032 	u8 vcpu_isc_mask;
3033 
3034 	for_each_set_bit(vcpu_idx, kvm->arch.idle_mask, online_vcpus) {
3035 		vcpu = kvm_get_vcpu(kvm, vcpu_idx);
3036 		if (psw_ioint_disabled(vcpu))
3037 			continue;
3038 		vcpu_isc_mask = (u8)(vcpu->arch.sie_block->gcr[6] >> 24);
3039 		if (deliverable_mask & vcpu_isc_mask) {
3040 			/* lately kicked but not yet running */
3041 			if (test_and_set_bit(vcpu_idx, gi->kicked_mask))
3042 				return;
3043 			kvm_s390_vcpu_wakeup(vcpu);
3044 			return;
3045 		}
3046 	}
3047 }
3048 
gisa_vcpu_kicker(struct hrtimer * timer)3049 static enum hrtimer_restart gisa_vcpu_kicker(struct hrtimer *timer)
3050 {
3051 	struct kvm_s390_gisa_interrupt *gi =
3052 		container_of(timer, struct kvm_s390_gisa_interrupt, timer);
3053 	struct kvm *kvm =
3054 		container_of(gi->origin, struct sie_page2, gisa)->kvm;
3055 	u8 pending_mask;
3056 
3057 	pending_mask = gisa_get_ipm_or_restore_iam(gi);
3058 	if (pending_mask) {
3059 		__airqs_kick_single_vcpu(kvm, pending_mask);
3060 		hrtimer_forward_now(timer, ns_to_ktime(gi->expires));
3061 		return HRTIMER_RESTART;
3062 	}
3063 
3064 	return HRTIMER_NORESTART;
3065 }
3066 
3067 #define NULL_GISA_ADDR 0x00000000UL
3068 #define NONE_GISA_ADDR 0x00000001UL
3069 #define GISA_ADDR_MASK 0xfffff000UL
3070 
process_gib_alert_list(void)3071 static void process_gib_alert_list(void)
3072 {
3073 	struct kvm_s390_gisa_interrupt *gi;
3074 	u32 final, gisa_phys, origin = 0UL;
3075 	struct kvm_s390_gisa *gisa;
3076 	struct kvm *kvm;
3077 
3078 	do {
3079 		/*
3080 		 * If the NONE_GISA_ADDR is still stored in the alert list
3081 		 * origin, we will leave the outer loop. No further GISA has
3082 		 * been added to the alert list by millicode while processing
3083 		 * the current alert list.
3084 		 */
3085 		final = (origin & NONE_GISA_ADDR);
3086 		/*
3087 		 * Cut off the alert list and store the NONE_GISA_ADDR in the
3088 		 * alert list origin to avoid further GAL interruptions.
3089 		 * A new alert list can be build up by millicode in parallel
3090 		 * for guests not in the yet cut-off alert list. When in the
3091 		 * final loop, store the NULL_GISA_ADDR instead. This will re-
3092 		 * enable GAL interruptions on the host again.
3093 		 */
3094 		origin = xchg(&gib->alert_list_origin,
3095 			      (!final) ? NONE_GISA_ADDR : NULL_GISA_ADDR);
3096 		/*
3097 		 * Loop through the just cut-off alert list and start the
3098 		 * gisa timers to kick idle vcpus to consume the pending
3099 		 * interruptions asap.
3100 		 */
3101 		while (origin & GISA_ADDR_MASK) {
3102 			gisa_phys = origin;
3103 			gisa = phys_to_virt(gisa_phys);
3104 			origin = gisa->next_alert;
3105 			gisa->next_alert = gisa_phys;
3106 			kvm = container_of(gisa, struct sie_page2, gisa)->kvm;
3107 			gi = &kvm->arch.gisa_int;
3108 			if (hrtimer_active(&gi->timer))
3109 				hrtimer_cancel(&gi->timer);
3110 			hrtimer_start(&gi->timer, 0, HRTIMER_MODE_REL);
3111 		}
3112 	} while (!final);
3113 
3114 }
3115 
kvm_s390_gisa_clear(struct kvm * kvm)3116 void kvm_s390_gisa_clear(struct kvm *kvm)
3117 {
3118 	struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
3119 
3120 	if (!gi->origin)
3121 		return;
3122 	gisa_clear_ipm(gi->origin);
3123 	VM_EVENT(kvm, 3, "gisa 0x%p cleared", gi->origin);
3124 }
3125 
kvm_s390_gisa_init(struct kvm * kvm)3126 void kvm_s390_gisa_init(struct kvm *kvm)
3127 {
3128 	struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
3129 
3130 	if (!css_general_characteristics.aiv)
3131 		return;
3132 	gi->origin = &kvm->arch.sie_page2->gisa;
3133 	gi->alert.mask = 0;
3134 	spin_lock_init(&gi->alert.ref_lock);
3135 	gi->expires = 50 * 1000; /* 50 usec */
3136 	hrtimer_setup(&gi->timer, gisa_vcpu_kicker, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3137 	memset(gi->origin, 0, sizeof(struct kvm_s390_gisa));
3138 	gi->origin->next_alert = (u32)virt_to_phys(gi->origin);
3139 	VM_EVENT(kvm, 3, "gisa 0x%p initialized", gi->origin);
3140 }
3141 
kvm_s390_gisa_enable(struct kvm * kvm)3142 void kvm_s390_gisa_enable(struct kvm *kvm)
3143 {
3144 	struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
3145 	struct kvm_vcpu *vcpu;
3146 	unsigned long i;
3147 	u32 gisa_desc;
3148 
3149 	if (gi->origin)
3150 		return;
3151 	kvm_s390_gisa_init(kvm);
3152 	gisa_desc = kvm_s390_get_gisa_desc(kvm);
3153 	if (!gisa_desc)
3154 		return;
3155 	kvm_for_each_vcpu(i, vcpu, kvm) {
3156 		mutex_lock(&vcpu->mutex);
3157 		vcpu->arch.sie_block->gd = gisa_desc;
3158 		vcpu->arch.sie_block->eca |= ECA_AIV;
3159 		VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
3160 			   vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
3161 		mutex_unlock(&vcpu->mutex);
3162 	}
3163 }
3164 
kvm_s390_gisa_destroy(struct kvm * kvm)3165 void kvm_s390_gisa_destroy(struct kvm *kvm)
3166 {
3167 	struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
3168 	struct kvm_s390_gisa *gisa = gi->origin;
3169 
3170 	if (!gi->origin)
3171 		return;
3172 	WARN(gi->alert.mask != 0x00,
3173 	     "unexpected non zero alert.mask 0x%02x",
3174 	     gi->alert.mask);
3175 	gi->alert.mask = 0x00;
3176 	if (gisa_set_iam(gi->origin, gi->alert.mask))
3177 		process_gib_alert_list();
3178 	hrtimer_cancel(&gi->timer);
3179 	gi->origin = NULL;
3180 	VM_EVENT(kvm, 3, "gisa 0x%p destroyed", gisa);
3181 }
3182 
kvm_s390_gisa_disable(struct kvm * kvm)3183 void kvm_s390_gisa_disable(struct kvm *kvm)
3184 {
3185 	struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
3186 	struct kvm_vcpu *vcpu;
3187 	unsigned long i;
3188 
3189 	if (!gi->origin)
3190 		return;
3191 	kvm_for_each_vcpu(i, vcpu, kvm) {
3192 		mutex_lock(&vcpu->mutex);
3193 		vcpu->arch.sie_block->eca &= ~ECA_AIV;
3194 		vcpu->arch.sie_block->gd = 0U;
3195 		mutex_unlock(&vcpu->mutex);
3196 		VCPU_EVENT(vcpu, 3, "AIV disabled for cpu %03u", vcpu->vcpu_id);
3197 	}
3198 	kvm_s390_gisa_destroy(kvm);
3199 }
3200 
3201 /**
3202  * kvm_s390_gisc_register - register a guest ISC
3203  *
3204  * @kvm:  the kernel vm to work with
3205  * @gisc: the guest interruption sub class to register
3206  *
3207  * The function extends the vm specific alert mask to use.
3208  * The effective IAM mask in the GISA is updated as well
3209  * in case the GISA is not part of the GIB alert list.
3210  * It will be updated latest when the IAM gets restored
3211  * by gisa_get_ipm_or_restore_iam().
3212  *
3213  * Returns: the nonspecific ISC (NISC) the gib alert mechanism
3214  *          has registered with the channel subsystem.
3215  *          -ENODEV in case the vm uses no GISA
3216  *          -ERANGE in case the guest ISC is invalid
3217  */
kvm_s390_gisc_register(struct kvm * kvm,u32 gisc)3218 int kvm_s390_gisc_register(struct kvm *kvm, u32 gisc)
3219 {
3220 	struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
3221 
3222 	if (!gi->origin)
3223 		return -ENODEV;
3224 	if (gisc > MAX_ISC)
3225 		return -ERANGE;
3226 
3227 	spin_lock(&gi->alert.ref_lock);
3228 	gi->alert.ref_count[gisc]++;
3229 	if (gi->alert.ref_count[gisc] == 1) {
3230 		gi->alert.mask |= 0x80 >> gisc;
3231 		gisa_set_iam(gi->origin, gi->alert.mask);
3232 	}
3233 	spin_unlock(&gi->alert.ref_lock);
3234 
3235 	return gib->nisc;
3236 }
3237 EXPORT_SYMBOL_GPL(kvm_s390_gisc_register);
3238 
3239 /**
3240  * kvm_s390_gisc_unregister - unregister a guest ISC
3241  *
3242  * @kvm:  the kernel vm to work with
3243  * @gisc: the guest interruption sub class to register
3244  *
3245  * The function reduces the vm specific alert mask to use.
3246  * The effective IAM mask in the GISA is updated as well
3247  * in case the GISA is not part of the GIB alert list.
3248  * It will be updated latest when the IAM gets restored
3249  * by gisa_get_ipm_or_restore_iam().
3250  *
3251  * Returns: the nonspecific ISC (NISC) the gib alert mechanism
3252  *          has registered with the channel subsystem.
3253  *          -ENODEV in case the vm uses no GISA
3254  *          -ERANGE in case the guest ISC is invalid
3255  *          -EINVAL in case the guest ISC is not registered
3256  */
kvm_s390_gisc_unregister(struct kvm * kvm,u32 gisc)3257 int kvm_s390_gisc_unregister(struct kvm *kvm, u32 gisc)
3258 {
3259 	struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
3260 	int rc = 0;
3261 
3262 	if (!gi->origin)
3263 		return -ENODEV;
3264 	if (gisc > MAX_ISC)
3265 		return -ERANGE;
3266 
3267 	spin_lock(&gi->alert.ref_lock);
3268 	if (gi->alert.ref_count[gisc] == 0) {
3269 		rc = -EINVAL;
3270 		goto out;
3271 	}
3272 	gi->alert.ref_count[gisc]--;
3273 	if (gi->alert.ref_count[gisc] == 0) {
3274 		gi->alert.mask &= ~(0x80 >> gisc);
3275 		gisa_set_iam(gi->origin, gi->alert.mask);
3276 	}
3277 out:
3278 	spin_unlock(&gi->alert.ref_lock);
3279 
3280 	return rc;
3281 }
3282 EXPORT_SYMBOL_GPL(kvm_s390_gisc_unregister);
3283 
aen_host_forward(unsigned long si)3284 static void aen_host_forward(unsigned long si)
3285 {
3286 	struct kvm_s390_gisa_interrupt *gi;
3287 	struct zpci_gaite *gaite;
3288 	struct kvm *kvm;
3289 
3290 	gaite = (struct zpci_gaite *)aift->gait +
3291 		(si * sizeof(struct zpci_gaite));
3292 	if (gaite->count == 0)
3293 		return;
3294 	if (gaite->aisb != 0)
3295 		set_bit_inv(gaite->aisbo, phys_to_virt(gaite->aisb));
3296 
3297 	kvm = kvm_s390_pci_si_to_kvm(aift, si);
3298 	if (!kvm)
3299 		return;
3300 	gi = &kvm->arch.gisa_int;
3301 
3302 	if (!(gi->origin->g1.simm & AIS_MODE_MASK(gaite->gisc)) ||
3303 	    !(gi->origin->g1.nimm & AIS_MODE_MASK(gaite->gisc))) {
3304 		gisa_set_ipm_gisc(gi->origin, gaite->gisc);
3305 		if (hrtimer_active(&gi->timer))
3306 			hrtimer_cancel(&gi->timer);
3307 		hrtimer_start(&gi->timer, 0, HRTIMER_MODE_REL);
3308 		kvm->stat.aen_forward++;
3309 	}
3310 }
3311 
aen_process_gait(u8 isc)3312 static void aen_process_gait(u8 isc)
3313 {
3314 	bool found = false, first = true;
3315 	union zpci_sic_iib iib = {{0}};
3316 	unsigned long si, flags;
3317 
3318 	spin_lock_irqsave(&aift->gait_lock, flags);
3319 
3320 	if (!aift->gait) {
3321 		spin_unlock_irqrestore(&aift->gait_lock, flags);
3322 		return;
3323 	}
3324 
3325 	for (si = 0;;) {
3326 		/* Scan adapter summary indicator bit vector */
3327 		si = airq_iv_scan(aift->sbv, si, airq_iv_end(aift->sbv));
3328 		if (si == -1UL) {
3329 			if (first || found) {
3330 				/* Re-enable interrupts. */
3331 				zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, isc,
3332 						  &iib);
3333 				first = found = false;
3334 			} else {
3335 				/* Interrupts on and all bits processed */
3336 				break;
3337 			}
3338 			found = false;
3339 			si = 0;
3340 			/* Scan again after re-enabling interrupts */
3341 			continue;
3342 		}
3343 		found = true;
3344 		aen_host_forward(si);
3345 	}
3346 
3347 	spin_unlock_irqrestore(&aift->gait_lock, flags);
3348 }
3349 
gib_alert_irq_handler(struct airq_struct * airq,struct tpi_info * tpi_info)3350 static void gib_alert_irq_handler(struct airq_struct *airq,
3351 				  struct tpi_info *tpi_info)
3352 {
3353 	struct tpi_adapter_info *info = (struct tpi_adapter_info *)tpi_info;
3354 
3355 	inc_irq_stat(IRQIO_GAL);
3356 
3357 	if ((info->forward || info->error) &&
3358 	    IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) {
3359 		aen_process_gait(info->isc);
3360 		if (info->aism != 0)
3361 			process_gib_alert_list();
3362 	} else {
3363 		process_gib_alert_list();
3364 	}
3365 }
3366 
3367 static struct airq_struct gib_alert_irq = {
3368 	.handler = gib_alert_irq_handler,
3369 };
3370 
kvm_s390_gib_destroy(void)3371 void kvm_s390_gib_destroy(void)
3372 {
3373 	if (!gib)
3374 		return;
3375 	if (kvm_s390_pci_interp_allowed() && aift) {
3376 		mutex_lock(&aift->aift_lock);
3377 		kvm_s390_pci_aen_exit();
3378 		mutex_unlock(&aift->aift_lock);
3379 	}
3380 	chsc_sgib(0);
3381 	unregister_adapter_interrupt(&gib_alert_irq);
3382 	free_page((unsigned long)gib);
3383 	gib = NULL;
3384 }
3385 
kvm_s390_gib_init(u8 nisc)3386 int __init kvm_s390_gib_init(u8 nisc)
3387 {
3388 	u32 gib_origin;
3389 	int rc = 0;
3390 
3391 	if (!css_general_characteristics.aiv) {
3392 		KVM_EVENT(3, "%s", "gib not initialized, no AIV facility");
3393 		goto out;
3394 	}
3395 
3396 	gib = (struct kvm_s390_gib *)get_zeroed_page(GFP_KERNEL_ACCOUNT | GFP_DMA);
3397 	if (!gib) {
3398 		rc = -ENOMEM;
3399 		goto out;
3400 	}
3401 
3402 	gib_alert_irq.isc = nisc;
3403 	if (register_adapter_interrupt(&gib_alert_irq)) {
3404 		pr_err("Registering the GIB alert interruption handler failed\n");
3405 		rc = -EIO;
3406 		goto out_free_gib;
3407 	}
3408 	/* adapter interrupts used for AP (applicable here) don't use the LSI */
3409 	*gib_alert_irq.lsi_ptr = 0xff;
3410 
3411 	gib->nisc = nisc;
3412 	gib_origin = virt_to_phys(gib);
3413 	if (chsc_sgib(gib_origin)) {
3414 		pr_err("Associating the GIB with the AIV facility failed\n");
3415 		free_page((unsigned long)gib);
3416 		gib = NULL;
3417 		rc = -EIO;
3418 		goto out_unreg_gal;
3419 	}
3420 
3421 	if (kvm_s390_pci_interp_allowed()) {
3422 		if (kvm_s390_pci_aen_init(nisc)) {
3423 			pr_err("Initializing AEN for PCI failed\n");
3424 			rc = -EIO;
3425 			goto out_unreg_gal;
3426 		}
3427 	}
3428 
3429 	KVM_EVENT(3, "gib 0x%p (nisc=%d) initialized", gib, gib->nisc);
3430 	goto out;
3431 
3432 out_unreg_gal:
3433 	unregister_adapter_interrupt(&gib_alert_irq);
3434 out_free_gib:
3435 	free_page((unsigned long)gib);
3436 	gib = NULL;
3437 out:
3438 	return rc;
3439 }
3440