xref: /linux/arch/s390/kvm/kvm-s390.c (revision e3372ffb5f9e2dda3da259b768aab6271672b90d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * hosting IBM Z kernel virtual machines (s390x)
4  *
5  * Copyright IBM Corp. 2008, 2020
6  *
7  *    Author(s): Carsten Otte <cotte@de.ibm.com>
8  *               Christian Borntraeger <borntraeger@de.ibm.com>
9  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
10  *               Jason J. Herne <jjherne@us.ibm.com>
11  */
12 
13 #define pr_fmt(fmt) "kvm-s390: " fmt
14 
15 #include <linux/compiler.h>
16 #include <linux/entry-virt.h>
17 #include <linux/export.h>
18 #include <linux/err.h>
19 #include <linux/fs.h>
20 #include <linux/hrtimer.h>
21 #include <linux/init.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/mman.h>
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/cpufeature.h>
28 #include <linux/random.h>
29 #include <linux/slab.h>
30 #include <linux/timer.h>
31 #include <linux/vmalloc.h>
32 #include <linux/bitmap.h>
33 #include <linux/sched/signal.h>
34 #include <linux/string.h>
35 #include <linux/pgtable.h>
36 #include <linux/mmu_notifier.h>
37 
38 #include <asm/access-regs.h>
39 #include <asm/asm-offsets.h>
40 #include <asm/lowcore.h>
41 #include <asm/machine.h>
42 #include <asm/stp.h>
43 #include <asm/gmap_helpers.h>
44 #include <asm/nmi.h>
45 #include <asm/isc.h>
46 #include <asm/sclp.h>
47 #include <asm/cpacf.h>
48 #include <asm/timex.h>
49 #include <asm/asm.h>
50 #include <asm/fpu.h>
51 #include <asm/ap.h>
52 #include <asm/uv.h>
53 #include "kvm-s390.h"
54 #include "gaccess.h"
55 #include "gmap.h"
56 #include "faultin.h"
57 #include "pci.h"
58 
59 #define CREATE_TRACE_POINTS
60 #include "trace.h"
61 #include "trace-s390.h"
62 
63 #define MEM_OP_MAX_SIZE 65536	/* Maximum transfer size for KVM_S390_MEM_OP */
64 #define LOCAL_IRQS 32
65 #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
66 			   (KVM_MAX_VCPUS + LOCAL_IRQS))
67 
68 const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
69 	KVM_GENERIC_VM_STATS(),
70 	STATS_DESC_COUNTER(VM, inject_io),
71 	STATS_DESC_COUNTER(VM, inject_float_mchk),
72 	STATS_DESC_COUNTER(VM, inject_pfault_done),
73 	STATS_DESC_COUNTER(VM, inject_service_signal),
74 	STATS_DESC_COUNTER(VM, inject_virtio),
75 	STATS_DESC_COUNTER(VM, aen_forward),
76 	STATS_DESC_COUNTER(VM, gmap_shadow_reuse),
77 	STATS_DESC_COUNTER(VM, gmap_shadow_create),
78 	STATS_DESC_COUNTER(VM, gmap_shadow_r1_entry),
79 	STATS_DESC_COUNTER(VM, gmap_shadow_r2_entry),
80 	STATS_DESC_COUNTER(VM, gmap_shadow_r3_entry),
81 	STATS_DESC_COUNTER(VM, gmap_shadow_sg_entry),
82 	STATS_DESC_COUNTER(VM, gmap_shadow_pg_entry),
83 };
84 
85 const struct kvm_stats_header kvm_vm_stats_header = {
86 	.name_size = KVM_STATS_NAME_SIZE,
87 	.num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
88 	.id_offset = sizeof(struct kvm_stats_header),
89 	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
90 	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
91 		       sizeof(kvm_vm_stats_desc),
92 };
93 
94 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
95 	KVM_GENERIC_VCPU_STATS(),
96 	STATS_DESC_COUNTER(VCPU, exit_userspace),
97 	STATS_DESC_COUNTER(VCPU, exit_null),
98 	STATS_DESC_COUNTER(VCPU, exit_external_request),
99 	STATS_DESC_COUNTER(VCPU, exit_io_request),
100 	STATS_DESC_COUNTER(VCPU, exit_external_interrupt),
101 	STATS_DESC_COUNTER(VCPU, exit_stop_request),
102 	STATS_DESC_COUNTER(VCPU, exit_validity),
103 	STATS_DESC_COUNTER(VCPU, exit_instruction),
104 	STATS_DESC_COUNTER(VCPU, exit_pei),
105 	STATS_DESC_COUNTER(VCPU, halt_no_poll_steal),
106 	STATS_DESC_COUNTER(VCPU, instruction_lctl),
107 	STATS_DESC_COUNTER(VCPU, instruction_lctlg),
108 	STATS_DESC_COUNTER(VCPU, instruction_stctl),
109 	STATS_DESC_COUNTER(VCPU, instruction_stctg),
110 	STATS_DESC_COUNTER(VCPU, exit_program_interruption),
111 	STATS_DESC_COUNTER(VCPU, exit_instr_and_program),
112 	STATS_DESC_COUNTER(VCPU, exit_operation_exception),
113 	STATS_DESC_COUNTER(VCPU, deliver_ckc),
114 	STATS_DESC_COUNTER(VCPU, deliver_cputm),
115 	STATS_DESC_COUNTER(VCPU, deliver_external_call),
116 	STATS_DESC_COUNTER(VCPU, deliver_emergency_signal),
117 	STATS_DESC_COUNTER(VCPU, deliver_service_signal),
118 	STATS_DESC_COUNTER(VCPU, deliver_virtio),
119 	STATS_DESC_COUNTER(VCPU, deliver_stop_signal),
120 	STATS_DESC_COUNTER(VCPU, deliver_prefix_signal),
121 	STATS_DESC_COUNTER(VCPU, deliver_restart_signal),
122 	STATS_DESC_COUNTER(VCPU, deliver_program),
123 	STATS_DESC_COUNTER(VCPU, deliver_io),
124 	STATS_DESC_COUNTER(VCPU, deliver_machine_check),
125 	STATS_DESC_COUNTER(VCPU, exit_wait_state),
126 	STATS_DESC_COUNTER(VCPU, inject_ckc),
127 	STATS_DESC_COUNTER(VCPU, inject_cputm),
128 	STATS_DESC_COUNTER(VCPU, inject_external_call),
129 	STATS_DESC_COUNTER(VCPU, inject_emergency_signal),
130 	STATS_DESC_COUNTER(VCPU, inject_mchk),
131 	STATS_DESC_COUNTER(VCPU, inject_pfault_init),
132 	STATS_DESC_COUNTER(VCPU, inject_program),
133 	STATS_DESC_COUNTER(VCPU, inject_restart),
134 	STATS_DESC_COUNTER(VCPU, inject_set_prefix),
135 	STATS_DESC_COUNTER(VCPU, inject_stop_signal),
136 	STATS_DESC_COUNTER(VCPU, instruction_epsw),
137 	STATS_DESC_COUNTER(VCPU, instruction_gs),
138 	STATS_DESC_COUNTER(VCPU, instruction_io_other),
139 	STATS_DESC_COUNTER(VCPU, instruction_lpsw),
140 	STATS_DESC_COUNTER(VCPU, instruction_lpswe),
141 	STATS_DESC_COUNTER(VCPU, instruction_lpswey),
142 	STATS_DESC_COUNTER(VCPU, instruction_pfmf),
143 	STATS_DESC_COUNTER(VCPU, instruction_ptff),
144 	STATS_DESC_COUNTER(VCPU, instruction_sck),
145 	STATS_DESC_COUNTER(VCPU, instruction_sckpf),
146 	STATS_DESC_COUNTER(VCPU, instruction_stidp),
147 	STATS_DESC_COUNTER(VCPU, instruction_spx),
148 	STATS_DESC_COUNTER(VCPU, instruction_stpx),
149 	STATS_DESC_COUNTER(VCPU, instruction_stap),
150 	STATS_DESC_COUNTER(VCPU, instruction_iske),
151 	STATS_DESC_COUNTER(VCPU, instruction_ri),
152 	STATS_DESC_COUNTER(VCPU, instruction_rrbe),
153 	STATS_DESC_COUNTER(VCPU, instruction_sske),
154 	STATS_DESC_COUNTER(VCPU, instruction_ipte_interlock),
155 	STATS_DESC_COUNTER(VCPU, instruction_stsi),
156 	STATS_DESC_COUNTER(VCPU, instruction_stfl),
157 	STATS_DESC_COUNTER(VCPU, instruction_tb),
158 	STATS_DESC_COUNTER(VCPU, instruction_tpi),
159 	STATS_DESC_COUNTER(VCPU, instruction_tprot),
160 	STATS_DESC_COUNTER(VCPU, instruction_tsch),
161 	STATS_DESC_COUNTER(VCPU, instruction_sie),
162 	STATS_DESC_COUNTER(VCPU, instruction_essa),
163 	STATS_DESC_COUNTER(VCPU, instruction_sthyi),
164 	STATS_DESC_COUNTER(VCPU, instruction_sigp_sense),
165 	STATS_DESC_COUNTER(VCPU, instruction_sigp_sense_running),
166 	STATS_DESC_COUNTER(VCPU, instruction_sigp_external_call),
167 	STATS_DESC_COUNTER(VCPU, instruction_sigp_emergency),
168 	STATS_DESC_COUNTER(VCPU, instruction_sigp_cond_emergency),
169 	STATS_DESC_COUNTER(VCPU, instruction_sigp_start),
170 	STATS_DESC_COUNTER(VCPU, instruction_sigp_stop),
171 	STATS_DESC_COUNTER(VCPU, instruction_sigp_stop_store_status),
172 	STATS_DESC_COUNTER(VCPU, instruction_sigp_store_status),
173 	STATS_DESC_COUNTER(VCPU, instruction_sigp_store_adtl_status),
174 	STATS_DESC_COUNTER(VCPU, instruction_sigp_arch),
175 	STATS_DESC_COUNTER(VCPU, instruction_sigp_prefix),
176 	STATS_DESC_COUNTER(VCPU, instruction_sigp_restart),
177 	STATS_DESC_COUNTER(VCPU, instruction_sigp_init_cpu_reset),
178 	STATS_DESC_COUNTER(VCPU, instruction_sigp_cpu_reset),
179 	STATS_DESC_COUNTER(VCPU, instruction_sigp_unknown),
180 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_10),
181 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_44),
182 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_9c),
183 	STATS_DESC_COUNTER(VCPU, diag_9c_ignored),
184 	STATS_DESC_COUNTER(VCPU, diag_9c_forward),
185 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_258),
186 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_308),
187 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_500),
188 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_other),
189 	STATS_DESC_COUNTER(VCPU, pfault_sync),
190 	STATS_DESC_COUNTER(VCPU, signal_exits)
191 };
192 
193 const struct kvm_stats_header kvm_vcpu_stats_header = {
194 	.name_size = KVM_STATS_NAME_SIZE,
195 	.num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
196 	.id_offset = sizeof(struct kvm_stats_header),
197 	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
198 	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
199 		       sizeof(kvm_vcpu_stats_desc),
200 };
201 
202 /* allow nested virtualization in KVM (if enabled by user space) */
203 static int nested;
204 module_param(nested, int, S_IRUGO);
205 MODULE_PARM_DESC(nested, "Nested virtualization support");
206 
207 /* allow 1m huge page guest backing, if !nested */
208 static int hpage;
209 module_param(hpage, int, 0444);
210 MODULE_PARM_DESC(hpage, "1m huge page backing support");
211 
212 /* maximum percentage of steal time for polling.  >100 is treated like 100 */
213 static u8 halt_poll_max_steal = 10;
214 module_param(halt_poll_max_steal, byte, 0644);
215 MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
216 
217 /* if set to true, the GISA will be initialized and used if available */
218 static bool use_gisa  = true;
219 module_param(use_gisa, bool, 0644);
220 MODULE_PARM_DESC(use_gisa, "Use the GISA if the host supports it.");
221 
222 /* maximum diag9c forwarding per second */
223 unsigned int diag9c_forwarding_hz;
224 module_param(diag9c_forwarding_hz, uint, 0644);
225 MODULE_PARM_DESC(diag9c_forwarding_hz, "Maximum diag9c forwarding per second, 0 to turn off");
226 
227 /*
228  * allow asynchronous deinit for protected guests; enable by default since
229  * the feature is opt-in anyway
230  */
231 static int async_destroy = 1;
232 module_param(async_destroy, int, 0444);
233 MODULE_PARM_DESC(async_destroy, "Asynchronous destroy for protected guests");
234 
235 /*
236  * For now we handle at most 16 double words as this is what the s390 base
237  * kernel handles and stores in the prefix page. If we ever need to go beyond
238  * this, this requires changes to code, but the external uapi can stay.
239  */
240 #define SIZE_INTERNAL 16
241 
242 /*
243  * Base feature mask that defines default mask for facilities. Consists of the
244  * defines in FACILITIES_KVM and the non-hypervisor managed bits.
245  */
246 static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
247 /*
248  * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
249  * and defines the facilities that can be enabled via a cpu model.
250  */
251 static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
252 
253 static unsigned long kvm_s390_fac_size(void)
254 {
255 	BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
256 	BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
257 	BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
258 		sizeof(stfle_fac_list));
259 
260 	return SIZE_INTERNAL;
261 }
262 
263 /* available cpu features supported by kvm */
264 static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
265 /* available subfunctions indicated via query / "test bit" */
266 static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
267 
268 debug_info_t *kvm_s390_dbf;
269 debug_info_t *kvm_s390_dbf_uv;
270 
271 /* Section: not file related */
272 /* forward declarations */
273 static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
274 {
275 	u8 delta_idx = 0;
276 
277 	/*
278 	 * The TOD jumps by delta, we have to compensate this by adding
279 	 * -delta to the epoch.
280 	 */
281 	delta = -delta;
282 
283 	/* sign-extension - we're adding to signed values below */
284 	if ((s64)delta < 0)
285 		delta_idx = -1;
286 
287 	scb->epoch += delta;
288 	if (scb->ecd & ECD_MEF) {
289 		scb->epdx += delta_idx;
290 		if (scb->epoch < delta)
291 			scb->epdx += 1;
292 	}
293 }
294 
295 /*
296  * This callback is executed during stop_machine(). All CPUs are therefore
297  * temporarily stopped. In order not to change guest behavior, we have to
298  * disable preemption whenever we touch the epoch of kvm and the VCPUs,
299  * so a CPU won't be stopped while calculating with the epoch.
300  */
301 static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
302 			  void *v)
303 {
304 	struct kvm *kvm;
305 	struct kvm_vcpu *vcpu;
306 	unsigned long i;
307 	unsigned long long *delta = v;
308 
309 	list_for_each_entry(kvm, &vm_list, vm_list) {
310 		kvm_for_each_vcpu(i, vcpu, kvm) {
311 			kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
312 			if (i == 0) {
313 				kvm->arch.epoch = vcpu->arch.sie_block->epoch;
314 				kvm->arch.epdx = vcpu->arch.sie_block->epdx;
315 			}
316 			if (vcpu->arch.cputm_enabled)
317 				vcpu->arch.cputm_start += *delta;
318 			if (vcpu->arch.vsie_block)
319 				kvm_clock_sync_scb(vcpu->arch.vsie_block,
320 						   *delta);
321 		}
322 	}
323 	return NOTIFY_OK;
324 }
325 
326 static struct notifier_block kvm_clock_notifier = {
327 	.notifier_call = kvm_clock_sync,
328 };
329 
330 static void allow_cpu_feat(unsigned long nr)
331 {
332 	set_bit_inv(nr, kvm_s390_available_cpu_feat);
333 }
334 
335 static inline int plo_test_bit(unsigned char nr)
336 {
337 	unsigned long function = (unsigned long)nr | 0x100;
338 	int cc;
339 
340 	asm volatile(
341 		"	lgr	0,%[function]\n"
342 		/* Parameter registers are ignored for "test bit" */
343 		"	plo	0,0,0,0(0)\n"
344 		CC_IPM(cc)
345 		: CC_OUT(cc, cc)
346 		: [function] "d" (function)
347 		: CC_CLOBBER_LIST("0"));
348 	return CC_TRANSFORM(cc) == 0;
349 }
350 
351 static __always_inline void pfcr_query(u8 (*query)[16])
352 {
353 	asm volatile(
354 		"	lghi	0,0\n"
355 		"	.insn   rsy,0xeb0000000016,0,0,%[query]"
356 		: [query] "=QS" (*query)
357 		:
358 		: "cc", "0");
359 }
360 
361 static __always_inline void __sortl_query(u8 (*query)[32])
362 {
363 	asm volatile(
364 		"	lghi	0,0\n"
365 		"	la	1,%[query]\n"
366 		/* Parameter registers are ignored */
367 		"	.insn	rre,0xb9380000,2,4"
368 		: [query] "=R" (*query)
369 		:
370 		: "cc", "0", "1");
371 }
372 
373 static __always_inline void __dfltcc_query(u8 (*query)[32])
374 {
375 	asm volatile(
376 		"	lghi	0,0\n"
377 		"	la	1,%[query]\n"
378 		/* Parameter registers are ignored */
379 		"	.insn	rrf,0xb9390000,2,4,6,0"
380 		: [query] "=R" (*query)
381 		:
382 		: "cc", "0", "1");
383 }
384 
385 static void __init kvm_s390_cpu_feat_init(void)
386 {
387 	int i;
388 
389 	for (i = 0; i < 256; ++i) {
390 		if (plo_test_bit(i))
391 			kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
392 	}
393 
394 	if (test_facility(28)) /* TOD-clock steering */
395 		ptff(kvm_s390_available_subfunc.ptff,
396 		     sizeof(kvm_s390_available_subfunc.ptff),
397 		     PTFF_QAF);
398 
399 	if (test_facility(17)) { /* MSA */
400 		__cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
401 			      kvm_s390_available_subfunc.kmac);
402 		__cpacf_query(CPACF_KMC, (cpacf_mask_t *)
403 			      kvm_s390_available_subfunc.kmc);
404 		__cpacf_query(CPACF_KM, (cpacf_mask_t *)
405 			      kvm_s390_available_subfunc.km);
406 		__cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
407 			      kvm_s390_available_subfunc.kimd);
408 		__cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
409 			      kvm_s390_available_subfunc.klmd);
410 	}
411 	if (test_facility(76)) /* MSA3 */
412 		__cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
413 			      kvm_s390_available_subfunc.pckmo);
414 	if (test_facility(77)) { /* MSA4 */
415 		__cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
416 			      kvm_s390_available_subfunc.kmctr);
417 		__cpacf_query(CPACF_KMF, (cpacf_mask_t *)
418 			      kvm_s390_available_subfunc.kmf);
419 		__cpacf_query(CPACF_KMO, (cpacf_mask_t *)
420 			      kvm_s390_available_subfunc.kmo);
421 		__cpacf_query(CPACF_PCC, (cpacf_mask_t *)
422 			      kvm_s390_available_subfunc.pcc);
423 	}
424 	if (test_facility(57)) /* MSA5 */
425 		__cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
426 			      kvm_s390_available_subfunc.ppno);
427 
428 	if (test_facility(146)) /* MSA8 */
429 		__cpacf_query(CPACF_KMA, (cpacf_mask_t *)
430 			      kvm_s390_available_subfunc.kma);
431 
432 	if (test_facility(155)) /* MSA9 */
433 		__cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
434 			      kvm_s390_available_subfunc.kdsa);
435 
436 	if (test_facility(150)) /* SORTL */
437 		__sortl_query(&kvm_s390_available_subfunc.sortl);
438 
439 	if (test_facility(151)) /* DFLTCC */
440 		__dfltcc_query(&kvm_s390_available_subfunc.dfltcc);
441 
442 	if (test_facility(201))	/* PFCR */
443 		pfcr_query(&kvm_s390_available_subfunc.pfcr);
444 
445 	if (machine_has_esop())
446 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
447 	/*
448 	 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
449 	 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
450 	 */
451 	if (!sclp.has_sief2 || !machine_has_esop() || !sclp.has_64bscao ||
452 	    !test_facility(3) || !nested)
453 		return;
454 	allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
455 	if (sclp.has_64bscao)
456 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
457 	if (sclp.has_siif)
458 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
459 	if (sclp.has_gpere)
460 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
461 	if (sclp.has_gsls)
462 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
463 	if (sclp.has_ib)
464 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
465 	if (sclp.has_cei)
466 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
467 	if (sclp.has_ibs)
468 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
469 	if (sclp.has_kss)
470 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
471 	/*
472 	 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
473 	 * all skey handling functions read/set the skey from the PGSTE
474 	 * instead of the real storage key.
475 	 *
476 	 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
477 	 * pages being detected as preserved although they are resident.
478 	 *
479 	 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
480 	 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
481 	 *
482 	 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
483 	 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
484 	 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
485 	 *
486 	 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
487 	 * cannot easily shadow the SCA because of the ipte lock.
488 	 */
489 }
490 
491 static int __init __kvm_s390_init(void)
492 {
493 	int rc = -ENOMEM;
494 
495 	kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
496 	if (!kvm_s390_dbf)
497 		return -ENOMEM;
498 
499 	kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long));
500 	if (!kvm_s390_dbf_uv)
501 		goto err_kvm_uv;
502 
503 	if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view) ||
504 	    debug_register_view(kvm_s390_dbf_uv, &debug_sprintf_view))
505 		goto err_debug_view;
506 
507 	kvm_s390_cpu_feat_init();
508 
509 	/* Register floating interrupt controller interface. */
510 	rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
511 	if (rc) {
512 		pr_err("A FLIC registration call failed with rc=%d\n", rc);
513 		goto err_flic;
514 	}
515 
516 	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) {
517 		rc = kvm_s390_pci_init();
518 		if (rc) {
519 			pr_err("Unable to allocate AIFT for PCI\n");
520 			goto err_pci;
521 		}
522 	}
523 
524 	rc = kvm_s390_gib_init(GAL_ISC);
525 	if (rc)
526 		goto err_gib;
527 
528 	atomic_notifier_chain_register(&s390_epoch_delta_notifier,
529 				       &kvm_clock_notifier);
530 
531 	return 0;
532 
533 err_gib:
534 	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
535 		kvm_s390_pci_exit();
536 err_pci:
537 err_flic:
538 err_debug_view:
539 	debug_unregister(kvm_s390_dbf_uv);
540 err_kvm_uv:
541 	debug_unregister(kvm_s390_dbf);
542 	return rc;
543 }
544 
545 static void __kvm_s390_exit(void)
546 {
547 	atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
548 					 &kvm_clock_notifier);
549 
550 	kvm_s390_gib_destroy();
551 	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
552 		kvm_s390_pci_exit();
553 	debug_unregister(kvm_s390_dbf);
554 	debug_unregister(kvm_s390_dbf_uv);
555 }
556 
557 static int kvm_s390_keyop(struct kvm_s390_mmu_cache *mc, struct kvm *kvm, int op,
558 			  unsigned long addr, union skey skey)
559 {
560 	union asce asce = kvm->arch.gmap->asce;
561 	gfn_t gfn = gpa_to_gfn(addr);
562 	int r;
563 
564 	guard(read_lock)(&kvm->mmu_lock);
565 
566 	switch (op) {
567 	case KVM_S390_KEYOP_SSKE:
568 		r = dat_cond_set_storage_key(mc, asce, gfn, skey, &skey, 0, 0, 0);
569 		if (r >= 0)
570 			return skey.skey;
571 		break;
572 	case KVM_S390_KEYOP_ISKE:
573 		r = dat_get_storage_key(asce, gfn, &skey);
574 		if (!r)
575 			return skey.skey;
576 		break;
577 	case KVM_S390_KEYOP_RRBE:
578 		r = dat_reset_reference_bit(asce, gfn);
579 		if (r > 0)
580 			return r << 1;
581 		break;
582 	default:
583 		return -EINVAL;
584 	}
585 	return r;
586 }
587 
588 /* Section: device related */
589 long kvm_arch_dev_ioctl(struct file *filp,
590 			unsigned int ioctl, unsigned long arg)
591 {
592 	if (ioctl == KVM_S390_ENABLE_SIE)
593 		return 0;
594 	return -EINVAL;
595 }
596 
597 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
598 {
599 	int r;
600 
601 	switch (ext) {
602 	case KVM_CAP_S390_PSW:
603 	case KVM_CAP_S390_GMAP:
604 	case KVM_CAP_SYNC_MMU:
605 #ifdef CONFIG_KVM_S390_UCONTROL
606 	case KVM_CAP_S390_UCONTROL:
607 #endif
608 	case KVM_CAP_ASYNC_PF:
609 	case KVM_CAP_SYNC_REGS:
610 	case KVM_CAP_ONE_REG:
611 	case KVM_CAP_ENABLE_CAP:
612 	case KVM_CAP_S390_CSS_SUPPORT:
613 	case KVM_CAP_IOEVENTFD:
614 	case KVM_CAP_S390_IRQCHIP:
615 	case KVM_CAP_VM_ATTRIBUTES:
616 	case KVM_CAP_MP_STATE:
617 	case KVM_CAP_IMMEDIATE_EXIT:
618 	case KVM_CAP_S390_INJECT_IRQ:
619 	case KVM_CAP_S390_USER_SIGP:
620 	case KVM_CAP_S390_USER_STSI:
621 	case KVM_CAP_S390_SKEYS:
622 	case KVM_CAP_S390_IRQ_STATE:
623 	case KVM_CAP_S390_USER_INSTR0:
624 	case KVM_CAP_S390_CMMA_MIGRATION:
625 	case KVM_CAP_S390_AIS:
626 	case KVM_CAP_S390_AIS_MIGRATION:
627 	case KVM_CAP_S390_VCPU_RESETS:
628 	case KVM_CAP_SET_GUEST_DEBUG:
629 	case KVM_CAP_S390_DIAG318:
630 	case KVM_CAP_IRQFD_RESAMPLE:
631 	case KVM_CAP_S390_USER_OPEREXEC:
632 	case KVM_CAP_S390_KEYOP:
633 		r = 1;
634 		break;
635 	case KVM_CAP_SET_GUEST_DEBUG2:
636 		r = KVM_GUESTDBG_VALID_MASK;
637 		break;
638 	case KVM_CAP_S390_HPAGE_1M:
639 		r = 0;
640 		if (hpage && !(kvm && kvm_is_ucontrol(kvm)))
641 			r = 1;
642 		break;
643 	case KVM_CAP_S390_MEM_OP:
644 		r = MEM_OP_MAX_SIZE;
645 		break;
646 	case KVM_CAP_S390_MEM_OP_EXTENSION:
647 		/*
648 		 * Flag bits indicating which extensions are supported.
649 		 * If r > 0, the base extension must also be supported/indicated,
650 		 * in order to maintain backwards compatibility.
651 		 */
652 		r = KVM_S390_MEMOP_EXTENSION_CAP_BASE |
653 		    KVM_S390_MEMOP_EXTENSION_CAP_CMPXCHG;
654 		break;
655 	case KVM_CAP_NR_VCPUS:
656 	case KVM_CAP_MAX_VCPUS:
657 	case KVM_CAP_MAX_VCPU_ID:
658 		/*
659 		 * Return the same value for KVM_CAP_MAX_VCPUS and
660 		 * KVM_CAP_MAX_VCPU_ID to conform with the KVM API.
661 		 */
662 		r = KVM_S390_ESCA_CPU_SLOTS;
663 		if (!kvm_s390_use_sca_entries())
664 			r = KVM_MAX_VCPUS;
665 		if (ext == KVM_CAP_NR_VCPUS)
666 			r = min_t(unsigned int, num_online_cpus(), r);
667 		break;
668 	case KVM_CAP_S390_COW:
669 		r = machine_has_esop();
670 		break;
671 	case KVM_CAP_S390_VECTOR_REGISTERS:
672 		r = test_facility(129);
673 		break;
674 	case KVM_CAP_S390_RI:
675 		r = test_facility(64);
676 		break;
677 	case KVM_CAP_S390_GS:
678 		r = test_facility(133);
679 		break;
680 	case KVM_CAP_S390_BPB:
681 		r = test_facility(82);
682 		break;
683 	case KVM_CAP_S390_PROTECTED_ASYNC_DISABLE:
684 		r = async_destroy && is_prot_virt_host();
685 		break;
686 	case KVM_CAP_S390_PROTECTED:
687 		r = is_prot_virt_host();
688 		break;
689 	case KVM_CAP_S390_PROTECTED_DUMP: {
690 		u64 pv_cmds_dump[] = {
691 			BIT_UVC_CMD_DUMP_INIT,
692 			BIT_UVC_CMD_DUMP_CONFIG_STOR_STATE,
693 			BIT_UVC_CMD_DUMP_CPU,
694 			BIT_UVC_CMD_DUMP_COMPLETE,
695 		};
696 		int i;
697 
698 		r = is_prot_virt_host();
699 
700 		for (i = 0; i < ARRAY_SIZE(pv_cmds_dump); i++) {
701 			if (!test_bit_inv(pv_cmds_dump[i],
702 					  (unsigned long *)&uv_info.inst_calls_list)) {
703 				r = 0;
704 				break;
705 			}
706 		}
707 		break;
708 	}
709 	case KVM_CAP_S390_ZPCI_OP:
710 		r = kvm_s390_pci_interp_allowed();
711 		break;
712 	case KVM_CAP_S390_CPU_TOPOLOGY:
713 		r = test_facility(11);
714 		break;
715 	default:
716 		r = 0;
717 	}
718 	return r;
719 }
720 
721 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
722 {
723 	gfn_t last_gfn = memslot->base_gfn + memslot->npages;
724 
725 	scoped_guard(read_lock, &kvm->mmu_lock)
726 		gmap_sync_dirty_log(kvm->arch.gmap, memslot->base_gfn, last_gfn);
727 }
728 
729 /* Section: vm related */
730 static void sca_del_vcpu(struct kvm_vcpu *vcpu);
731 
732 /*
733  * Get (and clear) the dirty memory log for a memory slot.
734  */
735 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
736 			       struct kvm_dirty_log *log)
737 {
738 	int r;
739 	unsigned long n;
740 	struct kvm_memory_slot *memslot;
741 	int is_dirty;
742 
743 	if (kvm_is_ucontrol(kvm))
744 		return -EINVAL;
745 
746 	mutex_lock(&kvm->slots_lock);
747 
748 	r = -EINVAL;
749 	if (log->slot >= KVM_USER_MEM_SLOTS)
750 		goto out;
751 
752 	r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
753 	if (r)
754 		goto out;
755 
756 	/* Clear the dirty log */
757 	if (is_dirty) {
758 		n = kvm_dirty_bitmap_bytes(memslot);
759 		memset(memslot->dirty_bitmap, 0, n);
760 	}
761 	r = 0;
762 out:
763 	mutex_unlock(&kvm->slots_lock);
764 	return r;
765 }
766 
767 static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
768 {
769 	unsigned long i;
770 	struct kvm_vcpu *vcpu;
771 
772 	kvm_for_each_vcpu(i, vcpu, kvm) {
773 		kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
774 	}
775 }
776 
777 int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
778 {
779 	int r;
780 
781 	if (cap->flags)
782 		return -EINVAL;
783 
784 	switch (cap->cap) {
785 	case KVM_CAP_S390_IRQCHIP:
786 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
787 		kvm->arch.use_irqchip = 1;
788 		r = 0;
789 		break;
790 	case KVM_CAP_S390_USER_SIGP:
791 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
792 		kvm->arch.user_sigp = 1;
793 		r = 0;
794 		break;
795 	case KVM_CAP_S390_VECTOR_REGISTERS:
796 		mutex_lock(&kvm->lock);
797 		if (kvm->created_vcpus) {
798 			r = -EBUSY;
799 		} else if (cpu_has_vx()) {
800 			set_kvm_facility(kvm->arch.model.fac_mask, 129);
801 			set_kvm_facility(kvm->arch.model.fac_list, 129);
802 			if (test_facility(134)) {
803 				set_kvm_facility(kvm->arch.model.fac_mask, 134);
804 				set_kvm_facility(kvm->arch.model.fac_list, 134);
805 			}
806 			if (test_facility(135)) {
807 				set_kvm_facility(kvm->arch.model.fac_mask, 135);
808 				set_kvm_facility(kvm->arch.model.fac_list, 135);
809 			}
810 			if (test_facility(148)) {
811 				set_kvm_facility(kvm->arch.model.fac_mask, 148);
812 				set_kvm_facility(kvm->arch.model.fac_list, 148);
813 			}
814 			if (test_facility(152)) {
815 				set_kvm_facility(kvm->arch.model.fac_mask, 152);
816 				set_kvm_facility(kvm->arch.model.fac_list, 152);
817 			}
818 			if (test_facility(192)) {
819 				set_kvm_facility(kvm->arch.model.fac_mask, 192);
820 				set_kvm_facility(kvm->arch.model.fac_list, 192);
821 			}
822 			if (test_facility(198)) {
823 				set_kvm_facility(kvm->arch.model.fac_mask, 198);
824 				set_kvm_facility(kvm->arch.model.fac_list, 198);
825 			}
826 			if (test_facility(199)) {
827 				set_kvm_facility(kvm->arch.model.fac_mask, 199);
828 				set_kvm_facility(kvm->arch.model.fac_list, 199);
829 			}
830 			r = 0;
831 		} else
832 			r = -EINVAL;
833 		mutex_unlock(&kvm->lock);
834 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
835 			 r ? "(not available)" : "(success)");
836 		break;
837 	case KVM_CAP_S390_RI:
838 		r = -EINVAL;
839 		mutex_lock(&kvm->lock);
840 		if (kvm->created_vcpus) {
841 			r = -EBUSY;
842 		} else if (test_facility(64)) {
843 			set_kvm_facility(kvm->arch.model.fac_mask, 64);
844 			set_kvm_facility(kvm->arch.model.fac_list, 64);
845 			r = 0;
846 		}
847 		mutex_unlock(&kvm->lock);
848 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
849 			 r ? "(not available)" : "(success)");
850 		break;
851 	case KVM_CAP_S390_AIS:
852 		mutex_lock(&kvm->lock);
853 		if (kvm->created_vcpus) {
854 			r = -EBUSY;
855 		} else {
856 			set_kvm_facility(kvm->arch.model.fac_mask, 72);
857 			set_kvm_facility(kvm->arch.model.fac_list, 72);
858 			r = 0;
859 		}
860 		mutex_unlock(&kvm->lock);
861 		VM_EVENT(kvm, 3, "ENABLE: AIS %s",
862 			 r ? "(not available)" : "(success)");
863 		break;
864 	case KVM_CAP_S390_GS:
865 		r = -EINVAL;
866 		mutex_lock(&kvm->lock);
867 		if (kvm->created_vcpus) {
868 			r = -EBUSY;
869 		} else if (test_facility(133)) {
870 			set_kvm_facility(kvm->arch.model.fac_mask, 133);
871 			set_kvm_facility(kvm->arch.model.fac_list, 133);
872 			r = 0;
873 		}
874 		mutex_unlock(&kvm->lock);
875 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
876 			 r ? "(not available)" : "(success)");
877 		break;
878 	case KVM_CAP_S390_HPAGE_1M:
879 		mutex_lock(&kvm->lock);
880 		if (kvm->created_vcpus)
881 			r = -EBUSY;
882 		else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
883 			r = -EINVAL;
884 		else {
885 			r = 0;
886 			set_bit(GMAP_FLAG_ALLOW_HPAGE_1M, &kvm->arch.gmap->flags);
887 			/*
888 			 * We might have to create fake 4k page
889 			 * tables. To avoid that the hardware works on
890 			 * stale PGSTEs, we emulate these instructions.
891 			 */
892 			kvm->arch.use_skf = 0;
893 			kvm->arch.use_pfmfi = 0;
894 		}
895 		mutex_unlock(&kvm->lock);
896 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
897 			 r ? "(not available)" : "(success)");
898 		break;
899 	case KVM_CAP_S390_USER_STSI:
900 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
901 		kvm->arch.user_stsi = 1;
902 		r = 0;
903 		break;
904 	case KVM_CAP_S390_USER_INSTR0:
905 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
906 		kvm->arch.user_instr0 = 1;
907 		icpt_operexc_on_all_vcpus(kvm);
908 		r = 0;
909 		break;
910 	case KVM_CAP_S390_CPU_TOPOLOGY:
911 		r = -EINVAL;
912 		mutex_lock(&kvm->lock);
913 		if (kvm->created_vcpus) {
914 			r = -EBUSY;
915 		} else if (test_facility(11)) {
916 			set_kvm_facility(kvm->arch.model.fac_mask, 11);
917 			set_kvm_facility(kvm->arch.model.fac_list, 11);
918 			r = 0;
919 		}
920 		mutex_unlock(&kvm->lock);
921 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_CPU_TOPOLOGY %s",
922 			 r ? "(not available)" : "(success)");
923 		break;
924 	case KVM_CAP_S390_USER_OPEREXEC:
925 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_OPEREXEC");
926 		kvm->arch.user_operexec = 1;
927 		icpt_operexc_on_all_vcpus(kvm);
928 		r = 0;
929 		break;
930 	default:
931 		r = -EINVAL;
932 		break;
933 	}
934 	return r;
935 }
936 
937 static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
938 {
939 	int ret;
940 
941 	switch (attr->attr) {
942 	case KVM_S390_VM_MEM_LIMIT_SIZE:
943 		ret = 0;
944 		VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
945 			 kvm->arch.mem_limit);
946 		if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
947 			ret = -EFAULT;
948 		break;
949 	default:
950 		ret = -ENXIO;
951 		break;
952 	}
953 	return ret;
954 }
955 
956 static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
957 {
958 	int ret;
959 
960 	switch (attr->attr) {
961 	case KVM_S390_VM_MEM_ENABLE_CMMA:
962 		ret = -ENXIO;
963 		if (!sclp.has_cmma)
964 			break;
965 
966 		VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
967 		mutex_lock(&kvm->lock);
968 		if (kvm->created_vcpus)
969 			ret = -EBUSY;
970 		else {
971 			kvm->arch.use_cmma = 1;
972 			/* Not compatible with cmma. */
973 			kvm->arch.use_pfmfi = 0;
974 			ret = 0;
975 		}
976 		mutex_unlock(&kvm->lock);
977 		break;
978 	case KVM_S390_VM_MEM_CLR_CMMA: {
979 		gfn_t start_gfn = 0;
980 
981 		ret = -ENXIO;
982 		if (!sclp.has_cmma)
983 			break;
984 		ret = -EINVAL;
985 		if (!kvm->arch.use_cmma)
986 			break;
987 
988 		VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
989 		do {
990 			start_gfn = dat_reset_cmma(kvm->arch.gmap->asce, start_gfn);
991 			cond_resched();
992 		} while (start_gfn);
993 		ret = 0;
994 		break;
995 	}
996 	case KVM_S390_VM_MEM_LIMIT_SIZE: {
997 		unsigned long new_limit;
998 
999 		if (kvm_is_ucontrol(kvm))
1000 			return -EINVAL;
1001 
1002 		if (get_user(new_limit, (u64 __user *)attr->addr))
1003 			return -EFAULT;
1004 
1005 		if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
1006 		    new_limit > kvm->arch.mem_limit)
1007 			return -E2BIG;
1008 
1009 		if (!new_limit)
1010 			return -EINVAL;
1011 
1012 		ret = -EBUSY;
1013 		if (!kvm->created_vcpus)
1014 			ret = gmap_set_limit(kvm->arch.gmap, gpa_to_gfn(new_limit));
1015 		VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
1016 		VM_EVENT(kvm, 3, "New guest asce: 0x%p",
1017 			 (void *)kvm->arch.gmap->asce.val);
1018 		break;
1019 	}
1020 	default:
1021 		ret = -ENXIO;
1022 		break;
1023 	}
1024 	return ret;
1025 }
1026 
1027 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
1028 
1029 void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
1030 {
1031 	struct kvm_vcpu *vcpu;
1032 	unsigned long i;
1033 
1034 	kvm_s390_vcpu_block_all(kvm);
1035 
1036 	kvm_for_each_vcpu(i, vcpu, kvm) {
1037 		kvm_s390_vcpu_crypto_setup(vcpu);
1038 		/* recreate the shadow crycb by leaving the VSIE handler */
1039 		kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
1040 	}
1041 
1042 	kvm_s390_vcpu_unblock_all(kvm);
1043 }
1044 
1045 static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
1046 {
1047 	mutex_lock(&kvm->lock);
1048 	switch (attr->attr) {
1049 	case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1050 		if (!test_kvm_facility(kvm, 76)) {
1051 			mutex_unlock(&kvm->lock);
1052 			return -EINVAL;
1053 		}
1054 		get_random_bytes(
1055 			kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1056 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1057 		kvm->arch.crypto.aes_kw = 1;
1058 		VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
1059 		break;
1060 	case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1061 		if (!test_kvm_facility(kvm, 76)) {
1062 			mutex_unlock(&kvm->lock);
1063 			return -EINVAL;
1064 		}
1065 		get_random_bytes(
1066 			kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1067 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1068 		kvm->arch.crypto.dea_kw = 1;
1069 		VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
1070 		break;
1071 	case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1072 		if (!test_kvm_facility(kvm, 76)) {
1073 			mutex_unlock(&kvm->lock);
1074 			return -EINVAL;
1075 		}
1076 		kvm->arch.crypto.aes_kw = 0;
1077 		memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
1078 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1079 		VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
1080 		break;
1081 	case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1082 		if (!test_kvm_facility(kvm, 76)) {
1083 			mutex_unlock(&kvm->lock);
1084 			return -EINVAL;
1085 		}
1086 		kvm->arch.crypto.dea_kw = 0;
1087 		memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
1088 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1089 		VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
1090 		break;
1091 	case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1092 		if (!ap_instructions_available()) {
1093 			mutex_unlock(&kvm->lock);
1094 			return -EOPNOTSUPP;
1095 		}
1096 		kvm->arch.crypto.apie = 1;
1097 		break;
1098 	case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1099 		if (!ap_instructions_available()) {
1100 			mutex_unlock(&kvm->lock);
1101 			return -EOPNOTSUPP;
1102 		}
1103 		kvm->arch.crypto.apie = 0;
1104 		break;
1105 	default:
1106 		mutex_unlock(&kvm->lock);
1107 		return -ENXIO;
1108 	}
1109 
1110 	kvm_s390_vcpu_crypto_reset_all(kvm);
1111 	mutex_unlock(&kvm->lock);
1112 	return 0;
1113 }
1114 
1115 static void kvm_s390_vcpu_pci_setup(struct kvm_vcpu *vcpu)
1116 {
1117 	/* Only set the ECB bits after guest requests zPCI interpretation */
1118 	if (!vcpu->kvm->arch.use_zpci_interp)
1119 		return;
1120 
1121 	vcpu->arch.sie_block->ecb2 |= ECB2_ZPCI_LSI;
1122 	vcpu->arch.sie_block->ecb3 |= ECB3_AISII + ECB3_AISI;
1123 }
1124 
1125 void kvm_s390_vcpu_pci_enable_interp(struct kvm *kvm)
1126 {
1127 	struct kvm_vcpu *vcpu;
1128 	unsigned long i;
1129 
1130 	lockdep_assert_held(&kvm->lock);
1131 
1132 	if (!kvm_s390_pci_interp_allowed())
1133 		return;
1134 
1135 	/*
1136 	 * If host is configured for PCI and the necessary facilities are
1137 	 * available, turn on interpretation for the life of this guest
1138 	 */
1139 	kvm->arch.use_zpci_interp = 1;
1140 
1141 	kvm_s390_vcpu_block_all(kvm);
1142 
1143 	kvm_for_each_vcpu(i, vcpu, kvm) {
1144 		kvm_s390_vcpu_pci_setup(vcpu);
1145 		kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
1146 	}
1147 
1148 	kvm_s390_vcpu_unblock_all(kvm);
1149 }
1150 
1151 static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
1152 {
1153 	unsigned long cx;
1154 	struct kvm_vcpu *vcpu;
1155 
1156 	kvm_for_each_vcpu(cx, vcpu, kvm)
1157 		kvm_s390_sync_request(req, vcpu);
1158 }
1159 
1160 /*
1161  * Must be called with kvm->srcu held to avoid races on memslots, and with
1162  * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
1163  */
1164 static int kvm_s390_vm_start_migration(struct kvm *kvm)
1165 {
1166 	struct kvm_memory_slot *ms;
1167 	struct kvm_memslots *slots;
1168 	unsigned long ram_pages = 0;
1169 	int bkt;
1170 
1171 	/* migration mode already enabled */
1172 	if (kvm->arch.migration_mode)
1173 		return 0;
1174 	slots = kvm_memslots(kvm);
1175 	if (!slots || kvm_memslots_empty(slots))
1176 		return -EINVAL;
1177 
1178 	if (!kvm->arch.use_cmma) {
1179 		kvm->arch.migration_mode = 1;
1180 		return 0;
1181 	}
1182 	kvm_for_each_memslot(ms, bkt, slots) {
1183 		if (!ms->dirty_bitmap)
1184 			return -EINVAL;
1185 		ram_pages += ms->npages;
1186 	}
1187 	/* mark all the pages as dirty */
1188 	gmap_set_cmma_all_dirty(kvm->arch.gmap);
1189 	atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
1190 	kvm->arch.migration_mode = 1;
1191 	kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
1192 	return 0;
1193 }
1194 
1195 /*
1196  * Must be called with kvm->slots_lock to avoid races with ourselves and
1197  * kvm_s390_vm_start_migration.
1198  */
1199 static int kvm_s390_vm_stop_migration(struct kvm *kvm)
1200 {
1201 	/* migration mode already disabled */
1202 	if (!kvm->arch.migration_mode)
1203 		return 0;
1204 	kvm->arch.migration_mode = 0;
1205 	if (kvm->arch.use_cmma)
1206 		kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
1207 	return 0;
1208 }
1209 
1210 static int kvm_s390_vm_set_migration(struct kvm *kvm,
1211 				     struct kvm_device_attr *attr)
1212 {
1213 	int res = -ENXIO;
1214 
1215 	mutex_lock(&kvm->slots_lock);
1216 	switch (attr->attr) {
1217 	case KVM_S390_VM_MIGRATION_START:
1218 		res = kvm_s390_vm_start_migration(kvm);
1219 		break;
1220 	case KVM_S390_VM_MIGRATION_STOP:
1221 		res = kvm_s390_vm_stop_migration(kvm);
1222 		break;
1223 	default:
1224 		break;
1225 	}
1226 	mutex_unlock(&kvm->slots_lock);
1227 
1228 	return res;
1229 }
1230 
1231 static int kvm_s390_vm_get_migration(struct kvm *kvm,
1232 				     struct kvm_device_attr *attr)
1233 {
1234 	u64 mig = kvm->arch.migration_mode;
1235 
1236 	if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1237 		return -ENXIO;
1238 
1239 	if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1240 		return -EFAULT;
1241 	return 0;
1242 }
1243 
1244 static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
1245 
1246 static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1247 {
1248 	struct kvm_s390_vm_tod_clock gtod;
1249 
1250 	if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
1251 		return -EFAULT;
1252 
1253 	if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
1254 		return -EINVAL;
1255 	__kvm_s390_set_tod_clock(kvm, &gtod);
1256 
1257 	VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
1258 		gtod.epoch_idx, gtod.tod);
1259 
1260 	return 0;
1261 }
1262 
1263 static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1264 {
1265 	u8 gtod_high;
1266 
1267 	if (copy_from_user(&gtod_high, (void __user *)attr->addr,
1268 					   sizeof(gtod_high)))
1269 		return -EFAULT;
1270 
1271 	if (gtod_high != 0)
1272 		return -EINVAL;
1273 	VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
1274 
1275 	return 0;
1276 }
1277 
1278 static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1279 {
1280 	struct kvm_s390_vm_tod_clock gtod = { 0 };
1281 
1282 	if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
1283 			   sizeof(gtod.tod)))
1284 		return -EFAULT;
1285 
1286 	__kvm_s390_set_tod_clock(kvm, &gtod);
1287 	VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
1288 	return 0;
1289 }
1290 
1291 static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1292 {
1293 	int ret;
1294 
1295 	if (attr->flags)
1296 		return -EINVAL;
1297 
1298 	mutex_lock(&kvm->lock);
1299 	/*
1300 	 * For protected guests, the TOD is managed by the ultravisor, so trying
1301 	 * to change it will never bring the expected results.
1302 	 */
1303 	if (kvm_s390_pv_is_protected(kvm)) {
1304 		ret = -EOPNOTSUPP;
1305 		goto out_unlock;
1306 	}
1307 
1308 	switch (attr->attr) {
1309 	case KVM_S390_VM_TOD_EXT:
1310 		ret = kvm_s390_set_tod_ext(kvm, attr);
1311 		break;
1312 	case KVM_S390_VM_TOD_HIGH:
1313 		ret = kvm_s390_set_tod_high(kvm, attr);
1314 		break;
1315 	case KVM_S390_VM_TOD_LOW:
1316 		ret = kvm_s390_set_tod_low(kvm, attr);
1317 		break;
1318 	default:
1319 		ret = -ENXIO;
1320 		break;
1321 	}
1322 
1323 out_unlock:
1324 	mutex_unlock(&kvm->lock);
1325 	return ret;
1326 }
1327 
1328 static void kvm_s390_get_tod_clock(struct kvm *kvm,
1329 				   struct kvm_s390_vm_tod_clock *gtod)
1330 {
1331 	union tod_clock clk;
1332 
1333 	preempt_disable();
1334 
1335 	store_tod_clock_ext(&clk);
1336 
1337 	gtod->tod = clk.tod + kvm->arch.epoch;
1338 	gtod->epoch_idx = 0;
1339 	if (test_kvm_facility(kvm, 139)) {
1340 		gtod->epoch_idx = clk.ei + kvm->arch.epdx;
1341 		if (gtod->tod < clk.tod)
1342 			gtod->epoch_idx += 1;
1343 	}
1344 
1345 	preempt_enable();
1346 }
1347 
1348 static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1349 {
1350 	struct kvm_s390_vm_tod_clock gtod;
1351 
1352 	memset(&gtod, 0, sizeof(gtod));
1353 	kvm_s390_get_tod_clock(kvm, &gtod);
1354 	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1355 		return -EFAULT;
1356 
1357 	VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1358 		gtod.epoch_idx, gtod.tod);
1359 	return 0;
1360 }
1361 
1362 static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1363 {
1364 	u8 gtod_high = 0;
1365 
1366 	if (copy_to_user((void __user *)attr->addr, &gtod_high,
1367 					 sizeof(gtod_high)))
1368 		return -EFAULT;
1369 	VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
1370 
1371 	return 0;
1372 }
1373 
1374 static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1375 {
1376 	u64 gtod;
1377 
1378 	gtod = kvm_s390_get_tod_clock_fast(kvm);
1379 	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1380 		return -EFAULT;
1381 	VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
1382 
1383 	return 0;
1384 }
1385 
1386 static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1387 {
1388 	int ret;
1389 
1390 	if (attr->flags)
1391 		return -EINVAL;
1392 
1393 	switch (attr->attr) {
1394 	case KVM_S390_VM_TOD_EXT:
1395 		ret = kvm_s390_get_tod_ext(kvm, attr);
1396 		break;
1397 	case KVM_S390_VM_TOD_HIGH:
1398 		ret = kvm_s390_get_tod_high(kvm, attr);
1399 		break;
1400 	case KVM_S390_VM_TOD_LOW:
1401 		ret = kvm_s390_get_tod_low(kvm, attr);
1402 		break;
1403 	default:
1404 		ret = -ENXIO;
1405 		break;
1406 	}
1407 	return ret;
1408 }
1409 
1410 static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1411 {
1412 	struct kvm_s390_vm_cpu_processor *proc;
1413 	u16 lowest_ibc, unblocked_ibc;
1414 	int ret = 0;
1415 
1416 	mutex_lock(&kvm->lock);
1417 	if (kvm->created_vcpus) {
1418 		ret = -EBUSY;
1419 		goto out;
1420 	}
1421 	proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
1422 	if (!proc) {
1423 		ret = -ENOMEM;
1424 		goto out;
1425 	}
1426 	if (!copy_from_user(proc, (void __user *)attr->addr,
1427 			    sizeof(*proc))) {
1428 		kvm->arch.model.cpuid = proc->cpuid;
1429 		lowest_ibc = sclp.ibc >> 16 & 0xfff;
1430 		unblocked_ibc = sclp.ibc & 0xfff;
1431 		if (lowest_ibc && proc->ibc) {
1432 			if (proc->ibc > unblocked_ibc)
1433 				kvm->arch.model.ibc = unblocked_ibc;
1434 			else if (proc->ibc < lowest_ibc)
1435 				kvm->arch.model.ibc = lowest_ibc;
1436 			else
1437 				kvm->arch.model.ibc = proc->ibc;
1438 		}
1439 		memcpy(kvm->arch.model.fac_list, proc->fac_list,
1440 		       S390_ARCH_FAC_LIST_SIZE_BYTE);
1441 		VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1442 			 kvm->arch.model.ibc,
1443 			 kvm->arch.model.cpuid);
1444 		VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1445 			 kvm->arch.model.fac_list[0],
1446 			 kvm->arch.model.fac_list[1],
1447 			 kvm->arch.model.fac_list[2]);
1448 	} else
1449 		ret = -EFAULT;
1450 	kfree(proc);
1451 out:
1452 	mutex_unlock(&kvm->lock);
1453 	return ret;
1454 }
1455 
1456 static int kvm_s390_set_processor_feat(struct kvm *kvm,
1457 				       struct kvm_device_attr *attr)
1458 {
1459 	struct kvm_s390_vm_cpu_feat data;
1460 
1461 	if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1462 		return -EFAULT;
1463 	if (!bitmap_subset((unsigned long *) data.feat,
1464 			   kvm_s390_available_cpu_feat,
1465 			   KVM_S390_VM_CPU_FEAT_NR_BITS))
1466 		return -EINVAL;
1467 
1468 	mutex_lock(&kvm->lock);
1469 	if (kvm->created_vcpus) {
1470 		mutex_unlock(&kvm->lock);
1471 		return -EBUSY;
1472 	}
1473 	bitmap_from_arr64(kvm->arch.cpu_feat, data.feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
1474 	mutex_unlock(&kvm->lock);
1475 	VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1476 			 data.feat[0],
1477 			 data.feat[1],
1478 			 data.feat[2]);
1479 	return 0;
1480 }
1481 
1482 static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1483 					  struct kvm_device_attr *attr)
1484 {
1485 	mutex_lock(&kvm->lock);
1486 	if (kvm->created_vcpus) {
1487 		mutex_unlock(&kvm->lock);
1488 		return -EBUSY;
1489 	}
1490 
1491 	if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1492 			   sizeof(struct kvm_s390_vm_cpu_subfunc))) {
1493 		mutex_unlock(&kvm->lock);
1494 		return -EFAULT;
1495 	}
1496 	mutex_unlock(&kvm->lock);
1497 
1498 	VM_EVENT(kvm, 3, "SET: guest PLO    subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1499 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1500 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1501 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1502 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1503 	VM_EVENT(kvm, 3, "SET: guest PTFF   subfunc 0x%16.16lx.%16.16lx",
1504 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1505 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1506 	VM_EVENT(kvm, 3, "SET: guest KMAC   subfunc 0x%16.16lx.%16.16lx",
1507 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1508 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1509 	VM_EVENT(kvm, 3, "SET: guest KMC    subfunc 0x%16.16lx.%16.16lx",
1510 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1511 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1512 	VM_EVENT(kvm, 3, "SET: guest KM     subfunc 0x%16.16lx.%16.16lx",
1513 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1514 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1515 	VM_EVENT(kvm, 3, "SET: guest KIMD   subfunc 0x%16.16lx.%16.16lx",
1516 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1517 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1518 	VM_EVENT(kvm, 3, "SET: guest KLMD   subfunc 0x%16.16lx.%16.16lx",
1519 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1520 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1521 	VM_EVENT(kvm, 3, "SET: guest PCKMO  subfunc 0x%16.16lx.%16.16lx",
1522 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1523 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1524 	VM_EVENT(kvm, 3, "SET: guest KMCTR  subfunc 0x%16.16lx.%16.16lx",
1525 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1526 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1527 	VM_EVENT(kvm, 3, "SET: guest KMF    subfunc 0x%16.16lx.%16.16lx",
1528 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1529 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1530 	VM_EVENT(kvm, 3, "SET: guest KMO    subfunc 0x%16.16lx.%16.16lx",
1531 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1532 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1533 	VM_EVENT(kvm, 3, "SET: guest PCC    subfunc 0x%16.16lx.%16.16lx",
1534 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1535 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1536 	VM_EVENT(kvm, 3, "SET: guest PPNO   subfunc 0x%16.16lx.%16.16lx",
1537 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1538 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1539 	VM_EVENT(kvm, 3, "SET: guest KMA    subfunc 0x%16.16lx.%16.16lx",
1540 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1541 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
1542 	VM_EVENT(kvm, 3, "SET: guest KDSA   subfunc 0x%16.16lx.%16.16lx",
1543 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1544 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1545 	VM_EVENT(kvm, 3, "SET: guest SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1546 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1547 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1548 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1549 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
1550 	VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1551 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1552 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1553 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1554 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
1555 	VM_EVENT(kvm, 3, "GET: guest PFCR   subfunc 0x%16.16lx.%16.16lx",
1556 		 ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[0],
1557 		 ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[1]);
1558 
1559 	return 0;
1560 }
1561 
1562 #define KVM_S390_VM_CPU_UV_FEAT_GUEST_MASK	\
1563 (						\
1564 	((struct kvm_s390_vm_cpu_uv_feat){	\
1565 		.ap = 1,			\
1566 		.ap_intr = 1,			\
1567 	})					\
1568 	.feat					\
1569 )
1570 
1571 static int kvm_s390_set_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr)
1572 {
1573 	struct kvm_s390_vm_cpu_uv_feat __user *ptr = (void __user *)attr->addr;
1574 	unsigned long data, filter;
1575 
1576 	filter = uv_info.uv_feature_indications & KVM_S390_VM_CPU_UV_FEAT_GUEST_MASK;
1577 	if (get_user(data, &ptr->feat))
1578 		return -EFAULT;
1579 	if (!bitmap_subset(&data, &filter, KVM_S390_VM_CPU_UV_FEAT_NR_BITS))
1580 		return -EINVAL;
1581 
1582 	mutex_lock(&kvm->lock);
1583 	if (kvm->created_vcpus) {
1584 		mutex_unlock(&kvm->lock);
1585 		return -EBUSY;
1586 	}
1587 	kvm->arch.model.uv_feat_guest.feat = data;
1588 	mutex_unlock(&kvm->lock);
1589 
1590 	VM_EVENT(kvm, 3, "SET: guest UV-feat: 0x%16.16lx", data);
1591 
1592 	return 0;
1593 }
1594 
1595 static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1596 {
1597 	int ret = -ENXIO;
1598 
1599 	switch (attr->attr) {
1600 	case KVM_S390_VM_CPU_PROCESSOR:
1601 		ret = kvm_s390_set_processor(kvm, attr);
1602 		break;
1603 	case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1604 		ret = kvm_s390_set_processor_feat(kvm, attr);
1605 		break;
1606 	case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1607 		ret = kvm_s390_set_processor_subfunc(kvm, attr);
1608 		break;
1609 	case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST:
1610 		ret = kvm_s390_set_uv_feat(kvm, attr);
1611 		break;
1612 	}
1613 	return ret;
1614 }
1615 
1616 static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1617 {
1618 	struct kvm_s390_vm_cpu_processor *proc;
1619 	int ret = 0;
1620 
1621 	proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
1622 	if (!proc) {
1623 		ret = -ENOMEM;
1624 		goto out;
1625 	}
1626 	proc->cpuid = kvm->arch.model.cpuid;
1627 	proc->ibc = kvm->arch.model.ibc;
1628 	memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1629 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
1630 	VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1631 		 kvm->arch.model.ibc,
1632 		 kvm->arch.model.cpuid);
1633 	VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1634 		 kvm->arch.model.fac_list[0],
1635 		 kvm->arch.model.fac_list[1],
1636 		 kvm->arch.model.fac_list[2]);
1637 	if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1638 		ret = -EFAULT;
1639 	kfree(proc);
1640 out:
1641 	return ret;
1642 }
1643 
1644 static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1645 {
1646 	struct kvm_s390_vm_cpu_machine *mach;
1647 	int ret = 0;
1648 
1649 	mach = kzalloc(sizeof(*mach), GFP_KERNEL_ACCOUNT);
1650 	if (!mach) {
1651 		ret = -ENOMEM;
1652 		goto out;
1653 	}
1654 	get_cpu_id((struct cpuid *) &mach->cpuid);
1655 	mach->ibc = sclp.ibc;
1656 	memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
1657 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
1658 	memcpy((unsigned long *)&mach->fac_list, stfle_fac_list,
1659 	       sizeof(stfle_fac_list));
1660 	VM_EVENT(kvm, 3, "GET: host ibc:  0x%4.4x, host cpuid:  0x%16.16llx",
1661 		 kvm->arch.model.ibc,
1662 		 kvm->arch.model.cpuid);
1663 	VM_EVENT(kvm, 3, "GET: host facmask:  0x%16.16llx.%16.16llx.%16.16llx",
1664 		 mach->fac_mask[0],
1665 		 mach->fac_mask[1],
1666 		 mach->fac_mask[2]);
1667 	VM_EVENT(kvm, 3, "GET: host faclist:  0x%16.16llx.%16.16llx.%16.16llx",
1668 		 mach->fac_list[0],
1669 		 mach->fac_list[1],
1670 		 mach->fac_list[2]);
1671 	if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1672 		ret = -EFAULT;
1673 	kfree(mach);
1674 out:
1675 	return ret;
1676 }
1677 
1678 static int kvm_s390_get_processor_feat(struct kvm *kvm,
1679 				       struct kvm_device_attr *attr)
1680 {
1681 	struct kvm_s390_vm_cpu_feat data;
1682 
1683 	bitmap_to_arr64(data.feat, kvm->arch.cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
1684 	if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1685 		return -EFAULT;
1686 	VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1687 			 data.feat[0],
1688 			 data.feat[1],
1689 			 data.feat[2]);
1690 	return 0;
1691 }
1692 
1693 static int kvm_s390_get_machine_feat(struct kvm *kvm,
1694 				     struct kvm_device_attr *attr)
1695 {
1696 	struct kvm_s390_vm_cpu_feat data;
1697 
1698 	bitmap_to_arr64(data.feat, kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
1699 	if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1700 		return -EFAULT;
1701 	VM_EVENT(kvm, 3, "GET: host feat:  0x%16.16llx.0x%16.16llx.0x%16.16llx",
1702 			 data.feat[0],
1703 			 data.feat[1],
1704 			 data.feat[2]);
1705 	return 0;
1706 }
1707 
1708 static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1709 					  struct kvm_device_attr *attr)
1710 {
1711 	if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1712 	    sizeof(struct kvm_s390_vm_cpu_subfunc)))
1713 		return -EFAULT;
1714 
1715 	VM_EVENT(kvm, 3, "GET: guest PLO    subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1716 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1717 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1718 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1719 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1720 	VM_EVENT(kvm, 3, "GET: guest PTFF   subfunc 0x%16.16lx.%16.16lx",
1721 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1722 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1723 	VM_EVENT(kvm, 3, "GET: guest KMAC   subfunc 0x%16.16lx.%16.16lx",
1724 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1725 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1726 	VM_EVENT(kvm, 3, "GET: guest KMC    subfunc 0x%16.16lx.%16.16lx",
1727 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1728 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1729 	VM_EVENT(kvm, 3, "GET: guest KM     subfunc 0x%16.16lx.%16.16lx",
1730 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1731 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1732 	VM_EVENT(kvm, 3, "GET: guest KIMD   subfunc 0x%16.16lx.%16.16lx",
1733 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1734 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1735 	VM_EVENT(kvm, 3, "GET: guest KLMD   subfunc 0x%16.16lx.%16.16lx",
1736 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1737 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1738 	VM_EVENT(kvm, 3, "GET: guest PCKMO  subfunc 0x%16.16lx.%16.16lx",
1739 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1740 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1741 	VM_EVENT(kvm, 3, "GET: guest KMCTR  subfunc 0x%16.16lx.%16.16lx",
1742 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1743 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1744 	VM_EVENT(kvm, 3, "GET: guest KMF    subfunc 0x%16.16lx.%16.16lx",
1745 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1746 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1747 	VM_EVENT(kvm, 3, "GET: guest KMO    subfunc 0x%16.16lx.%16.16lx",
1748 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1749 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1750 	VM_EVENT(kvm, 3, "GET: guest PCC    subfunc 0x%16.16lx.%16.16lx",
1751 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1752 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1753 	VM_EVENT(kvm, 3, "GET: guest PPNO   subfunc 0x%16.16lx.%16.16lx",
1754 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1755 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1756 	VM_EVENT(kvm, 3, "GET: guest KMA    subfunc 0x%16.16lx.%16.16lx",
1757 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1758 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
1759 	VM_EVENT(kvm, 3, "GET: guest KDSA   subfunc 0x%16.16lx.%16.16lx",
1760 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1761 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1762 	VM_EVENT(kvm, 3, "GET: guest SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1763 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1764 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1765 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1766 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
1767 	VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1768 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1769 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1770 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1771 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
1772 	VM_EVENT(kvm, 3, "GET: guest PFCR   subfunc 0x%16.16lx.%16.16lx",
1773 		 ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[0],
1774 		 ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[1]);
1775 
1776 	return 0;
1777 }
1778 
1779 static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1780 					struct kvm_device_attr *attr)
1781 {
1782 	if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1783 	    sizeof(struct kvm_s390_vm_cpu_subfunc)))
1784 		return -EFAULT;
1785 
1786 	VM_EVENT(kvm, 3, "GET: host  PLO    subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1787 		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
1788 		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
1789 		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
1790 		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
1791 	VM_EVENT(kvm, 3, "GET: host  PTFF   subfunc 0x%16.16lx.%16.16lx",
1792 		 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
1793 		 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
1794 	VM_EVENT(kvm, 3, "GET: host  KMAC   subfunc 0x%16.16lx.%16.16lx",
1795 		 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
1796 		 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
1797 	VM_EVENT(kvm, 3, "GET: host  KMC    subfunc 0x%16.16lx.%16.16lx",
1798 		 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
1799 		 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
1800 	VM_EVENT(kvm, 3, "GET: host  KM     subfunc 0x%16.16lx.%16.16lx",
1801 		 ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
1802 		 ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
1803 	VM_EVENT(kvm, 3, "GET: host  KIMD   subfunc 0x%16.16lx.%16.16lx",
1804 		 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
1805 		 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
1806 	VM_EVENT(kvm, 3, "GET: host  KLMD   subfunc 0x%16.16lx.%16.16lx",
1807 		 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
1808 		 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
1809 	VM_EVENT(kvm, 3, "GET: host  PCKMO  subfunc 0x%16.16lx.%16.16lx",
1810 		 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
1811 		 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
1812 	VM_EVENT(kvm, 3, "GET: host  KMCTR  subfunc 0x%16.16lx.%16.16lx",
1813 		 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
1814 		 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
1815 	VM_EVENT(kvm, 3, "GET: host  KMF    subfunc 0x%16.16lx.%16.16lx",
1816 		 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
1817 		 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
1818 	VM_EVENT(kvm, 3, "GET: host  KMO    subfunc 0x%16.16lx.%16.16lx",
1819 		 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
1820 		 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
1821 	VM_EVENT(kvm, 3, "GET: host  PCC    subfunc 0x%16.16lx.%16.16lx",
1822 		 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
1823 		 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
1824 	VM_EVENT(kvm, 3, "GET: host  PPNO   subfunc 0x%16.16lx.%16.16lx",
1825 		 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
1826 		 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
1827 	VM_EVENT(kvm, 3, "GET: host  KMA    subfunc 0x%16.16lx.%16.16lx",
1828 		 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
1829 		 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
1830 	VM_EVENT(kvm, 3, "GET: host  KDSA   subfunc 0x%16.16lx.%16.16lx",
1831 		 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
1832 		 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
1833 	VM_EVENT(kvm, 3, "GET: host  SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1834 		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
1835 		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
1836 		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
1837 		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
1838 	VM_EVENT(kvm, 3, "GET: host  DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1839 		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
1840 		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
1841 		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
1842 		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
1843 	VM_EVENT(kvm, 3, "GET: host  PFCR   subfunc 0x%16.16lx.%16.16lx",
1844 		 ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[0],
1845 		 ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[1]);
1846 
1847 	return 0;
1848 }
1849 
1850 static int kvm_s390_get_processor_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr)
1851 {
1852 	struct kvm_s390_vm_cpu_uv_feat __user *dst = (void __user *)attr->addr;
1853 	unsigned long feat = kvm->arch.model.uv_feat_guest.feat;
1854 
1855 	if (put_user(feat, &dst->feat))
1856 		return -EFAULT;
1857 	VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx", feat);
1858 
1859 	return 0;
1860 }
1861 
1862 static int kvm_s390_get_machine_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr)
1863 {
1864 	struct kvm_s390_vm_cpu_uv_feat __user *dst = (void __user *)attr->addr;
1865 	unsigned long feat;
1866 
1867 	BUILD_BUG_ON(sizeof(*dst) != sizeof(uv_info.uv_feature_indications));
1868 
1869 	feat = uv_info.uv_feature_indications & KVM_S390_VM_CPU_UV_FEAT_GUEST_MASK;
1870 	if (put_user(feat, &dst->feat))
1871 		return -EFAULT;
1872 	VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx", feat);
1873 
1874 	return 0;
1875 }
1876 
1877 static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1878 {
1879 	int ret = -ENXIO;
1880 
1881 	switch (attr->attr) {
1882 	case KVM_S390_VM_CPU_PROCESSOR:
1883 		ret = kvm_s390_get_processor(kvm, attr);
1884 		break;
1885 	case KVM_S390_VM_CPU_MACHINE:
1886 		ret = kvm_s390_get_machine(kvm, attr);
1887 		break;
1888 	case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1889 		ret = kvm_s390_get_processor_feat(kvm, attr);
1890 		break;
1891 	case KVM_S390_VM_CPU_MACHINE_FEAT:
1892 		ret = kvm_s390_get_machine_feat(kvm, attr);
1893 		break;
1894 	case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1895 		ret = kvm_s390_get_processor_subfunc(kvm, attr);
1896 		break;
1897 	case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1898 		ret = kvm_s390_get_machine_subfunc(kvm, attr);
1899 		break;
1900 	case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST:
1901 		ret = kvm_s390_get_processor_uv_feat(kvm, attr);
1902 		break;
1903 	case KVM_S390_VM_CPU_MACHINE_UV_FEAT_GUEST:
1904 		ret = kvm_s390_get_machine_uv_feat(kvm, attr);
1905 		break;
1906 	}
1907 	return ret;
1908 }
1909 
1910 /**
1911  * kvm_s390_update_topology_change_report - update CPU topology change report
1912  * @kvm: guest KVM description
1913  * @val: set or clear the MTCR bit
1914  *
1915  * Updates the Multiprocessor Topology-Change-Report bit to signal
1916  * the guest with a topology change.
1917  * This is only relevant if the topology facility is present.
1918  */
1919 static void kvm_s390_update_topology_change_report(struct kvm *kvm, bool val)
1920 {
1921 	union sca_utility new, old;
1922 	struct esca_block *sca;
1923 
1924 	sca = kvm->arch.sca;
1925 	old = READ_ONCE(sca->utility);
1926 	do {
1927 		new = old;
1928 		new.mtcr = val;
1929 	} while (!try_cmpxchg(&sca->utility.val, &old.val, new.val));
1930 }
1931 
1932 static int kvm_s390_set_topo_change_indication(struct kvm *kvm,
1933 					       struct kvm_device_attr *attr)
1934 {
1935 	if (!test_kvm_facility(kvm, 11))
1936 		return -ENXIO;
1937 
1938 	kvm_s390_update_topology_change_report(kvm, !!attr->attr);
1939 	return 0;
1940 }
1941 
1942 static int kvm_s390_get_topo_change_indication(struct kvm *kvm,
1943 					       struct kvm_device_attr *attr)
1944 {
1945 	u8 topo;
1946 
1947 	if (!test_kvm_facility(kvm, 11))
1948 		return -ENXIO;
1949 
1950 	topo = kvm->arch.sca->utility.mtcr;
1951 
1952 	return put_user(topo, (u8 __user *)attr->addr);
1953 }
1954 
1955 static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1956 {
1957 	int ret;
1958 
1959 	switch (attr->group) {
1960 	case KVM_S390_VM_MEM_CTRL:
1961 		ret = kvm_s390_set_mem_control(kvm, attr);
1962 		break;
1963 	case KVM_S390_VM_TOD:
1964 		ret = kvm_s390_set_tod(kvm, attr);
1965 		break;
1966 	case KVM_S390_VM_CPU_MODEL:
1967 		ret = kvm_s390_set_cpu_model(kvm, attr);
1968 		break;
1969 	case KVM_S390_VM_CRYPTO:
1970 		ret = kvm_s390_vm_set_crypto(kvm, attr);
1971 		break;
1972 	case KVM_S390_VM_MIGRATION:
1973 		ret = kvm_s390_vm_set_migration(kvm, attr);
1974 		break;
1975 	case KVM_S390_VM_CPU_TOPOLOGY:
1976 		ret = kvm_s390_set_topo_change_indication(kvm, attr);
1977 		break;
1978 	default:
1979 		ret = -ENXIO;
1980 		break;
1981 	}
1982 
1983 	return ret;
1984 }
1985 
1986 static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1987 {
1988 	int ret;
1989 
1990 	switch (attr->group) {
1991 	case KVM_S390_VM_MEM_CTRL:
1992 		ret = kvm_s390_get_mem_control(kvm, attr);
1993 		break;
1994 	case KVM_S390_VM_TOD:
1995 		ret = kvm_s390_get_tod(kvm, attr);
1996 		break;
1997 	case KVM_S390_VM_CPU_MODEL:
1998 		ret = kvm_s390_get_cpu_model(kvm, attr);
1999 		break;
2000 	case KVM_S390_VM_MIGRATION:
2001 		ret = kvm_s390_vm_get_migration(kvm, attr);
2002 		break;
2003 	case KVM_S390_VM_CPU_TOPOLOGY:
2004 		ret = kvm_s390_get_topo_change_indication(kvm, attr);
2005 		break;
2006 	default:
2007 		ret = -ENXIO;
2008 		break;
2009 	}
2010 
2011 	return ret;
2012 }
2013 
2014 static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
2015 {
2016 	int ret;
2017 
2018 	switch (attr->group) {
2019 	case KVM_S390_VM_MEM_CTRL:
2020 		switch (attr->attr) {
2021 		case KVM_S390_VM_MEM_ENABLE_CMMA:
2022 		case KVM_S390_VM_MEM_CLR_CMMA:
2023 			ret = sclp.has_cmma ? 0 : -ENXIO;
2024 			break;
2025 		case KVM_S390_VM_MEM_LIMIT_SIZE:
2026 			ret = 0;
2027 			break;
2028 		default:
2029 			ret = -ENXIO;
2030 			break;
2031 		}
2032 		break;
2033 	case KVM_S390_VM_TOD:
2034 		switch (attr->attr) {
2035 		case KVM_S390_VM_TOD_LOW:
2036 		case KVM_S390_VM_TOD_HIGH:
2037 			ret = 0;
2038 			break;
2039 		default:
2040 			ret = -ENXIO;
2041 			break;
2042 		}
2043 		break;
2044 	case KVM_S390_VM_CPU_MODEL:
2045 		switch (attr->attr) {
2046 		case KVM_S390_VM_CPU_PROCESSOR:
2047 		case KVM_S390_VM_CPU_MACHINE:
2048 		case KVM_S390_VM_CPU_PROCESSOR_FEAT:
2049 		case KVM_S390_VM_CPU_MACHINE_FEAT:
2050 		case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
2051 		case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
2052 		case KVM_S390_VM_CPU_MACHINE_UV_FEAT_GUEST:
2053 		case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST:
2054 			ret = 0;
2055 			break;
2056 		default:
2057 			ret = -ENXIO;
2058 			break;
2059 		}
2060 		break;
2061 	case KVM_S390_VM_CRYPTO:
2062 		switch (attr->attr) {
2063 		case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
2064 		case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
2065 		case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
2066 		case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
2067 			ret = 0;
2068 			break;
2069 		case KVM_S390_VM_CRYPTO_ENABLE_APIE:
2070 		case KVM_S390_VM_CRYPTO_DISABLE_APIE:
2071 			ret = ap_instructions_available() ? 0 : -ENXIO;
2072 			break;
2073 		default:
2074 			ret = -ENXIO;
2075 			break;
2076 		}
2077 		break;
2078 	case KVM_S390_VM_MIGRATION:
2079 		ret = 0;
2080 		break;
2081 	case KVM_S390_VM_CPU_TOPOLOGY:
2082 		ret = test_kvm_facility(kvm, 11) ? 0 : -ENXIO;
2083 		break;
2084 	default:
2085 		ret = -ENXIO;
2086 		break;
2087 	}
2088 
2089 	return ret;
2090 }
2091 
2092 static int kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
2093 {
2094 	union skey *keys;
2095 	int i, r = 0;
2096 
2097 	if (args->flags != 0)
2098 		return -EINVAL;
2099 
2100 	/* Is this guest using storage keys? */
2101 	if (!uses_skeys(kvm->arch.gmap))
2102 		return KVM_S390_GET_SKEYS_NONE;
2103 
2104 	/* Enforce sane limit on memory allocation */
2105 	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
2106 		return -EINVAL;
2107 
2108 	keys = kvmalloc_array(args->count, sizeof(*keys), GFP_KERNEL_ACCOUNT);
2109 	if (!keys)
2110 		return -ENOMEM;
2111 
2112 	scoped_guard(read_lock, &kvm->mmu_lock) {
2113 		for (i = 0; i < args->count; i++) {
2114 			r = dat_get_storage_key(kvm->arch.gmap->asce,
2115 						args->start_gfn + i, keys + i);
2116 			if (r)
2117 				break;
2118 		}
2119 	}
2120 
2121 	if (!r) {
2122 		r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
2123 				 sizeof(uint8_t) * args->count);
2124 		if (r)
2125 			r = -EFAULT;
2126 	}
2127 
2128 	kvfree(keys);
2129 	return r;
2130 }
2131 
2132 static int kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
2133 {
2134 	struct kvm_s390_mmu_cache *mc;
2135 	union skey *keys;
2136 	int i, r = 0;
2137 
2138 	if (args->flags != 0)
2139 		return -EINVAL;
2140 
2141 	/* Enforce sane limit on memory allocation */
2142 	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
2143 		return -EINVAL;
2144 
2145 	keys = kvmalloc_array(args->count, sizeof(*keys), GFP_KERNEL_ACCOUNT);
2146 	if (!keys)
2147 		return -ENOMEM;
2148 
2149 	r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
2150 			   sizeof(uint8_t) * args->count);
2151 	if (r) {
2152 		r = -EFAULT;
2153 		goto out;
2154 	}
2155 
2156 	/* Enable storage key handling for the guest */
2157 	r = gmap_enable_skeys(kvm->arch.gmap);
2158 	if (r)
2159 		goto out;
2160 
2161 	r = -EINVAL;
2162 	for (i = 0; i < args->count; i++) {
2163 		/* Lowest order bit is reserved */
2164 		if (keys[i].zero)
2165 			goto out;
2166 	}
2167 
2168 	mc = kvm_s390_new_mmu_cache();
2169 	if (!mc) {
2170 		r = -ENOMEM;
2171 		goto out;
2172 	}
2173 
2174 	r = 0;
2175 	do {
2176 		r = kvm_s390_mmu_cache_topup(mc);
2177 		if (r == -ENOMEM)
2178 			break;
2179 		scoped_guard(read_lock, &kvm->mmu_lock) {
2180 			for (i = 0 ; i < args->count; i++) {
2181 				r = dat_set_storage_key(mc, kvm->arch.gmap->asce,
2182 							args->start_gfn + i, keys[i], 0);
2183 				if (r)
2184 					break;
2185 			}
2186 		}
2187 	} while (r == -ENOMEM);
2188 	kvm_s390_free_mmu_cache(mc);
2189 out:
2190 	kvfree(keys);
2191 	return r;
2192 }
2193 
2194 /*
2195  * This function searches for the next page with dirty CMMA attributes, and
2196  * saves the attributes in the buffer up to either the end of the buffer or
2197  * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
2198  * no trailing clean bytes are saved.
2199  * In case no dirty bits were found, or if CMMA was not enabled or used, the
2200  * output buffer will indicate 0 as length.
2201  */
2202 static int kvm_s390_get_cmma_bits(struct kvm *kvm,
2203 				  struct kvm_s390_cmma_log *args)
2204 {
2205 	int peek, ret;
2206 	u8 *values;
2207 
2208 	if (!kvm->arch.use_cmma)
2209 		return -ENXIO;
2210 	/* Invalid/unsupported flags were specified */
2211 	if (args->flags & ~KVM_S390_CMMA_PEEK)
2212 		return -EINVAL;
2213 	/* Migration mode query, and we are not doing a migration */
2214 	peek = !!(args->flags & KVM_S390_CMMA_PEEK);
2215 	if (!peek && !kvm->arch.migration_mode)
2216 		return -EINVAL;
2217 	/* CMMA is disabled or was not used, or the buffer has length zero */
2218 	args->count = min(args->count, KVM_S390_CMMA_SIZE_MAX);
2219 	if (!args->count || !uses_cmm(kvm->arch.gmap)) {
2220 		memset(args, 0, sizeof(*args));
2221 		return 0;
2222 	}
2223 	/* We are not peeking, and there are no dirty pages */
2224 	if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
2225 		memset(args, 0, sizeof(*args));
2226 		return 0;
2227 	}
2228 
2229 	values = vmalloc(args->count);
2230 	if (!values)
2231 		return -ENOMEM;
2232 
2233 	scoped_guard(read_lock, &kvm->mmu_lock) {
2234 		if (peek)
2235 			ret = dat_peek_cmma(args->start_gfn, kvm->arch.gmap->asce, &args->count,
2236 					    values);
2237 		else
2238 			ret = dat_get_cmma(kvm->arch.gmap->asce, &args->start_gfn, &args->count,
2239 					   values, &kvm->arch.cmma_dirty_pages);
2240 	}
2241 
2242 	if (kvm->arch.migration_mode)
2243 		args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2244 	else
2245 		args->remaining = 0;
2246 
2247 	if (copy_to_user((void __user *)args->values, values, args->count))
2248 		ret = -EFAULT;
2249 
2250 	vfree(values);
2251 	return ret;
2252 }
2253 
2254 /*
2255  * This function sets the CMMA attributes for the given pages. If the input
2256  * buffer has zero length, no action is taken, otherwise the attributes are
2257  * set and the mm->context.uses_cmm flag is set.
2258  */
2259 static int kvm_s390_set_cmma_bits(struct kvm *kvm,
2260 				  const struct kvm_s390_cmma_log *args)
2261 {
2262 	struct kvm_s390_mmu_cache *mc;
2263 	u8 *bits = NULL;
2264 	int r = 0;
2265 
2266 	if (!kvm->arch.use_cmma)
2267 		return -ENXIO;
2268 	/* invalid/unsupported flags */
2269 	if (args->flags != 0)
2270 		return -EINVAL;
2271 	/* Enforce sane limit on memory allocation */
2272 	if (args->count > KVM_S390_CMMA_SIZE_MAX)
2273 		return -EINVAL;
2274 	/* Nothing to do */
2275 	if (args->count == 0)
2276 		return 0;
2277 
2278 	mc = kvm_s390_new_mmu_cache();
2279 	if (!mc)
2280 		return -ENOMEM;
2281 	bits = vmalloc(array_size(sizeof(*bits), args->count));
2282 	if (!bits)
2283 		goto out;
2284 
2285 	r = copy_from_user(bits, (void __user *)args->values, args->count);
2286 	if (r) {
2287 		r = -EFAULT;
2288 		goto out;
2289 	}
2290 
2291 	do {
2292 		r = kvm_s390_mmu_cache_topup(mc);
2293 		if (r)
2294 			break;
2295 		scoped_guard(read_lock, &kvm->mmu_lock) {
2296 			r = dat_set_cmma_bits(mc, kvm->arch.gmap->asce, args->start_gfn,
2297 					      args->count, args->mask, bits);
2298 		}
2299 	} while (r == -ENOMEM);
2300 
2301 	set_bit(GMAP_FLAG_USES_CMM, &kvm->arch.gmap->flags);
2302 out:
2303 	kvm_s390_free_mmu_cache(mc);
2304 	vfree(bits);
2305 	return r;
2306 }
2307 
2308 /**
2309  * kvm_s390_cpus_from_pv - Convert all protected vCPUs in a protected VM to
2310  * non protected.
2311  * @kvm: the VM whose protected vCPUs are to be converted
2312  * @rc: return value for the RC field of the UVC (in case of error)
2313  * @rrc: return value for the RRC field of the UVC (in case of error)
2314  *
2315  * Does not stop in case of error, tries to convert as many
2316  * CPUs as possible. In case of error, the RC and RRC of the last error are
2317  * returned.
2318  *
2319  * Return: 0 in case of success, otherwise -EIO
2320  */
2321 int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
2322 {
2323 	struct kvm_vcpu *vcpu;
2324 	unsigned long i;
2325 	u16 _rc, _rrc;
2326 	int ret = 0;
2327 
2328 	/*
2329 	 * We ignore failures and try to destroy as many CPUs as possible.
2330 	 * At the same time we must not free the assigned resources when
2331 	 * this fails, as the ultravisor has still access to that memory.
2332 	 * So kvm_s390_pv_destroy_cpu can leave a "wanted" memory leak
2333 	 * behind.
2334 	 * We want to return the first failure rc and rrc, though.
2335 	 */
2336 	kvm_for_each_vcpu(i, vcpu, kvm) {
2337 		mutex_lock(&vcpu->mutex);
2338 		if (kvm_s390_pv_destroy_cpu(vcpu, &_rc, &_rrc) && !ret) {
2339 			*rc = _rc;
2340 			*rrc = _rrc;
2341 			ret = -EIO;
2342 		}
2343 		mutex_unlock(&vcpu->mutex);
2344 	}
2345 	/* Ensure that we re-enable gisa if the non-PV guest used it but the PV guest did not. */
2346 	if (use_gisa)
2347 		kvm_s390_gisa_enable(kvm);
2348 	return ret;
2349 }
2350 
2351 /**
2352  * kvm_s390_cpus_to_pv - Convert all non-protected vCPUs in a protected VM
2353  * to protected.
2354  * @kvm: the VM whose protected vCPUs are to be converted
2355  * @rc: return value for the RC field of the UVC (in case of error)
2356  * @rrc: return value for the RRC field of the UVC (in case of error)
2357  *
2358  * Tries to undo the conversion in case of error.
2359  *
2360  * Return: 0 in case of success, otherwise -EIO
2361  */
2362 static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
2363 {
2364 	unsigned long i;
2365 	int r = 0;
2366 	u16 dummy;
2367 
2368 	struct kvm_vcpu *vcpu;
2369 
2370 	/* Disable the GISA if the ultravisor does not support AIV. */
2371 	if (!uv_has_feature(BIT_UV_FEAT_AIV))
2372 		kvm_s390_gisa_disable(kvm);
2373 
2374 	kvm_for_each_vcpu(i, vcpu, kvm) {
2375 		mutex_lock(&vcpu->mutex);
2376 		r = kvm_s390_pv_create_cpu(vcpu, rc, rrc);
2377 		mutex_unlock(&vcpu->mutex);
2378 		if (r)
2379 			break;
2380 	}
2381 	if (r)
2382 		kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
2383 	return r;
2384 }
2385 
2386 /*
2387  * Here we provide user space with a direct interface to query UV
2388  * related data like UV maxima and available features as well as
2389  * feature specific data.
2390  *
2391  * To facilitate future extension of the data structures we'll try to
2392  * write data up to the maximum requested length.
2393  */
2394 static ssize_t kvm_s390_handle_pv_info(struct kvm_s390_pv_info *info)
2395 {
2396 	ssize_t len_min;
2397 
2398 	switch (info->header.id) {
2399 	case KVM_PV_INFO_VM: {
2400 		len_min =  sizeof(info->header) + sizeof(info->vm);
2401 
2402 		if (info->header.len_max < len_min)
2403 			return -EINVAL;
2404 
2405 		memcpy(info->vm.inst_calls_list,
2406 		       uv_info.inst_calls_list,
2407 		       sizeof(uv_info.inst_calls_list));
2408 
2409 		/* It's max cpuid not max cpus, so it's off by one */
2410 		info->vm.max_cpus = uv_info.max_guest_cpu_id + 1;
2411 		info->vm.max_guests = uv_info.max_num_sec_conf;
2412 		info->vm.max_guest_addr = uv_info.max_sec_stor_addr;
2413 		info->vm.feature_indication = uv_info.uv_feature_indications;
2414 
2415 		return len_min;
2416 	}
2417 	case KVM_PV_INFO_DUMP: {
2418 		len_min =  sizeof(info->header) + sizeof(info->dump);
2419 
2420 		if (info->header.len_max < len_min)
2421 			return -EINVAL;
2422 
2423 		info->dump.dump_cpu_buffer_len = uv_info.guest_cpu_stor_len;
2424 		info->dump.dump_config_mem_buffer_per_1m = uv_info.conf_dump_storage_state_len;
2425 		info->dump.dump_config_finalize_len = uv_info.conf_dump_finalize_len;
2426 		return len_min;
2427 	}
2428 	default:
2429 		return -EINVAL;
2430 	}
2431 }
2432 
2433 static int kvm_s390_pv_dmp(struct kvm *kvm, struct kvm_pv_cmd *cmd,
2434 			   struct kvm_s390_pv_dmp dmp)
2435 {
2436 	int r = -EINVAL;
2437 	void __user *result_buff = (void __user *)dmp.buff_addr;
2438 
2439 	switch (dmp.subcmd) {
2440 	case KVM_PV_DUMP_INIT: {
2441 		if (kvm->arch.pv.dumping)
2442 			break;
2443 
2444 		/*
2445 		 * Block SIE entry as concurrent dump UVCs could lead
2446 		 * to validities.
2447 		 */
2448 		kvm_s390_vcpu_block_all(kvm);
2449 
2450 		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2451 				  UVC_CMD_DUMP_INIT, &cmd->rc, &cmd->rrc);
2452 		KVM_UV_EVENT(kvm, 3, "PROTVIRT DUMP INIT: rc %x rrc %x",
2453 			     cmd->rc, cmd->rrc);
2454 		if (!r) {
2455 			kvm->arch.pv.dumping = true;
2456 		} else {
2457 			kvm_s390_vcpu_unblock_all(kvm);
2458 			r = -EINVAL;
2459 		}
2460 		break;
2461 	}
2462 	case KVM_PV_DUMP_CONFIG_STOR_STATE: {
2463 		if (!kvm->arch.pv.dumping)
2464 			break;
2465 
2466 		/*
2467 		 * gaddr is an output parameter since we might stop
2468 		 * early. As dmp will be copied back in our caller, we
2469 		 * don't need to do it ourselves.
2470 		 */
2471 		r = kvm_s390_pv_dump_stor_state(kvm, result_buff, &dmp.gaddr, dmp.buff_len,
2472 						&cmd->rc, &cmd->rrc);
2473 		break;
2474 	}
2475 	case KVM_PV_DUMP_COMPLETE: {
2476 		if (!kvm->arch.pv.dumping)
2477 			break;
2478 
2479 		r = -EINVAL;
2480 		if (dmp.buff_len < uv_info.conf_dump_finalize_len)
2481 			break;
2482 
2483 		r = kvm_s390_pv_dump_complete(kvm, result_buff,
2484 					      &cmd->rc, &cmd->rrc);
2485 		break;
2486 	}
2487 	default:
2488 		r = -ENOTTY;
2489 		break;
2490 	}
2491 
2492 	return r;
2493 }
2494 
2495 static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
2496 {
2497 	const bool need_lock = (cmd->cmd != KVM_PV_ASYNC_CLEANUP_PERFORM);
2498 	void __user *argp = (void __user *)cmd->data;
2499 	int r = 0;
2500 	u16 dummy;
2501 
2502 	if (need_lock)
2503 		mutex_lock(&kvm->lock);
2504 
2505 	switch (cmd->cmd) {
2506 	case KVM_PV_ENABLE: {
2507 		r = -EINVAL;
2508 		if (kvm_s390_pv_is_protected(kvm))
2509 			break;
2510 
2511 		mmap_write_lock(kvm->mm);
2512 		/*
2513 		 * Disable creation of new THPs. Existing THPs can stay, they
2514 		 * will be split when any part of them gets imported.
2515 		 */
2516 		mm_flags_clear(MMF_DISABLE_THP_EXCEPT_ADVISED, kvm->mm);
2517 		mm_flags_set(MMF_DISABLE_THP_COMPLETELY, kvm->mm);
2518 		set_bit(GMAP_FLAG_EXPORT_ON_UNMAP, &kvm->arch.gmap->flags);
2519 		r = gmap_helper_disable_cow_sharing();
2520 		mmap_write_unlock(kvm->mm);
2521 		if (r)
2522 			break;
2523 
2524 		r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc);
2525 		if (r)
2526 			break;
2527 
2528 		r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc);
2529 		if (r)
2530 			kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
2531 
2532 		/* we need to block service interrupts from now on */
2533 		set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2534 		break;
2535 	}
2536 	case KVM_PV_ASYNC_CLEANUP_PREPARE:
2537 		r = -EINVAL;
2538 		if (!kvm_s390_pv_is_protected(kvm) || !async_destroy)
2539 			break;
2540 
2541 		r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
2542 		/*
2543 		 * If a CPU could not be destroyed, destroy VM will also fail.
2544 		 * There is no point in trying to destroy it. Instead return
2545 		 * the rc and rrc from the first CPU that failed destroying.
2546 		 */
2547 		if (r)
2548 			break;
2549 		r = kvm_s390_pv_set_aside(kvm, &cmd->rc, &cmd->rrc);
2550 
2551 		/* no need to block service interrupts any more */
2552 		clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2553 		break;
2554 	case KVM_PV_ASYNC_CLEANUP_PERFORM:
2555 		r = -EINVAL;
2556 		if (!async_destroy)
2557 			break;
2558 		/* kvm->lock must not be held; this is asserted inside the function. */
2559 		r = kvm_s390_pv_deinit_aside_vm(kvm, &cmd->rc, &cmd->rrc);
2560 		break;
2561 	case KVM_PV_DISABLE: {
2562 		r = -EINVAL;
2563 		if (!kvm_s390_pv_is_protected(kvm))
2564 			break;
2565 
2566 		r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
2567 		/*
2568 		 * If a CPU could not be destroyed, destroy VM will also fail.
2569 		 * There is no point in trying to destroy it. Instead return
2570 		 * the rc and rrc from the first CPU that failed destroying.
2571 		 */
2572 		if (r)
2573 			break;
2574 		r = kvm_s390_pv_deinit_cleanup_all(kvm, &cmd->rc, &cmd->rrc);
2575 
2576 		/* no need to block service interrupts any more */
2577 		clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2578 		break;
2579 	}
2580 	case KVM_PV_SET_SEC_PARMS: {
2581 		struct kvm_s390_pv_sec_parm parms = {};
2582 		void *hdr;
2583 
2584 		r = -EINVAL;
2585 		if (!kvm_s390_pv_is_protected(kvm))
2586 			break;
2587 
2588 		r = -EFAULT;
2589 		if (copy_from_user(&parms, argp, sizeof(parms)))
2590 			break;
2591 
2592 		/* Currently restricted to 1MiB */
2593 		r = -EINVAL;
2594 		if (parms.length > SZ_1M)
2595 			break;
2596 
2597 		r = -ENOMEM;
2598 		hdr = vmalloc(parms.length);
2599 		if (!hdr)
2600 			break;
2601 
2602 		r = -EFAULT;
2603 		if (!copy_from_user(hdr, (void __user *)parms.origin,
2604 				    parms.length))
2605 			r = kvm_s390_pv_set_sec_parms(kvm, hdr, parms.length,
2606 						      &cmd->rc, &cmd->rrc);
2607 
2608 		vfree(hdr);
2609 		break;
2610 	}
2611 	case KVM_PV_UNPACK: {
2612 		struct kvm_s390_pv_unp unp = {};
2613 
2614 		r = -EINVAL;
2615 		if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm))
2616 			break;
2617 
2618 		r = -EFAULT;
2619 		if (copy_from_user(&unp, argp, sizeof(unp)))
2620 			break;
2621 
2622 		r = kvm_s390_pv_unpack(kvm, unp.addr, unp.size, unp.tweak,
2623 				       &cmd->rc, &cmd->rrc);
2624 		break;
2625 	}
2626 	case KVM_PV_VERIFY: {
2627 		r = -EINVAL;
2628 		if (!kvm_s390_pv_is_protected(kvm))
2629 			break;
2630 
2631 		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2632 				  UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc);
2633 		KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc,
2634 			     cmd->rrc);
2635 		break;
2636 	}
2637 	case KVM_PV_PREP_RESET: {
2638 		r = -EINVAL;
2639 		if (!kvm_s390_pv_is_protected(kvm))
2640 			break;
2641 
2642 		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2643 				  UVC_CMD_PREPARE_RESET, &cmd->rc, &cmd->rrc);
2644 		KVM_UV_EVENT(kvm, 3, "PROTVIRT PREP RESET: rc %x rrc %x",
2645 			     cmd->rc, cmd->rrc);
2646 		break;
2647 	}
2648 	case KVM_PV_UNSHARE_ALL: {
2649 		r = -EINVAL;
2650 		if (!kvm_s390_pv_is_protected(kvm))
2651 			break;
2652 
2653 		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2654 				  UVC_CMD_SET_UNSHARE_ALL, &cmd->rc, &cmd->rrc);
2655 		KVM_UV_EVENT(kvm, 3, "PROTVIRT UNSHARE: rc %x rrc %x",
2656 			     cmd->rc, cmd->rrc);
2657 		break;
2658 	}
2659 	case KVM_PV_INFO: {
2660 		struct kvm_s390_pv_info info = {};
2661 		ssize_t data_len;
2662 
2663 		/*
2664 		 * No need to check the VM protection here.
2665 		 *
2666 		 * Maybe user space wants to query some of the data
2667 		 * when the VM is still unprotected. If we see the
2668 		 * need to fence a new data command we can still
2669 		 * return an error in the info handler.
2670 		 */
2671 
2672 		r = -EFAULT;
2673 		if (copy_from_user(&info, argp, sizeof(info.header)))
2674 			break;
2675 
2676 		r = -EINVAL;
2677 		if (info.header.len_max < sizeof(info.header))
2678 			break;
2679 
2680 		data_len = kvm_s390_handle_pv_info(&info);
2681 		if (data_len < 0) {
2682 			r = data_len;
2683 			break;
2684 		}
2685 		/*
2686 		 * If a data command struct is extended (multiple
2687 		 * times) this can be used to determine how much of it
2688 		 * is valid.
2689 		 */
2690 		info.header.len_written = data_len;
2691 
2692 		r = -EFAULT;
2693 		if (copy_to_user(argp, &info, data_len))
2694 			break;
2695 
2696 		r = 0;
2697 		break;
2698 	}
2699 	case KVM_PV_DUMP: {
2700 		struct kvm_s390_pv_dmp dmp;
2701 
2702 		r = -EINVAL;
2703 		if (!kvm_s390_pv_is_protected(kvm))
2704 			break;
2705 
2706 		r = -EFAULT;
2707 		if (copy_from_user(&dmp, argp, sizeof(dmp)))
2708 			break;
2709 
2710 		r = kvm_s390_pv_dmp(kvm, cmd, dmp);
2711 		if (r)
2712 			break;
2713 
2714 		if (copy_to_user(argp, &dmp, sizeof(dmp))) {
2715 			r = -EFAULT;
2716 			break;
2717 		}
2718 
2719 		break;
2720 	}
2721 	default:
2722 		r = -ENOTTY;
2723 	}
2724 	if (need_lock)
2725 		mutex_unlock(&kvm->lock);
2726 
2727 	return r;
2728 }
2729 
2730 static int mem_op_validate_common(struct kvm_s390_mem_op *mop, u64 supported_flags)
2731 {
2732 	if (mop->flags & ~supported_flags || !mop->size)
2733 		return -EINVAL;
2734 	if (mop->size > MEM_OP_MAX_SIZE)
2735 		return -E2BIG;
2736 	if (mop->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION) {
2737 		if (mop->key > 0xf)
2738 			return -EINVAL;
2739 	} else {
2740 		mop->key = 0;
2741 	}
2742 	return 0;
2743 }
2744 
2745 static int kvm_s390_vm_mem_op_abs(struct kvm *kvm, struct kvm_s390_mem_op *mop)
2746 {
2747 	void __user *uaddr = (void __user *)mop->buf;
2748 	void *tmpbuf __free(kvfree) = NULL;
2749 	enum gacc_mode acc_mode;
2750 	int r;
2751 
2752 	r = mem_op_validate_common(mop, KVM_S390_MEMOP_F_SKEY_PROTECTION |
2753 					KVM_S390_MEMOP_F_CHECK_ONLY);
2754 	if (r)
2755 		return r;
2756 
2757 	if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2758 		tmpbuf = vmalloc(mop->size);
2759 		if (!tmpbuf)
2760 			return -ENOMEM;
2761 	}
2762 
2763 	acc_mode = mop->op == KVM_S390_MEMOP_ABSOLUTE_READ ? GACC_FETCH : GACC_STORE;
2764 
2765 	scoped_guard(srcu, &kvm->srcu) {
2766 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)
2767 			return check_gpa_range(kvm, mop->gaddr, mop->size, acc_mode, mop->key);
2768 
2769 		if (acc_mode == GACC_STORE && copy_from_user(tmpbuf, uaddr, mop->size))
2770 			return -EFAULT;
2771 		r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf,
2772 					      mop->size, acc_mode, mop->key);
2773 		if (r)
2774 			return r;
2775 		if (acc_mode != GACC_STORE && copy_to_user(uaddr, tmpbuf, mop->size))
2776 			return -EFAULT;
2777 	}
2778 	return 0;
2779 }
2780 
2781 static int kvm_s390_vm_mem_op_cmpxchg(struct kvm *kvm, struct kvm_s390_mem_op *mop)
2782 {
2783 	void __user *uaddr = (void __user *)mop->buf;
2784 	void __user *old_addr = (void __user *)mop->old_addr;
2785 	union kvm_s390_quad old = { .sixteen = 0 };
2786 	union kvm_s390_quad new = { .sixteen = 0 };
2787 	bool success = false;
2788 	int r;
2789 
2790 	r = mem_op_validate_common(mop, KVM_S390_MEMOP_F_SKEY_PROTECTION);
2791 	if (r)
2792 		return r;
2793 	/*
2794 	 * This validates off_in_quad. Checking that size is a power
2795 	 * of two is not necessary, as cmpxchg_guest_abs_with_key
2796 	 * takes care of that
2797 	 */
2798 	if (mop->size > sizeof(new))
2799 		return -EINVAL;
2800 	if (copy_from_user(&new, uaddr, mop->size))
2801 		return -EFAULT;
2802 	if (copy_from_user(&old, old_addr, mop->size))
2803 		return -EFAULT;
2804 
2805 	scoped_guard(srcu, &kvm->srcu) {
2806 		r = cmpxchg_guest_abs_with_key(kvm, mop->gaddr, mop->size, &old, new,
2807 					       mop->key, &success);
2808 
2809 		if (!success && copy_to_user(old_addr, &old, mop->size))
2810 			return -EFAULT;
2811 	}
2812 	return r;
2813 }
2814 
2815 static int kvm_s390_vm_mem_op(struct kvm *kvm, struct kvm_s390_mem_op *mop)
2816 {
2817 	/*
2818 	 * This is technically a heuristic only, if the kvm->lock is not
2819 	 * taken, it is not guaranteed that the vm is/remains non-protected.
2820 	 * This is ok from a kernel perspective, wrongdoing is detected
2821 	 * on the access, -EFAULT is returned and the vm may crash the
2822 	 * next time it accesses the memory in question.
2823 	 * There is no sane usecase to do switching and a memop on two
2824 	 * different CPUs at the same time.
2825 	 */
2826 	if (kvm_s390_pv_get_handle(kvm))
2827 		return -EINVAL;
2828 
2829 	switch (mop->op) {
2830 	case KVM_S390_MEMOP_ABSOLUTE_READ:
2831 	case KVM_S390_MEMOP_ABSOLUTE_WRITE:
2832 		return kvm_s390_vm_mem_op_abs(kvm, mop);
2833 	case KVM_S390_MEMOP_ABSOLUTE_CMPXCHG:
2834 		return kvm_s390_vm_mem_op_cmpxchg(kvm, mop);
2835 	default:
2836 		return -EINVAL;
2837 	}
2838 }
2839 
2840 int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
2841 {
2842 	struct kvm *kvm = filp->private_data;
2843 	void __user *argp = (void __user *)arg;
2844 	struct kvm_device_attr attr;
2845 	int r;
2846 
2847 	switch (ioctl) {
2848 	case KVM_S390_INTERRUPT: {
2849 		struct kvm_s390_interrupt s390int;
2850 
2851 		r = -EFAULT;
2852 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
2853 			break;
2854 		r = kvm_s390_inject_vm(kvm, &s390int);
2855 		break;
2856 	}
2857 	case KVM_CREATE_IRQCHIP: {
2858 		r = -EINVAL;
2859 		if (kvm->arch.use_irqchip)
2860 			r = 0;
2861 		break;
2862 	}
2863 	case KVM_SET_DEVICE_ATTR: {
2864 		r = -EFAULT;
2865 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2866 			break;
2867 		r = kvm_s390_vm_set_attr(kvm, &attr);
2868 		break;
2869 	}
2870 	case KVM_GET_DEVICE_ATTR: {
2871 		r = -EFAULT;
2872 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2873 			break;
2874 		r = kvm_s390_vm_get_attr(kvm, &attr);
2875 		break;
2876 	}
2877 	case KVM_HAS_DEVICE_ATTR: {
2878 		r = -EFAULT;
2879 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2880 			break;
2881 		r = kvm_s390_vm_has_attr(kvm, &attr);
2882 		break;
2883 	}
2884 	case KVM_S390_GET_SKEYS: {
2885 		struct kvm_s390_skeys args;
2886 
2887 		r = -EFAULT;
2888 		if (copy_from_user(&args, argp,
2889 				   sizeof(struct kvm_s390_skeys)))
2890 			break;
2891 		r = kvm_s390_get_skeys(kvm, &args);
2892 		break;
2893 	}
2894 	case KVM_S390_SET_SKEYS: {
2895 		struct kvm_s390_skeys args;
2896 
2897 		r = -EFAULT;
2898 		if (copy_from_user(&args, argp,
2899 				   sizeof(struct kvm_s390_skeys)))
2900 			break;
2901 		r = kvm_s390_set_skeys(kvm, &args);
2902 		break;
2903 	}
2904 	case KVM_S390_GET_CMMA_BITS: {
2905 		struct kvm_s390_cmma_log args;
2906 
2907 		r = -EFAULT;
2908 		if (copy_from_user(&args, argp, sizeof(args)))
2909 			break;
2910 		mutex_lock(&kvm->slots_lock);
2911 		r = kvm_s390_get_cmma_bits(kvm, &args);
2912 		mutex_unlock(&kvm->slots_lock);
2913 		if (!r) {
2914 			r = copy_to_user(argp, &args, sizeof(args));
2915 			if (r)
2916 				r = -EFAULT;
2917 		}
2918 		break;
2919 	}
2920 	case KVM_S390_SET_CMMA_BITS: {
2921 		struct kvm_s390_cmma_log args;
2922 
2923 		r = -EFAULT;
2924 		if (copy_from_user(&args, argp, sizeof(args)))
2925 			break;
2926 		mutex_lock(&kvm->slots_lock);
2927 		r = kvm_s390_set_cmma_bits(kvm, &args);
2928 		mutex_unlock(&kvm->slots_lock);
2929 		break;
2930 	}
2931 	case KVM_S390_PV_COMMAND: {
2932 		struct kvm_pv_cmd args;
2933 
2934 		/* protvirt means user cpu state */
2935 		kvm_s390_set_user_cpu_state_ctrl(kvm);
2936 		r = 0;
2937 		if (!is_prot_virt_host()) {
2938 			r = -EINVAL;
2939 			break;
2940 		}
2941 		if (copy_from_user(&args, argp, sizeof(args))) {
2942 			r = -EFAULT;
2943 			break;
2944 		}
2945 		if (args.flags) {
2946 			r = -EINVAL;
2947 			break;
2948 		}
2949 		/* must be called without kvm->lock */
2950 		r = kvm_s390_handle_pv(kvm, &args);
2951 		if (copy_to_user(argp, &args, sizeof(args))) {
2952 			r = -EFAULT;
2953 			break;
2954 		}
2955 		break;
2956 	}
2957 	case KVM_S390_MEM_OP: {
2958 		struct kvm_s390_mem_op mem_op;
2959 
2960 		if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
2961 			r = kvm_s390_vm_mem_op(kvm, &mem_op);
2962 		else
2963 			r = -EFAULT;
2964 		break;
2965 	}
2966 	case KVM_S390_KEYOP: {
2967 		struct kvm_s390_mmu_cache *mc;
2968 		struct kvm_s390_keyop kop;
2969 		union skey skey;
2970 
2971 		if (copy_from_user(&kop, argp, sizeof(kop))) {
2972 			r = -EFAULT;
2973 			break;
2974 		}
2975 		skey.skey = kop.key;
2976 
2977 		mc = kvm_s390_new_mmu_cache();
2978 		if (!mc)
2979 			return -ENOMEM;
2980 
2981 		r = kvm_s390_keyop(mc, kvm, kop.operation, kop.guest_addr, skey);
2982 		kvm_s390_free_mmu_cache(mc);
2983 		if (r < 0)
2984 			break;
2985 
2986 		kop.key = r;
2987 		r = 0;
2988 		if (copy_to_user(argp, &kop, sizeof(kop)))
2989 			r = -EFAULT;
2990 		break;
2991 	}
2992 	case KVM_S390_ZPCI_OP: {
2993 		struct kvm_s390_zpci_op args;
2994 
2995 		r = -EINVAL;
2996 		if (!IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
2997 			break;
2998 		if (copy_from_user(&args, argp, sizeof(args))) {
2999 			r = -EFAULT;
3000 			break;
3001 		}
3002 		r = kvm_s390_pci_zpci_op(kvm, &args);
3003 		break;
3004 	}
3005 	default:
3006 		r = -ENOTTY;
3007 	}
3008 
3009 	return r;
3010 }
3011 
3012 static int kvm_s390_apxa_installed(void)
3013 {
3014 	struct ap_config_info info;
3015 
3016 	if (ap_instructions_available()) {
3017 		if (ap_qci(&info) == 0)
3018 			return info.apxa;
3019 	}
3020 
3021 	return 0;
3022 }
3023 
3024 /*
3025  * The format of the crypto control block (CRYCB) is specified in the 3 low
3026  * order bits of the CRYCB designation (CRYCBD) field as follows:
3027  * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
3028  *	     AP extended addressing (APXA) facility are installed.
3029  * Format 1: The APXA facility is not installed but the MSAX3 facility is.
3030  * Format 2: Both the APXA and MSAX3 facilities are installed
3031  */
3032 static void kvm_s390_set_crycb_format(struct kvm *kvm)
3033 {
3034 	kvm->arch.crypto.crycbd = virt_to_phys(kvm->arch.crypto.crycb);
3035 
3036 	/* Clear the CRYCB format bits - i.e., set format 0 by default */
3037 	kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
3038 
3039 	/* Check whether MSAX3 is installed */
3040 	if (!test_kvm_facility(kvm, 76))
3041 		return;
3042 
3043 	if (kvm_s390_apxa_installed())
3044 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
3045 	else
3046 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
3047 }
3048 
3049 /*
3050  * kvm_arch_crypto_set_masks
3051  *
3052  * @kvm: pointer to the target guest's KVM struct containing the crypto masks
3053  *	 to be set.
3054  * @apm: the mask identifying the accessible AP adapters
3055  * @aqm: the mask identifying the accessible AP domains
3056  * @adm: the mask identifying the accessible AP control domains
3057  *
3058  * Set the masks that identify the adapters, domains and control domains to
3059  * which the KVM guest is granted access.
3060  *
3061  * Note: The kvm->lock mutex must be locked by the caller before invoking this
3062  *	 function.
3063  */
3064 void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
3065 			       unsigned long *aqm, unsigned long *adm)
3066 {
3067 	struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
3068 
3069 	kvm_s390_vcpu_block_all(kvm);
3070 
3071 	switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
3072 	case CRYCB_FORMAT2: /* APCB1 use 256 bits */
3073 		memcpy(crycb->apcb1.apm, apm, 32);
3074 		VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
3075 			 apm[0], apm[1], apm[2], apm[3]);
3076 		memcpy(crycb->apcb1.aqm, aqm, 32);
3077 		VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
3078 			 aqm[0], aqm[1], aqm[2], aqm[3]);
3079 		memcpy(crycb->apcb1.adm, adm, 32);
3080 		VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
3081 			 adm[0], adm[1], adm[2], adm[3]);
3082 		break;
3083 	case CRYCB_FORMAT1:
3084 	case CRYCB_FORMAT0: /* Fall through both use APCB0 */
3085 		memcpy(crycb->apcb0.apm, apm, 8);
3086 		memcpy(crycb->apcb0.aqm, aqm, 2);
3087 		memcpy(crycb->apcb0.adm, adm, 2);
3088 		VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
3089 			 apm[0], *((unsigned short *)aqm),
3090 			 *((unsigned short *)adm));
3091 		break;
3092 	default:	/* Can not happen */
3093 		break;
3094 	}
3095 
3096 	/* recreate the shadow crycb for each vcpu */
3097 	kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
3098 	kvm_s390_vcpu_unblock_all(kvm);
3099 }
3100 EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
3101 
3102 /*
3103  * kvm_arch_crypto_clear_masks
3104  *
3105  * @kvm: pointer to the target guest's KVM struct containing the crypto masks
3106  *	 to be cleared.
3107  *
3108  * Clear the masks that identify the adapters, domains and control domains to
3109  * which the KVM guest is granted access.
3110  *
3111  * Note: The kvm->lock mutex must be locked by the caller before invoking this
3112  *	 function.
3113  */
3114 void kvm_arch_crypto_clear_masks(struct kvm *kvm)
3115 {
3116 	kvm_s390_vcpu_block_all(kvm);
3117 
3118 	memset(&kvm->arch.crypto.crycb->apcb0, 0,
3119 	       sizeof(kvm->arch.crypto.crycb->apcb0));
3120 	memset(&kvm->arch.crypto.crycb->apcb1, 0,
3121 	       sizeof(kvm->arch.crypto.crycb->apcb1));
3122 
3123 	VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
3124 	/* recreate the shadow crycb for each vcpu */
3125 	kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
3126 	kvm_s390_vcpu_unblock_all(kvm);
3127 }
3128 EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
3129 
3130 static u64 kvm_s390_get_initial_cpuid(void)
3131 {
3132 	struct cpuid cpuid;
3133 
3134 	get_cpu_id(&cpuid);
3135 	cpuid.version = 0xff;
3136 	return *((u64 *) &cpuid);
3137 }
3138 
3139 static void kvm_s390_crypto_init(struct kvm *kvm)
3140 {
3141 	kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
3142 	kvm_s390_set_crycb_format(kvm);
3143 	init_rwsem(&kvm->arch.crypto.pqap_hook_rwsem);
3144 
3145 	if (!test_kvm_facility(kvm, 76))
3146 		return;
3147 
3148 	/* Enable AES/DEA protected key functions by default */
3149 	kvm->arch.crypto.aes_kw = 1;
3150 	kvm->arch.crypto.dea_kw = 1;
3151 	get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
3152 			 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
3153 	get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
3154 			 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
3155 }
3156 
3157 static void sca_dispose(struct kvm *kvm)
3158 {
3159 	free_pages_exact(kvm->arch.sca, sizeof(*kvm->arch.sca));
3160 	kvm->arch.sca = NULL;
3161 }
3162 
3163 void kvm_arch_free_vm(struct kvm *kvm)
3164 {
3165 	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
3166 		kvm_s390_pci_clear_list(kvm);
3167 
3168 	__kvm_arch_free_vm(kvm);
3169 }
3170 
3171 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
3172 {
3173 	gfp_t alloc_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO;
3174 	char debug_name[16];
3175 	int i, rc;
3176 
3177 	mutex_init(&kvm->arch.pv.import_lock);
3178 
3179 	rc = -EINVAL;
3180 #ifdef CONFIG_KVM_S390_UCONTROL
3181 	if (type & ~KVM_VM_S390_UCONTROL)
3182 		goto out_err;
3183 	if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
3184 		goto out_err;
3185 #else
3186 	if (type)
3187 		goto out_err;
3188 #endif
3189 	rc = -ENOMEM;
3190 
3191 	if (!sclp.has_64bscao)
3192 		alloc_flags |= GFP_DMA;
3193 	mutex_lock(&kvm_lock);
3194 
3195 	kvm->arch.sca = alloc_pages_exact(sizeof(*kvm->arch.sca), alloc_flags);
3196 	mutex_unlock(&kvm_lock);
3197 	if (!kvm->arch.sca)
3198 		goto out_err;
3199 
3200 	snprintf(debug_name, sizeof(debug_name), "kvm-%u", current->pid);
3201 
3202 	kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
3203 	if (!kvm->arch.dbf)
3204 		goto out_err;
3205 
3206 	BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
3207 	kvm->arch.sie_page2 =
3208 	     (struct sie_page2 *) get_zeroed_page(GFP_KERNEL_ACCOUNT | GFP_DMA);
3209 	if (!kvm->arch.sie_page2)
3210 		goto out_err;
3211 
3212 	kvm->arch.sie_page2->kvm = kvm;
3213 	kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
3214 
3215 	for (i = 0; i < kvm_s390_fac_size(); i++) {
3216 		kvm->arch.model.fac_mask[i] = stfle_fac_list[i] &
3217 					      (kvm_s390_fac_base[i] |
3218 					       kvm_s390_fac_ext[i]);
3219 		kvm->arch.model.fac_list[i] = stfle_fac_list[i] &
3220 					      kvm_s390_fac_base[i];
3221 	}
3222 	kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
3223 
3224 	/* we are always in czam mode - even on pre z14 machines */
3225 	set_kvm_facility(kvm->arch.model.fac_mask, 138);
3226 	set_kvm_facility(kvm->arch.model.fac_list, 138);
3227 	/* we emulate STHYI in kvm */
3228 	set_kvm_facility(kvm->arch.model.fac_mask, 74);
3229 	set_kvm_facility(kvm->arch.model.fac_list, 74);
3230 	if (machine_has_tlb_guest()) {
3231 		set_kvm_facility(kvm->arch.model.fac_mask, 147);
3232 		set_kvm_facility(kvm->arch.model.fac_list, 147);
3233 	}
3234 
3235 	if (css_general_characteristics.aiv && test_facility(65))
3236 		set_kvm_facility(kvm->arch.model.fac_mask, 65);
3237 
3238 	kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
3239 	kvm->arch.model.ibc = sclp.ibc & 0x0fff;
3240 
3241 	kvm->arch.model.uv_feat_guest.feat = 0;
3242 
3243 	kvm_s390_crypto_init(kvm);
3244 
3245 	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) {
3246 		mutex_lock(&kvm->lock);
3247 		kvm_s390_pci_init_list(kvm);
3248 		kvm_s390_vcpu_pci_enable_interp(kvm);
3249 		mutex_unlock(&kvm->lock);
3250 	}
3251 
3252 	mutex_init(&kvm->arch.float_int.ais_lock);
3253 	spin_lock_init(&kvm->arch.float_int.lock);
3254 	for (i = 0; i < FIRQ_LIST_COUNT; i++)
3255 		INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
3256 	init_waitqueue_head(&kvm->arch.ipte_wq);
3257 	mutex_init(&kvm->arch.ipte_mutex);
3258 
3259 	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
3260 	VM_EVENT(kvm, 3, "vm created with type %lu", type);
3261 
3262 	kvm->arch.mem_limit = type & KVM_VM_S390_UCONTROL ? KVM_S390_NO_MEM_LIMIT : sclp.hamax + 1;
3263 	kvm->arch.gmap = gmap_new(kvm, gpa_to_gfn(kvm->arch.mem_limit));
3264 	if (!kvm->arch.gmap)
3265 		goto out_err;
3266 	clear_bit(GMAP_FLAG_PFAULT_ENABLED, &kvm->arch.gmap->flags);
3267 
3268 	if (type & KVM_VM_S390_UCONTROL) {
3269 		struct kvm_userspace_memory_region2 fake_memslot = {
3270 			.slot = KVM_S390_UCONTROL_MEMSLOT,
3271 			.guest_phys_addr = 0,
3272 			.userspace_addr = 0,
3273 			.memory_size = ALIGN_DOWN(TASK_SIZE, _SEGMENT_SIZE),
3274 			.flags = 0,
3275 		};
3276 
3277 		/* one flat fake memslot covering the whole address-space */
3278 		mutex_lock(&kvm->slots_lock);
3279 		KVM_BUG_ON(kvm_set_internal_memslot(kvm, &fake_memslot), kvm);
3280 		mutex_unlock(&kvm->slots_lock);
3281 		set_bit(GMAP_FLAG_IS_UCONTROL, &kvm->arch.gmap->flags);
3282 	} else {
3283 		struct crst_table *table = dereference_asce(kvm->arch.gmap->asce);
3284 
3285 		crst_table_init((void *)table, _CRSTE_HOLE(table->crstes[0].h.tt).val);
3286 	}
3287 
3288 	kvm->arch.use_pfmfi = sclp.has_pfmfi;
3289 	kvm->arch.use_skf = sclp.has_skey;
3290 	spin_lock_init(&kvm->arch.start_stop_lock);
3291 	kvm_s390_vsie_init(kvm);
3292 	if (use_gisa)
3293 		kvm_s390_gisa_init(kvm);
3294 	INIT_LIST_HEAD(&kvm->arch.pv.need_cleanup);
3295 	kvm->arch.pv.set_aside = NULL;
3296 	KVM_EVENT(3, "vm 0x%p created by pid %u", kvm, current->pid);
3297 
3298 	return 0;
3299 out_err:
3300 	free_page((unsigned long)kvm->arch.sie_page2);
3301 	debug_unregister(kvm->arch.dbf);
3302 	sca_dispose(kvm);
3303 	KVM_EVENT(3, "creation of vm failed: %d", rc);
3304 	return rc;
3305 }
3306 
3307 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
3308 {
3309 	u16 rc, rrc;
3310 
3311 	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
3312 	trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
3313 	kvm_s390_clear_local_irqs(vcpu);
3314 	kvm_clear_async_pf_completion_queue(vcpu);
3315 	if (!kvm_is_ucontrol(vcpu->kvm))
3316 		sca_del_vcpu(vcpu);
3317 	kvm_s390_update_topology_change_report(vcpu->kvm, 1);
3318 
3319 	if (kvm_is_ucontrol(vcpu->kvm)) {
3320 		scoped_guard(spinlock, &vcpu->kvm->arch.gmap->children_lock)
3321 			gmap_remove_child(vcpu->arch.gmap);
3322 		vcpu->arch.gmap = gmap_put(vcpu->arch.gmap);
3323 	}
3324 
3325 	if (vcpu->kvm->arch.use_cmma)
3326 		kvm_s390_vcpu_unsetup_cmma(vcpu);
3327 	/* We can not hold the vcpu mutex here, we are already dying */
3328 	if (kvm_s390_pv_cpu_get_handle(vcpu))
3329 		kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc);
3330 	free_page((unsigned long)(vcpu->arch.sie_block));
3331 	kvm_s390_free_mmu_cache(vcpu->arch.mc);
3332 }
3333 
3334 void kvm_arch_destroy_vm(struct kvm *kvm)
3335 {
3336 	u16 rc, rrc;
3337 
3338 	kvm_destroy_vcpus(kvm);
3339 	sca_dispose(kvm);
3340 	kvm_s390_gisa_destroy(kvm);
3341 	/*
3342 	 * We are already at the end of life and kvm->lock is not taken.
3343 	 * This is ok as the file descriptor is closed by now and nobody
3344 	 * can mess with the pv state.
3345 	 */
3346 	kvm_s390_pv_deinit_cleanup_all(kvm, &rc, &rrc);
3347 	/*
3348 	 * Remove the mmu notifier only when the whole KVM VM is torn down,
3349 	 * and only if one was registered to begin with. If the VM is
3350 	 * currently not protected, but has been previously been protected,
3351 	 * then it's possible that the notifier is still registered.
3352 	 */
3353 	if (kvm->arch.pv.mmu_notifier.ops)
3354 		mmu_notifier_unregister(&kvm->arch.pv.mmu_notifier, kvm->mm);
3355 
3356 	debug_unregister(kvm->arch.dbf);
3357 	free_page((unsigned long)kvm->arch.sie_page2);
3358 	kvm_s390_destroy_adapters(kvm);
3359 	kvm_s390_clear_float_irqs(kvm);
3360 	kvm_s390_vsie_destroy(kvm);
3361 	kvm->arch.gmap = gmap_put(kvm->arch.gmap);
3362 	KVM_EVENT(3, "vm 0x%p destroyed", kvm);
3363 }
3364 
3365 /* Section: vcpu related */
3366 static void sca_del_vcpu(struct kvm_vcpu *vcpu)
3367 {
3368 	struct esca_block *sca = vcpu->kvm->arch.sca;
3369 
3370 	if (!kvm_s390_use_sca_entries())
3371 		return;
3372 
3373 	clear_bit_inv(vcpu->vcpu_id, (unsigned long *)sca->mcn);
3374 	sca->cpu[vcpu->vcpu_id].sda = 0;
3375 }
3376 
3377 static void sca_add_vcpu(struct kvm_vcpu *vcpu)
3378 {
3379 	struct esca_block *sca = vcpu->kvm->arch.sca;
3380 	phys_addr_t sca_phys = virt_to_phys(sca);
3381 
3382 	/* we still need the sca header for the ipte control */
3383 	vcpu->arch.sie_block->scaoh = sca_phys >> 32;
3384 	vcpu->arch.sie_block->scaol = sca_phys & ESCA_SCAOL_MASK;
3385 	vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
3386 
3387 	if (!kvm_s390_use_sca_entries())
3388 		return;
3389 
3390 	set_bit_inv(vcpu->vcpu_id, (unsigned long *)sca->mcn);
3391 	sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block);
3392 }
3393 
3394 static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
3395 {
3396 	if (!kvm_s390_use_sca_entries())
3397 		return id < KVM_MAX_VCPUS;
3398 
3399 	return id < KVM_S390_ESCA_CPU_SLOTS;
3400 }
3401 
3402 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3403 static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3404 {
3405 	WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
3406 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3407 	vcpu->arch.cputm_start = get_tod_clock_fast();
3408 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3409 }
3410 
3411 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3412 static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3413 {
3414 	WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
3415 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3416 	vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3417 	vcpu->arch.cputm_start = 0;
3418 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3419 }
3420 
3421 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3422 static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3423 {
3424 	WARN_ON_ONCE(vcpu->arch.cputm_enabled);
3425 	vcpu->arch.cputm_enabled = true;
3426 	__start_cpu_timer_accounting(vcpu);
3427 }
3428 
3429 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3430 static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3431 {
3432 	WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
3433 	__stop_cpu_timer_accounting(vcpu);
3434 	vcpu->arch.cputm_enabled = false;
3435 }
3436 
3437 static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3438 {
3439 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3440 	__enable_cpu_timer_accounting(vcpu);
3441 	preempt_enable();
3442 }
3443 
3444 static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3445 {
3446 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3447 	__disable_cpu_timer_accounting(vcpu);
3448 	preempt_enable();
3449 }
3450 
3451 /* set the cpu timer - may only be called from the VCPU thread itself */
3452 void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
3453 {
3454 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3455 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3456 	if (vcpu->arch.cputm_enabled)
3457 		vcpu->arch.cputm_start = get_tod_clock_fast();
3458 	vcpu->arch.sie_block->cputm = cputm;
3459 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3460 	preempt_enable();
3461 }
3462 
3463 /* update and get the cpu timer - can also be called from other VCPU threads */
3464 __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
3465 {
3466 	unsigned int seq;
3467 	__u64 value;
3468 
3469 	if (unlikely(!vcpu->arch.cputm_enabled))
3470 		return vcpu->arch.sie_block->cputm;
3471 
3472 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3473 	do {
3474 		seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
3475 		/*
3476 		 * If the writer would ever execute a read in the critical
3477 		 * section, e.g. in irq context, we have a deadlock.
3478 		 */
3479 		WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
3480 		value = vcpu->arch.sie_block->cputm;
3481 		/* if cputm_start is 0, accounting is being started/stopped */
3482 		if (likely(vcpu->arch.cputm_start))
3483 			value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3484 	} while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
3485 	preempt_enable();
3486 	return value;
3487 }
3488 
3489 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
3490 {
3491 
3492 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
3493 	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
3494 		__start_cpu_timer_accounting(vcpu);
3495 	vcpu->cpu = cpu;
3496 }
3497 
3498 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
3499 {
3500 	vcpu->cpu = -1;
3501 	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
3502 		__stop_cpu_timer_accounting(vcpu);
3503 	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
3504 
3505 }
3506 
3507 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
3508 {
3509 	mutex_lock(&vcpu->kvm->lock);
3510 	preempt_disable();
3511 	vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
3512 	vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
3513 	preempt_enable();
3514 	mutex_unlock(&vcpu->kvm->lock);
3515 	if (!kvm_is_ucontrol(vcpu->kvm)) {
3516 		vcpu->arch.gmap = vcpu->kvm->arch.gmap;
3517 		sca_add_vcpu(vcpu);
3518 	}
3519 	if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
3520 		vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3521 }
3522 
3523 static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
3524 {
3525 	if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
3526 	    test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
3527 		return true;
3528 	return false;
3529 }
3530 
3531 static bool kvm_has_pckmo_ecc(struct kvm *kvm)
3532 {
3533 	/* At least one ECC subfunction must be present */
3534 	return kvm_has_pckmo_subfunc(kvm, 32) ||
3535 	       kvm_has_pckmo_subfunc(kvm, 33) ||
3536 	       kvm_has_pckmo_subfunc(kvm, 34) ||
3537 	       kvm_has_pckmo_subfunc(kvm, 40) ||
3538 	       kvm_has_pckmo_subfunc(kvm, 41);
3539 
3540 }
3541 
3542 static bool kvm_has_pckmo_hmac(struct kvm *kvm)
3543 {
3544 	/* At least one HMAC subfunction must be present */
3545 	return kvm_has_pckmo_subfunc(kvm, 118) ||
3546 	       kvm_has_pckmo_subfunc(kvm, 122);
3547 }
3548 
3549 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
3550 {
3551 	/*
3552 	 * If the AP instructions are not being interpreted and the MSAX3
3553 	 * facility is not configured for the guest, there is nothing to set up.
3554 	 */
3555 	if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
3556 		return;
3557 
3558 	vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
3559 	vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
3560 	vcpu->arch.sie_block->eca &= ~ECA_APIE;
3561 	vcpu->arch.sie_block->ecd &= ~(ECD_ECC | ECD_HMAC);
3562 
3563 	if (vcpu->kvm->arch.crypto.apie)
3564 		vcpu->arch.sie_block->eca |= ECA_APIE;
3565 
3566 	/* Set up protected key support */
3567 	if (vcpu->kvm->arch.crypto.aes_kw) {
3568 		vcpu->arch.sie_block->ecb3 |= ECB3_AES;
3569 		/* ecc/hmac is also wrapped with AES key */
3570 		if (kvm_has_pckmo_ecc(vcpu->kvm))
3571 			vcpu->arch.sie_block->ecd |= ECD_ECC;
3572 		if (kvm_has_pckmo_hmac(vcpu->kvm))
3573 			vcpu->arch.sie_block->ecd |= ECD_HMAC;
3574 	}
3575 
3576 	if (vcpu->kvm->arch.crypto.dea_kw)
3577 		vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
3578 }
3579 
3580 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
3581 {
3582 	free_page((unsigned long)phys_to_virt(vcpu->arch.sie_block->cbrlo));
3583 	vcpu->arch.sie_block->cbrlo = 0;
3584 }
3585 
3586 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
3587 {
3588 	void *cbrlo_page = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3589 
3590 	if (!cbrlo_page)
3591 		return -ENOMEM;
3592 
3593 	vcpu->arch.sie_block->cbrlo = virt_to_phys(cbrlo_page);
3594 	return 0;
3595 }
3596 
3597 static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
3598 {
3599 	struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
3600 
3601 	vcpu->arch.sie_block->ibc = model->ibc;
3602 	if (test_kvm_facility(vcpu->kvm, 7))
3603 		vcpu->arch.sie_block->fac = virt_to_phys(model->fac_list);
3604 }
3605 
3606 static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu)
3607 {
3608 	int rc = 0;
3609 	u16 uvrc, uvrrc;
3610 
3611 	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
3612 						    CPUSTAT_SM |
3613 						    CPUSTAT_STOPPED);
3614 
3615 	if (test_kvm_facility(vcpu->kvm, 78))
3616 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
3617 	else if (test_kvm_facility(vcpu->kvm, 8))
3618 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
3619 
3620 	kvm_s390_vcpu_setup_model(vcpu);
3621 
3622 	/* pgste_set_pte has special handling for !machine_has_esop() */
3623 	if (machine_has_esop())
3624 		vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
3625 	if (test_kvm_facility(vcpu->kvm, 9))
3626 		vcpu->arch.sie_block->ecb |= ECB_SRSI;
3627 	if (test_kvm_facility(vcpu->kvm, 11))
3628 		vcpu->arch.sie_block->ecb |= ECB_PTF;
3629 	if (test_kvm_facility(vcpu->kvm, 73))
3630 		vcpu->arch.sie_block->ecb |= ECB_TE;
3631 	if (!kvm_is_ucontrol(vcpu->kvm))
3632 		vcpu->arch.sie_block->ecb |= ECB_SPECI;
3633 
3634 	if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
3635 		vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
3636 	if (test_kvm_facility(vcpu->kvm, 130))
3637 		vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
3638 	vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
3639 	if (sclp.has_cei)
3640 		vcpu->arch.sie_block->eca |= ECA_CEI;
3641 	if (sclp.has_ib)
3642 		vcpu->arch.sie_block->eca |= ECA_IB;
3643 	if (sclp.has_siif)
3644 		vcpu->arch.sie_block->eca |= ECA_SII;
3645 	if (kvm_s390_use_sca_entries())
3646 		vcpu->arch.sie_block->eca |= ECA_SIGPI;
3647 	if (test_kvm_facility(vcpu->kvm, 129)) {
3648 		vcpu->arch.sie_block->eca |= ECA_VX;
3649 		vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3650 	}
3651 	if (test_kvm_facility(vcpu->kvm, 139))
3652 		vcpu->arch.sie_block->ecd |= ECD_MEF;
3653 	if (test_kvm_facility(vcpu->kvm, 156))
3654 		vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
3655 	if (vcpu->arch.sie_block->gd) {
3656 		vcpu->arch.sie_block->eca |= ECA_AIV;
3657 		VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
3658 			   vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
3659 	}
3660 	vcpu->arch.sie_block->sdnxo = virt_to_phys(&vcpu->run->s.regs.sdnx) | SDNXC;
3661 	vcpu->arch.sie_block->riccbd = virt_to_phys(&vcpu->run->s.regs.riccb);
3662 
3663 	if (sclp.has_kss)
3664 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
3665 	else
3666 		vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
3667 
3668 	if (vcpu->kvm->arch.use_cmma) {
3669 		rc = kvm_s390_vcpu_setup_cmma(vcpu);
3670 		if (rc)
3671 			return rc;
3672 	}
3673 	hrtimer_setup(&vcpu->arch.ckc_timer, kvm_s390_idle_wakeup, CLOCK_MONOTONIC,
3674 		      HRTIMER_MODE_REL);
3675 
3676 	vcpu->arch.sie_block->hpid = HPID_KVM;
3677 
3678 	kvm_s390_vcpu_crypto_setup(vcpu);
3679 
3680 	kvm_s390_vcpu_pci_setup(vcpu);
3681 
3682 	mutex_lock(&vcpu->kvm->lock);
3683 	if (kvm_s390_pv_is_protected(vcpu->kvm)) {
3684 		rc = kvm_s390_pv_create_cpu(vcpu, &uvrc, &uvrrc);
3685 		if (rc)
3686 			kvm_s390_vcpu_unsetup_cmma(vcpu);
3687 	}
3688 	mutex_unlock(&vcpu->kvm->lock);
3689 
3690 	return rc;
3691 }
3692 
3693 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
3694 {
3695 	if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
3696 		return -EINVAL;
3697 	return 0;
3698 }
3699 
3700 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
3701 {
3702 	struct sie_page *sie_page;
3703 	int rc;
3704 
3705 	BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
3706 	vcpu->arch.mc = kvm_s390_new_mmu_cache();
3707 	if (!vcpu->arch.mc)
3708 		return -ENOMEM;
3709 	sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL_ACCOUNT);
3710 	if (!sie_page) {
3711 		kvm_s390_free_mmu_cache(vcpu->arch.mc);
3712 		vcpu->arch.mc = NULL;
3713 		return -ENOMEM;
3714 	}
3715 
3716 	vcpu->arch.sie_block = &sie_page->sie_block;
3717 	vcpu->arch.sie_block->itdba = virt_to_phys(&sie_page->itdb);
3718 
3719 	/* the real guest size will always be smaller than msl */
3720 	vcpu->arch.sie_block->mso = 0;
3721 	vcpu->arch.sie_block->msl = sclp.hamax;
3722 
3723 	vcpu->arch.sie_block->icpua = vcpu->vcpu_id;
3724 	spin_lock_init(&vcpu->arch.local_int.lock);
3725 	vcpu->arch.sie_block->gd = kvm_s390_get_gisa_desc(vcpu->kvm);
3726 	seqcount_init(&vcpu->arch.cputm_seqcount);
3727 
3728 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3729 	kvm_clear_async_pf_completion_queue(vcpu);
3730 	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
3731 				    KVM_SYNC_GPRS |
3732 				    KVM_SYNC_ACRS |
3733 				    KVM_SYNC_CRS |
3734 				    KVM_SYNC_ARCH0 |
3735 				    KVM_SYNC_PFAULT |
3736 				    KVM_SYNC_DIAG318;
3737 	vcpu->arch.acrs_loaded = false;
3738 	kvm_s390_set_prefix(vcpu, 0);
3739 	if (test_kvm_facility(vcpu->kvm, 64))
3740 		vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
3741 	if (test_kvm_facility(vcpu->kvm, 82))
3742 		vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
3743 	if (test_kvm_facility(vcpu->kvm, 133))
3744 		vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
3745 	if (test_kvm_facility(vcpu->kvm, 156))
3746 		vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
3747 	/* fprs can be synchronized via vrs, even if the guest has no vx. With
3748 	 * cpu_has_vx(), (load|store)_fpu_regs() will work with vrs format.
3749 	 */
3750 	if (cpu_has_vx())
3751 		vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
3752 	else
3753 		vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
3754 
3755 	if (kvm_is_ucontrol(vcpu->kvm)) {
3756 		rc = -ENOMEM;
3757 		vcpu->arch.gmap = gmap_new_child(vcpu->kvm->arch.gmap, -1UL);
3758 		if (!vcpu->arch.gmap)
3759 			goto out_free_sie_block;
3760 	}
3761 
3762 	VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%p, sie block at 0x%p",
3763 		 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3764 	trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3765 
3766 	rc = kvm_s390_vcpu_setup(vcpu);
3767 	if (rc)
3768 		goto out_ucontrol_uninit;
3769 
3770 	kvm_s390_update_topology_change_report(vcpu->kvm, 1);
3771 	return 0;
3772 
3773 out_ucontrol_uninit:
3774 	if (kvm_is_ucontrol(vcpu->kvm)) {
3775 		gmap_remove_child(vcpu->arch.gmap);
3776 		vcpu->arch.gmap = gmap_put(vcpu->arch.gmap);
3777 	}
3778 out_free_sie_block:
3779 	free_page((unsigned long)(vcpu->arch.sie_block));
3780 	return rc;
3781 }
3782 
3783 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
3784 {
3785 	clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
3786 	return kvm_s390_vcpu_has_irq(vcpu, 0);
3787 }
3788 
3789 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
3790 {
3791 	return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
3792 }
3793 
3794 void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
3795 {
3796 	atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
3797 	exit_sie(vcpu);
3798 }
3799 
3800 void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
3801 {
3802 	atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
3803 }
3804 
3805 static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
3806 {
3807 	atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
3808 	exit_sie(vcpu);
3809 }
3810 
3811 bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
3812 {
3813 	return atomic_read(&vcpu->arch.sie_block->prog20) &
3814 	       (PROG_BLOCK_SIE | PROG_REQUEST);
3815 }
3816 
3817 static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
3818 {
3819 	atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
3820 }
3821 
3822 /*
3823  * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
3824  * If the CPU is not running (e.g. waiting as idle) the function will
3825  * return immediately. */
3826 void exit_sie(struct kvm_vcpu *vcpu)
3827 {
3828 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
3829 	kvm_s390_vsie_kick(vcpu);
3830 	while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
3831 		cpu_relax();
3832 }
3833 
3834 /* Kick a guest cpu out of SIE to process a request synchronously */
3835 void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
3836 {
3837 	__kvm_make_request(req, vcpu);
3838 	kvm_s390_vcpu_request(vcpu);
3839 }
3840 
3841 bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
3842 {
3843 	/* do not poll with more than halt_poll_max_steal percent of steal time */
3844 	if (get_lowcore()->avg_steal_timer * 100 / (TICK_USEC << 12) >=
3845 	    READ_ONCE(halt_poll_max_steal)) {
3846 		vcpu->stat.halt_no_poll_steal++;
3847 		return true;
3848 	}
3849 	return false;
3850 }
3851 
3852 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
3853 {
3854 	/* kvm common code refers to this, but never calls it */
3855 	BUG();
3856 	return 0;
3857 }
3858 
3859 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
3860 					   struct kvm_one_reg *reg)
3861 {
3862 	int r = -EINVAL;
3863 
3864 	switch (reg->id) {
3865 	case KVM_REG_S390_TODPR:
3866 		r = put_user(vcpu->arch.sie_block->todpr,
3867 			     (u32 __user *)reg->addr);
3868 		break;
3869 	case KVM_REG_S390_EPOCHDIFF:
3870 		r = put_user(vcpu->arch.sie_block->epoch,
3871 			     (u64 __user *)reg->addr);
3872 		break;
3873 	case KVM_REG_S390_CPU_TIMER:
3874 		r = put_user(kvm_s390_get_cpu_timer(vcpu),
3875 			     (u64 __user *)reg->addr);
3876 		break;
3877 	case KVM_REG_S390_CLOCK_COMP:
3878 		r = put_user(vcpu->arch.sie_block->ckc,
3879 			     (u64 __user *)reg->addr);
3880 		break;
3881 	case KVM_REG_S390_PFTOKEN:
3882 		r = put_user(vcpu->arch.pfault_token,
3883 			     (u64 __user *)reg->addr);
3884 		break;
3885 	case KVM_REG_S390_PFCOMPARE:
3886 		r = put_user(vcpu->arch.pfault_compare,
3887 			     (u64 __user *)reg->addr);
3888 		break;
3889 	case KVM_REG_S390_PFSELECT:
3890 		r = put_user(vcpu->arch.pfault_select,
3891 			     (u64 __user *)reg->addr);
3892 		break;
3893 	case KVM_REG_S390_PP:
3894 		r = put_user(vcpu->arch.sie_block->pp,
3895 			     (u64 __user *)reg->addr);
3896 		break;
3897 	case KVM_REG_S390_GBEA:
3898 		r = put_user(vcpu->arch.sie_block->gbea,
3899 			     (u64 __user *)reg->addr);
3900 		break;
3901 	default:
3902 		break;
3903 	}
3904 
3905 	return r;
3906 }
3907 
3908 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
3909 					   struct kvm_one_reg *reg)
3910 {
3911 	int r = -EINVAL;
3912 	__u64 val;
3913 
3914 	switch (reg->id) {
3915 	case KVM_REG_S390_TODPR:
3916 		r = get_user(vcpu->arch.sie_block->todpr,
3917 			     (u32 __user *)reg->addr);
3918 		break;
3919 	case KVM_REG_S390_EPOCHDIFF:
3920 		r = get_user(vcpu->arch.sie_block->epoch,
3921 			     (u64 __user *)reg->addr);
3922 		break;
3923 	case KVM_REG_S390_CPU_TIMER:
3924 		r = get_user(val, (u64 __user *)reg->addr);
3925 		if (!r)
3926 			kvm_s390_set_cpu_timer(vcpu, val);
3927 		break;
3928 	case KVM_REG_S390_CLOCK_COMP:
3929 		r = get_user(vcpu->arch.sie_block->ckc,
3930 			     (u64 __user *)reg->addr);
3931 		break;
3932 	case KVM_REG_S390_PFTOKEN:
3933 		r = get_user(vcpu->arch.pfault_token,
3934 			     (u64 __user *)reg->addr);
3935 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3936 			kvm_clear_async_pf_completion_queue(vcpu);
3937 		break;
3938 	case KVM_REG_S390_PFCOMPARE:
3939 		r = get_user(vcpu->arch.pfault_compare,
3940 			     (u64 __user *)reg->addr);
3941 		break;
3942 	case KVM_REG_S390_PFSELECT:
3943 		r = get_user(vcpu->arch.pfault_select,
3944 			     (u64 __user *)reg->addr);
3945 		break;
3946 	case KVM_REG_S390_PP:
3947 		r = get_user(vcpu->arch.sie_block->pp,
3948 			     (u64 __user *)reg->addr);
3949 		break;
3950 	case KVM_REG_S390_GBEA:
3951 		r = get_user(vcpu->arch.sie_block->gbea,
3952 			     (u64 __user *)reg->addr);
3953 		break;
3954 	default:
3955 		break;
3956 	}
3957 
3958 	return r;
3959 }
3960 
3961 static void kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu *vcpu)
3962 {
3963 	vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI;
3964 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3965 	memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb));
3966 
3967 	kvm_clear_async_pf_completion_queue(vcpu);
3968 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
3969 		kvm_s390_vcpu_stop(vcpu);
3970 	kvm_s390_clear_local_irqs(vcpu);
3971 }
3972 
3973 static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
3974 {
3975 	/* Initial reset is a superset of the normal reset */
3976 	kvm_arch_vcpu_ioctl_normal_reset(vcpu);
3977 
3978 	/*
3979 	 * This equals initial cpu reset in pop, but we don't switch to ESA.
3980 	 * We do not only reset the internal data, but also ...
3981 	 */
3982 	vcpu->arch.sie_block->gpsw.mask = 0;
3983 	vcpu->arch.sie_block->gpsw.addr = 0;
3984 	kvm_s390_set_prefix(vcpu, 0);
3985 	kvm_s390_set_cpu_timer(vcpu, 0);
3986 	vcpu->arch.sie_block->ckc = 0;
3987 	memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
3988 	vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
3989 	vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
3990 
3991 	/* ... the data in sync regs */
3992 	memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs));
3993 	vcpu->run->s.regs.ckc = 0;
3994 	vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK;
3995 	vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK;
3996 	vcpu->run->psw_addr = 0;
3997 	vcpu->run->psw_mask = 0;
3998 	vcpu->run->s.regs.todpr = 0;
3999 	vcpu->run->s.regs.cputm = 0;
4000 	vcpu->run->s.regs.ckc = 0;
4001 	vcpu->run->s.regs.pp = 0;
4002 	vcpu->run->s.regs.gbea = 1;
4003 	vcpu->run->s.regs.fpc = 0;
4004 	/*
4005 	 * Do not reset these registers in the protected case, as some of
4006 	 * them are overlaid and they are not accessible in this case
4007 	 * anyway.
4008 	 */
4009 	if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
4010 		vcpu->arch.sie_block->gbea = 1;
4011 		vcpu->arch.sie_block->pp = 0;
4012 		vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
4013 		vcpu->arch.sie_block->todpr = 0;
4014 	}
4015 }
4016 
4017 static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
4018 {
4019 	struct kvm_sync_regs *regs = &vcpu->run->s.regs;
4020 
4021 	/* Clear reset is a superset of the initial reset */
4022 	kvm_arch_vcpu_ioctl_initial_reset(vcpu);
4023 
4024 	memset(&regs->gprs, 0, sizeof(regs->gprs));
4025 	memset(&regs->vrs, 0, sizeof(regs->vrs));
4026 	memset(&regs->acrs, 0, sizeof(regs->acrs));
4027 	memset(&regs->gscb, 0, sizeof(regs->gscb));
4028 
4029 	regs->etoken = 0;
4030 	regs->etoken_extension = 0;
4031 }
4032 
4033 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4034 {
4035 	vcpu_load(vcpu);
4036 	memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
4037 	vcpu_put(vcpu);
4038 	return 0;
4039 }
4040 
4041 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4042 {
4043 	vcpu_load(vcpu);
4044 	memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
4045 	vcpu_put(vcpu);
4046 	return 0;
4047 }
4048 
4049 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
4050 				  struct kvm_sregs *sregs)
4051 {
4052 	vcpu_load(vcpu);
4053 
4054 	memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
4055 	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
4056 
4057 	vcpu_put(vcpu);
4058 	return 0;
4059 }
4060 
4061 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
4062 				  struct kvm_sregs *sregs)
4063 {
4064 	vcpu_load(vcpu);
4065 
4066 	memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
4067 	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
4068 
4069 	vcpu_put(vcpu);
4070 	return 0;
4071 }
4072 
4073 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4074 {
4075 	vcpu_load(vcpu);
4076 
4077 	vcpu->run->s.regs.fpc = fpu->fpc;
4078 	if (cpu_has_vx())
4079 		convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
4080 				 (freg_t *) fpu->fprs);
4081 	else
4082 		memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
4083 
4084 	vcpu_put(vcpu);
4085 	return 0;
4086 }
4087 
4088 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4089 {
4090 	vcpu_load(vcpu);
4091 
4092 	if (cpu_has_vx())
4093 		convert_vx_to_fp((freg_t *) fpu->fprs,
4094 				 (__vector128 *) vcpu->run->s.regs.vrs);
4095 	else
4096 		memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
4097 	fpu->fpc = vcpu->run->s.regs.fpc;
4098 
4099 	vcpu_put(vcpu);
4100 	return 0;
4101 }
4102 
4103 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
4104 {
4105 	int rc = 0;
4106 
4107 	if (!is_vcpu_stopped(vcpu))
4108 		rc = -EBUSY;
4109 	else {
4110 		vcpu->run->psw_mask = psw.mask;
4111 		vcpu->run->psw_addr = psw.addr;
4112 	}
4113 	return rc;
4114 }
4115 
4116 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
4117 				  struct kvm_translation *tr)
4118 {
4119 	return -EINVAL; /* not implemented yet */
4120 }
4121 
4122 #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
4123 			      KVM_GUESTDBG_USE_HW_BP | \
4124 			      KVM_GUESTDBG_ENABLE)
4125 
4126 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
4127 					struct kvm_guest_debug *dbg)
4128 {
4129 	int rc = 0;
4130 
4131 	vcpu_load(vcpu);
4132 
4133 	vcpu->guest_debug = 0;
4134 	kvm_s390_clear_bp_data(vcpu);
4135 
4136 	if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
4137 		rc = -EINVAL;
4138 		goto out;
4139 	}
4140 	if (!sclp.has_gpere) {
4141 		rc = -EINVAL;
4142 		goto out;
4143 	}
4144 
4145 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
4146 		vcpu->guest_debug = dbg->control;
4147 		/* enforce guest PER */
4148 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
4149 
4150 		if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
4151 			rc = kvm_s390_import_bp_data(vcpu, dbg);
4152 	} else {
4153 		kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
4154 		vcpu->arch.guestdbg.last_bp = 0;
4155 	}
4156 
4157 	if (rc) {
4158 		vcpu->guest_debug = 0;
4159 		kvm_s390_clear_bp_data(vcpu);
4160 		kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
4161 	}
4162 
4163 out:
4164 	vcpu_put(vcpu);
4165 	return rc;
4166 }
4167 
4168 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
4169 				    struct kvm_mp_state *mp_state)
4170 {
4171 	int ret;
4172 
4173 	vcpu_load(vcpu);
4174 
4175 	/* CHECK_STOP and LOAD are not supported yet */
4176 	ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
4177 				      KVM_MP_STATE_OPERATING;
4178 
4179 	vcpu_put(vcpu);
4180 	return ret;
4181 }
4182 
4183 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
4184 				    struct kvm_mp_state *mp_state)
4185 {
4186 	int rc = 0;
4187 
4188 	vcpu_load(vcpu);
4189 
4190 	/* user space knows about this interface - let it control the state */
4191 	kvm_s390_set_user_cpu_state_ctrl(vcpu->kvm);
4192 
4193 	switch (mp_state->mp_state) {
4194 	case KVM_MP_STATE_STOPPED:
4195 		rc = kvm_s390_vcpu_stop(vcpu);
4196 		break;
4197 	case KVM_MP_STATE_OPERATING:
4198 		rc = kvm_s390_vcpu_start(vcpu);
4199 		break;
4200 	case KVM_MP_STATE_LOAD:
4201 		if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
4202 			rc = -ENXIO;
4203 			break;
4204 		}
4205 		rc = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR_LOAD);
4206 		break;
4207 	case KVM_MP_STATE_CHECK_STOP:
4208 		fallthrough;	/* CHECK_STOP and LOAD are not supported yet */
4209 	default:
4210 		rc = -ENXIO;
4211 	}
4212 
4213 	vcpu_put(vcpu);
4214 	return rc;
4215 }
4216 
4217 static bool ibs_enabled(struct kvm_vcpu *vcpu)
4218 {
4219 	return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
4220 }
4221 
4222 static int vcpu_ucontrol_translate(struct kvm_vcpu *vcpu, gpa_t *gaddr)
4223 {
4224 	int rc;
4225 
4226 	if (kvm_is_ucontrol(vcpu->kvm)) {
4227 		rc = gmap_ucas_translate(vcpu->arch.mc, vcpu->arch.gmap, gaddr);
4228 		if (rc == -EREMOTE) {
4229 			vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
4230 			vcpu->run->s390_ucontrol.trans_exc_code = *gaddr;
4231 			vcpu->run->s390_ucontrol.pgm_code = PGM_SEGMENT_TRANSLATION;
4232 		}
4233 		return rc;
4234 	}
4235 	return 0;
4236 }
4237 
4238 static int kvm_s390_fixup_prefix(struct kvm_vcpu *vcpu)
4239 {
4240 	gpa_t gaddr = kvm_s390_get_prefix(vcpu);
4241 	gfn_t gfn;
4242 	int rc;
4243 
4244 	if (vcpu_ucontrol_translate(vcpu, &gaddr))
4245 		return -EREMOTE;
4246 	gfn = gpa_to_gfn(gaddr);
4247 
4248 	rc = kvm_s390_faultin_gfn_simple(vcpu, NULL, gfn, true);
4249 	if (rc)
4250 		return rc;
4251 	rc = kvm_s390_faultin_gfn_simple(vcpu, NULL, gfn + 1, true);
4252 	if (rc)
4253 		return rc;
4254 
4255 	scoped_guard(write_lock, &vcpu->kvm->mmu_lock)
4256 		rc = dat_set_prefix_notif_bit(vcpu->kvm->arch.gmap->asce, gfn);
4257 	return rc;
4258 }
4259 
4260 static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
4261 {
4262 retry:
4263 	kvm_s390_vcpu_request_handled(vcpu);
4264 	if (!kvm_request_pending(vcpu))
4265 		return 0;
4266 	/*
4267 	 * If the guest prefix changed, re-arm the ipte notifier for the
4268 	 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
4269 	 * This ensures that the ipte instruction for this request has
4270 	 * already finished. We might race against a second unmapper that
4271 	 * wants to set the blocking bit. Lets just retry the request loop.
4272 	 */
4273 	if (kvm_check_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu)) {
4274 		int rc;
4275 
4276 		rc = kvm_s390_fixup_prefix(vcpu);
4277 		if (rc) {
4278 			kvm_make_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu);
4279 			return rc;
4280 		}
4281 		goto retry;
4282 	}
4283 
4284 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
4285 		vcpu->arch.sie_block->ihcpu = 0xffff;
4286 		goto retry;
4287 	}
4288 
4289 	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
4290 		if (!ibs_enabled(vcpu)) {
4291 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
4292 			kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
4293 		}
4294 		goto retry;
4295 	}
4296 
4297 	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
4298 		if (ibs_enabled(vcpu)) {
4299 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
4300 			kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
4301 		}
4302 		goto retry;
4303 	}
4304 
4305 	if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
4306 		vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
4307 		goto retry;
4308 	}
4309 
4310 	if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
4311 		/*
4312 		 * Disable CMM virtualization; we will emulate the ESSA
4313 		 * instruction manually, in order to provide additional
4314 		 * functionalities needed for live migration.
4315 		 */
4316 		vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
4317 		goto retry;
4318 	}
4319 
4320 	if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
4321 		/*
4322 		 * Re-enable CMM virtualization if CMMA is available and
4323 		 * CMM has been used.
4324 		 */
4325 		if (vcpu->kvm->arch.use_cmma && uses_cmm(vcpu->arch.gmap))
4326 			vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
4327 		goto retry;
4328 	}
4329 
4330 	/* we left the vsie handler, nothing to do, just clear the request */
4331 	kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
4332 
4333 	return 0;
4334 }
4335 
4336 static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
4337 {
4338 	struct kvm_vcpu *vcpu;
4339 	union tod_clock clk;
4340 	unsigned long i;
4341 
4342 	preempt_disable();
4343 
4344 	store_tod_clock_ext(&clk);
4345 
4346 	kvm->arch.epoch = gtod->tod - clk.tod;
4347 	kvm->arch.epdx = 0;
4348 	if (test_kvm_facility(kvm, 139)) {
4349 		kvm->arch.epdx = gtod->epoch_idx - clk.ei;
4350 		if (kvm->arch.epoch > gtod->tod)
4351 			kvm->arch.epdx -= 1;
4352 	}
4353 
4354 	kvm_s390_vcpu_block_all(kvm);
4355 	kvm_for_each_vcpu(i, vcpu, kvm) {
4356 		vcpu->arch.sie_block->epoch = kvm->arch.epoch;
4357 		vcpu->arch.sie_block->epdx  = kvm->arch.epdx;
4358 	}
4359 
4360 	kvm_s390_vcpu_unblock_all(kvm);
4361 	preempt_enable();
4362 }
4363 
4364 int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
4365 {
4366 	if (!mutex_trylock(&kvm->lock))
4367 		return 0;
4368 	__kvm_s390_set_tod_clock(kvm, gtod);
4369 	mutex_unlock(&kvm->lock);
4370 	return 1;
4371 }
4372 
4373 static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
4374 				      unsigned long token)
4375 {
4376 	struct kvm_s390_interrupt inti;
4377 	struct kvm_s390_irq irq;
4378 
4379 	if (start_token) {
4380 		irq.u.ext.ext_params2 = token;
4381 		irq.type = KVM_S390_INT_PFAULT_INIT;
4382 		WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
4383 	} else {
4384 		inti.type = KVM_S390_INT_PFAULT_DONE;
4385 		inti.parm64 = token;
4386 		WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
4387 	}
4388 }
4389 
4390 bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
4391 				     struct kvm_async_pf *work)
4392 {
4393 	trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
4394 	__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
4395 
4396 	return true;
4397 }
4398 
4399 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
4400 				 struct kvm_async_pf *work)
4401 {
4402 	trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
4403 	__kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
4404 }
4405 
4406 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
4407 			       struct kvm_async_pf *work)
4408 {
4409 	/* s390 will always inject the page directly */
4410 }
4411 
4412 bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
4413 {
4414 	/*
4415 	 * s390 will always inject the page directly,
4416 	 * but we still want check_async_completion to cleanup
4417 	 */
4418 	return true;
4419 }
4420 
4421 bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
4422 {
4423 	hva_t hva;
4424 	struct kvm_arch_async_pf arch;
4425 
4426 	if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4427 		return false;
4428 	if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
4429 	    vcpu->arch.pfault_compare)
4430 		return false;
4431 	if (psw_extint_disabled(vcpu))
4432 		return false;
4433 	if (kvm_s390_vcpu_has_irq(vcpu, 0))
4434 		return false;
4435 	if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
4436 		return false;
4437 	if (!pfault_enabled(vcpu->arch.gmap))
4438 		return false;
4439 
4440 	hva = gfn_to_hva(vcpu->kvm, current->thread.gmap_teid.addr);
4441 	if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
4442 		return false;
4443 
4444 	return kvm_setup_async_pf(vcpu, current->thread.gmap_teid.addr * PAGE_SIZE, hva, &arch);
4445 }
4446 
4447 static int vcpu_pre_run(struct kvm_vcpu *vcpu)
4448 {
4449 	int rc, cpuflags;
4450 
4451 	/*
4452 	 * On s390 notifications for arriving pages will be delivered directly
4453 	 * to the guest but the house keeping for completed pfaults is
4454 	 * handled outside the worker.
4455 	 */
4456 	kvm_check_async_pf_completion(vcpu);
4457 
4458 	vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
4459 	vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
4460 
4461 	if (!kvm_is_ucontrol(vcpu->kvm)) {
4462 		rc = kvm_s390_deliver_pending_interrupts(vcpu);
4463 		if (rc || guestdbg_exit_pending(vcpu))
4464 			return rc;
4465 	}
4466 
4467 	rc = kvm_s390_handle_requests(vcpu);
4468 	if (rc)
4469 		return rc;
4470 
4471 	if (guestdbg_enabled(vcpu)) {
4472 		kvm_s390_backup_guest_per_regs(vcpu);
4473 		kvm_s390_patch_guest_per_regs(vcpu);
4474 	}
4475 
4476 	clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
4477 
4478 	vcpu->arch.sie_block->icptcode = 0;
4479 	current->thread.gmap_int_code = 0;
4480 	cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
4481 	VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
4482 	trace_kvm_s390_sie_enter(vcpu, cpuflags);
4483 
4484 	return 0;
4485 }
4486 
4487 static int vcpu_post_run_addressing_exception(struct kvm_vcpu *vcpu)
4488 {
4489 	struct kvm_s390_pgm_info pgm_info = {
4490 		.code = PGM_ADDRESSING,
4491 	};
4492 	u8 opcode, ilen;
4493 	int rc;
4494 
4495 	VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
4496 	trace_kvm_s390_sie_fault(vcpu);
4497 
4498 	/*
4499 	 * We want to inject an addressing exception, which is defined as a
4500 	 * suppressing or terminating exception. However, since we came here
4501 	 * by a DAT access exception, the PSW still points to the faulting
4502 	 * instruction since DAT exceptions are nullifying. So we've got
4503 	 * to look up the current opcode to get the length of the instruction
4504 	 * to be able to forward the PSW.
4505 	 */
4506 	rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
4507 	ilen = insn_length(opcode);
4508 	if (rc < 0) {
4509 		return rc;
4510 	} else if (rc) {
4511 		/* Instruction-Fetching Exceptions - we can't detect the ilen.
4512 		 * Forward by arbitrary ilc, injection will take care of
4513 		 * nullification if necessary.
4514 		 */
4515 		pgm_info = vcpu->arch.pgm;
4516 		ilen = 4;
4517 	}
4518 	pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
4519 	kvm_s390_forward_psw(vcpu, ilen);
4520 	return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
4521 }
4522 
4523 static void kvm_s390_assert_primary_as(struct kvm_vcpu *vcpu)
4524 {
4525 	KVM_BUG(current->thread.gmap_teid.as != PSW_BITS_AS_PRIMARY, vcpu->kvm,
4526 		"Unexpected program interrupt 0x%x, TEID 0x%016lx",
4527 		current->thread.gmap_int_code, current->thread.gmap_teid.val);
4528 }
4529 
4530 static int vcpu_dat_fault_handler(struct kvm_vcpu *vcpu, gpa_t gaddr, bool wr)
4531 {
4532 	struct guest_fault f = {
4533 		.write_attempt = wr,
4534 		.attempt_pfault = pfault_enabled(vcpu->arch.gmap),
4535 	};
4536 	int rc;
4537 
4538 	if (vcpu_ucontrol_translate(vcpu, &gaddr))
4539 		return -EREMOTE;
4540 	f.gfn = gpa_to_gfn(gaddr);
4541 
4542 	rc = kvm_s390_faultin_gfn(vcpu, NULL, &f);
4543 	if (rc <= 0)
4544 		return rc;
4545 	if (rc == PGM_ADDRESSING)
4546 		return vcpu_post_run_addressing_exception(vcpu);
4547 	KVM_BUG_ON(rc, vcpu->kvm);
4548 	return -EINVAL;
4549 }
4550 
4551 static int vcpu_post_run_handle_fault(struct kvm_vcpu *vcpu)
4552 {
4553 	unsigned int foll = 0;
4554 	unsigned long gaddr;
4555 	int rc;
4556 
4557 	gaddr = current->thread.gmap_teid.addr * PAGE_SIZE;
4558 	if (kvm_s390_cur_gmap_fault_is_write())
4559 		foll = FOLL_WRITE;
4560 
4561 	switch (current->thread.gmap_int_code & PGM_INT_CODE_MASK) {
4562 	case 0:
4563 		vcpu->stat.exit_null++;
4564 		break;
4565 	case PGM_SECURE_STORAGE_ACCESS:
4566 	case PGM_SECURE_STORAGE_VIOLATION:
4567 		kvm_s390_assert_primary_as(vcpu);
4568 		/*
4569 		 * This can happen after a reboot with asynchronous teardown;
4570 		 * the new guest (normal or protected) will run on top of the
4571 		 * previous protected guest. The old pages need to be destroyed
4572 		 * so the new guest can use them.
4573 		 */
4574 		if (kvm_s390_pv_destroy_page(vcpu->kvm, gaddr)) {
4575 			/*
4576 			 * Either KVM messed up the secure guest mapping or the
4577 			 * same page is mapped into multiple secure guests.
4578 			 *
4579 			 * This exception is only triggered when a guest 2 is
4580 			 * running and can therefore never occur in kernel
4581 			 * context.
4582 			 */
4583 			pr_warn_ratelimited("Secure storage violation (%x) in task: %s, pid %d\n",
4584 					    current->thread.gmap_int_code, current->comm,
4585 					    current->pid);
4586 			send_sig(SIGSEGV, current, 0);
4587 		}
4588 		break;
4589 	case PGM_NON_SECURE_STORAGE_ACCESS:
4590 		kvm_s390_assert_primary_as(vcpu);
4591 		/*
4592 		 * This is normal operation; a page belonging to a protected
4593 		 * guest has not been imported yet. Try to import the page into
4594 		 * the protected guest.
4595 		 */
4596 		rc = kvm_s390_pv_convert_to_secure(vcpu->kvm, gaddr);
4597 		if (rc == -EINVAL)
4598 			send_sig(SIGSEGV, current, 0);
4599 		if (rc != -ENXIO)
4600 			break;
4601 		foll = FOLL_WRITE;
4602 		fallthrough;
4603 	case PGM_PROTECTION:
4604 	case PGM_SEGMENT_TRANSLATION:
4605 	case PGM_PAGE_TRANSLATION:
4606 	case PGM_ASCE_TYPE:
4607 	case PGM_REGION_FIRST_TRANS:
4608 	case PGM_REGION_SECOND_TRANS:
4609 	case PGM_REGION_THIRD_TRANS:
4610 		kvm_s390_assert_primary_as(vcpu);
4611 		return vcpu_dat_fault_handler(vcpu, gaddr, foll);
4612 	default:
4613 		KVM_BUG(1, vcpu->kvm, "Unexpected program interrupt 0x%x, TEID 0x%016lx",
4614 			current->thread.gmap_int_code, current->thread.gmap_teid.val);
4615 		send_sig(SIGSEGV, current, 0);
4616 		break;
4617 	}
4618 	return 0;
4619 }
4620 
4621 static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
4622 {
4623 	struct mcck_volatile_info *mcck_info;
4624 	struct sie_page *sie_page;
4625 	int rc;
4626 
4627 	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
4628 		   vcpu->arch.sie_block->icptcode);
4629 	trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
4630 
4631 	if (guestdbg_enabled(vcpu))
4632 		kvm_s390_restore_guest_per_regs(vcpu);
4633 
4634 	vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
4635 	vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
4636 
4637 	if (exit_reason == -EINTR) {
4638 		VCPU_EVENT(vcpu, 3, "%s", "machine check");
4639 		sie_page = container_of(vcpu->arch.sie_block,
4640 					struct sie_page, sie_block);
4641 		mcck_info = &sie_page->mcck_info;
4642 		kvm_s390_reinject_machine_check(vcpu, mcck_info);
4643 		return 0;
4644 	}
4645 
4646 	if (vcpu->arch.sie_block->icptcode > 0) {
4647 		rc = kvm_handle_sie_intercept(vcpu);
4648 
4649 		if (rc != -EOPNOTSUPP)
4650 			return rc;
4651 		vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
4652 		vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
4653 		vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
4654 		vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
4655 		return -EREMOTE;
4656 	}
4657 
4658 	return vcpu_post_run_handle_fault(vcpu);
4659 }
4660 
4661 int noinstr kvm_s390_enter_exit_sie(struct kvm_s390_sie_block *scb,
4662 				    u64 *gprs, unsigned long gasce)
4663 {
4664 	int ret;
4665 
4666 	guest_state_enter_irqoff();
4667 
4668 	/*
4669 	 * The guest_state_{enter,exit}_irqoff() functions inform lockdep and
4670 	 * tracing that entry to the guest will enable host IRQs, and exit from
4671 	 * the guest will disable host IRQs.
4672 	 */
4673 	ret = sie64a(scb, gprs, gasce);
4674 
4675 	guest_state_exit_irqoff();
4676 
4677 	return ret;
4678 }
4679 
4680 #define PSW_INT_MASK (PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_MCHECK)
4681 static int __vcpu_run(struct kvm_vcpu *vcpu)
4682 {
4683 	int rc, exit_reason;
4684 	struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block;
4685 
4686 	/*
4687 	 * We try to hold kvm->srcu during most of vcpu_run (except when run-
4688 	 * ning the guest), so that memslots (and other stuff) are protected
4689 	 */
4690 	kvm_vcpu_srcu_read_lock(vcpu);
4691 
4692 	while (true) {
4693 		rc = vcpu_pre_run(vcpu);
4694 		kvm_vcpu_srcu_read_unlock(vcpu);
4695 		if (rc || guestdbg_exit_pending(vcpu))
4696 			break;
4697 
4698 		/*
4699 		 * As PF_VCPU will be used in fault handler, between
4700 		 * guest_timing_enter_irqoff and guest_timing_exit_irqoff
4701 		 * should be no uaccess.
4702 		 */
4703 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4704 			memcpy(sie_page->pv_grregs,
4705 			       vcpu->run->s.regs.gprs,
4706 			       sizeof(sie_page->pv_grregs));
4707 		}
4708 
4709 xfer_to_guest_mode_check:
4710 		local_irq_disable();
4711 		xfer_to_guest_mode_prepare();
4712 		if (xfer_to_guest_mode_work_pending()) {
4713 			local_irq_enable();
4714 			rc = kvm_xfer_to_guest_mode_handle_work(vcpu);
4715 			if (rc)
4716 				break;
4717 			goto xfer_to_guest_mode_check;
4718 		}
4719 
4720 		guest_timing_enter_irqoff();
4721 		__disable_cpu_timer_accounting(vcpu);
4722 
4723 		exit_reason = kvm_s390_enter_exit_sie(vcpu->arch.sie_block,
4724 						      vcpu->run->s.regs.gprs,
4725 						      vcpu->arch.gmap->asce.val);
4726 
4727 		__enable_cpu_timer_accounting(vcpu);
4728 		guest_timing_exit_irqoff();
4729 		local_irq_enable();
4730 
4731 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4732 			memcpy(vcpu->run->s.regs.gprs,
4733 			       sie_page->pv_grregs,
4734 			       sizeof(sie_page->pv_grregs));
4735 			/*
4736 			 * We're not allowed to inject interrupts on intercepts
4737 			 * that leave the guest state in an "in-between" state
4738 			 * where the next SIE entry will do a continuation.
4739 			 * Fence interrupts in our "internal" PSW.
4740 			 */
4741 			if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR ||
4742 			    vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) {
4743 				vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
4744 			}
4745 		}
4746 		kvm_vcpu_srcu_read_lock(vcpu);
4747 
4748 		rc = vcpu_post_run(vcpu, exit_reason);
4749 		if (rc || guestdbg_exit_pending(vcpu)) {
4750 			kvm_vcpu_srcu_read_unlock(vcpu);
4751 			break;
4752 		}
4753 	}
4754 
4755 	return rc;
4756 }
4757 
4758 static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
4759 {
4760 	struct kvm_run *kvm_run = vcpu->run;
4761 	struct runtime_instr_cb *riccb;
4762 	struct gs_cb *gscb;
4763 
4764 	riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
4765 	gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
4766 	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
4767 	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
4768 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4769 		vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
4770 		vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
4771 		vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
4772 	}
4773 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
4774 		vcpu->arch.pfault_token = kvm_run->s.regs.pft;
4775 		vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
4776 		vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
4777 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4778 			kvm_clear_async_pf_completion_queue(vcpu);
4779 	}
4780 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) {
4781 		vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318;
4782 		vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc;
4783 		VCPU_EVENT(vcpu, 3, "setting cpnc to %d", vcpu->arch.diag318_info.cpnc);
4784 	}
4785 	/*
4786 	 * If userspace sets the riccb (e.g. after migration) to a valid state,
4787 	 * we should enable RI here instead of doing the lazy enablement.
4788 	 */
4789 	if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
4790 	    test_kvm_facility(vcpu->kvm, 64) &&
4791 	    riccb->v &&
4792 	    !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
4793 		VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
4794 		vcpu->arch.sie_block->ecb3 |= ECB3_RI;
4795 	}
4796 	/*
4797 	 * If userspace sets the gscb (e.g. after migration) to non-zero,
4798 	 * we should enable GS here instead of doing the lazy enablement.
4799 	 */
4800 	if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
4801 	    test_kvm_facility(vcpu->kvm, 133) &&
4802 	    gscb->gssm &&
4803 	    !vcpu->arch.gs_enabled) {
4804 		VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
4805 		vcpu->arch.sie_block->ecb |= ECB_GS;
4806 		vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
4807 		vcpu->arch.gs_enabled = 1;
4808 	}
4809 	if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
4810 	    test_kvm_facility(vcpu->kvm, 82)) {
4811 		vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
4812 		vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
4813 	}
4814 	if (cpu_has_gs()) {
4815 		preempt_disable();
4816 		local_ctl_set_bit(2, CR2_GUARDED_STORAGE_BIT);
4817 		if (current->thread.gs_cb) {
4818 			vcpu->arch.host_gscb = current->thread.gs_cb;
4819 			save_gs_cb(vcpu->arch.host_gscb);
4820 		}
4821 		if (vcpu->arch.gs_enabled) {
4822 			current->thread.gs_cb = (struct gs_cb *)
4823 						&vcpu->run->s.regs.gscb;
4824 			restore_gs_cb(current->thread.gs_cb);
4825 		}
4826 		preempt_enable();
4827 	}
4828 	/* SIE will load etoken directly from SDNX and therefore kvm_run */
4829 }
4830 
4831 static void sync_regs(struct kvm_vcpu *vcpu)
4832 {
4833 	struct kvm_run *kvm_run = vcpu->run;
4834 
4835 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
4836 		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
4837 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
4838 		memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
4839 		/* some control register changes require a tlb flush */
4840 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
4841 	}
4842 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4843 		kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
4844 		vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
4845 	}
4846 	save_access_regs(vcpu->arch.host_acrs);
4847 	restore_access_regs(vcpu->run->s.regs.acrs);
4848 	vcpu->arch.acrs_loaded = true;
4849 	kvm_s390_fpu_load(vcpu->run);
4850 	/* Sync fmt2 only data */
4851 	if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
4852 		sync_regs_fmt2(vcpu);
4853 	} else {
4854 		/*
4855 		 * In several places we have to modify our internal view to
4856 		 * not do things that are disallowed by the ultravisor. For
4857 		 * example we must not inject interrupts after specific exits
4858 		 * (e.g. 112 prefix page not secure). We do this by turning
4859 		 * off the machine check, external and I/O interrupt bits
4860 		 * of our PSW copy. To avoid getting validity intercepts, we
4861 		 * do only accept the condition code from userspace.
4862 		 */
4863 		vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC;
4864 		vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask &
4865 						   PSW_MASK_CC;
4866 	}
4867 
4868 	kvm_run->kvm_dirty_regs = 0;
4869 }
4870 
4871 static void store_regs_fmt2(struct kvm_vcpu *vcpu)
4872 {
4873 	struct kvm_run *kvm_run = vcpu->run;
4874 
4875 	kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
4876 	kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
4877 	kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
4878 	kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
4879 	kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
4880 	if (cpu_has_gs()) {
4881 		preempt_disable();
4882 		local_ctl_set_bit(2, CR2_GUARDED_STORAGE_BIT);
4883 		if (vcpu->arch.gs_enabled)
4884 			save_gs_cb(current->thread.gs_cb);
4885 		current->thread.gs_cb = vcpu->arch.host_gscb;
4886 		restore_gs_cb(vcpu->arch.host_gscb);
4887 		if (!vcpu->arch.host_gscb)
4888 			local_ctl_clear_bit(2, CR2_GUARDED_STORAGE_BIT);
4889 		vcpu->arch.host_gscb = NULL;
4890 		preempt_enable();
4891 	}
4892 	/* SIE will save etoken directly into SDNX and therefore kvm_run */
4893 }
4894 
4895 static void store_regs(struct kvm_vcpu *vcpu)
4896 {
4897 	struct kvm_run *kvm_run = vcpu->run;
4898 
4899 	kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
4900 	kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
4901 	kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
4902 	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
4903 	kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
4904 	kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
4905 	kvm_run->s.regs.pft = vcpu->arch.pfault_token;
4906 	kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
4907 	kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
4908 	save_access_regs(vcpu->run->s.regs.acrs);
4909 	restore_access_regs(vcpu->arch.host_acrs);
4910 	vcpu->arch.acrs_loaded = false;
4911 	kvm_s390_fpu_store(vcpu->run);
4912 	if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
4913 		store_regs_fmt2(vcpu);
4914 }
4915 
4916 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
4917 {
4918 	struct kvm_run *kvm_run = vcpu->run;
4919 	DECLARE_KERNEL_FPU_ONSTACK32(fpu);
4920 	int rc;
4921 
4922 	/*
4923 	 * Running a VM while dumping always has the potential to
4924 	 * produce inconsistent dump data. But for PV vcpus a SIE
4925 	 * entry while dumping could also lead to a fatal validity
4926 	 * intercept which we absolutely want to avoid.
4927 	 */
4928 	if (vcpu->kvm->arch.pv.dumping)
4929 		return -EINVAL;
4930 
4931 	if (!vcpu->wants_to_run)
4932 		return -EINTR;
4933 
4934 	if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS ||
4935 	    kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS)
4936 		return -EINVAL;
4937 
4938 	vcpu_load(vcpu);
4939 
4940 	if (guestdbg_exit_pending(vcpu)) {
4941 		kvm_s390_prepare_debug_exit(vcpu);
4942 		rc = 0;
4943 		goto out;
4944 	}
4945 
4946 	kvm_sigset_activate(vcpu);
4947 
4948 	/*
4949 	 * no need to check the return value of vcpu_start as it can only have
4950 	 * an error for protvirt, but protvirt means user cpu state
4951 	 */
4952 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
4953 		kvm_s390_vcpu_start(vcpu);
4954 	} else if (is_vcpu_stopped(vcpu)) {
4955 		pr_err_ratelimited("can't run stopped vcpu %d\n",
4956 				   vcpu->vcpu_id);
4957 		rc = -EINVAL;
4958 		goto out;
4959 	}
4960 
4961 	kernel_fpu_begin(&fpu, KERNEL_FPC | KERNEL_VXR);
4962 	sync_regs(vcpu);
4963 	enable_cpu_timer_accounting(vcpu);
4964 
4965 	might_fault();
4966 	rc = __vcpu_run(vcpu);
4967 
4968 	if (signal_pending(current) && !rc) {
4969 		kvm_run->exit_reason = KVM_EXIT_INTR;
4970 		vcpu->stat.signal_exits++;
4971 		rc = -EINTR;
4972 	}
4973 
4974 	if (guestdbg_exit_pending(vcpu) && !rc)  {
4975 		kvm_s390_prepare_debug_exit(vcpu);
4976 		rc = 0;
4977 	}
4978 
4979 	if (rc == -EREMOTE) {
4980 		/* userspace support is needed, kvm_run has been prepared */
4981 		rc = 0;
4982 	}
4983 
4984 	disable_cpu_timer_accounting(vcpu);
4985 	store_regs(vcpu);
4986 	kernel_fpu_end(&fpu, KERNEL_FPC | KERNEL_VXR);
4987 
4988 	kvm_sigset_deactivate(vcpu);
4989 
4990 	vcpu->stat.exit_userspace++;
4991 out:
4992 	vcpu_put(vcpu);
4993 	return rc;
4994 }
4995 
4996 /*
4997  * store status at address
4998  * we use have two special cases:
4999  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
5000  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
5001  */
5002 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
5003 {
5004 	unsigned char archmode = 1;
5005 	freg_t fprs[NUM_FPRS];
5006 	unsigned int px;
5007 	u64 clkcomp, cputm;
5008 	int rc;
5009 
5010 	px = kvm_s390_get_prefix(vcpu);
5011 	if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
5012 		if (write_guest_abs(vcpu, 163, &archmode, 1))
5013 			return -EFAULT;
5014 		gpa = 0;
5015 	} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
5016 		if (write_guest_real(vcpu, 163, &archmode, 1))
5017 			return -EFAULT;
5018 		gpa = px;
5019 	} else
5020 		gpa -= __LC_FPREGS_SAVE_AREA;
5021 
5022 	/* manually convert vector registers if necessary */
5023 	if (cpu_has_vx()) {
5024 		convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
5025 		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
5026 				     fprs, 128);
5027 	} else {
5028 		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
5029 				     vcpu->run->s.regs.fprs, 128);
5030 	}
5031 	rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
5032 			      vcpu->run->s.regs.gprs, 128);
5033 	rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
5034 			      &vcpu->arch.sie_block->gpsw, 16);
5035 	rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
5036 			      &px, 4);
5037 	rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
5038 			      &vcpu->run->s.regs.fpc, 4);
5039 	rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
5040 			      &vcpu->arch.sie_block->todpr, 4);
5041 	cputm = kvm_s390_get_cpu_timer(vcpu);
5042 	rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
5043 			      &cputm, 8);
5044 	clkcomp = vcpu->arch.sie_block->ckc >> 8;
5045 	rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
5046 			      &clkcomp, 8);
5047 	rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
5048 			      &vcpu->run->s.regs.acrs, 64);
5049 	rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
5050 			      &vcpu->arch.sie_block->gcr, 128);
5051 	return rc ? -EFAULT : 0;
5052 }
5053 
5054 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
5055 {
5056 	/*
5057 	 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
5058 	 * switch in the run ioctl. Let's update our copies before we save
5059 	 * it into the save area
5060 	 */
5061 	kvm_s390_fpu_store(vcpu->run);
5062 	save_access_regs(vcpu->run->s.regs.acrs);
5063 
5064 	return kvm_s390_store_status_unloaded(vcpu, addr);
5065 }
5066 
5067 static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
5068 {
5069 	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
5070 	kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
5071 }
5072 
5073 static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
5074 {
5075 	unsigned long i;
5076 	struct kvm_vcpu *vcpu;
5077 
5078 	kvm_for_each_vcpu(i, vcpu, kvm) {
5079 		__disable_ibs_on_vcpu(vcpu);
5080 	}
5081 }
5082 
5083 static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
5084 {
5085 	if (!sclp.has_ibs)
5086 		return;
5087 	kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
5088 	kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
5089 }
5090 
5091 int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
5092 {
5093 	int i, online_vcpus, r = 0, started_vcpus = 0;
5094 
5095 	if (!is_vcpu_stopped(vcpu))
5096 		return 0;
5097 
5098 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
5099 	/* Only one cpu at a time may enter/leave the STOPPED state. */
5100 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
5101 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
5102 
5103 	/* Let's tell the UV that we want to change into the operating state */
5104 	if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5105 		r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR);
5106 		if (r) {
5107 			spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5108 			return r;
5109 		}
5110 	}
5111 
5112 	for (i = 0; i < online_vcpus; i++) {
5113 		if (!is_vcpu_stopped(kvm_get_vcpu(vcpu->kvm, i)))
5114 			started_vcpus++;
5115 	}
5116 
5117 	if (started_vcpus == 0) {
5118 		/* we're the only active VCPU -> speed it up */
5119 		__enable_ibs_on_vcpu(vcpu);
5120 	} else if (started_vcpus == 1) {
5121 		/*
5122 		 * As we are starting a second VCPU, we have to disable
5123 		 * the IBS facility on all VCPUs to remove potentially
5124 		 * outstanding ENABLE requests.
5125 		 */
5126 		__disable_ibs_on_all_vcpus(vcpu->kvm);
5127 	}
5128 
5129 	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
5130 	/*
5131 	 * The real PSW might have changed due to a RESTART interpreted by the
5132 	 * ultravisor. We block all interrupts and let the next sie exit
5133 	 * refresh our view.
5134 	 */
5135 	if (kvm_s390_pv_cpu_is_protected(vcpu))
5136 		vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
5137 	/*
5138 	 * Another VCPU might have used IBS while we were offline.
5139 	 * Let's play safe and flush the VCPU at startup.
5140 	 */
5141 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
5142 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5143 	return 0;
5144 }
5145 
5146 int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
5147 {
5148 	int i, online_vcpus, r = 0, started_vcpus = 0;
5149 	struct kvm_vcpu *started_vcpu = NULL;
5150 
5151 	if (is_vcpu_stopped(vcpu))
5152 		return 0;
5153 
5154 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
5155 	/* Only one cpu at a time may enter/leave the STOPPED state. */
5156 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
5157 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
5158 
5159 	/* Let's tell the UV that we want to change into the stopped state */
5160 	if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5161 		r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_STP);
5162 		if (r) {
5163 			spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5164 			return r;
5165 		}
5166 	}
5167 
5168 	/*
5169 	 * Set the VCPU to STOPPED and THEN clear the interrupt flag,
5170 	 * now that the SIGP STOP and SIGP STOP AND STORE STATUS orders
5171 	 * have been fully processed. This will ensure that the VCPU
5172 	 * is kept BUSY if another VCPU is inquiring with SIGP SENSE.
5173 	 */
5174 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
5175 	kvm_s390_clear_stop_irq(vcpu);
5176 
5177 	__disable_ibs_on_vcpu(vcpu);
5178 
5179 	for (i = 0; i < online_vcpus; i++) {
5180 		struct kvm_vcpu *tmp = kvm_get_vcpu(vcpu->kvm, i);
5181 
5182 		if (!is_vcpu_stopped(tmp)) {
5183 			started_vcpus++;
5184 			started_vcpu = tmp;
5185 		}
5186 	}
5187 
5188 	if (started_vcpus == 1) {
5189 		/*
5190 		 * As we only have one VCPU left, we want to enable the
5191 		 * IBS facility for that VCPU to speed it up.
5192 		 */
5193 		__enable_ibs_on_vcpu(started_vcpu);
5194 	}
5195 
5196 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5197 	return 0;
5198 }
5199 
5200 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
5201 				     struct kvm_enable_cap *cap)
5202 {
5203 	int r;
5204 
5205 	if (cap->flags)
5206 		return -EINVAL;
5207 
5208 	switch (cap->cap) {
5209 	case KVM_CAP_S390_CSS_SUPPORT:
5210 		if (!vcpu->kvm->arch.css_support) {
5211 			vcpu->kvm->arch.css_support = 1;
5212 			VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
5213 			trace_kvm_s390_enable_css(vcpu->kvm);
5214 		}
5215 		r = 0;
5216 		break;
5217 	default:
5218 		r = -EINVAL;
5219 		break;
5220 	}
5221 	return r;
5222 }
5223 
5224 static long kvm_s390_vcpu_sida_op(struct kvm_vcpu *vcpu,
5225 				  struct kvm_s390_mem_op *mop)
5226 {
5227 	void __user *uaddr = (void __user *)mop->buf;
5228 	void *sida_addr;
5229 	int r = 0;
5230 
5231 	if (mop->flags || !mop->size)
5232 		return -EINVAL;
5233 	if (mop->size + mop->sida_offset < mop->size)
5234 		return -EINVAL;
5235 	if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block))
5236 		return -E2BIG;
5237 	if (!kvm_s390_pv_cpu_is_protected(vcpu))
5238 		return -EINVAL;
5239 
5240 	sida_addr = (char *)sida_addr(vcpu->arch.sie_block) + mop->sida_offset;
5241 
5242 	switch (mop->op) {
5243 	case KVM_S390_MEMOP_SIDA_READ:
5244 		if (copy_to_user(uaddr, sida_addr, mop->size))
5245 			r = -EFAULT;
5246 
5247 		break;
5248 	case KVM_S390_MEMOP_SIDA_WRITE:
5249 		if (copy_from_user(sida_addr, uaddr, mop->size))
5250 			r = -EFAULT;
5251 		break;
5252 	}
5253 	return r;
5254 }
5255 
5256 static long kvm_s390_vcpu_mem_op(struct kvm_vcpu *vcpu,
5257 				 struct kvm_s390_mem_op *mop)
5258 {
5259 	void __user *uaddr = (void __user *)mop->buf;
5260 	void *tmpbuf __free(kvfree) = NULL;
5261 	enum gacc_mode acc_mode;
5262 	int r;
5263 
5264 	r = mem_op_validate_common(mop, KVM_S390_MEMOP_F_INJECT_EXCEPTION |
5265 					KVM_S390_MEMOP_F_CHECK_ONLY |
5266 					KVM_S390_MEMOP_F_SKEY_PROTECTION);
5267 	if (r)
5268 		return r;
5269 	if (mop->ar >= NUM_ACRS)
5270 		return -EINVAL;
5271 	if (kvm_s390_pv_cpu_is_protected(vcpu))
5272 		return -EINVAL;
5273 	if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
5274 		tmpbuf = vmalloc(mop->size);
5275 		if (!tmpbuf)
5276 			return -ENOMEM;
5277 	}
5278 
5279 	acc_mode = mop->op == KVM_S390_MEMOP_LOGICAL_READ ? GACC_FETCH : GACC_STORE;
5280 	if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
5281 		r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size,
5282 				    acc_mode, mop->key);
5283 	} else if (acc_mode == GACC_FETCH) {
5284 		r = read_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf,
5285 					mop->size, mop->key);
5286 		if (!r && copy_to_user(uaddr, tmpbuf, mop->size))
5287 			return -EFAULT;
5288 	} else {
5289 		if (copy_from_user(tmpbuf, uaddr, mop->size))
5290 			return -EFAULT;
5291 		r = write_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf,
5292 					 mop->size, mop->key);
5293 	}
5294 
5295 	if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
5296 		kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
5297 
5298 	return r;
5299 }
5300 
5301 static long kvm_s390_vcpu_memsida_op(struct kvm_vcpu *vcpu,
5302 				     struct kvm_s390_mem_op *mop)
5303 {
5304 	int r, srcu_idx;
5305 
5306 	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
5307 
5308 	switch (mop->op) {
5309 	case KVM_S390_MEMOP_LOGICAL_READ:
5310 	case KVM_S390_MEMOP_LOGICAL_WRITE:
5311 		r = kvm_s390_vcpu_mem_op(vcpu, mop);
5312 		break;
5313 	case KVM_S390_MEMOP_SIDA_READ:
5314 	case KVM_S390_MEMOP_SIDA_WRITE:
5315 		/* we are locked against sida going away by the vcpu->mutex */
5316 		r = kvm_s390_vcpu_sida_op(vcpu, mop);
5317 		break;
5318 	default:
5319 		r = -EINVAL;
5320 	}
5321 
5322 	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
5323 	return r;
5324 }
5325 
5326 long kvm_arch_vcpu_unlocked_ioctl(struct file *filp, unsigned int ioctl,
5327 				  unsigned long arg)
5328 {
5329 	struct kvm_vcpu *vcpu = filp->private_data;
5330 	void __user *argp = (void __user *)arg;
5331 	int rc;
5332 
5333 	switch (ioctl) {
5334 	case KVM_S390_IRQ: {
5335 		struct kvm_s390_irq s390irq;
5336 
5337 		if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
5338 			return -EFAULT;
5339 		rc = kvm_s390_inject_vcpu(vcpu, &s390irq);
5340 		break;
5341 	}
5342 	case KVM_S390_INTERRUPT: {
5343 		struct kvm_s390_interrupt s390int;
5344 		struct kvm_s390_irq s390irq = {};
5345 
5346 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
5347 			return -EFAULT;
5348 		if (s390int_to_s390irq(&s390int, &s390irq))
5349 			return -EINVAL;
5350 		rc = kvm_s390_inject_vcpu(vcpu, &s390irq);
5351 		break;
5352 	}
5353 	default:
5354 		rc = -ENOIOCTLCMD;
5355 		break;
5356 	}
5357 
5358 	/*
5359 	 * To simplify single stepping of userspace-emulated instructions,
5360 	 * KVM_EXIT_S390_SIEIC exit sets KVM_GUESTDBG_EXIT_PENDING (see
5361 	 * should_handle_per_ifetch()). However, if userspace emulation injects
5362 	 * an interrupt, it needs to be cleared, so that KVM_EXIT_DEBUG happens
5363 	 * after (and not before) the interrupt delivery.
5364 	 */
5365 	if (!rc)
5366 		vcpu->guest_debug &= ~KVM_GUESTDBG_EXIT_PENDING;
5367 
5368 	return rc;
5369 }
5370 
5371 static int kvm_s390_handle_pv_vcpu_dump(struct kvm_vcpu *vcpu,
5372 					struct kvm_pv_cmd *cmd)
5373 {
5374 	struct kvm_s390_pv_dmp dmp;
5375 	void *data;
5376 	int ret;
5377 
5378 	/* Dump initialization is a prerequisite */
5379 	if (!vcpu->kvm->arch.pv.dumping)
5380 		return -EINVAL;
5381 
5382 	if (copy_from_user(&dmp, (__u8 __user *)cmd->data, sizeof(dmp)))
5383 		return -EFAULT;
5384 
5385 	/* We only handle this subcmd right now */
5386 	if (dmp.subcmd != KVM_PV_DUMP_CPU)
5387 		return -EINVAL;
5388 
5389 	/* CPU dump length is the same as create cpu storage donation. */
5390 	if (dmp.buff_len != uv_info.guest_cpu_stor_len)
5391 		return -EINVAL;
5392 
5393 	data = kvzalloc(uv_info.guest_cpu_stor_len, GFP_KERNEL);
5394 	if (!data)
5395 		return -ENOMEM;
5396 
5397 	ret = kvm_s390_pv_dump_cpu(vcpu, data, &cmd->rc, &cmd->rrc);
5398 
5399 	VCPU_EVENT(vcpu, 3, "PROTVIRT DUMP CPU %d rc %x rrc %x",
5400 		   vcpu->vcpu_id, cmd->rc, cmd->rrc);
5401 
5402 	if (ret)
5403 		ret = -EINVAL;
5404 
5405 	/* On success copy over the dump data */
5406 	if (!ret && copy_to_user((__u8 __user *)dmp.buff_addr, data, uv_info.guest_cpu_stor_len))
5407 		ret = -EFAULT;
5408 
5409 	kvfree(data);
5410 	return ret;
5411 }
5412 
5413 long kvm_arch_vcpu_ioctl(struct file *filp,
5414 			 unsigned int ioctl, unsigned long arg)
5415 {
5416 	struct kvm_vcpu *vcpu = filp->private_data;
5417 	void __user *argp = (void __user *)arg;
5418 	int idx;
5419 	long r;
5420 	u16 rc, rrc;
5421 
5422 	vcpu_load(vcpu);
5423 
5424 	switch (ioctl) {
5425 	case KVM_S390_STORE_STATUS:
5426 		idx = srcu_read_lock(&vcpu->kvm->srcu);
5427 		r = kvm_s390_store_status_unloaded(vcpu, arg);
5428 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
5429 		break;
5430 	case KVM_S390_SET_INITIAL_PSW: {
5431 		psw_t psw;
5432 
5433 		r = -EFAULT;
5434 		if (copy_from_user(&psw, argp, sizeof(psw)))
5435 			break;
5436 		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
5437 		break;
5438 	}
5439 	case KVM_S390_CLEAR_RESET:
5440 		r = 0;
5441 		kvm_arch_vcpu_ioctl_clear_reset(vcpu);
5442 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5443 			r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
5444 					  UVC_CMD_CPU_RESET_CLEAR, &rc, &rrc);
5445 			VCPU_EVENT(vcpu, 3, "PROTVIRT RESET CLEAR VCPU: rc %x rrc %x",
5446 				   rc, rrc);
5447 		}
5448 		break;
5449 	case KVM_S390_INITIAL_RESET:
5450 		r = 0;
5451 		kvm_arch_vcpu_ioctl_initial_reset(vcpu);
5452 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5453 			r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
5454 					  UVC_CMD_CPU_RESET_INITIAL,
5455 					  &rc, &rrc);
5456 			VCPU_EVENT(vcpu, 3, "PROTVIRT RESET INITIAL VCPU: rc %x rrc %x",
5457 				   rc, rrc);
5458 		}
5459 		break;
5460 	case KVM_S390_NORMAL_RESET:
5461 		r = 0;
5462 		kvm_arch_vcpu_ioctl_normal_reset(vcpu);
5463 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5464 			r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
5465 					  UVC_CMD_CPU_RESET, &rc, &rrc);
5466 			VCPU_EVENT(vcpu, 3, "PROTVIRT RESET NORMAL VCPU: rc %x rrc %x",
5467 				   rc, rrc);
5468 		}
5469 		break;
5470 	case KVM_SET_ONE_REG:
5471 	case KVM_GET_ONE_REG: {
5472 		struct kvm_one_reg reg;
5473 		r = -EINVAL;
5474 		if (kvm_s390_pv_cpu_is_protected(vcpu))
5475 			break;
5476 		r = -EFAULT;
5477 		if (copy_from_user(&reg, argp, sizeof(reg)))
5478 			break;
5479 		if (ioctl == KVM_SET_ONE_REG)
5480 			r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
5481 		else
5482 			r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
5483 		break;
5484 	}
5485 #ifdef CONFIG_KVM_S390_UCONTROL
5486 	case KVM_S390_UCAS_MAP: {
5487 		struct kvm_s390_ucas_mapping ucas;
5488 
5489 		r = -EFAULT;
5490 		if (copy_from_user(&ucas, argp, sizeof(ucas)))
5491 			break;
5492 
5493 		r = -EINVAL;
5494 		if (!kvm_is_ucontrol(vcpu->kvm))
5495 			break;
5496 		if (!IS_ALIGNED(ucas.user_addr | ucas.vcpu_addr | ucas.length, _SEGMENT_SIZE))
5497 			break;
5498 
5499 		r = gmap_ucas_map(vcpu->arch.gmap, gpa_to_gfn(ucas.user_addr),
5500 				  gpa_to_gfn(ucas.vcpu_addr),
5501 				  ucas.length >> _SEGMENT_SHIFT);
5502 		break;
5503 	}
5504 	case KVM_S390_UCAS_UNMAP: {
5505 		struct kvm_s390_ucas_mapping ucas;
5506 
5507 		r = -EFAULT;
5508 		if (copy_from_user(&ucas, argp, sizeof(ucas)))
5509 			break;
5510 
5511 		r = -EINVAL;
5512 		if (!kvm_is_ucontrol(vcpu->kvm))
5513 			break;
5514 		if (!IS_ALIGNED(ucas.vcpu_addr | ucas.length, _SEGMENT_SIZE))
5515 			break;
5516 
5517 		gmap_ucas_unmap(vcpu->arch.gmap, gpa_to_gfn(ucas.vcpu_addr),
5518 				ucas.length >> _SEGMENT_SHIFT);
5519 		r = 0;
5520 		break;
5521 	}
5522 #endif
5523 	case KVM_S390_VCPU_FAULT: {
5524 		idx = srcu_read_lock(&vcpu->kvm->srcu);
5525 		r = vcpu_dat_fault_handler(vcpu, arg, 0);
5526 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
5527 		break;
5528 	}
5529 	case KVM_ENABLE_CAP:
5530 	{
5531 		struct kvm_enable_cap cap;
5532 		r = -EFAULT;
5533 		if (copy_from_user(&cap, argp, sizeof(cap)))
5534 			break;
5535 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
5536 		break;
5537 	}
5538 	case KVM_S390_MEM_OP: {
5539 		struct kvm_s390_mem_op mem_op;
5540 
5541 		if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
5542 			r = kvm_s390_vcpu_memsida_op(vcpu, &mem_op);
5543 		else
5544 			r = -EFAULT;
5545 		break;
5546 	}
5547 	case KVM_S390_SET_IRQ_STATE: {
5548 		struct kvm_s390_irq_state irq_state;
5549 
5550 		r = -EFAULT;
5551 		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
5552 			break;
5553 		if (irq_state.len > VCPU_IRQS_MAX_BUF ||
5554 		    irq_state.len == 0 ||
5555 		    irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
5556 			r = -EINVAL;
5557 			break;
5558 		}
5559 		/* do not use irq_state.flags, it will break old QEMUs */
5560 		r = kvm_s390_set_irq_state(vcpu,
5561 					   (void __user *) irq_state.buf,
5562 					   irq_state.len);
5563 		break;
5564 	}
5565 	case KVM_S390_GET_IRQ_STATE: {
5566 		struct kvm_s390_irq_state irq_state;
5567 
5568 		r = -EFAULT;
5569 		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
5570 			break;
5571 		if (irq_state.len == 0) {
5572 			r = -EINVAL;
5573 			break;
5574 		}
5575 		/* do not use irq_state.flags, it will break old QEMUs */
5576 		r = kvm_s390_get_irq_state(vcpu,
5577 					   (__u8 __user *)  irq_state.buf,
5578 					   irq_state.len);
5579 		break;
5580 	}
5581 	case KVM_S390_PV_CPU_COMMAND: {
5582 		struct kvm_pv_cmd cmd;
5583 
5584 		r = -EINVAL;
5585 		if (!is_prot_virt_host())
5586 			break;
5587 
5588 		r = -EFAULT;
5589 		if (copy_from_user(&cmd, argp, sizeof(cmd)))
5590 			break;
5591 
5592 		r = -EINVAL;
5593 		if (cmd.flags)
5594 			break;
5595 
5596 		/* We only handle this cmd right now */
5597 		if (cmd.cmd != KVM_PV_DUMP)
5598 			break;
5599 
5600 		r = kvm_s390_handle_pv_vcpu_dump(vcpu, &cmd);
5601 
5602 		/* Always copy over UV rc / rrc data */
5603 		if (copy_to_user((__u8 __user *)argp, &cmd.rc,
5604 				 sizeof(cmd.rc) + sizeof(cmd.rrc)))
5605 			r = -EFAULT;
5606 		break;
5607 	}
5608 	default:
5609 		r = -ENOTTY;
5610 	}
5611 
5612 	vcpu_put(vcpu);
5613 	return r;
5614 }
5615 
5616 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
5617 {
5618 #ifdef CONFIG_KVM_S390_UCONTROL
5619 	if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
5620 		 && (kvm_is_ucontrol(vcpu->kvm))) {
5621 		vmf->page = virt_to_page(vcpu->arch.sie_block);
5622 		get_page(vmf->page);
5623 		return 0;
5624 	}
5625 #endif
5626 	return VM_FAULT_SIGBUS;
5627 }
5628 
5629 bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
5630 {
5631 	return true;
5632 }
5633 
5634 /* Section: memory related */
5635 int kvm_arch_prepare_memory_region(struct kvm *kvm,
5636 				   const struct kvm_memory_slot *old,
5637 				   struct kvm_memory_slot *new,
5638 				   enum kvm_mr_change change)
5639 {
5640 	gpa_t size;
5641 
5642 	if (kvm_is_ucontrol(kvm) && new->id < KVM_USER_MEM_SLOTS)
5643 		return -EINVAL;
5644 
5645 	/* When we are protected, we should not change the memory slots */
5646 	if (kvm_s390_pv_get_handle(kvm))
5647 		return -EINVAL;
5648 
5649 	if (change != KVM_MR_DELETE && change != KVM_MR_FLAGS_ONLY) {
5650 		/*
5651 		 * A few sanity checks. We can have memory slots which have to be
5652 		 * located/ended at a segment boundary (1MB). The memory in userland is
5653 		 * ok to be fragmented into various different vmas. It is okay to mmap()
5654 		 * and munmap() stuff in this slot after doing this call at any time
5655 		 */
5656 
5657 		if (new->userspace_addr & 0xffffful)
5658 			return -EINVAL;
5659 
5660 		size = new->npages * PAGE_SIZE;
5661 		if (size & 0xffffful)
5662 			return -EINVAL;
5663 
5664 		if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit)
5665 			return -EINVAL;
5666 	}
5667 
5668 	if (!kvm->arch.migration_mode)
5669 		return 0;
5670 
5671 	/*
5672 	 * Turn off migration mode when:
5673 	 * - userspace creates a new memslot with dirty logging off,
5674 	 * - userspace modifies an existing memslot (MOVE or FLAGS_ONLY) and
5675 	 *   dirty logging is turned off.
5676 	 * Migration mode expects dirty page logging being enabled to store
5677 	 * its dirty bitmap.
5678 	 */
5679 	if (change != KVM_MR_DELETE &&
5680 	    !(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
5681 		WARN(kvm_s390_vm_stop_migration(kvm),
5682 		     "Failed to stop migration mode");
5683 
5684 	return 0;
5685 }
5686 
5687 void kvm_arch_commit_memory_region(struct kvm *kvm,
5688 				struct kvm_memory_slot *old,
5689 				const struct kvm_memory_slot *new,
5690 				enum kvm_mr_change change)
5691 {
5692 	struct kvm_s390_mmu_cache *mc = NULL;
5693 	int rc = 0;
5694 
5695 	if (change == KVM_MR_FLAGS_ONLY)
5696 		return;
5697 
5698 	mc = kvm_s390_new_mmu_cache();
5699 	if (!mc) {
5700 		rc = -ENOMEM;
5701 		goto out;
5702 	}
5703 
5704 	scoped_guard(write_lock, &kvm->mmu_lock) {
5705 		switch (change) {
5706 		case KVM_MR_DELETE:
5707 			rc = dat_delete_slot(mc, kvm->arch.gmap->asce, old->base_gfn, old->npages);
5708 			break;
5709 		case KVM_MR_MOVE:
5710 			rc = dat_delete_slot(mc, kvm->arch.gmap->asce, old->base_gfn, old->npages);
5711 			if (rc)
5712 				break;
5713 			fallthrough;
5714 		case KVM_MR_CREATE:
5715 			rc = dat_create_slot(mc, kvm->arch.gmap->asce, new->base_gfn, new->npages);
5716 			break;
5717 		case KVM_MR_FLAGS_ONLY:
5718 			break;
5719 		default:
5720 			WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
5721 		}
5722 	}
5723 out:
5724 	if (rc)
5725 		pr_warn("failed to commit memory region\n");
5726 	kvm_s390_free_mmu_cache(mc);
5727 	return;
5728 }
5729 
5730 /**
5731  * kvm_test_age_gfn() - test young
5732  * @kvm: the kvm instance
5733  * @range: the range of guest addresses whose young status needs to be cleared
5734  *
5735  * Context: called by KVM common code without holding the kvm mmu lock
5736  * Return: true if any page in the given range is young, otherwise 0.
5737  */
5738 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
5739 {
5740 	scoped_guard(read_lock, &kvm->mmu_lock)
5741 		return dat_test_age_gfn(kvm->arch.gmap->asce, range->start, range->end);
5742 }
5743 
5744 /**
5745  * kvm_age_gfn() - clear young
5746  * @kvm: the kvm instance
5747  * @range: the range of guest addresses whose young status needs to be cleared
5748  *
5749  * Context: called by KVM common code without holding the kvm mmu lock
5750  * Return: true if any page in the given range was young, otherwise 0.
5751  */
5752 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
5753 {
5754 	scoped_guard(read_lock, &kvm->mmu_lock)
5755 		return gmap_age_gfn(kvm->arch.gmap, range->start, range->end);
5756 }
5757 
5758 /**
5759  * kvm_unmap_gfn_range() - Unmap a range of guest addresses
5760  * @kvm: the kvm instance
5761  * @range: the range of guest page frames to invalidate
5762  *
5763  * This function always returns false because every DAT table modification
5764  * has to use the appropriate DAT table manipulation instructions, which will
5765  * keep the TLB coherent, hence no additional TLB flush is ever required.
5766  *
5767  * Context: called by KVM common code with the kvm mmu write lock held
5768  * Return: false
5769  */
5770 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
5771 {
5772 	return gmap_unmap_gfn_range(kvm->arch.gmap, range->slot, range->start, range->end);
5773 }
5774 
5775 static inline unsigned long nonhyp_mask(int i)
5776 {
5777 	unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
5778 
5779 	return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
5780 }
5781 
5782 static int __init kvm_s390_init(void)
5783 {
5784 	int i, r;
5785 
5786 	if (!sclp.has_sief2) {
5787 		pr_info("SIE is not available\n");
5788 		return -ENODEV;
5789 	}
5790 
5791 	for (i = 0; i < 16; i++)
5792 		kvm_s390_fac_base[i] |=
5793 			stfle_fac_list[i] & nonhyp_mask(i);
5794 
5795 	r = __kvm_s390_init();
5796 	if (r)
5797 		return r;
5798 
5799 	r = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE);
5800 	if (r) {
5801 		__kvm_s390_exit();
5802 		return r;
5803 	}
5804 	return 0;
5805 }
5806 
5807 static void __exit kvm_s390_exit(void)
5808 {
5809 	kvm_exit();
5810 
5811 	__kvm_s390_exit();
5812 }
5813 
5814 module_init(kvm_s390_init);
5815 module_exit(kvm_s390_exit);
5816 
5817 /*
5818  * Enable autoloading of the kvm module.
5819  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
5820  * since x86 takes a different approach.
5821  */
5822 #include <linux/miscdevice.h>
5823 MODULE_ALIAS_MISCDEV(KVM_MINOR);
5824 MODULE_ALIAS("devname:kvm");
5825