xref: /linux/arch/s390/kvm/kvm-s390.c (revision 949d0a46ad1b9ab3450fb6ed69ff1e3e13c657bd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * hosting IBM Z kernel virtual machines (s390x)
4  *
5  * Copyright IBM Corp. 2008, 2020
6  *
7  *    Author(s): Carsten Otte <cotte@de.ibm.com>
8  *               Christian Borntraeger <borntraeger@de.ibm.com>
9  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
10  *               Jason J. Herne <jjherne@us.ibm.com>
11  */
12 
13 #define pr_fmt(fmt) "kvm-s390: " fmt
14 
15 #include <linux/compiler.h>
16 #include <linux/entry-virt.h>
17 #include <linux/export.h>
18 #include <linux/err.h>
19 #include <linux/fs.h>
20 #include <linux/hrtimer.h>
21 #include <linux/init.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/mman.h>
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/cpufeature.h>
28 #include <linux/random.h>
29 #include <linux/slab.h>
30 #include <linux/timer.h>
31 #include <linux/vmalloc.h>
32 #include <linux/bitmap.h>
33 #include <linux/sched/signal.h>
34 #include <linux/string.h>
35 #include <linux/pgtable.h>
36 #include <linux/mmu_notifier.h>
37 
38 #include <asm/access-regs.h>
39 #include <asm/asm-offsets.h>
40 #include <asm/lowcore.h>
41 #include <asm/machine.h>
42 #include <asm/stp.h>
43 #include <asm/gmap_helpers.h>
44 #include <asm/nmi.h>
45 #include <asm/isc.h>
46 #include <asm/sclp.h>
47 #include <asm/cpacf.h>
48 #include <asm/timex.h>
49 #include <asm/asm.h>
50 #include <asm/fpu.h>
51 #include <asm/ap.h>
52 #include <asm/uv.h>
53 #include "kvm-s390.h"
54 #include "gaccess.h"
55 #include "gmap.h"
56 #include "faultin.h"
57 #include "pci.h"
58 
59 #define CREATE_TRACE_POINTS
60 #include "trace.h"
61 #include "trace-s390.h"
62 
63 #define MEM_OP_MAX_SIZE 65536	/* Maximum transfer size for KVM_S390_MEM_OP */
64 #define LOCAL_IRQS 32
65 #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
66 			   (KVM_MAX_VCPUS + LOCAL_IRQS))
67 
68 const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
69 	KVM_GENERIC_VM_STATS(),
70 	STATS_DESC_COUNTER(VM, inject_io),
71 	STATS_DESC_COUNTER(VM, inject_float_mchk),
72 	STATS_DESC_COUNTER(VM, inject_pfault_done),
73 	STATS_DESC_COUNTER(VM, inject_service_signal),
74 	STATS_DESC_COUNTER(VM, inject_virtio),
75 	STATS_DESC_COUNTER(VM, aen_forward),
76 	STATS_DESC_COUNTER(VM, gmap_shadow_reuse),
77 	STATS_DESC_COUNTER(VM, gmap_shadow_create),
78 	STATS_DESC_COUNTER(VM, gmap_shadow_r1_entry),
79 	STATS_DESC_COUNTER(VM, gmap_shadow_r2_entry),
80 	STATS_DESC_COUNTER(VM, gmap_shadow_r3_entry),
81 	STATS_DESC_COUNTER(VM, gmap_shadow_sg_entry),
82 	STATS_DESC_COUNTER(VM, gmap_shadow_pg_entry),
83 };
84 
85 const struct kvm_stats_header kvm_vm_stats_header = {
86 	.name_size = KVM_STATS_NAME_SIZE,
87 	.num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
88 	.id_offset = sizeof(struct kvm_stats_header),
89 	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
90 	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
91 		       sizeof(kvm_vm_stats_desc),
92 };
93 
94 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
95 	KVM_GENERIC_VCPU_STATS(),
96 	STATS_DESC_COUNTER(VCPU, exit_userspace),
97 	STATS_DESC_COUNTER(VCPU, exit_null),
98 	STATS_DESC_COUNTER(VCPU, exit_external_request),
99 	STATS_DESC_COUNTER(VCPU, exit_io_request),
100 	STATS_DESC_COUNTER(VCPU, exit_external_interrupt),
101 	STATS_DESC_COUNTER(VCPU, exit_stop_request),
102 	STATS_DESC_COUNTER(VCPU, exit_validity),
103 	STATS_DESC_COUNTER(VCPU, exit_instruction),
104 	STATS_DESC_COUNTER(VCPU, exit_pei),
105 	STATS_DESC_COUNTER(VCPU, halt_no_poll_steal),
106 	STATS_DESC_COUNTER(VCPU, instruction_lctl),
107 	STATS_DESC_COUNTER(VCPU, instruction_lctlg),
108 	STATS_DESC_COUNTER(VCPU, instruction_stctl),
109 	STATS_DESC_COUNTER(VCPU, instruction_stctg),
110 	STATS_DESC_COUNTER(VCPU, exit_program_interruption),
111 	STATS_DESC_COUNTER(VCPU, exit_instr_and_program),
112 	STATS_DESC_COUNTER(VCPU, exit_operation_exception),
113 	STATS_DESC_COUNTER(VCPU, deliver_ckc),
114 	STATS_DESC_COUNTER(VCPU, deliver_cputm),
115 	STATS_DESC_COUNTER(VCPU, deliver_external_call),
116 	STATS_DESC_COUNTER(VCPU, deliver_emergency_signal),
117 	STATS_DESC_COUNTER(VCPU, deliver_service_signal),
118 	STATS_DESC_COUNTER(VCPU, deliver_virtio),
119 	STATS_DESC_COUNTER(VCPU, deliver_stop_signal),
120 	STATS_DESC_COUNTER(VCPU, deliver_prefix_signal),
121 	STATS_DESC_COUNTER(VCPU, deliver_restart_signal),
122 	STATS_DESC_COUNTER(VCPU, deliver_program),
123 	STATS_DESC_COUNTER(VCPU, deliver_io),
124 	STATS_DESC_COUNTER(VCPU, deliver_machine_check),
125 	STATS_DESC_COUNTER(VCPU, exit_wait_state),
126 	STATS_DESC_COUNTER(VCPU, inject_ckc),
127 	STATS_DESC_COUNTER(VCPU, inject_cputm),
128 	STATS_DESC_COUNTER(VCPU, inject_external_call),
129 	STATS_DESC_COUNTER(VCPU, inject_emergency_signal),
130 	STATS_DESC_COUNTER(VCPU, inject_mchk),
131 	STATS_DESC_COUNTER(VCPU, inject_pfault_init),
132 	STATS_DESC_COUNTER(VCPU, inject_program),
133 	STATS_DESC_COUNTER(VCPU, inject_restart),
134 	STATS_DESC_COUNTER(VCPU, inject_set_prefix),
135 	STATS_DESC_COUNTER(VCPU, inject_stop_signal),
136 	STATS_DESC_COUNTER(VCPU, instruction_epsw),
137 	STATS_DESC_COUNTER(VCPU, instruction_gs),
138 	STATS_DESC_COUNTER(VCPU, instruction_io_other),
139 	STATS_DESC_COUNTER(VCPU, instruction_lpsw),
140 	STATS_DESC_COUNTER(VCPU, instruction_lpswe),
141 	STATS_DESC_COUNTER(VCPU, instruction_lpswey),
142 	STATS_DESC_COUNTER(VCPU, instruction_pfmf),
143 	STATS_DESC_COUNTER(VCPU, instruction_ptff),
144 	STATS_DESC_COUNTER(VCPU, instruction_sck),
145 	STATS_DESC_COUNTER(VCPU, instruction_sckpf),
146 	STATS_DESC_COUNTER(VCPU, instruction_stidp),
147 	STATS_DESC_COUNTER(VCPU, instruction_spx),
148 	STATS_DESC_COUNTER(VCPU, instruction_stpx),
149 	STATS_DESC_COUNTER(VCPU, instruction_stap),
150 	STATS_DESC_COUNTER(VCPU, instruction_iske),
151 	STATS_DESC_COUNTER(VCPU, instruction_ri),
152 	STATS_DESC_COUNTER(VCPU, instruction_rrbe),
153 	STATS_DESC_COUNTER(VCPU, instruction_sske),
154 	STATS_DESC_COUNTER(VCPU, instruction_ipte_interlock),
155 	STATS_DESC_COUNTER(VCPU, instruction_stsi),
156 	STATS_DESC_COUNTER(VCPU, instruction_stfl),
157 	STATS_DESC_COUNTER(VCPU, instruction_tb),
158 	STATS_DESC_COUNTER(VCPU, instruction_tpi),
159 	STATS_DESC_COUNTER(VCPU, instruction_tprot),
160 	STATS_DESC_COUNTER(VCPU, instruction_tsch),
161 	STATS_DESC_COUNTER(VCPU, instruction_sie),
162 	STATS_DESC_COUNTER(VCPU, instruction_essa),
163 	STATS_DESC_COUNTER(VCPU, instruction_sthyi),
164 	STATS_DESC_COUNTER(VCPU, instruction_sigp_sense),
165 	STATS_DESC_COUNTER(VCPU, instruction_sigp_sense_running),
166 	STATS_DESC_COUNTER(VCPU, instruction_sigp_external_call),
167 	STATS_DESC_COUNTER(VCPU, instruction_sigp_emergency),
168 	STATS_DESC_COUNTER(VCPU, instruction_sigp_cond_emergency),
169 	STATS_DESC_COUNTER(VCPU, instruction_sigp_start),
170 	STATS_DESC_COUNTER(VCPU, instruction_sigp_stop),
171 	STATS_DESC_COUNTER(VCPU, instruction_sigp_stop_store_status),
172 	STATS_DESC_COUNTER(VCPU, instruction_sigp_store_status),
173 	STATS_DESC_COUNTER(VCPU, instruction_sigp_store_adtl_status),
174 	STATS_DESC_COUNTER(VCPU, instruction_sigp_arch),
175 	STATS_DESC_COUNTER(VCPU, instruction_sigp_prefix),
176 	STATS_DESC_COUNTER(VCPU, instruction_sigp_restart),
177 	STATS_DESC_COUNTER(VCPU, instruction_sigp_init_cpu_reset),
178 	STATS_DESC_COUNTER(VCPU, instruction_sigp_cpu_reset),
179 	STATS_DESC_COUNTER(VCPU, instruction_sigp_unknown),
180 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_10),
181 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_44),
182 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_9c),
183 	STATS_DESC_COUNTER(VCPU, diag_9c_ignored),
184 	STATS_DESC_COUNTER(VCPU, diag_9c_forward),
185 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_258),
186 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_308),
187 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_500),
188 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_other),
189 	STATS_DESC_COUNTER(VCPU, pfault_sync),
190 	STATS_DESC_COUNTER(VCPU, signal_exits)
191 };
192 
193 const struct kvm_stats_header kvm_vcpu_stats_header = {
194 	.name_size = KVM_STATS_NAME_SIZE,
195 	.num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
196 	.id_offset = sizeof(struct kvm_stats_header),
197 	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
198 	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
199 		       sizeof(kvm_vcpu_stats_desc),
200 };
201 
202 /* allow nested virtualization in KVM (if enabled by user space) */
203 static int nested;
204 module_param(nested, int, S_IRUGO);
205 MODULE_PARM_DESC(nested, "Nested virtualization support");
206 
207 /* allow 1m huge page guest backing, if !nested */
208 static int hpage;
209 module_param(hpage, int, 0444);
210 MODULE_PARM_DESC(hpage, "1m huge page backing support");
211 
212 /* maximum percentage of steal time for polling.  >100 is treated like 100 */
213 static u8 halt_poll_max_steal = 10;
214 module_param(halt_poll_max_steal, byte, 0644);
215 MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
216 
217 /* if set to true, the GISA will be initialized and used if available */
218 static bool use_gisa  = true;
219 module_param(use_gisa, bool, 0644);
220 MODULE_PARM_DESC(use_gisa, "Use the GISA if the host supports it.");
221 
222 /* maximum diag9c forwarding per second */
223 unsigned int diag9c_forwarding_hz;
224 module_param(diag9c_forwarding_hz, uint, 0644);
225 MODULE_PARM_DESC(diag9c_forwarding_hz, "Maximum diag9c forwarding per second, 0 to turn off");
226 
227 /*
228  * allow asynchronous deinit for protected guests; enable by default since
229  * the feature is opt-in anyway
230  */
231 static int async_destroy = 1;
232 module_param(async_destroy, int, 0444);
233 MODULE_PARM_DESC(async_destroy, "Asynchronous destroy for protected guests");
234 
235 /*
236  * For now we handle at most 16 double words as this is what the s390 base
237  * kernel handles and stores in the prefix page. If we ever need to go beyond
238  * this, this requires changes to code, but the external uapi can stay.
239  */
240 #define SIZE_INTERNAL 16
241 
242 /*
243  * Base feature mask that defines default mask for facilities. Consists of the
244  * defines in FACILITIES_KVM and the non-hypervisor managed bits.
245  */
246 static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
247 /*
248  * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
249  * and defines the facilities that can be enabled via a cpu model.
250  */
251 static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
252 
kvm_s390_fac_size(void)253 static unsigned long kvm_s390_fac_size(void)
254 {
255 	BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
256 	BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
257 	BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
258 		sizeof(stfle_fac_list));
259 
260 	return SIZE_INTERNAL;
261 }
262 
263 /* available cpu features supported by kvm */
264 static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
265 /* available subfunctions indicated via query / "test bit" */
266 static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
267 
268 debug_info_t *kvm_s390_dbf;
269 debug_info_t *kvm_s390_dbf_uv;
270 
271 /* Section: not file related */
272 /* forward declarations */
kvm_clock_sync_scb(struct kvm_s390_sie_block * scb,u64 delta)273 static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
274 {
275 	u8 delta_idx = 0;
276 
277 	/*
278 	 * The TOD jumps by delta, we have to compensate this by adding
279 	 * -delta to the epoch.
280 	 */
281 	delta = -delta;
282 
283 	/* sign-extension - we're adding to signed values below */
284 	if ((s64)delta < 0)
285 		delta_idx = -1;
286 
287 	scb->epoch += delta;
288 	if (scb->ecd & ECD_MEF) {
289 		scb->epdx += delta_idx;
290 		if (scb->epoch < delta)
291 			scb->epdx += 1;
292 	}
293 }
294 
295 /*
296  * This callback is executed during stop_machine(). All CPUs are therefore
297  * temporarily stopped. In order not to change guest behavior, we have to
298  * disable preemption whenever we touch the epoch of kvm and the VCPUs,
299  * so a CPU won't be stopped while calculating with the epoch.
300  */
kvm_clock_sync(struct notifier_block * notifier,unsigned long val,void * v)301 static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
302 			  void *v)
303 {
304 	struct kvm *kvm;
305 	struct kvm_vcpu *vcpu;
306 	unsigned long i;
307 	unsigned long long *delta = v;
308 
309 	list_for_each_entry(kvm, &vm_list, vm_list) {
310 		kvm_for_each_vcpu(i, vcpu, kvm) {
311 			kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
312 			if (i == 0) {
313 				kvm->arch.epoch = vcpu->arch.sie_block->epoch;
314 				kvm->arch.epdx = vcpu->arch.sie_block->epdx;
315 			}
316 			if (vcpu->arch.cputm_enabled)
317 				vcpu->arch.cputm_start += *delta;
318 			if (vcpu->arch.vsie_block)
319 				kvm_clock_sync_scb(vcpu->arch.vsie_block,
320 						   *delta);
321 		}
322 	}
323 	return NOTIFY_OK;
324 }
325 
326 static struct notifier_block kvm_clock_notifier = {
327 	.notifier_call = kvm_clock_sync,
328 };
329 
allow_cpu_feat(unsigned long nr)330 static void allow_cpu_feat(unsigned long nr)
331 {
332 	set_bit_inv(nr, kvm_s390_available_cpu_feat);
333 }
334 
plo_test_bit(unsigned char nr)335 static inline int plo_test_bit(unsigned char nr)
336 {
337 	unsigned long function = (unsigned long)nr | 0x100;
338 	int cc;
339 
340 	asm volatile(
341 		"	lgr	0,%[function]\n"
342 		/* Parameter registers are ignored for "test bit" */
343 		"	plo	0,0,0,0(0)\n"
344 		CC_IPM(cc)
345 		: CC_OUT(cc, cc)
346 		: [function] "d" (function)
347 		: CC_CLOBBER_LIST("0"));
348 	return CC_TRANSFORM(cc) == 0;
349 }
350 
pfcr_query(u8 (* query)[16])351 static __always_inline void pfcr_query(u8 (*query)[16])
352 {
353 	asm volatile(
354 		"	lghi	0,0\n"
355 		"	.insn   rsy,0xeb0000000016,0,0,%[query]"
356 		: [query] "=QS" (*query)
357 		:
358 		: "cc", "0");
359 }
360 
__sortl_query(u8 (* query)[32])361 static __always_inline void __sortl_query(u8 (*query)[32])
362 {
363 	asm volatile(
364 		"	lghi	0,0\n"
365 		"	la	1,%[query]\n"
366 		/* Parameter registers are ignored */
367 		"	.insn	rre,0xb9380000,2,4"
368 		: [query] "=R" (*query)
369 		:
370 		: "cc", "0", "1");
371 }
372 
__dfltcc_query(u8 (* query)[32])373 static __always_inline void __dfltcc_query(u8 (*query)[32])
374 {
375 	asm volatile(
376 		"	lghi	0,0\n"
377 		"	la	1,%[query]\n"
378 		/* Parameter registers are ignored */
379 		"	.insn	rrf,0xb9390000,2,4,6,0"
380 		: [query] "=R" (*query)
381 		:
382 		: "cc", "0", "1");
383 }
384 
kvm_s390_cpu_feat_init(void)385 static void __init kvm_s390_cpu_feat_init(void)
386 {
387 	int i;
388 
389 	for (i = 0; i < 256; ++i) {
390 		if (plo_test_bit(i))
391 			kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
392 	}
393 
394 	if (test_facility(28)) /* TOD-clock steering */
395 		ptff(kvm_s390_available_subfunc.ptff,
396 		     sizeof(kvm_s390_available_subfunc.ptff),
397 		     PTFF_QAF);
398 
399 	if (test_facility(17)) { /* MSA */
400 		__cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
401 			      kvm_s390_available_subfunc.kmac);
402 		__cpacf_query(CPACF_KMC, (cpacf_mask_t *)
403 			      kvm_s390_available_subfunc.kmc);
404 		__cpacf_query(CPACF_KM, (cpacf_mask_t *)
405 			      kvm_s390_available_subfunc.km);
406 		__cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
407 			      kvm_s390_available_subfunc.kimd);
408 		__cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
409 			      kvm_s390_available_subfunc.klmd);
410 	}
411 	if (test_facility(76)) /* MSA3 */
412 		__cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
413 			      kvm_s390_available_subfunc.pckmo);
414 	if (test_facility(77)) { /* MSA4 */
415 		__cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
416 			      kvm_s390_available_subfunc.kmctr);
417 		__cpacf_query(CPACF_KMF, (cpacf_mask_t *)
418 			      kvm_s390_available_subfunc.kmf);
419 		__cpacf_query(CPACF_KMO, (cpacf_mask_t *)
420 			      kvm_s390_available_subfunc.kmo);
421 		__cpacf_query(CPACF_PCC, (cpacf_mask_t *)
422 			      kvm_s390_available_subfunc.pcc);
423 	}
424 	if (test_facility(57)) /* MSA5 */
425 		__cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
426 			      kvm_s390_available_subfunc.ppno);
427 
428 	if (test_facility(146)) /* MSA8 */
429 		__cpacf_query(CPACF_KMA, (cpacf_mask_t *)
430 			      kvm_s390_available_subfunc.kma);
431 
432 	if (test_facility(155)) /* MSA9 */
433 		__cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
434 			      kvm_s390_available_subfunc.kdsa);
435 
436 	if (test_facility(150)) /* SORTL */
437 		__sortl_query(&kvm_s390_available_subfunc.sortl);
438 
439 	if (test_facility(151)) /* DFLTCC */
440 		__dfltcc_query(&kvm_s390_available_subfunc.dfltcc);
441 
442 	if (test_facility(201))	/* PFCR */
443 		pfcr_query(&kvm_s390_available_subfunc.pfcr);
444 
445 	if (machine_has_esop())
446 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
447 	/*
448 	 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
449 	 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
450 	 */
451 	if (!sclp.has_sief2 || !machine_has_esop() || !sclp.has_64bscao ||
452 	    !test_facility(3) || !nested)
453 		return;
454 	allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
455 	if (sclp.has_64bscao)
456 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
457 	if (sclp.has_siif)
458 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
459 	if (sclp.has_gpere)
460 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
461 	if (sclp.has_gsls)
462 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
463 	if (sclp.has_ib)
464 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
465 	if (sclp.has_cei)
466 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
467 	if (sclp.has_ibs)
468 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
469 	if (sclp.has_kss)
470 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
471 	/*
472 	 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
473 	 * all skey handling functions read/set the skey from the PGSTE
474 	 * instead of the real storage key.
475 	 *
476 	 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
477 	 * pages being detected as preserved although they are resident.
478 	 *
479 	 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
480 	 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
481 	 *
482 	 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
483 	 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
484 	 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
485 	 *
486 	 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
487 	 * cannot easily shadow the SCA because of the ipte lock.
488 	 */
489 }
490 
__kvm_s390_init(void)491 static int __init __kvm_s390_init(void)
492 {
493 	int rc = -ENOMEM;
494 
495 	kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
496 	if (!kvm_s390_dbf)
497 		return -ENOMEM;
498 
499 	kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long));
500 	if (!kvm_s390_dbf_uv)
501 		goto err_kvm_uv;
502 
503 	if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view) ||
504 	    debug_register_view(kvm_s390_dbf_uv, &debug_sprintf_view))
505 		goto err_debug_view;
506 
507 	kvm_s390_cpu_feat_init();
508 
509 	/* Register floating interrupt controller interface. */
510 	rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
511 	if (rc) {
512 		pr_err("A FLIC registration call failed with rc=%d\n", rc);
513 		goto err_flic;
514 	}
515 
516 	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) {
517 		rc = kvm_s390_pci_init();
518 		if (rc) {
519 			pr_err("Unable to allocate AIFT for PCI\n");
520 			goto err_pci;
521 		}
522 	}
523 
524 	rc = kvm_s390_gib_init(GAL_ISC);
525 	if (rc)
526 		goto err_gib;
527 
528 	atomic_notifier_chain_register(&s390_epoch_delta_notifier,
529 				       &kvm_clock_notifier);
530 
531 	return 0;
532 
533 err_gib:
534 	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
535 		kvm_s390_pci_exit();
536 err_pci:
537 err_flic:
538 err_debug_view:
539 	debug_unregister(kvm_s390_dbf_uv);
540 err_kvm_uv:
541 	debug_unregister(kvm_s390_dbf);
542 	return rc;
543 }
544 
__kvm_s390_exit(void)545 static void __kvm_s390_exit(void)
546 {
547 	atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
548 					 &kvm_clock_notifier);
549 
550 	kvm_s390_gib_destroy();
551 	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
552 		kvm_s390_pci_exit();
553 	debug_unregister(kvm_s390_dbf);
554 	debug_unregister(kvm_s390_dbf_uv);
555 }
556 
kvm_s390_keyop(struct kvm_s390_mmu_cache * mc,struct kvm * kvm,int op,unsigned long addr,union skey skey)557 static int kvm_s390_keyop(struct kvm_s390_mmu_cache *mc, struct kvm *kvm, int op,
558 			  unsigned long addr, union skey skey)
559 {
560 	union asce asce = kvm->arch.gmap->asce;
561 	gfn_t gfn = gpa_to_gfn(addr);
562 	int r;
563 
564 	guard(read_lock)(&kvm->mmu_lock);
565 
566 	switch (op) {
567 	case KVM_S390_KEYOP_SSKE:
568 		r = dat_cond_set_storage_key(mc, asce, gfn, skey, &skey, 0, 0, 0);
569 		if (r >= 0)
570 			return skey.skey;
571 		break;
572 	case KVM_S390_KEYOP_ISKE:
573 		r = dat_get_storage_key(asce, gfn, &skey);
574 		if (!r)
575 			return skey.skey;
576 		break;
577 	case KVM_S390_KEYOP_RRBE:
578 		r = dat_reset_reference_bit(asce, gfn);
579 		if (r > 0)
580 			return r << 1;
581 		break;
582 	default:
583 		return -EINVAL;
584 	}
585 	return r;
586 }
587 
588 /* Section: device related */
kvm_arch_dev_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)589 long kvm_arch_dev_ioctl(struct file *filp,
590 			unsigned int ioctl, unsigned long arg)
591 {
592 	if (ioctl == KVM_S390_ENABLE_SIE)
593 		return 0;
594 	return -EINVAL;
595 }
596 
kvm_vm_ioctl_check_extension(struct kvm * kvm,long ext)597 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
598 {
599 	int r;
600 
601 	switch (ext) {
602 	case KVM_CAP_S390_PSW:
603 	case KVM_CAP_S390_GMAP:
604 #ifdef CONFIG_KVM_S390_UCONTROL
605 	case KVM_CAP_S390_UCONTROL:
606 #endif
607 	case KVM_CAP_ASYNC_PF:
608 	case KVM_CAP_SYNC_REGS:
609 	case KVM_CAP_ONE_REG:
610 	case KVM_CAP_ENABLE_CAP:
611 	case KVM_CAP_S390_CSS_SUPPORT:
612 	case KVM_CAP_IOEVENTFD:
613 	case KVM_CAP_S390_IRQCHIP:
614 	case KVM_CAP_VM_ATTRIBUTES:
615 	case KVM_CAP_MP_STATE:
616 	case KVM_CAP_IMMEDIATE_EXIT:
617 	case KVM_CAP_S390_INJECT_IRQ:
618 	case KVM_CAP_S390_USER_SIGP:
619 	case KVM_CAP_S390_USER_STSI:
620 	case KVM_CAP_S390_SKEYS:
621 	case KVM_CAP_S390_IRQ_STATE:
622 	case KVM_CAP_S390_USER_INSTR0:
623 	case KVM_CAP_S390_CMMA_MIGRATION:
624 	case KVM_CAP_S390_AIS:
625 	case KVM_CAP_S390_AIS_MIGRATION:
626 	case KVM_CAP_S390_VCPU_RESETS:
627 	case KVM_CAP_SET_GUEST_DEBUG:
628 	case KVM_CAP_S390_DIAG318:
629 	case KVM_CAP_IRQFD_RESAMPLE:
630 	case KVM_CAP_S390_USER_OPEREXEC:
631 	case KVM_CAP_S390_KEYOP:
632 		r = 1;
633 		break;
634 	case KVM_CAP_SET_GUEST_DEBUG2:
635 		r = KVM_GUESTDBG_VALID_MASK;
636 		break;
637 	case KVM_CAP_S390_HPAGE_1M:
638 		r = 0;
639 		if (hpage && !(kvm && kvm_is_ucontrol(kvm)))
640 			r = 1;
641 		break;
642 	case KVM_CAP_S390_MEM_OP:
643 		r = MEM_OP_MAX_SIZE;
644 		break;
645 	case KVM_CAP_S390_MEM_OP_EXTENSION:
646 		/*
647 		 * Flag bits indicating which extensions are supported.
648 		 * If r > 0, the base extension must also be supported/indicated,
649 		 * in order to maintain backwards compatibility.
650 		 */
651 		r = KVM_S390_MEMOP_EXTENSION_CAP_BASE |
652 		    KVM_S390_MEMOP_EXTENSION_CAP_CMPXCHG;
653 		break;
654 	case KVM_CAP_NR_VCPUS:
655 	case KVM_CAP_MAX_VCPUS:
656 	case KVM_CAP_MAX_VCPU_ID:
657 		/*
658 		 * Return the same value for KVM_CAP_MAX_VCPUS and
659 		 * KVM_CAP_MAX_VCPU_ID to conform with the KVM API.
660 		 */
661 		r = KVM_S390_ESCA_CPU_SLOTS;
662 		if (!kvm_s390_use_sca_entries())
663 			r = KVM_MAX_VCPUS;
664 		if (ext == KVM_CAP_NR_VCPUS)
665 			r = min_t(unsigned int, num_online_cpus(), r);
666 		break;
667 	case KVM_CAP_S390_COW:
668 		r = machine_has_esop();
669 		break;
670 	case KVM_CAP_S390_VECTOR_REGISTERS:
671 		r = test_facility(129);
672 		break;
673 	case KVM_CAP_S390_RI:
674 		r = test_facility(64);
675 		break;
676 	case KVM_CAP_S390_GS:
677 		r = test_facility(133);
678 		break;
679 	case KVM_CAP_S390_BPB:
680 		r = test_facility(82);
681 		break;
682 	case KVM_CAP_S390_PROTECTED_ASYNC_DISABLE:
683 		r = async_destroy && is_prot_virt_host();
684 		break;
685 	case KVM_CAP_S390_PROTECTED:
686 		r = is_prot_virt_host();
687 		break;
688 	case KVM_CAP_S390_PROTECTED_DUMP: {
689 		u64 pv_cmds_dump[] = {
690 			BIT_UVC_CMD_DUMP_INIT,
691 			BIT_UVC_CMD_DUMP_CONFIG_STOR_STATE,
692 			BIT_UVC_CMD_DUMP_CPU,
693 			BIT_UVC_CMD_DUMP_COMPLETE,
694 		};
695 		int i;
696 
697 		r = is_prot_virt_host();
698 
699 		for (i = 0; i < ARRAY_SIZE(pv_cmds_dump); i++) {
700 			if (!test_bit_inv(pv_cmds_dump[i],
701 					  (unsigned long *)&uv_info.inst_calls_list)) {
702 				r = 0;
703 				break;
704 			}
705 		}
706 		break;
707 	}
708 	case KVM_CAP_S390_ZPCI_OP:
709 		r = kvm_s390_pci_interp_allowed();
710 		break;
711 	case KVM_CAP_S390_CPU_TOPOLOGY:
712 		r = test_facility(11);
713 		break;
714 	default:
715 		r = 0;
716 	}
717 	return r;
718 }
719 
kvm_arch_sync_dirty_log(struct kvm * kvm,struct kvm_memory_slot * memslot)720 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
721 {
722 	gfn_t last_gfn = memslot->base_gfn + memslot->npages;
723 
724 	scoped_guard(read_lock, &kvm->mmu_lock)
725 		gmap_sync_dirty_log(kvm->arch.gmap, memslot->base_gfn, last_gfn);
726 }
727 
728 /* Section: vm related */
729 static void sca_del_vcpu(struct kvm_vcpu *vcpu);
730 
731 /*
732  * Get (and clear) the dirty memory log for a memory slot.
733  */
kvm_vm_ioctl_get_dirty_log(struct kvm * kvm,struct kvm_dirty_log * log)734 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
735 			       struct kvm_dirty_log *log)
736 {
737 	int r;
738 	unsigned long n;
739 	struct kvm_memory_slot *memslot;
740 	int is_dirty;
741 
742 	if (kvm_is_ucontrol(kvm))
743 		return -EINVAL;
744 
745 	mutex_lock(&kvm->slots_lock);
746 
747 	r = -EINVAL;
748 	if (log->slot >= KVM_USER_MEM_SLOTS)
749 		goto out;
750 
751 	r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
752 	if (r)
753 		goto out;
754 
755 	/* Clear the dirty log */
756 	if (is_dirty) {
757 		n = kvm_dirty_bitmap_bytes(memslot);
758 		memset(memslot->dirty_bitmap, 0, n);
759 	}
760 	r = 0;
761 out:
762 	mutex_unlock(&kvm->slots_lock);
763 	return r;
764 }
765 
icpt_operexc_on_all_vcpus(struct kvm * kvm)766 static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
767 {
768 	unsigned long i;
769 	struct kvm_vcpu *vcpu;
770 
771 	kvm_for_each_vcpu(i, vcpu, kvm) {
772 		kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
773 	}
774 }
775 
kvm_vm_ioctl_enable_cap(struct kvm * kvm,struct kvm_enable_cap * cap)776 int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
777 {
778 	int r;
779 
780 	if (cap->flags)
781 		return -EINVAL;
782 
783 	switch (cap->cap) {
784 	case KVM_CAP_S390_IRQCHIP:
785 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
786 		kvm->arch.use_irqchip = 1;
787 		r = 0;
788 		break;
789 	case KVM_CAP_S390_USER_SIGP:
790 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
791 		kvm->arch.user_sigp = 1;
792 		r = 0;
793 		break;
794 	case KVM_CAP_S390_VECTOR_REGISTERS:
795 		mutex_lock(&kvm->lock);
796 		if (kvm->created_vcpus) {
797 			r = -EBUSY;
798 		} else if (cpu_has_vx()) {
799 			set_kvm_facility(kvm->arch.model.fac_mask, 129);
800 			set_kvm_facility(kvm->arch.model.fac_list, 129);
801 			if (test_facility(134)) {
802 				set_kvm_facility(kvm->arch.model.fac_mask, 134);
803 				set_kvm_facility(kvm->arch.model.fac_list, 134);
804 			}
805 			if (test_facility(135)) {
806 				set_kvm_facility(kvm->arch.model.fac_mask, 135);
807 				set_kvm_facility(kvm->arch.model.fac_list, 135);
808 			}
809 			if (test_facility(148)) {
810 				set_kvm_facility(kvm->arch.model.fac_mask, 148);
811 				set_kvm_facility(kvm->arch.model.fac_list, 148);
812 			}
813 			if (test_facility(152)) {
814 				set_kvm_facility(kvm->arch.model.fac_mask, 152);
815 				set_kvm_facility(kvm->arch.model.fac_list, 152);
816 			}
817 			if (test_facility(192)) {
818 				set_kvm_facility(kvm->arch.model.fac_mask, 192);
819 				set_kvm_facility(kvm->arch.model.fac_list, 192);
820 			}
821 			if (test_facility(198)) {
822 				set_kvm_facility(kvm->arch.model.fac_mask, 198);
823 				set_kvm_facility(kvm->arch.model.fac_list, 198);
824 			}
825 			if (test_facility(199)) {
826 				set_kvm_facility(kvm->arch.model.fac_mask, 199);
827 				set_kvm_facility(kvm->arch.model.fac_list, 199);
828 			}
829 			r = 0;
830 		} else
831 			r = -EINVAL;
832 		mutex_unlock(&kvm->lock);
833 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
834 			 r ? "(not available)" : "(success)");
835 		break;
836 	case KVM_CAP_S390_RI:
837 		r = -EINVAL;
838 		mutex_lock(&kvm->lock);
839 		if (kvm->created_vcpus) {
840 			r = -EBUSY;
841 		} else if (test_facility(64)) {
842 			set_kvm_facility(kvm->arch.model.fac_mask, 64);
843 			set_kvm_facility(kvm->arch.model.fac_list, 64);
844 			r = 0;
845 		}
846 		mutex_unlock(&kvm->lock);
847 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
848 			 r ? "(not available)" : "(success)");
849 		break;
850 	case KVM_CAP_S390_AIS:
851 		mutex_lock(&kvm->lock);
852 		if (kvm->created_vcpus) {
853 			r = -EBUSY;
854 		} else {
855 			set_kvm_facility(kvm->arch.model.fac_mask, 72);
856 			set_kvm_facility(kvm->arch.model.fac_list, 72);
857 			r = 0;
858 		}
859 		mutex_unlock(&kvm->lock);
860 		VM_EVENT(kvm, 3, "ENABLE: AIS %s",
861 			 r ? "(not available)" : "(success)");
862 		break;
863 	case KVM_CAP_S390_GS:
864 		r = -EINVAL;
865 		mutex_lock(&kvm->lock);
866 		if (kvm->created_vcpus) {
867 			r = -EBUSY;
868 		} else if (test_facility(133)) {
869 			set_kvm_facility(kvm->arch.model.fac_mask, 133);
870 			set_kvm_facility(kvm->arch.model.fac_list, 133);
871 			r = 0;
872 		}
873 		mutex_unlock(&kvm->lock);
874 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
875 			 r ? "(not available)" : "(success)");
876 		break;
877 	case KVM_CAP_S390_HPAGE_1M:
878 		mutex_lock(&kvm->lock);
879 		if (kvm->created_vcpus)
880 			r = -EBUSY;
881 		else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
882 			r = -EINVAL;
883 		else {
884 			r = 0;
885 			set_bit(GMAP_FLAG_ALLOW_HPAGE_1M, &kvm->arch.gmap->flags);
886 			/*
887 			 * We might have to create fake 4k page
888 			 * tables. To avoid that the hardware works on
889 			 * stale PGSTEs, we emulate these instructions.
890 			 */
891 			kvm->arch.use_skf = 0;
892 			kvm->arch.use_pfmfi = 0;
893 		}
894 		mutex_unlock(&kvm->lock);
895 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
896 			 r ? "(not available)" : "(success)");
897 		break;
898 	case KVM_CAP_S390_USER_STSI:
899 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
900 		kvm->arch.user_stsi = 1;
901 		r = 0;
902 		break;
903 	case KVM_CAP_S390_USER_INSTR0:
904 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
905 		kvm->arch.user_instr0 = 1;
906 		icpt_operexc_on_all_vcpus(kvm);
907 		r = 0;
908 		break;
909 	case KVM_CAP_S390_CPU_TOPOLOGY:
910 		r = -EINVAL;
911 		mutex_lock(&kvm->lock);
912 		if (kvm->created_vcpus) {
913 			r = -EBUSY;
914 		} else if (test_facility(11)) {
915 			set_kvm_facility(kvm->arch.model.fac_mask, 11);
916 			set_kvm_facility(kvm->arch.model.fac_list, 11);
917 			r = 0;
918 		}
919 		mutex_unlock(&kvm->lock);
920 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_CPU_TOPOLOGY %s",
921 			 r ? "(not available)" : "(success)");
922 		break;
923 	case KVM_CAP_S390_USER_OPEREXEC:
924 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_OPEREXEC");
925 		kvm->arch.user_operexec = 1;
926 		icpt_operexc_on_all_vcpus(kvm);
927 		r = 0;
928 		break;
929 	default:
930 		r = -EINVAL;
931 		break;
932 	}
933 	return r;
934 }
935 
kvm_s390_get_mem_control(struct kvm * kvm,struct kvm_device_attr * attr)936 static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
937 {
938 	int ret;
939 
940 	switch (attr->attr) {
941 	case KVM_S390_VM_MEM_LIMIT_SIZE:
942 		ret = 0;
943 		VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
944 			 kvm->arch.mem_limit);
945 		if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
946 			ret = -EFAULT;
947 		break;
948 	default:
949 		ret = -ENXIO;
950 		break;
951 	}
952 	return ret;
953 }
954 
kvm_s390_set_mem_control(struct kvm * kvm,struct kvm_device_attr * attr)955 static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
956 {
957 	int ret;
958 
959 	switch (attr->attr) {
960 	case KVM_S390_VM_MEM_ENABLE_CMMA:
961 		ret = -ENXIO;
962 		if (!sclp.has_cmma)
963 			break;
964 
965 		VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
966 		mutex_lock(&kvm->lock);
967 		if (kvm->created_vcpus)
968 			ret = -EBUSY;
969 		else {
970 			kvm->arch.use_cmma = 1;
971 			/* Not compatible with cmma. */
972 			kvm->arch.use_pfmfi = 0;
973 			ret = 0;
974 		}
975 		mutex_unlock(&kvm->lock);
976 		break;
977 	case KVM_S390_VM_MEM_CLR_CMMA: {
978 		gfn_t start_gfn = 0;
979 
980 		ret = -ENXIO;
981 		if (!sclp.has_cmma)
982 			break;
983 		ret = -EINVAL;
984 		if (!kvm->arch.use_cmma)
985 			break;
986 
987 		VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
988 		do {
989 			start_gfn = dat_reset_cmma(kvm->arch.gmap->asce, start_gfn);
990 			cond_resched();
991 		} while (start_gfn);
992 		ret = 0;
993 		break;
994 	}
995 	case KVM_S390_VM_MEM_LIMIT_SIZE: {
996 		unsigned long new_limit;
997 
998 		if (kvm_is_ucontrol(kvm))
999 			return -EINVAL;
1000 
1001 		if (get_user(new_limit, (u64 __user *)attr->addr))
1002 			return -EFAULT;
1003 
1004 		if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
1005 		    new_limit > kvm->arch.mem_limit)
1006 			return -E2BIG;
1007 
1008 		if (!new_limit)
1009 			return -EINVAL;
1010 
1011 		ret = -EBUSY;
1012 		if (!kvm->created_vcpus)
1013 			ret = gmap_set_limit(kvm->arch.gmap, gpa_to_gfn(new_limit));
1014 		VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
1015 		VM_EVENT(kvm, 3, "New guest asce: 0x%p",
1016 			 (void *)kvm->arch.gmap->asce.val);
1017 		break;
1018 	}
1019 	default:
1020 		ret = -ENXIO;
1021 		break;
1022 	}
1023 	return ret;
1024 }
1025 
1026 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
1027 
kvm_s390_vcpu_crypto_reset_all(struct kvm * kvm)1028 void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
1029 {
1030 	struct kvm_vcpu *vcpu;
1031 	unsigned long i;
1032 
1033 	kvm_s390_vcpu_block_all(kvm);
1034 
1035 	kvm_for_each_vcpu(i, vcpu, kvm) {
1036 		kvm_s390_vcpu_crypto_setup(vcpu);
1037 		/* recreate the shadow crycb by leaving the VSIE handler */
1038 		kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
1039 	}
1040 
1041 	kvm_s390_vcpu_unblock_all(kvm);
1042 }
1043 
kvm_s390_vm_set_crypto(struct kvm * kvm,struct kvm_device_attr * attr)1044 static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
1045 {
1046 	mutex_lock(&kvm->lock);
1047 	switch (attr->attr) {
1048 	case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1049 		if (!test_kvm_facility(kvm, 76)) {
1050 			mutex_unlock(&kvm->lock);
1051 			return -EINVAL;
1052 		}
1053 		get_random_bytes(
1054 			kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1055 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1056 		kvm->arch.crypto.aes_kw = 1;
1057 		VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
1058 		break;
1059 	case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1060 		if (!test_kvm_facility(kvm, 76)) {
1061 			mutex_unlock(&kvm->lock);
1062 			return -EINVAL;
1063 		}
1064 		get_random_bytes(
1065 			kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1066 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1067 		kvm->arch.crypto.dea_kw = 1;
1068 		VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
1069 		break;
1070 	case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1071 		if (!test_kvm_facility(kvm, 76)) {
1072 			mutex_unlock(&kvm->lock);
1073 			return -EINVAL;
1074 		}
1075 		kvm->arch.crypto.aes_kw = 0;
1076 		memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
1077 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1078 		VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
1079 		break;
1080 	case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1081 		if (!test_kvm_facility(kvm, 76)) {
1082 			mutex_unlock(&kvm->lock);
1083 			return -EINVAL;
1084 		}
1085 		kvm->arch.crypto.dea_kw = 0;
1086 		memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
1087 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1088 		VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
1089 		break;
1090 	case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1091 		if (!ap_instructions_available()) {
1092 			mutex_unlock(&kvm->lock);
1093 			return -EOPNOTSUPP;
1094 		}
1095 		kvm->arch.crypto.apie = 1;
1096 		break;
1097 	case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1098 		if (!ap_instructions_available()) {
1099 			mutex_unlock(&kvm->lock);
1100 			return -EOPNOTSUPP;
1101 		}
1102 		kvm->arch.crypto.apie = 0;
1103 		break;
1104 	default:
1105 		mutex_unlock(&kvm->lock);
1106 		return -ENXIO;
1107 	}
1108 
1109 	kvm_s390_vcpu_crypto_reset_all(kvm);
1110 	mutex_unlock(&kvm->lock);
1111 	return 0;
1112 }
1113 
kvm_s390_vcpu_pci_setup(struct kvm_vcpu * vcpu)1114 static void kvm_s390_vcpu_pci_setup(struct kvm_vcpu *vcpu)
1115 {
1116 	/* Only set the ECB bits after guest requests zPCI interpretation */
1117 	if (!vcpu->kvm->arch.use_zpci_interp)
1118 		return;
1119 
1120 	vcpu->arch.sie_block->ecb2 |= ECB2_ZPCI_LSI;
1121 	vcpu->arch.sie_block->ecb3 |= ECB3_AISII + ECB3_AISI;
1122 }
1123 
kvm_s390_vcpu_pci_enable_interp(struct kvm * kvm)1124 void kvm_s390_vcpu_pci_enable_interp(struct kvm *kvm)
1125 {
1126 	struct kvm_vcpu *vcpu;
1127 	unsigned long i;
1128 
1129 	lockdep_assert_held(&kvm->lock);
1130 
1131 	if (!kvm_s390_pci_interp_allowed())
1132 		return;
1133 
1134 	/*
1135 	 * If host is configured for PCI and the necessary facilities are
1136 	 * available, turn on interpretation for the life of this guest
1137 	 */
1138 	kvm->arch.use_zpci_interp = 1;
1139 
1140 	kvm_s390_vcpu_block_all(kvm);
1141 
1142 	kvm_for_each_vcpu(i, vcpu, kvm) {
1143 		kvm_s390_vcpu_pci_setup(vcpu);
1144 		kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
1145 	}
1146 
1147 	kvm_s390_vcpu_unblock_all(kvm);
1148 }
1149 
kvm_s390_sync_request_broadcast(struct kvm * kvm,int req)1150 static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
1151 {
1152 	unsigned long cx;
1153 	struct kvm_vcpu *vcpu;
1154 
1155 	kvm_for_each_vcpu(cx, vcpu, kvm)
1156 		kvm_s390_sync_request(req, vcpu);
1157 }
1158 
1159 /*
1160  * Must be called with kvm->srcu held to avoid races on memslots, and with
1161  * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
1162  */
kvm_s390_vm_start_migration(struct kvm * kvm)1163 static int kvm_s390_vm_start_migration(struct kvm *kvm)
1164 {
1165 	struct kvm_memory_slot *ms;
1166 	struct kvm_memslots *slots;
1167 	unsigned long ram_pages = 0;
1168 	int bkt;
1169 
1170 	/* migration mode already enabled */
1171 	if (kvm->arch.migration_mode)
1172 		return 0;
1173 	slots = kvm_memslots(kvm);
1174 	if (!slots || kvm_memslots_empty(slots))
1175 		return -EINVAL;
1176 
1177 	if (!kvm->arch.use_cmma) {
1178 		kvm->arch.migration_mode = 1;
1179 		return 0;
1180 	}
1181 	kvm_for_each_memslot(ms, bkt, slots) {
1182 		if (!ms->dirty_bitmap)
1183 			return -EINVAL;
1184 		ram_pages += ms->npages;
1185 	}
1186 	/* mark all the pages as dirty */
1187 	gmap_set_cmma_all_dirty(kvm->arch.gmap);
1188 	atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
1189 	kvm->arch.migration_mode = 1;
1190 	kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
1191 	return 0;
1192 }
1193 
1194 /*
1195  * Must be called with kvm->slots_lock to avoid races with ourselves and
1196  * kvm_s390_vm_start_migration.
1197  */
kvm_s390_vm_stop_migration(struct kvm * kvm)1198 static int kvm_s390_vm_stop_migration(struct kvm *kvm)
1199 {
1200 	/* migration mode already disabled */
1201 	if (!kvm->arch.migration_mode)
1202 		return 0;
1203 	kvm->arch.migration_mode = 0;
1204 	if (kvm->arch.use_cmma)
1205 		kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
1206 	return 0;
1207 }
1208 
kvm_s390_vm_set_migration(struct kvm * kvm,struct kvm_device_attr * attr)1209 static int kvm_s390_vm_set_migration(struct kvm *kvm,
1210 				     struct kvm_device_attr *attr)
1211 {
1212 	int res = -ENXIO;
1213 
1214 	mutex_lock(&kvm->slots_lock);
1215 	switch (attr->attr) {
1216 	case KVM_S390_VM_MIGRATION_START:
1217 		res = kvm_s390_vm_start_migration(kvm);
1218 		break;
1219 	case KVM_S390_VM_MIGRATION_STOP:
1220 		res = kvm_s390_vm_stop_migration(kvm);
1221 		break;
1222 	default:
1223 		break;
1224 	}
1225 	mutex_unlock(&kvm->slots_lock);
1226 
1227 	return res;
1228 }
1229 
kvm_s390_vm_get_migration(struct kvm * kvm,struct kvm_device_attr * attr)1230 static int kvm_s390_vm_get_migration(struct kvm *kvm,
1231 				     struct kvm_device_attr *attr)
1232 {
1233 	u64 mig = kvm->arch.migration_mode;
1234 
1235 	if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1236 		return -ENXIO;
1237 
1238 	if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1239 		return -EFAULT;
1240 	return 0;
1241 }
1242 
1243 static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
1244 
kvm_s390_set_tod_ext(struct kvm * kvm,struct kvm_device_attr * attr)1245 static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1246 {
1247 	struct kvm_s390_vm_tod_clock gtod;
1248 
1249 	if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
1250 		return -EFAULT;
1251 
1252 	if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
1253 		return -EINVAL;
1254 	__kvm_s390_set_tod_clock(kvm, &gtod);
1255 
1256 	VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
1257 		gtod.epoch_idx, gtod.tod);
1258 
1259 	return 0;
1260 }
1261 
kvm_s390_set_tod_high(struct kvm * kvm,struct kvm_device_attr * attr)1262 static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1263 {
1264 	u8 gtod_high;
1265 
1266 	if (copy_from_user(&gtod_high, (void __user *)attr->addr,
1267 					   sizeof(gtod_high)))
1268 		return -EFAULT;
1269 
1270 	if (gtod_high != 0)
1271 		return -EINVAL;
1272 	VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
1273 
1274 	return 0;
1275 }
1276 
kvm_s390_set_tod_low(struct kvm * kvm,struct kvm_device_attr * attr)1277 static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1278 {
1279 	struct kvm_s390_vm_tod_clock gtod = { 0 };
1280 
1281 	if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
1282 			   sizeof(gtod.tod)))
1283 		return -EFAULT;
1284 
1285 	__kvm_s390_set_tod_clock(kvm, &gtod);
1286 	VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
1287 	return 0;
1288 }
1289 
kvm_s390_set_tod(struct kvm * kvm,struct kvm_device_attr * attr)1290 static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1291 {
1292 	int ret;
1293 
1294 	if (attr->flags)
1295 		return -EINVAL;
1296 
1297 	mutex_lock(&kvm->lock);
1298 	/*
1299 	 * For protected guests, the TOD is managed by the ultravisor, so trying
1300 	 * to change it will never bring the expected results.
1301 	 */
1302 	if (kvm_s390_pv_is_protected(kvm)) {
1303 		ret = -EOPNOTSUPP;
1304 		goto out_unlock;
1305 	}
1306 
1307 	switch (attr->attr) {
1308 	case KVM_S390_VM_TOD_EXT:
1309 		ret = kvm_s390_set_tod_ext(kvm, attr);
1310 		break;
1311 	case KVM_S390_VM_TOD_HIGH:
1312 		ret = kvm_s390_set_tod_high(kvm, attr);
1313 		break;
1314 	case KVM_S390_VM_TOD_LOW:
1315 		ret = kvm_s390_set_tod_low(kvm, attr);
1316 		break;
1317 	default:
1318 		ret = -ENXIO;
1319 		break;
1320 	}
1321 
1322 out_unlock:
1323 	mutex_unlock(&kvm->lock);
1324 	return ret;
1325 }
1326 
kvm_s390_get_tod_clock(struct kvm * kvm,struct kvm_s390_vm_tod_clock * gtod)1327 static void kvm_s390_get_tod_clock(struct kvm *kvm,
1328 				   struct kvm_s390_vm_tod_clock *gtod)
1329 {
1330 	union tod_clock clk;
1331 
1332 	preempt_disable();
1333 
1334 	store_tod_clock_ext(&clk);
1335 
1336 	gtod->tod = clk.tod + kvm->arch.epoch;
1337 	gtod->epoch_idx = 0;
1338 	if (test_kvm_facility(kvm, 139)) {
1339 		gtod->epoch_idx = clk.ei + kvm->arch.epdx;
1340 		if (gtod->tod < clk.tod)
1341 			gtod->epoch_idx += 1;
1342 	}
1343 
1344 	preempt_enable();
1345 }
1346 
kvm_s390_get_tod_ext(struct kvm * kvm,struct kvm_device_attr * attr)1347 static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1348 {
1349 	struct kvm_s390_vm_tod_clock gtod;
1350 
1351 	memset(&gtod, 0, sizeof(gtod));
1352 	kvm_s390_get_tod_clock(kvm, &gtod);
1353 	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1354 		return -EFAULT;
1355 
1356 	VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1357 		gtod.epoch_idx, gtod.tod);
1358 	return 0;
1359 }
1360 
kvm_s390_get_tod_high(struct kvm * kvm,struct kvm_device_attr * attr)1361 static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1362 {
1363 	u8 gtod_high = 0;
1364 
1365 	if (copy_to_user((void __user *)attr->addr, &gtod_high,
1366 					 sizeof(gtod_high)))
1367 		return -EFAULT;
1368 	VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
1369 
1370 	return 0;
1371 }
1372 
kvm_s390_get_tod_low(struct kvm * kvm,struct kvm_device_attr * attr)1373 static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1374 {
1375 	u64 gtod;
1376 
1377 	gtod = kvm_s390_get_tod_clock_fast(kvm);
1378 	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1379 		return -EFAULT;
1380 	VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
1381 
1382 	return 0;
1383 }
1384 
kvm_s390_get_tod(struct kvm * kvm,struct kvm_device_attr * attr)1385 static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1386 {
1387 	int ret;
1388 
1389 	if (attr->flags)
1390 		return -EINVAL;
1391 
1392 	switch (attr->attr) {
1393 	case KVM_S390_VM_TOD_EXT:
1394 		ret = kvm_s390_get_tod_ext(kvm, attr);
1395 		break;
1396 	case KVM_S390_VM_TOD_HIGH:
1397 		ret = kvm_s390_get_tod_high(kvm, attr);
1398 		break;
1399 	case KVM_S390_VM_TOD_LOW:
1400 		ret = kvm_s390_get_tod_low(kvm, attr);
1401 		break;
1402 	default:
1403 		ret = -ENXIO;
1404 		break;
1405 	}
1406 	return ret;
1407 }
1408 
kvm_s390_set_processor(struct kvm * kvm,struct kvm_device_attr * attr)1409 static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1410 {
1411 	struct kvm_s390_vm_cpu_processor *proc;
1412 	u16 lowest_ibc, unblocked_ibc;
1413 	int ret = 0;
1414 
1415 	mutex_lock(&kvm->lock);
1416 	if (kvm->created_vcpus) {
1417 		ret = -EBUSY;
1418 		goto out;
1419 	}
1420 	proc = kzalloc_obj(*proc, GFP_KERNEL_ACCOUNT);
1421 	if (!proc) {
1422 		ret = -ENOMEM;
1423 		goto out;
1424 	}
1425 	if (!copy_from_user(proc, (void __user *)attr->addr,
1426 			    sizeof(*proc))) {
1427 		kvm->arch.model.cpuid = proc->cpuid;
1428 		lowest_ibc = sclp.ibc >> 16 & 0xfff;
1429 		unblocked_ibc = sclp.ibc & 0xfff;
1430 		if (lowest_ibc && proc->ibc) {
1431 			if (proc->ibc > unblocked_ibc)
1432 				kvm->arch.model.ibc = unblocked_ibc;
1433 			else if (proc->ibc < lowest_ibc)
1434 				kvm->arch.model.ibc = lowest_ibc;
1435 			else
1436 				kvm->arch.model.ibc = proc->ibc;
1437 		}
1438 		memcpy(kvm->arch.model.fac_list, proc->fac_list,
1439 		       S390_ARCH_FAC_LIST_SIZE_BYTE);
1440 		VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1441 			 kvm->arch.model.ibc,
1442 			 kvm->arch.model.cpuid);
1443 		VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1444 			 kvm->arch.model.fac_list[0],
1445 			 kvm->arch.model.fac_list[1],
1446 			 kvm->arch.model.fac_list[2]);
1447 	} else
1448 		ret = -EFAULT;
1449 	kfree(proc);
1450 out:
1451 	mutex_unlock(&kvm->lock);
1452 	return ret;
1453 }
1454 
kvm_s390_set_processor_feat(struct kvm * kvm,struct kvm_device_attr * attr)1455 static int kvm_s390_set_processor_feat(struct kvm *kvm,
1456 				       struct kvm_device_attr *attr)
1457 {
1458 	struct kvm_s390_vm_cpu_feat data;
1459 
1460 	if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1461 		return -EFAULT;
1462 	if (!bitmap_subset((unsigned long *) data.feat,
1463 			   kvm_s390_available_cpu_feat,
1464 			   KVM_S390_VM_CPU_FEAT_NR_BITS))
1465 		return -EINVAL;
1466 
1467 	mutex_lock(&kvm->lock);
1468 	if (kvm->created_vcpus) {
1469 		mutex_unlock(&kvm->lock);
1470 		return -EBUSY;
1471 	}
1472 	bitmap_from_arr64(kvm->arch.cpu_feat, data.feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
1473 	mutex_unlock(&kvm->lock);
1474 	VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1475 			 data.feat[0],
1476 			 data.feat[1],
1477 			 data.feat[2]);
1478 	return 0;
1479 }
1480 
kvm_s390_set_processor_subfunc(struct kvm * kvm,struct kvm_device_attr * attr)1481 static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1482 					  struct kvm_device_attr *attr)
1483 {
1484 	mutex_lock(&kvm->lock);
1485 	if (kvm->created_vcpus) {
1486 		mutex_unlock(&kvm->lock);
1487 		return -EBUSY;
1488 	}
1489 
1490 	if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1491 			   sizeof(struct kvm_s390_vm_cpu_subfunc))) {
1492 		mutex_unlock(&kvm->lock);
1493 		return -EFAULT;
1494 	}
1495 	mutex_unlock(&kvm->lock);
1496 
1497 	VM_EVENT(kvm, 3, "SET: guest PLO    subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1498 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1499 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1500 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1501 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1502 	VM_EVENT(kvm, 3, "SET: guest PTFF   subfunc 0x%16.16lx.%16.16lx",
1503 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1504 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1505 	VM_EVENT(kvm, 3, "SET: guest KMAC   subfunc 0x%16.16lx.%16.16lx",
1506 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1507 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1508 	VM_EVENT(kvm, 3, "SET: guest KMC    subfunc 0x%16.16lx.%16.16lx",
1509 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1510 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1511 	VM_EVENT(kvm, 3, "SET: guest KM     subfunc 0x%16.16lx.%16.16lx",
1512 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1513 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1514 	VM_EVENT(kvm, 3, "SET: guest KIMD   subfunc 0x%16.16lx.%16.16lx",
1515 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1516 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1517 	VM_EVENT(kvm, 3, "SET: guest KLMD   subfunc 0x%16.16lx.%16.16lx",
1518 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1519 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1520 	VM_EVENT(kvm, 3, "SET: guest PCKMO  subfunc 0x%16.16lx.%16.16lx",
1521 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1522 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1523 	VM_EVENT(kvm, 3, "SET: guest KMCTR  subfunc 0x%16.16lx.%16.16lx",
1524 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1525 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1526 	VM_EVENT(kvm, 3, "SET: guest KMF    subfunc 0x%16.16lx.%16.16lx",
1527 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1528 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1529 	VM_EVENT(kvm, 3, "SET: guest KMO    subfunc 0x%16.16lx.%16.16lx",
1530 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1531 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1532 	VM_EVENT(kvm, 3, "SET: guest PCC    subfunc 0x%16.16lx.%16.16lx",
1533 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1534 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1535 	VM_EVENT(kvm, 3, "SET: guest PPNO   subfunc 0x%16.16lx.%16.16lx",
1536 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1537 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1538 	VM_EVENT(kvm, 3, "SET: guest KMA    subfunc 0x%16.16lx.%16.16lx",
1539 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1540 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
1541 	VM_EVENT(kvm, 3, "SET: guest KDSA   subfunc 0x%16.16lx.%16.16lx",
1542 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1543 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1544 	VM_EVENT(kvm, 3, "SET: guest SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1545 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1546 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1547 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1548 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
1549 	VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1550 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1551 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1552 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1553 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
1554 	VM_EVENT(kvm, 3, "GET: guest PFCR   subfunc 0x%16.16lx.%16.16lx",
1555 		 ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[0],
1556 		 ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[1]);
1557 
1558 	return 0;
1559 }
1560 
1561 #define KVM_S390_VM_CPU_UV_FEAT_GUEST_MASK	\
1562 (						\
1563 	((struct kvm_s390_vm_cpu_uv_feat){	\
1564 		.ap = 1,			\
1565 		.ap_intr = 1,			\
1566 	})					\
1567 	.feat					\
1568 )
1569 
kvm_s390_set_uv_feat(struct kvm * kvm,struct kvm_device_attr * attr)1570 static int kvm_s390_set_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr)
1571 {
1572 	struct kvm_s390_vm_cpu_uv_feat __user *ptr = (void __user *)attr->addr;
1573 	unsigned long data, filter;
1574 
1575 	filter = uv_info.uv_feature_indications & KVM_S390_VM_CPU_UV_FEAT_GUEST_MASK;
1576 	if (get_user(data, &ptr->feat))
1577 		return -EFAULT;
1578 	if (!bitmap_subset(&data, &filter, KVM_S390_VM_CPU_UV_FEAT_NR_BITS))
1579 		return -EINVAL;
1580 
1581 	mutex_lock(&kvm->lock);
1582 	if (kvm->created_vcpus) {
1583 		mutex_unlock(&kvm->lock);
1584 		return -EBUSY;
1585 	}
1586 	kvm->arch.model.uv_feat_guest.feat = data;
1587 	mutex_unlock(&kvm->lock);
1588 
1589 	VM_EVENT(kvm, 3, "SET: guest UV-feat: 0x%16.16lx", data);
1590 
1591 	return 0;
1592 }
1593 
kvm_s390_set_cpu_model(struct kvm * kvm,struct kvm_device_attr * attr)1594 static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1595 {
1596 	int ret = -ENXIO;
1597 
1598 	switch (attr->attr) {
1599 	case KVM_S390_VM_CPU_PROCESSOR:
1600 		ret = kvm_s390_set_processor(kvm, attr);
1601 		break;
1602 	case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1603 		ret = kvm_s390_set_processor_feat(kvm, attr);
1604 		break;
1605 	case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1606 		ret = kvm_s390_set_processor_subfunc(kvm, attr);
1607 		break;
1608 	case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST:
1609 		ret = kvm_s390_set_uv_feat(kvm, attr);
1610 		break;
1611 	}
1612 	return ret;
1613 }
1614 
kvm_s390_get_processor(struct kvm * kvm,struct kvm_device_attr * attr)1615 static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1616 {
1617 	struct kvm_s390_vm_cpu_processor *proc;
1618 	int ret = 0;
1619 
1620 	proc = kzalloc_obj(*proc, GFP_KERNEL_ACCOUNT);
1621 	if (!proc) {
1622 		ret = -ENOMEM;
1623 		goto out;
1624 	}
1625 	proc->cpuid = kvm->arch.model.cpuid;
1626 	proc->ibc = kvm->arch.model.ibc;
1627 	memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1628 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
1629 	VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1630 		 kvm->arch.model.ibc,
1631 		 kvm->arch.model.cpuid);
1632 	VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1633 		 kvm->arch.model.fac_list[0],
1634 		 kvm->arch.model.fac_list[1],
1635 		 kvm->arch.model.fac_list[2]);
1636 	if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1637 		ret = -EFAULT;
1638 	kfree(proc);
1639 out:
1640 	return ret;
1641 }
1642 
kvm_s390_get_machine(struct kvm * kvm,struct kvm_device_attr * attr)1643 static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1644 {
1645 	struct kvm_s390_vm_cpu_machine *mach;
1646 	int ret = 0;
1647 
1648 	mach = kzalloc_obj(*mach, GFP_KERNEL_ACCOUNT);
1649 	if (!mach) {
1650 		ret = -ENOMEM;
1651 		goto out;
1652 	}
1653 	get_cpu_id((struct cpuid *) &mach->cpuid);
1654 	mach->ibc = sclp.ibc;
1655 	memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
1656 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
1657 	memcpy((unsigned long *)&mach->fac_list, stfle_fac_list,
1658 	       sizeof(stfle_fac_list));
1659 	VM_EVENT(kvm, 3, "GET: host ibc:  0x%4.4x, host cpuid:  0x%16.16llx",
1660 		 kvm->arch.model.ibc,
1661 		 kvm->arch.model.cpuid);
1662 	VM_EVENT(kvm, 3, "GET: host facmask:  0x%16.16llx.%16.16llx.%16.16llx",
1663 		 mach->fac_mask[0],
1664 		 mach->fac_mask[1],
1665 		 mach->fac_mask[2]);
1666 	VM_EVENT(kvm, 3, "GET: host faclist:  0x%16.16llx.%16.16llx.%16.16llx",
1667 		 mach->fac_list[0],
1668 		 mach->fac_list[1],
1669 		 mach->fac_list[2]);
1670 	if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1671 		ret = -EFAULT;
1672 	kfree(mach);
1673 out:
1674 	return ret;
1675 }
1676 
kvm_s390_get_processor_feat(struct kvm * kvm,struct kvm_device_attr * attr)1677 static int kvm_s390_get_processor_feat(struct kvm *kvm,
1678 				       struct kvm_device_attr *attr)
1679 {
1680 	struct kvm_s390_vm_cpu_feat data;
1681 
1682 	bitmap_to_arr64(data.feat, kvm->arch.cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
1683 	if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1684 		return -EFAULT;
1685 	VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1686 			 data.feat[0],
1687 			 data.feat[1],
1688 			 data.feat[2]);
1689 	return 0;
1690 }
1691 
kvm_s390_get_machine_feat(struct kvm * kvm,struct kvm_device_attr * attr)1692 static int kvm_s390_get_machine_feat(struct kvm *kvm,
1693 				     struct kvm_device_attr *attr)
1694 {
1695 	struct kvm_s390_vm_cpu_feat data;
1696 
1697 	bitmap_to_arr64(data.feat, kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
1698 	if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1699 		return -EFAULT;
1700 	VM_EVENT(kvm, 3, "GET: host feat:  0x%16.16llx.0x%16.16llx.0x%16.16llx",
1701 			 data.feat[0],
1702 			 data.feat[1],
1703 			 data.feat[2]);
1704 	return 0;
1705 }
1706 
kvm_s390_get_processor_subfunc(struct kvm * kvm,struct kvm_device_attr * attr)1707 static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1708 					  struct kvm_device_attr *attr)
1709 {
1710 	if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1711 	    sizeof(struct kvm_s390_vm_cpu_subfunc)))
1712 		return -EFAULT;
1713 
1714 	VM_EVENT(kvm, 3, "GET: guest PLO    subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1715 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1716 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1717 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1718 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1719 	VM_EVENT(kvm, 3, "GET: guest PTFF   subfunc 0x%16.16lx.%16.16lx",
1720 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1721 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1722 	VM_EVENT(kvm, 3, "GET: guest KMAC   subfunc 0x%16.16lx.%16.16lx",
1723 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1724 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1725 	VM_EVENT(kvm, 3, "GET: guest KMC    subfunc 0x%16.16lx.%16.16lx",
1726 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1727 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1728 	VM_EVENT(kvm, 3, "GET: guest KM     subfunc 0x%16.16lx.%16.16lx",
1729 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1730 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1731 	VM_EVENT(kvm, 3, "GET: guest KIMD   subfunc 0x%16.16lx.%16.16lx",
1732 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1733 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1734 	VM_EVENT(kvm, 3, "GET: guest KLMD   subfunc 0x%16.16lx.%16.16lx",
1735 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1736 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1737 	VM_EVENT(kvm, 3, "GET: guest PCKMO  subfunc 0x%16.16lx.%16.16lx",
1738 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1739 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1740 	VM_EVENT(kvm, 3, "GET: guest KMCTR  subfunc 0x%16.16lx.%16.16lx",
1741 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1742 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1743 	VM_EVENT(kvm, 3, "GET: guest KMF    subfunc 0x%16.16lx.%16.16lx",
1744 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1745 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1746 	VM_EVENT(kvm, 3, "GET: guest KMO    subfunc 0x%16.16lx.%16.16lx",
1747 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1748 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1749 	VM_EVENT(kvm, 3, "GET: guest PCC    subfunc 0x%16.16lx.%16.16lx",
1750 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1751 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1752 	VM_EVENT(kvm, 3, "GET: guest PPNO   subfunc 0x%16.16lx.%16.16lx",
1753 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1754 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1755 	VM_EVENT(kvm, 3, "GET: guest KMA    subfunc 0x%16.16lx.%16.16lx",
1756 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1757 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
1758 	VM_EVENT(kvm, 3, "GET: guest KDSA   subfunc 0x%16.16lx.%16.16lx",
1759 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1760 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1761 	VM_EVENT(kvm, 3, "GET: guest SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1762 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1763 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1764 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1765 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
1766 	VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1767 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1768 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1769 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1770 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
1771 	VM_EVENT(kvm, 3, "GET: guest PFCR   subfunc 0x%16.16lx.%16.16lx",
1772 		 ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[0],
1773 		 ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[1]);
1774 
1775 	return 0;
1776 }
1777 
kvm_s390_get_machine_subfunc(struct kvm * kvm,struct kvm_device_attr * attr)1778 static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1779 					struct kvm_device_attr *attr)
1780 {
1781 	if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1782 	    sizeof(struct kvm_s390_vm_cpu_subfunc)))
1783 		return -EFAULT;
1784 
1785 	VM_EVENT(kvm, 3, "GET: host  PLO    subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1786 		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
1787 		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
1788 		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
1789 		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
1790 	VM_EVENT(kvm, 3, "GET: host  PTFF   subfunc 0x%16.16lx.%16.16lx",
1791 		 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
1792 		 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
1793 	VM_EVENT(kvm, 3, "GET: host  KMAC   subfunc 0x%16.16lx.%16.16lx",
1794 		 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
1795 		 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
1796 	VM_EVENT(kvm, 3, "GET: host  KMC    subfunc 0x%16.16lx.%16.16lx",
1797 		 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
1798 		 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
1799 	VM_EVENT(kvm, 3, "GET: host  KM     subfunc 0x%16.16lx.%16.16lx",
1800 		 ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
1801 		 ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
1802 	VM_EVENT(kvm, 3, "GET: host  KIMD   subfunc 0x%16.16lx.%16.16lx",
1803 		 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
1804 		 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
1805 	VM_EVENT(kvm, 3, "GET: host  KLMD   subfunc 0x%16.16lx.%16.16lx",
1806 		 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
1807 		 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
1808 	VM_EVENT(kvm, 3, "GET: host  PCKMO  subfunc 0x%16.16lx.%16.16lx",
1809 		 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
1810 		 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
1811 	VM_EVENT(kvm, 3, "GET: host  KMCTR  subfunc 0x%16.16lx.%16.16lx",
1812 		 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
1813 		 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
1814 	VM_EVENT(kvm, 3, "GET: host  KMF    subfunc 0x%16.16lx.%16.16lx",
1815 		 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
1816 		 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
1817 	VM_EVENT(kvm, 3, "GET: host  KMO    subfunc 0x%16.16lx.%16.16lx",
1818 		 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
1819 		 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
1820 	VM_EVENT(kvm, 3, "GET: host  PCC    subfunc 0x%16.16lx.%16.16lx",
1821 		 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
1822 		 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
1823 	VM_EVENT(kvm, 3, "GET: host  PPNO   subfunc 0x%16.16lx.%16.16lx",
1824 		 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
1825 		 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
1826 	VM_EVENT(kvm, 3, "GET: host  KMA    subfunc 0x%16.16lx.%16.16lx",
1827 		 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
1828 		 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
1829 	VM_EVENT(kvm, 3, "GET: host  KDSA   subfunc 0x%16.16lx.%16.16lx",
1830 		 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
1831 		 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
1832 	VM_EVENT(kvm, 3, "GET: host  SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1833 		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
1834 		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
1835 		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
1836 		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
1837 	VM_EVENT(kvm, 3, "GET: host  DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1838 		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
1839 		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
1840 		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
1841 		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
1842 	VM_EVENT(kvm, 3, "GET: host  PFCR   subfunc 0x%16.16lx.%16.16lx",
1843 		 ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[0],
1844 		 ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[1]);
1845 
1846 	return 0;
1847 }
1848 
kvm_s390_get_processor_uv_feat(struct kvm * kvm,struct kvm_device_attr * attr)1849 static int kvm_s390_get_processor_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr)
1850 {
1851 	struct kvm_s390_vm_cpu_uv_feat __user *dst = (void __user *)attr->addr;
1852 	unsigned long feat = kvm->arch.model.uv_feat_guest.feat;
1853 
1854 	if (put_user(feat, &dst->feat))
1855 		return -EFAULT;
1856 	VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx", feat);
1857 
1858 	return 0;
1859 }
1860 
kvm_s390_get_machine_uv_feat(struct kvm * kvm,struct kvm_device_attr * attr)1861 static int kvm_s390_get_machine_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr)
1862 {
1863 	struct kvm_s390_vm_cpu_uv_feat __user *dst = (void __user *)attr->addr;
1864 	unsigned long feat;
1865 
1866 	BUILD_BUG_ON(sizeof(*dst) != sizeof(uv_info.uv_feature_indications));
1867 
1868 	feat = uv_info.uv_feature_indications & KVM_S390_VM_CPU_UV_FEAT_GUEST_MASK;
1869 	if (put_user(feat, &dst->feat))
1870 		return -EFAULT;
1871 	VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx", feat);
1872 
1873 	return 0;
1874 }
1875 
kvm_s390_get_cpu_model(struct kvm * kvm,struct kvm_device_attr * attr)1876 static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1877 {
1878 	int ret = -ENXIO;
1879 
1880 	switch (attr->attr) {
1881 	case KVM_S390_VM_CPU_PROCESSOR:
1882 		ret = kvm_s390_get_processor(kvm, attr);
1883 		break;
1884 	case KVM_S390_VM_CPU_MACHINE:
1885 		ret = kvm_s390_get_machine(kvm, attr);
1886 		break;
1887 	case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1888 		ret = kvm_s390_get_processor_feat(kvm, attr);
1889 		break;
1890 	case KVM_S390_VM_CPU_MACHINE_FEAT:
1891 		ret = kvm_s390_get_machine_feat(kvm, attr);
1892 		break;
1893 	case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1894 		ret = kvm_s390_get_processor_subfunc(kvm, attr);
1895 		break;
1896 	case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1897 		ret = kvm_s390_get_machine_subfunc(kvm, attr);
1898 		break;
1899 	case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST:
1900 		ret = kvm_s390_get_processor_uv_feat(kvm, attr);
1901 		break;
1902 	case KVM_S390_VM_CPU_MACHINE_UV_FEAT_GUEST:
1903 		ret = kvm_s390_get_machine_uv_feat(kvm, attr);
1904 		break;
1905 	}
1906 	return ret;
1907 }
1908 
1909 /**
1910  * kvm_s390_update_topology_change_report - update CPU topology change report
1911  * @kvm: guest KVM description
1912  * @val: set or clear the MTCR bit
1913  *
1914  * Updates the Multiprocessor Topology-Change-Report bit to signal
1915  * the guest with a topology change.
1916  * This is only relevant if the topology facility is present.
1917  */
kvm_s390_update_topology_change_report(struct kvm * kvm,bool val)1918 static void kvm_s390_update_topology_change_report(struct kvm *kvm, bool val)
1919 {
1920 	union sca_utility new, old;
1921 	struct esca_block *sca;
1922 
1923 	sca = kvm->arch.sca;
1924 	old = READ_ONCE(sca->utility);
1925 	do {
1926 		new = old;
1927 		new.mtcr = val;
1928 	} while (!try_cmpxchg(&sca->utility.val, &old.val, new.val));
1929 }
1930 
kvm_s390_set_topo_change_indication(struct kvm * kvm,struct kvm_device_attr * attr)1931 static int kvm_s390_set_topo_change_indication(struct kvm *kvm,
1932 					       struct kvm_device_attr *attr)
1933 {
1934 	if (!test_kvm_facility(kvm, 11))
1935 		return -ENXIO;
1936 
1937 	kvm_s390_update_topology_change_report(kvm, !!attr->attr);
1938 	return 0;
1939 }
1940 
kvm_s390_get_topo_change_indication(struct kvm * kvm,struct kvm_device_attr * attr)1941 static int kvm_s390_get_topo_change_indication(struct kvm *kvm,
1942 					       struct kvm_device_attr *attr)
1943 {
1944 	u8 topo;
1945 
1946 	if (!test_kvm_facility(kvm, 11))
1947 		return -ENXIO;
1948 
1949 	topo = kvm->arch.sca->utility.mtcr;
1950 
1951 	return put_user(topo, (u8 __user *)attr->addr);
1952 }
1953 
kvm_s390_vm_set_attr(struct kvm * kvm,struct kvm_device_attr * attr)1954 static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1955 {
1956 	int ret;
1957 
1958 	switch (attr->group) {
1959 	case KVM_S390_VM_MEM_CTRL:
1960 		ret = kvm_s390_set_mem_control(kvm, attr);
1961 		break;
1962 	case KVM_S390_VM_TOD:
1963 		ret = kvm_s390_set_tod(kvm, attr);
1964 		break;
1965 	case KVM_S390_VM_CPU_MODEL:
1966 		ret = kvm_s390_set_cpu_model(kvm, attr);
1967 		break;
1968 	case KVM_S390_VM_CRYPTO:
1969 		ret = kvm_s390_vm_set_crypto(kvm, attr);
1970 		break;
1971 	case KVM_S390_VM_MIGRATION:
1972 		ret = kvm_s390_vm_set_migration(kvm, attr);
1973 		break;
1974 	case KVM_S390_VM_CPU_TOPOLOGY:
1975 		ret = kvm_s390_set_topo_change_indication(kvm, attr);
1976 		break;
1977 	default:
1978 		ret = -ENXIO;
1979 		break;
1980 	}
1981 
1982 	return ret;
1983 }
1984 
kvm_s390_vm_get_attr(struct kvm * kvm,struct kvm_device_attr * attr)1985 static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1986 {
1987 	int ret;
1988 
1989 	switch (attr->group) {
1990 	case KVM_S390_VM_MEM_CTRL:
1991 		ret = kvm_s390_get_mem_control(kvm, attr);
1992 		break;
1993 	case KVM_S390_VM_TOD:
1994 		ret = kvm_s390_get_tod(kvm, attr);
1995 		break;
1996 	case KVM_S390_VM_CPU_MODEL:
1997 		ret = kvm_s390_get_cpu_model(kvm, attr);
1998 		break;
1999 	case KVM_S390_VM_MIGRATION:
2000 		ret = kvm_s390_vm_get_migration(kvm, attr);
2001 		break;
2002 	case KVM_S390_VM_CPU_TOPOLOGY:
2003 		ret = kvm_s390_get_topo_change_indication(kvm, attr);
2004 		break;
2005 	default:
2006 		ret = -ENXIO;
2007 		break;
2008 	}
2009 
2010 	return ret;
2011 }
2012 
kvm_s390_vm_has_attr(struct kvm * kvm,struct kvm_device_attr * attr)2013 static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
2014 {
2015 	int ret;
2016 
2017 	switch (attr->group) {
2018 	case KVM_S390_VM_MEM_CTRL:
2019 		switch (attr->attr) {
2020 		case KVM_S390_VM_MEM_ENABLE_CMMA:
2021 		case KVM_S390_VM_MEM_CLR_CMMA:
2022 			ret = sclp.has_cmma ? 0 : -ENXIO;
2023 			break;
2024 		case KVM_S390_VM_MEM_LIMIT_SIZE:
2025 			ret = 0;
2026 			break;
2027 		default:
2028 			ret = -ENXIO;
2029 			break;
2030 		}
2031 		break;
2032 	case KVM_S390_VM_TOD:
2033 		switch (attr->attr) {
2034 		case KVM_S390_VM_TOD_LOW:
2035 		case KVM_S390_VM_TOD_HIGH:
2036 			ret = 0;
2037 			break;
2038 		default:
2039 			ret = -ENXIO;
2040 			break;
2041 		}
2042 		break;
2043 	case KVM_S390_VM_CPU_MODEL:
2044 		switch (attr->attr) {
2045 		case KVM_S390_VM_CPU_PROCESSOR:
2046 		case KVM_S390_VM_CPU_MACHINE:
2047 		case KVM_S390_VM_CPU_PROCESSOR_FEAT:
2048 		case KVM_S390_VM_CPU_MACHINE_FEAT:
2049 		case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
2050 		case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
2051 		case KVM_S390_VM_CPU_MACHINE_UV_FEAT_GUEST:
2052 		case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST:
2053 			ret = 0;
2054 			break;
2055 		default:
2056 			ret = -ENXIO;
2057 			break;
2058 		}
2059 		break;
2060 	case KVM_S390_VM_CRYPTO:
2061 		switch (attr->attr) {
2062 		case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
2063 		case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
2064 		case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
2065 		case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
2066 			ret = 0;
2067 			break;
2068 		case KVM_S390_VM_CRYPTO_ENABLE_APIE:
2069 		case KVM_S390_VM_CRYPTO_DISABLE_APIE:
2070 			ret = ap_instructions_available() ? 0 : -ENXIO;
2071 			break;
2072 		default:
2073 			ret = -ENXIO;
2074 			break;
2075 		}
2076 		break;
2077 	case KVM_S390_VM_MIGRATION:
2078 		ret = 0;
2079 		break;
2080 	case KVM_S390_VM_CPU_TOPOLOGY:
2081 		ret = test_kvm_facility(kvm, 11) ? 0 : -ENXIO;
2082 		break;
2083 	default:
2084 		ret = -ENXIO;
2085 		break;
2086 	}
2087 
2088 	return ret;
2089 }
2090 
kvm_s390_get_skeys(struct kvm * kvm,struct kvm_s390_skeys * args)2091 static int kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
2092 {
2093 	union skey *keys;
2094 	int i, r = 0;
2095 
2096 	if (args->flags != 0)
2097 		return -EINVAL;
2098 
2099 	/* Is this guest using storage keys? */
2100 	if (!uses_skeys(kvm->arch.gmap))
2101 		return KVM_S390_GET_SKEYS_NONE;
2102 
2103 	/* Enforce sane limit on memory allocation */
2104 	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
2105 		return -EINVAL;
2106 
2107 	keys = kvmalloc_array(args->count, sizeof(*keys), GFP_KERNEL_ACCOUNT);
2108 	if (!keys)
2109 		return -ENOMEM;
2110 
2111 	scoped_guard(read_lock, &kvm->mmu_lock) {
2112 		for (i = 0; i < args->count; i++) {
2113 			r = dat_get_storage_key(kvm->arch.gmap->asce,
2114 						args->start_gfn + i, keys + i);
2115 			if (r)
2116 				break;
2117 		}
2118 	}
2119 
2120 	if (!r) {
2121 		r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
2122 				 sizeof(uint8_t) * args->count);
2123 		if (r)
2124 			r = -EFAULT;
2125 	}
2126 
2127 	kvfree(keys);
2128 	return r;
2129 }
2130 
kvm_s390_set_skeys(struct kvm * kvm,struct kvm_s390_skeys * args)2131 static int kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
2132 {
2133 	struct kvm_s390_mmu_cache *mc;
2134 	union skey *keys;
2135 	int i, r = 0;
2136 
2137 	if (args->flags != 0)
2138 		return -EINVAL;
2139 
2140 	/* Enforce sane limit on memory allocation */
2141 	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
2142 		return -EINVAL;
2143 
2144 	keys = kvmalloc_array(args->count, sizeof(*keys), GFP_KERNEL_ACCOUNT);
2145 	if (!keys)
2146 		return -ENOMEM;
2147 
2148 	r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
2149 			   sizeof(uint8_t) * args->count);
2150 	if (r) {
2151 		r = -EFAULT;
2152 		goto out;
2153 	}
2154 
2155 	/* Enable storage key handling for the guest */
2156 	r = gmap_enable_skeys(kvm->arch.gmap);
2157 	if (r)
2158 		goto out;
2159 
2160 	r = -EINVAL;
2161 	for (i = 0; i < args->count; i++) {
2162 		/* Lowest order bit is reserved */
2163 		if (keys[i].zero)
2164 			goto out;
2165 	}
2166 
2167 	mc = kvm_s390_new_mmu_cache();
2168 	if (!mc) {
2169 		r = -ENOMEM;
2170 		goto out;
2171 	}
2172 
2173 	r = 0;
2174 	do {
2175 		r = kvm_s390_mmu_cache_topup(mc);
2176 		if (r == -ENOMEM)
2177 			break;
2178 		scoped_guard(read_lock, &kvm->mmu_lock) {
2179 			for (i = 0 ; i < args->count; i++) {
2180 				r = dat_set_storage_key(mc, kvm->arch.gmap->asce,
2181 							args->start_gfn + i, keys[i], 0);
2182 				if (r)
2183 					break;
2184 			}
2185 		}
2186 	} while (r == -ENOMEM);
2187 	kvm_s390_free_mmu_cache(mc);
2188 out:
2189 	kvfree(keys);
2190 	return r;
2191 }
2192 
2193 /*
2194  * This function searches for the next page with dirty CMMA attributes, and
2195  * saves the attributes in the buffer up to either the end of the buffer or
2196  * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
2197  * no trailing clean bytes are saved.
2198  * In case no dirty bits were found, or if CMMA was not enabled or used, the
2199  * output buffer will indicate 0 as length.
2200  */
kvm_s390_get_cmma_bits(struct kvm * kvm,struct kvm_s390_cmma_log * args)2201 static int kvm_s390_get_cmma_bits(struct kvm *kvm,
2202 				  struct kvm_s390_cmma_log *args)
2203 {
2204 	int peek, ret;
2205 	u8 *values;
2206 
2207 	if (!kvm->arch.use_cmma)
2208 		return -ENXIO;
2209 	/* Invalid/unsupported flags were specified */
2210 	if (args->flags & ~KVM_S390_CMMA_PEEK)
2211 		return -EINVAL;
2212 	/* Migration mode query, and we are not doing a migration */
2213 	peek = !!(args->flags & KVM_S390_CMMA_PEEK);
2214 	if (!peek && !kvm->arch.migration_mode)
2215 		return -EINVAL;
2216 	/* CMMA is disabled or was not used, or the buffer has length zero */
2217 	args->count = min(args->count, KVM_S390_CMMA_SIZE_MAX);
2218 	if (!args->count || !uses_cmm(kvm->arch.gmap)) {
2219 		memset(args, 0, sizeof(*args));
2220 		return 0;
2221 	}
2222 	/* We are not peeking, and there are no dirty pages */
2223 	if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
2224 		memset(args, 0, sizeof(*args));
2225 		return 0;
2226 	}
2227 
2228 	values = vmalloc(args->count);
2229 	if (!values)
2230 		return -ENOMEM;
2231 
2232 	scoped_guard(read_lock, &kvm->mmu_lock) {
2233 		if (peek)
2234 			ret = dat_peek_cmma(args->start_gfn, kvm->arch.gmap->asce, &args->count,
2235 					    values);
2236 		else
2237 			ret = dat_get_cmma(kvm->arch.gmap->asce, &args->start_gfn, &args->count,
2238 					   values, &kvm->arch.cmma_dirty_pages);
2239 	}
2240 
2241 	if (kvm->arch.migration_mode)
2242 		args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2243 	else
2244 		args->remaining = 0;
2245 
2246 	if (copy_to_user((void __user *)args->values, values, args->count))
2247 		ret = -EFAULT;
2248 
2249 	vfree(values);
2250 	return ret;
2251 }
2252 
2253 /*
2254  * This function sets the CMMA attributes for the given pages. If the input
2255  * buffer has zero length, no action is taken, otherwise the attributes are
2256  * set and the mm->context.uses_cmm flag is set.
2257  */
kvm_s390_set_cmma_bits(struct kvm * kvm,const struct kvm_s390_cmma_log * args)2258 static int kvm_s390_set_cmma_bits(struct kvm *kvm,
2259 				  const struct kvm_s390_cmma_log *args)
2260 {
2261 	struct kvm_s390_mmu_cache *mc;
2262 	u8 *bits = NULL;
2263 	int r = 0;
2264 
2265 	if (!kvm->arch.use_cmma)
2266 		return -ENXIO;
2267 	/* invalid/unsupported flags */
2268 	if (args->flags != 0)
2269 		return -EINVAL;
2270 	/* Enforce sane limit on memory allocation */
2271 	if (args->count > KVM_S390_CMMA_SIZE_MAX)
2272 		return -EINVAL;
2273 	/* Nothing to do */
2274 	if (args->count == 0)
2275 		return 0;
2276 
2277 	mc = kvm_s390_new_mmu_cache();
2278 	if (!mc)
2279 		return -ENOMEM;
2280 	bits = vmalloc(array_size(sizeof(*bits), args->count));
2281 	if (!bits)
2282 		goto out;
2283 
2284 	r = copy_from_user(bits, (void __user *)args->values, args->count);
2285 	if (r) {
2286 		r = -EFAULT;
2287 		goto out;
2288 	}
2289 
2290 	do {
2291 		r = kvm_s390_mmu_cache_topup(mc);
2292 		if (r)
2293 			break;
2294 		scoped_guard(read_lock, &kvm->mmu_lock) {
2295 			r = dat_set_cmma_bits(mc, kvm->arch.gmap->asce, args->start_gfn,
2296 					      args->count, args->mask, bits);
2297 		}
2298 	} while (r == -ENOMEM);
2299 
2300 	set_bit(GMAP_FLAG_USES_CMM, &kvm->arch.gmap->flags);
2301 out:
2302 	kvm_s390_free_mmu_cache(mc);
2303 	vfree(bits);
2304 	return r;
2305 }
2306 
2307 /**
2308  * kvm_s390_cpus_from_pv - Convert all protected vCPUs in a protected VM to
2309  * non protected.
2310  * @kvm: the VM whose protected vCPUs are to be converted
2311  * @rc: return value for the RC field of the UVC (in case of error)
2312  * @rrc: return value for the RRC field of the UVC (in case of error)
2313  *
2314  * Does not stop in case of error, tries to convert as many
2315  * CPUs as possible. In case of error, the RC and RRC of the last error are
2316  * returned.
2317  *
2318  * Return: 0 in case of success, otherwise -EIO
2319  */
kvm_s390_cpus_from_pv(struct kvm * kvm,u16 * rc,u16 * rrc)2320 int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
2321 {
2322 	struct kvm_vcpu *vcpu;
2323 	unsigned long i;
2324 	u16 _rc, _rrc;
2325 	int ret = 0;
2326 
2327 	/*
2328 	 * We ignore failures and try to destroy as many CPUs as possible.
2329 	 * At the same time we must not free the assigned resources when
2330 	 * this fails, as the ultravisor has still access to that memory.
2331 	 * So kvm_s390_pv_destroy_cpu can leave a "wanted" memory leak
2332 	 * behind.
2333 	 * We want to return the first failure rc and rrc, though.
2334 	 */
2335 	kvm_for_each_vcpu(i, vcpu, kvm) {
2336 		mutex_lock(&vcpu->mutex);
2337 		if (kvm_s390_pv_destroy_cpu(vcpu, &_rc, &_rrc) && !ret) {
2338 			*rc = _rc;
2339 			*rrc = _rrc;
2340 			ret = -EIO;
2341 		}
2342 		mutex_unlock(&vcpu->mutex);
2343 	}
2344 	/* Ensure that we re-enable gisa if the non-PV guest used it but the PV guest did not. */
2345 	if (use_gisa)
2346 		kvm_s390_gisa_enable(kvm);
2347 	return ret;
2348 }
2349 
2350 /**
2351  * kvm_s390_cpus_to_pv - Convert all non-protected vCPUs in a protected VM
2352  * to protected.
2353  * @kvm: the VM whose protected vCPUs are to be converted
2354  * @rc: return value for the RC field of the UVC (in case of error)
2355  * @rrc: return value for the RRC field of the UVC (in case of error)
2356  *
2357  * Tries to undo the conversion in case of error.
2358  *
2359  * Return: 0 in case of success, otherwise -EIO
2360  */
kvm_s390_cpus_to_pv(struct kvm * kvm,u16 * rc,u16 * rrc)2361 static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
2362 {
2363 	unsigned long i;
2364 	int r = 0;
2365 	u16 dummy;
2366 
2367 	struct kvm_vcpu *vcpu;
2368 
2369 	/* Disable the GISA if the ultravisor does not support AIV. */
2370 	if (!uv_has_feature(BIT_UV_FEAT_AIV))
2371 		kvm_s390_gisa_disable(kvm);
2372 
2373 	kvm_for_each_vcpu(i, vcpu, kvm) {
2374 		mutex_lock(&vcpu->mutex);
2375 		r = kvm_s390_pv_create_cpu(vcpu, rc, rrc);
2376 		mutex_unlock(&vcpu->mutex);
2377 		if (r)
2378 			break;
2379 	}
2380 	if (r)
2381 		kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
2382 	return r;
2383 }
2384 
2385 /*
2386  * Here we provide user space with a direct interface to query UV
2387  * related data like UV maxima and available features as well as
2388  * feature specific data.
2389  *
2390  * To facilitate future extension of the data structures we'll try to
2391  * write data up to the maximum requested length.
2392  */
kvm_s390_handle_pv_info(struct kvm_s390_pv_info * info)2393 static ssize_t kvm_s390_handle_pv_info(struct kvm_s390_pv_info *info)
2394 {
2395 	ssize_t len_min;
2396 
2397 	switch (info->header.id) {
2398 	case KVM_PV_INFO_VM: {
2399 		len_min =  sizeof(info->header) + sizeof(info->vm);
2400 
2401 		if (info->header.len_max < len_min)
2402 			return -EINVAL;
2403 
2404 		memcpy(info->vm.inst_calls_list,
2405 		       uv_info.inst_calls_list,
2406 		       sizeof(uv_info.inst_calls_list));
2407 
2408 		/* It's max cpuid not max cpus, so it's off by one */
2409 		info->vm.max_cpus = uv_info.max_guest_cpu_id + 1;
2410 		info->vm.max_guests = uv_info.max_num_sec_conf;
2411 		info->vm.max_guest_addr = uv_info.max_sec_stor_addr;
2412 		info->vm.feature_indication = uv_info.uv_feature_indications;
2413 
2414 		return len_min;
2415 	}
2416 	case KVM_PV_INFO_DUMP: {
2417 		len_min =  sizeof(info->header) + sizeof(info->dump);
2418 
2419 		if (info->header.len_max < len_min)
2420 			return -EINVAL;
2421 
2422 		info->dump.dump_cpu_buffer_len = uv_info.guest_cpu_stor_len;
2423 		info->dump.dump_config_mem_buffer_per_1m = uv_info.conf_dump_storage_state_len;
2424 		info->dump.dump_config_finalize_len = uv_info.conf_dump_finalize_len;
2425 		return len_min;
2426 	}
2427 	default:
2428 		return -EINVAL;
2429 	}
2430 }
2431 
kvm_s390_pv_dmp(struct kvm * kvm,struct kvm_pv_cmd * cmd,struct kvm_s390_pv_dmp dmp)2432 static int kvm_s390_pv_dmp(struct kvm *kvm, struct kvm_pv_cmd *cmd,
2433 			   struct kvm_s390_pv_dmp dmp)
2434 {
2435 	int r = -EINVAL;
2436 	void __user *result_buff = (void __user *)dmp.buff_addr;
2437 
2438 	switch (dmp.subcmd) {
2439 	case KVM_PV_DUMP_INIT: {
2440 		if (kvm->arch.pv.dumping)
2441 			break;
2442 
2443 		/*
2444 		 * Block SIE entry as concurrent dump UVCs could lead
2445 		 * to validities.
2446 		 */
2447 		kvm_s390_vcpu_block_all(kvm);
2448 
2449 		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2450 				  UVC_CMD_DUMP_INIT, &cmd->rc, &cmd->rrc);
2451 		KVM_UV_EVENT(kvm, 3, "PROTVIRT DUMP INIT: rc %x rrc %x",
2452 			     cmd->rc, cmd->rrc);
2453 		if (!r) {
2454 			kvm->arch.pv.dumping = true;
2455 		} else {
2456 			kvm_s390_vcpu_unblock_all(kvm);
2457 			r = -EINVAL;
2458 		}
2459 		break;
2460 	}
2461 	case KVM_PV_DUMP_CONFIG_STOR_STATE: {
2462 		if (!kvm->arch.pv.dumping)
2463 			break;
2464 
2465 		/*
2466 		 * gaddr is an output parameter since we might stop
2467 		 * early. As dmp will be copied back in our caller, we
2468 		 * don't need to do it ourselves.
2469 		 */
2470 		r = kvm_s390_pv_dump_stor_state(kvm, result_buff, &dmp.gaddr, dmp.buff_len,
2471 						&cmd->rc, &cmd->rrc);
2472 		break;
2473 	}
2474 	case KVM_PV_DUMP_COMPLETE: {
2475 		if (!kvm->arch.pv.dumping)
2476 			break;
2477 
2478 		r = -EINVAL;
2479 		if (dmp.buff_len < uv_info.conf_dump_finalize_len)
2480 			break;
2481 
2482 		r = kvm_s390_pv_dump_complete(kvm, result_buff,
2483 					      &cmd->rc, &cmd->rrc);
2484 		break;
2485 	}
2486 	default:
2487 		r = -ENOTTY;
2488 		break;
2489 	}
2490 
2491 	return r;
2492 }
2493 
kvm_s390_handle_pv(struct kvm * kvm,struct kvm_pv_cmd * cmd)2494 static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
2495 {
2496 	const bool need_lock = (cmd->cmd != KVM_PV_ASYNC_CLEANUP_PERFORM);
2497 	void __user *argp = (void __user *)cmd->data;
2498 	int r = 0;
2499 	u16 dummy;
2500 
2501 	if (need_lock)
2502 		mutex_lock(&kvm->lock);
2503 
2504 	switch (cmd->cmd) {
2505 	case KVM_PV_ENABLE: {
2506 		r = -EINVAL;
2507 		if (kvm_s390_pv_is_protected(kvm))
2508 			break;
2509 
2510 		mmap_write_lock(kvm->mm);
2511 		/*
2512 		 * Disable creation of new THPs. Existing THPs can stay, they
2513 		 * will be split when any part of them gets imported.
2514 		 */
2515 		mm_flags_clear(MMF_DISABLE_THP_EXCEPT_ADVISED, kvm->mm);
2516 		mm_flags_set(MMF_DISABLE_THP_COMPLETELY, kvm->mm);
2517 		set_bit(GMAP_FLAG_EXPORT_ON_UNMAP, &kvm->arch.gmap->flags);
2518 		r = gmap_helper_disable_cow_sharing();
2519 		mmap_write_unlock(kvm->mm);
2520 		if (r)
2521 			break;
2522 
2523 		r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc);
2524 		if (r)
2525 			break;
2526 
2527 		r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc);
2528 		if (r)
2529 			kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
2530 
2531 		/* we need to block service interrupts from now on */
2532 		set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2533 		break;
2534 	}
2535 	case KVM_PV_ASYNC_CLEANUP_PREPARE:
2536 		r = -EINVAL;
2537 		if (!kvm_s390_pv_is_protected(kvm) || !async_destroy)
2538 			break;
2539 
2540 		r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
2541 		/*
2542 		 * If a CPU could not be destroyed, destroy VM will also fail.
2543 		 * There is no point in trying to destroy it. Instead return
2544 		 * the rc and rrc from the first CPU that failed destroying.
2545 		 */
2546 		if (r)
2547 			break;
2548 		r = kvm_s390_pv_set_aside(kvm, &cmd->rc, &cmd->rrc);
2549 
2550 		/* no need to block service interrupts any more */
2551 		clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2552 		break;
2553 	case KVM_PV_ASYNC_CLEANUP_PERFORM:
2554 		r = -EINVAL;
2555 		if (!async_destroy)
2556 			break;
2557 		/* kvm->lock must not be held; this is asserted inside the function. */
2558 		r = kvm_s390_pv_deinit_aside_vm(kvm, &cmd->rc, &cmd->rrc);
2559 		break;
2560 	case KVM_PV_DISABLE: {
2561 		r = -EINVAL;
2562 		if (!kvm_s390_pv_is_protected(kvm))
2563 			break;
2564 
2565 		r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
2566 		/*
2567 		 * If a CPU could not be destroyed, destroy VM will also fail.
2568 		 * There is no point in trying to destroy it. Instead return
2569 		 * the rc and rrc from the first CPU that failed destroying.
2570 		 */
2571 		if (r)
2572 			break;
2573 		r = kvm_s390_pv_deinit_cleanup_all(kvm, &cmd->rc, &cmd->rrc);
2574 
2575 		/* no need to block service interrupts any more */
2576 		clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2577 		break;
2578 	}
2579 	case KVM_PV_SET_SEC_PARMS: {
2580 		struct kvm_s390_pv_sec_parm parms = {};
2581 		void *hdr;
2582 
2583 		r = -EINVAL;
2584 		if (!kvm_s390_pv_is_protected(kvm))
2585 			break;
2586 
2587 		r = -EFAULT;
2588 		if (copy_from_user(&parms, argp, sizeof(parms)))
2589 			break;
2590 
2591 		/* Currently restricted to 1MiB */
2592 		r = -EINVAL;
2593 		if (parms.length > SZ_1M)
2594 			break;
2595 
2596 		r = -ENOMEM;
2597 		hdr = vmalloc(parms.length);
2598 		if (!hdr)
2599 			break;
2600 
2601 		r = -EFAULT;
2602 		if (!copy_from_user(hdr, (void __user *)parms.origin,
2603 				    parms.length))
2604 			r = kvm_s390_pv_set_sec_parms(kvm, hdr, parms.length,
2605 						      &cmd->rc, &cmd->rrc);
2606 
2607 		vfree(hdr);
2608 		break;
2609 	}
2610 	case KVM_PV_UNPACK: {
2611 		struct kvm_s390_pv_unp unp = {};
2612 
2613 		r = -EINVAL;
2614 		if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm))
2615 			break;
2616 
2617 		r = -EFAULT;
2618 		if (copy_from_user(&unp, argp, sizeof(unp)))
2619 			break;
2620 
2621 		r = kvm_s390_pv_unpack(kvm, unp.addr, unp.size, unp.tweak,
2622 				       &cmd->rc, &cmd->rrc);
2623 		break;
2624 	}
2625 	case KVM_PV_VERIFY: {
2626 		r = -EINVAL;
2627 		if (!kvm_s390_pv_is_protected(kvm))
2628 			break;
2629 
2630 		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2631 				  UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc);
2632 		KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc,
2633 			     cmd->rrc);
2634 		break;
2635 	}
2636 	case KVM_PV_PREP_RESET: {
2637 		r = -EINVAL;
2638 		if (!kvm_s390_pv_is_protected(kvm))
2639 			break;
2640 
2641 		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2642 				  UVC_CMD_PREPARE_RESET, &cmd->rc, &cmd->rrc);
2643 		KVM_UV_EVENT(kvm, 3, "PROTVIRT PREP RESET: rc %x rrc %x",
2644 			     cmd->rc, cmd->rrc);
2645 		break;
2646 	}
2647 	case KVM_PV_UNSHARE_ALL: {
2648 		r = -EINVAL;
2649 		if (!kvm_s390_pv_is_protected(kvm))
2650 			break;
2651 
2652 		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2653 				  UVC_CMD_SET_UNSHARE_ALL, &cmd->rc, &cmd->rrc);
2654 		KVM_UV_EVENT(kvm, 3, "PROTVIRT UNSHARE: rc %x rrc %x",
2655 			     cmd->rc, cmd->rrc);
2656 		break;
2657 	}
2658 	case KVM_PV_INFO: {
2659 		struct kvm_s390_pv_info info = {};
2660 		ssize_t data_len;
2661 
2662 		/*
2663 		 * No need to check the VM protection here.
2664 		 *
2665 		 * Maybe user space wants to query some of the data
2666 		 * when the VM is still unprotected. If we see the
2667 		 * need to fence a new data command we can still
2668 		 * return an error in the info handler.
2669 		 */
2670 
2671 		r = -EFAULT;
2672 		if (copy_from_user(&info, argp, sizeof(info.header)))
2673 			break;
2674 
2675 		r = -EINVAL;
2676 		if (info.header.len_max < sizeof(info.header))
2677 			break;
2678 
2679 		data_len = kvm_s390_handle_pv_info(&info);
2680 		if (data_len < 0) {
2681 			r = data_len;
2682 			break;
2683 		}
2684 		/*
2685 		 * If a data command struct is extended (multiple
2686 		 * times) this can be used to determine how much of it
2687 		 * is valid.
2688 		 */
2689 		info.header.len_written = data_len;
2690 
2691 		r = -EFAULT;
2692 		if (copy_to_user(argp, &info, data_len))
2693 			break;
2694 
2695 		r = 0;
2696 		break;
2697 	}
2698 	case KVM_PV_DUMP: {
2699 		struct kvm_s390_pv_dmp dmp;
2700 
2701 		r = -EINVAL;
2702 		if (!kvm_s390_pv_is_protected(kvm))
2703 			break;
2704 
2705 		r = -EFAULT;
2706 		if (copy_from_user(&dmp, argp, sizeof(dmp)))
2707 			break;
2708 
2709 		r = kvm_s390_pv_dmp(kvm, cmd, dmp);
2710 		if (r)
2711 			break;
2712 
2713 		if (copy_to_user(argp, &dmp, sizeof(dmp))) {
2714 			r = -EFAULT;
2715 			break;
2716 		}
2717 
2718 		break;
2719 	}
2720 	default:
2721 		r = -ENOTTY;
2722 	}
2723 	if (need_lock)
2724 		mutex_unlock(&kvm->lock);
2725 
2726 	return r;
2727 }
2728 
mem_op_validate_common(struct kvm_s390_mem_op * mop,u64 supported_flags)2729 static int mem_op_validate_common(struct kvm_s390_mem_op *mop, u64 supported_flags)
2730 {
2731 	if (mop->flags & ~supported_flags || !mop->size)
2732 		return -EINVAL;
2733 	if (mop->size > MEM_OP_MAX_SIZE)
2734 		return -E2BIG;
2735 	if (mop->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION) {
2736 		if (mop->key > 0xf)
2737 			return -EINVAL;
2738 	} else {
2739 		mop->key = 0;
2740 	}
2741 	return 0;
2742 }
2743 
kvm_s390_vm_mem_op_abs(struct kvm * kvm,struct kvm_s390_mem_op * mop)2744 static int kvm_s390_vm_mem_op_abs(struct kvm *kvm, struct kvm_s390_mem_op *mop)
2745 {
2746 	void __user *uaddr = (void __user *)mop->buf;
2747 	void *tmpbuf __free(kvfree) = NULL;
2748 	enum gacc_mode acc_mode;
2749 	int r;
2750 
2751 	r = mem_op_validate_common(mop, KVM_S390_MEMOP_F_SKEY_PROTECTION |
2752 					KVM_S390_MEMOP_F_CHECK_ONLY);
2753 	if (r)
2754 		return r;
2755 
2756 	if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2757 		tmpbuf = vmalloc(mop->size);
2758 		if (!tmpbuf)
2759 			return -ENOMEM;
2760 	}
2761 
2762 	acc_mode = mop->op == KVM_S390_MEMOP_ABSOLUTE_READ ? GACC_FETCH : GACC_STORE;
2763 
2764 	scoped_guard(srcu, &kvm->srcu) {
2765 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)
2766 			return check_gpa_range(kvm, mop->gaddr, mop->size, acc_mode, mop->key);
2767 
2768 		if (acc_mode == GACC_STORE && copy_from_user(tmpbuf, uaddr, mop->size))
2769 			return -EFAULT;
2770 		r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf,
2771 					      mop->size, acc_mode, mop->key);
2772 		if (r)
2773 			return r;
2774 		if (acc_mode != GACC_STORE && copy_to_user(uaddr, tmpbuf, mop->size))
2775 			return -EFAULT;
2776 	}
2777 	return 0;
2778 }
2779 
kvm_s390_vm_mem_op_cmpxchg(struct kvm * kvm,struct kvm_s390_mem_op * mop)2780 static int kvm_s390_vm_mem_op_cmpxchg(struct kvm *kvm, struct kvm_s390_mem_op *mop)
2781 {
2782 	void __user *uaddr = (void __user *)mop->buf;
2783 	void __user *old_addr = (void __user *)mop->old_addr;
2784 	union kvm_s390_quad old = { .sixteen = 0 };
2785 	union kvm_s390_quad new = { .sixteen = 0 };
2786 	bool success = false;
2787 	int r;
2788 
2789 	r = mem_op_validate_common(mop, KVM_S390_MEMOP_F_SKEY_PROTECTION);
2790 	if (r)
2791 		return r;
2792 	/*
2793 	 * This validates off_in_quad. Checking that size is a power
2794 	 * of two is not necessary, as cmpxchg_guest_abs_with_key
2795 	 * takes care of that
2796 	 */
2797 	if (mop->size > sizeof(new))
2798 		return -EINVAL;
2799 	if (copy_from_user(&new, uaddr, mop->size))
2800 		return -EFAULT;
2801 	if (copy_from_user(&old, old_addr, mop->size))
2802 		return -EFAULT;
2803 
2804 	scoped_guard(srcu, &kvm->srcu) {
2805 		r = cmpxchg_guest_abs_with_key(kvm, mop->gaddr, mop->size, &old, new,
2806 					       mop->key, &success);
2807 
2808 		if (!success && copy_to_user(old_addr, &old, mop->size))
2809 			return -EFAULT;
2810 	}
2811 	return r;
2812 }
2813 
kvm_s390_vm_mem_op(struct kvm * kvm,struct kvm_s390_mem_op * mop)2814 static int kvm_s390_vm_mem_op(struct kvm *kvm, struct kvm_s390_mem_op *mop)
2815 {
2816 	/*
2817 	 * This is technically a heuristic only, if the kvm->lock is not
2818 	 * taken, it is not guaranteed that the vm is/remains non-protected.
2819 	 * This is ok from a kernel perspective, wrongdoing is detected
2820 	 * on the access, -EFAULT is returned and the vm may crash the
2821 	 * next time it accesses the memory in question.
2822 	 * There is no sane usecase to do switching and a memop on two
2823 	 * different CPUs at the same time.
2824 	 */
2825 	if (kvm_s390_pv_get_handle(kvm))
2826 		return -EINVAL;
2827 
2828 	switch (mop->op) {
2829 	case KVM_S390_MEMOP_ABSOLUTE_READ:
2830 	case KVM_S390_MEMOP_ABSOLUTE_WRITE:
2831 		return kvm_s390_vm_mem_op_abs(kvm, mop);
2832 	case KVM_S390_MEMOP_ABSOLUTE_CMPXCHG:
2833 		return kvm_s390_vm_mem_op_cmpxchg(kvm, mop);
2834 	default:
2835 		return -EINVAL;
2836 	}
2837 }
2838 
kvm_arch_vm_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)2839 int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
2840 {
2841 	struct kvm *kvm = filp->private_data;
2842 	void __user *argp = (void __user *)arg;
2843 	struct kvm_device_attr attr;
2844 	int r;
2845 
2846 	switch (ioctl) {
2847 	case KVM_S390_INTERRUPT: {
2848 		struct kvm_s390_interrupt s390int;
2849 
2850 		r = -EFAULT;
2851 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
2852 			break;
2853 		r = kvm_s390_inject_vm(kvm, &s390int);
2854 		break;
2855 	}
2856 	case KVM_CREATE_IRQCHIP: {
2857 		r = -EINVAL;
2858 		if (kvm->arch.use_irqchip)
2859 			r = 0;
2860 		break;
2861 	}
2862 	case KVM_SET_DEVICE_ATTR: {
2863 		r = -EFAULT;
2864 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2865 			break;
2866 		r = kvm_s390_vm_set_attr(kvm, &attr);
2867 		break;
2868 	}
2869 	case KVM_GET_DEVICE_ATTR: {
2870 		r = -EFAULT;
2871 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2872 			break;
2873 		r = kvm_s390_vm_get_attr(kvm, &attr);
2874 		break;
2875 	}
2876 	case KVM_HAS_DEVICE_ATTR: {
2877 		r = -EFAULT;
2878 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2879 			break;
2880 		r = kvm_s390_vm_has_attr(kvm, &attr);
2881 		break;
2882 	}
2883 	case KVM_S390_GET_SKEYS: {
2884 		struct kvm_s390_skeys args;
2885 
2886 		r = -EFAULT;
2887 		if (copy_from_user(&args, argp,
2888 				   sizeof(struct kvm_s390_skeys)))
2889 			break;
2890 		r = kvm_s390_get_skeys(kvm, &args);
2891 		break;
2892 	}
2893 	case KVM_S390_SET_SKEYS: {
2894 		struct kvm_s390_skeys args;
2895 
2896 		r = -EFAULT;
2897 		if (copy_from_user(&args, argp,
2898 				   sizeof(struct kvm_s390_skeys)))
2899 			break;
2900 		r = kvm_s390_set_skeys(kvm, &args);
2901 		break;
2902 	}
2903 	case KVM_S390_GET_CMMA_BITS: {
2904 		struct kvm_s390_cmma_log args;
2905 
2906 		r = -EFAULT;
2907 		if (copy_from_user(&args, argp, sizeof(args)))
2908 			break;
2909 		mutex_lock(&kvm->slots_lock);
2910 		r = kvm_s390_get_cmma_bits(kvm, &args);
2911 		mutex_unlock(&kvm->slots_lock);
2912 		if (!r) {
2913 			r = copy_to_user(argp, &args, sizeof(args));
2914 			if (r)
2915 				r = -EFAULT;
2916 		}
2917 		break;
2918 	}
2919 	case KVM_S390_SET_CMMA_BITS: {
2920 		struct kvm_s390_cmma_log args;
2921 
2922 		r = -EFAULT;
2923 		if (copy_from_user(&args, argp, sizeof(args)))
2924 			break;
2925 		mutex_lock(&kvm->slots_lock);
2926 		r = kvm_s390_set_cmma_bits(kvm, &args);
2927 		mutex_unlock(&kvm->slots_lock);
2928 		break;
2929 	}
2930 	case KVM_S390_PV_COMMAND: {
2931 		struct kvm_pv_cmd args;
2932 
2933 		/* protvirt means user cpu state */
2934 		kvm_s390_set_user_cpu_state_ctrl(kvm);
2935 		r = 0;
2936 		if (!is_prot_virt_host()) {
2937 			r = -EINVAL;
2938 			break;
2939 		}
2940 		if (copy_from_user(&args, argp, sizeof(args))) {
2941 			r = -EFAULT;
2942 			break;
2943 		}
2944 		if (args.flags) {
2945 			r = -EINVAL;
2946 			break;
2947 		}
2948 		/* must be called without kvm->lock */
2949 		r = kvm_s390_handle_pv(kvm, &args);
2950 		if (copy_to_user(argp, &args, sizeof(args))) {
2951 			r = -EFAULT;
2952 			break;
2953 		}
2954 		break;
2955 	}
2956 	case KVM_S390_MEM_OP: {
2957 		struct kvm_s390_mem_op mem_op;
2958 
2959 		if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
2960 			r = kvm_s390_vm_mem_op(kvm, &mem_op);
2961 		else
2962 			r = -EFAULT;
2963 		break;
2964 	}
2965 	case KVM_S390_KEYOP: {
2966 		struct kvm_s390_mmu_cache *mc;
2967 		struct kvm_s390_keyop kop;
2968 		union skey skey;
2969 
2970 		if (copy_from_user(&kop, argp, sizeof(kop))) {
2971 			r = -EFAULT;
2972 			break;
2973 		}
2974 		skey.skey = kop.key;
2975 
2976 		mc = kvm_s390_new_mmu_cache();
2977 		if (!mc)
2978 			return -ENOMEM;
2979 
2980 		r = kvm_s390_keyop(mc, kvm, kop.operation, kop.guest_addr, skey);
2981 		kvm_s390_free_mmu_cache(mc);
2982 		if (r < 0)
2983 			break;
2984 
2985 		kop.key = r;
2986 		r = 0;
2987 		if (copy_to_user(argp, &kop, sizeof(kop)))
2988 			r = -EFAULT;
2989 		break;
2990 	}
2991 	case KVM_S390_ZPCI_OP: {
2992 		struct kvm_s390_zpci_op args;
2993 
2994 		r = -EINVAL;
2995 		if (!IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
2996 			break;
2997 		if (copy_from_user(&args, argp, sizeof(args))) {
2998 			r = -EFAULT;
2999 			break;
3000 		}
3001 		r = kvm_s390_pci_zpci_op(kvm, &args);
3002 		break;
3003 	}
3004 	default:
3005 		r = -ENOTTY;
3006 	}
3007 
3008 	return r;
3009 }
3010 
kvm_s390_apxa_installed(void)3011 static int kvm_s390_apxa_installed(void)
3012 {
3013 	struct ap_config_info info;
3014 
3015 	if (ap_instructions_available()) {
3016 		if (ap_qci(&info) == 0)
3017 			return info.apxa;
3018 	}
3019 
3020 	return 0;
3021 }
3022 
3023 /*
3024  * The format of the crypto control block (CRYCB) is specified in the 3 low
3025  * order bits of the CRYCB designation (CRYCBD) field as follows:
3026  * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
3027  *	     AP extended addressing (APXA) facility are installed.
3028  * Format 1: The APXA facility is not installed but the MSAX3 facility is.
3029  * Format 2: Both the APXA and MSAX3 facilities are installed
3030  */
kvm_s390_set_crycb_format(struct kvm * kvm)3031 static void kvm_s390_set_crycb_format(struct kvm *kvm)
3032 {
3033 	kvm->arch.crypto.crycbd = virt_to_phys(kvm->arch.crypto.crycb);
3034 
3035 	/* Clear the CRYCB format bits - i.e., set format 0 by default */
3036 	kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
3037 
3038 	/* Check whether MSAX3 is installed */
3039 	if (!test_kvm_facility(kvm, 76))
3040 		return;
3041 
3042 	if (kvm_s390_apxa_installed())
3043 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
3044 	else
3045 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
3046 }
3047 
3048 /*
3049  * kvm_arch_crypto_set_masks
3050  *
3051  * @kvm: pointer to the target guest's KVM struct containing the crypto masks
3052  *	 to be set.
3053  * @apm: the mask identifying the accessible AP adapters
3054  * @aqm: the mask identifying the accessible AP domains
3055  * @adm: the mask identifying the accessible AP control domains
3056  *
3057  * Set the masks that identify the adapters, domains and control domains to
3058  * which the KVM guest is granted access.
3059  *
3060  * Note: The kvm->lock mutex must be locked by the caller before invoking this
3061  *	 function.
3062  */
kvm_arch_crypto_set_masks(struct kvm * kvm,unsigned long * apm,unsigned long * aqm,unsigned long * adm)3063 void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
3064 			       unsigned long *aqm, unsigned long *adm)
3065 {
3066 	struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
3067 
3068 	kvm_s390_vcpu_block_all(kvm);
3069 
3070 	switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
3071 	case CRYCB_FORMAT2: /* APCB1 use 256 bits */
3072 		memcpy(crycb->apcb1.apm, apm, 32);
3073 		VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
3074 			 apm[0], apm[1], apm[2], apm[3]);
3075 		memcpy(crycb->apcb1.aqm, aqm, 32);
3076 		VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
3077 			 aqm[0], aqm[1], aqm[2], aqm[3]);
3078 		memcpy(crycb->apcb1.adm, adm, 32);
3079 		VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
3080 			 adm[0], adm[1], adm[2], adm[3]);
3081 		break;
3082 	case CRYCB_FORMAT1:
3083 	case CRYCB_FORMAT0: /* Fall through both use APCB0 */
3084 		memcpy(crycb->apcb0.apm, apm, 8);
3085 		memcpy(crycb->apcb0.aqm, aqm, 2);
3086 		memcpy(crycb->apcb0.adm, adm, 2);
3087 		VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
3088 			 apm[0], *((unsigned short *)aqm),
3089 			 *((unsigned short *)adm));
3090 		break;
3091 	default:	/* Can not happen */
3092 		break;
3093 	}
3094 
3095 	/* recreate the shadow crycb for each vcpu */
3096 	kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
3097 	kvm_s390_vcpu_unblock_all(kvm);
3098 }
3099 EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
3100 
3101 /*
3102  * kvm_arch_crypto_clear_masks
3103  *
3104  * @kvm: pointer to the target guest's KVM struct containing the crypto masks
3105  *	 to be cleared.
3106  *
3107  * Clear the masks that identify the adapters, domains and control domains to
3108  * which the KVM guest is granted access.
3109  *
3110  * Note: The kvm->lock mutex must be locked by the caller before invoking this
3111  *	 function.
3112  */
kvm_arch_crypto_clear_masks(struct kvm * kvm)3113 void kvm_arch_crypto_clear_masks(struct kvm *kvm)
3114 {
3115 	kvm_s390_vcpu_block_all(kvm);
3116 
3117 	memset(&kvm->arch.crypto.crycb->apcb0, 0,
3118 	       sizeof(kvm->arch.crypto.crycb->apcb0));
3119 	memset(&kvm->arch.crypto.crycb->apcb1, 0,
3120 	       sizeof(kvm->arch.crypto.crycb->apcb1));
3121 
3122 	VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
3123 	/* recreate the shadow crycb for each vcpu */
3124 	kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
3125 	kvm_s390_vcpu_unblock_all(kvm);
3126 }
3127 EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
3128 
kvm_s390_get_initial_cpuid(void)3129 static u64 kvm_s390_get_initial_cpuid(void)
3130 {
3131 	struct cpuid cpuid;
3132 
3133 	get_cpu_id(&cpuid);
3134 	cpuid.version = 0xff;
3135 	return *((u64 *) &cpuid);
3136 }
3137 
kvm_s390_crypto_init(struct kvm * kvm)3138 static void kvm_s390_crypto_init(struct kvm *kvm)
3139 {
3140 	kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
3141 	kvm_s390_set_crycb_format(kvm);
3142 	init_rwsem(&kvm->arch.crypto.pqap_hook_rwsem);
3143 
3144 	if (!test_kvm_facility(kvm, 76))
3145 		return;
3146 
3147 	/* Enable AES/DEA protected key functions by default */
3148 	kvm->arch.crypto.aes_kw = 1;
3149 	kvm->arch.crypto.dea_kw = 1;
3150 	get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
3151 			 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
3152 	get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
3153 			 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
3154 }
3155 
sca_dispose(struct kvm * kvm)3156 static void sca_dispose(struct kvm *kvm)
3157 {
3158 	free_pages_exact(kvm->arch.sca, sizeof(*kvm->arch.sca));
3159 	kvm->arch.sca = NULL;
3160 }
3161 
kvm_arch_free_vm(struct kvm * kvm)3162 void kvm_arch_free_vm(struct kvm *kvm)
3163 {
3164 	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
3165 		kvm_s390_pci_clear_list(kvm);
3166 
3167 	__kvm_arch_free_vm(kvm);
3168 }
3169 
kvm_arch_init_vm(struct kvm * kvm,unsigned long type)3170 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
3171 {
3172 	gfp_t alloc_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO;
3173 	char debug_name[16];
3174 	int i, rc;
3175 
3176 	mutex_init(&kvm->arch.pv.import_lock);
3177 
3178 	rc = -EINVAL;
3179 #ifdef CONFIG_KVM_S390_UCONTROL
3180 	if (type & ~KVM_VM_S390_UCONTROL)
3181 		goto out_err;
3182 	if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
3183 		goto out_err;
3184 #else
3185 	if (type)
3186 		goto out_err;
3187 #endif
3188 	rc = -ENOMEM;
3189 
3190 	if (!sclp.has_64bscao)
3191 		alloc_flags |= GFP_DMA;
3192 	mutex_lock(&kvm_lock);
3193 
3194 	kvm->arch.sca = alloc_pages_exact(sizeof(*kvm->arch.sca), alloc_flags);
3195 	mutex_unlock(&kvm_lock);
3196 	if (!kvm->arch.sca)
3197 		goto out_err;
3198 
3199 	snprintf(debug_name, sizeof(debug_name), "kvm-%u", current->pid);
3200 
3201 	kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
3202 	if (!kvm->arch.dbf)
3203 		goto out_err;
3204 
3205 	BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
3206 	kvm->arch.sie_page2 =
3207 	     (struct sie_page2 *) get_zeroed_page(GFP_KERNEL_ACCOUNT | GFP_DMA);
3208 	if (!kvm->arch.sie_page2)
3209 		goto out_err;
3210 
3211 	kvm->arch.sie_page2->kvm = kvm;
3212 	kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
3213 
3214 	for (i = 0; i < kvm_s390_fac_size(); i++) {
3215 		kvm->arch.model.fac_mask[i] = stfle_fac_list[i] &
3216 					      (kvm_s390_fac_base[i] |
3217 					       kvm_s390_fac_ext[i]);
3218 		kvm->arch.model.fac_list[i] = stfle_fac_list[i] &
3219 					      kvm_s390_fac_base[i];
3220 	}
3221 	kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
3222 
3223 	/* we are always in czam mode - even on pre z14 machines */
3224 	set_kvm_facility(kvm->arch.model.fac_mask, 138);
3225 	set_kvm_facility(kvm->arch.model.fac_list, 138);
3226 	/* we emulate STHYI in kvm */
3227 	set_kvm_facility(kvm->arch.model.fac_mask, 74);
3228 	set_kvm_facility(kvm->arch.model.fac_list, 74);
3229 	if (machine_has_tlb_guest()) {
3230 		set_kvm_facility(kvm->arch.model.fac_mask, 147);
3231 		set_kvm_facility(kvm->arch.model.fac_list, 147);
3232 	}
3233 
3234 	if (css_general_characteristics.aiv && test_facility(65))
3235 		set_kvm_facility(kvm->arch.model.fac_mask, 65);
3236 
3237 	kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
3238 	kvm->arch.model.ibc = sclp.ibc & 0x0fff;
3239 
3240 	kvm->arch.model.uv_feat_guest.feat = 0;
3241 
3242 	kvm_s390_crypto_init(kvm);
3243 
3244 	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) {
3245 		mutex_lock(&kvm->lock);
3246 		kvm_s390_pci_init_list(kvm);
3247 		kvm_s390_vcpu_pci_enable_interp(kvm);
3248 		mutex_unlock(&kvm->lock);
3249 	}
3250 
3251 	mutex_init(&kvm->arch.float_int.ais_lock);
3252 	spin_lock_init(&kvm->arch.float_int.lock);
3253 	for (i = 0; i < FIRQ_LIST_COUNT; i++)
3254 		INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
3255 	init_waitqueue_head(&kvm->arch.ipte_wq);
3256 	mutex_init(&kvm->arch.ipte_mutex);
3257 
3258 	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
3259 	VM_EVENT(kvm, 3, "vm created with type %lu", type);
3260 
3261 	kvm->arch.mem_limit = type & KVM_VM_S390_UCONTROL ? KVM_S390_NO_MEM_LIMIT : sclp.hamax + 1;
3262 	kvm->arch.gmap = gmap_new(kvm, gpa_to_gfn(kvm->arch.mem_limit));
3263 	if (!kvm->arch.gmap)
3264 		goto out_err;
3265 	clear_bit(GMAP_FLAG_PFAULT_ENABLED, &kvm->arch.gmap->flags);
3266 
3267 	if (type & KVM_VM_S390_UCONTROL) {
3268 		struct kvm_userspace_memory_region2 fake_memslot = {
3269 			.slot = KVM_S390_UCONTROL_MEMSLOT,
3270 			.guest_phys_addr = 0,
3271 			.userspace_addr = 0,
3272 			.memory_size = ALIGN_DOWN(TASK_SIZE, _SEGMENT_SIZE),
3273 			.flags = 0,
3274 		};
3275 
3276 		/* one flat fake memslot covering the whole address-space */
3277 		mutex_lock(&kvm->slots_lock);
3278 		KVM_BUG_ON(kvm_set_internal_memslot(kvm, &fake_memslot), kvm);
3279 		mutex_unlock(&kvm->slots_lock);
3280 		set_bit(GMAP_FLAG_IS_UCONTROL, &kvm->arch.gmap->flags);
3281 	} else {
3282 		struct crst_table *table = dereference_asce(kvm->arch.gmap->asce);
3283 
3284 		crst_table_init((void *)table, _CRSTE_HOLE(table->crstes[0].h.tt).val);
3285 	}
3286 
3287 	kvm->arch.use_pfmfi = sclp.has_pfmfi;
3288 	kvm->arch.use_skf = sclp.has_skey;
3289 	spin_lock_init(&kvm->arch.start_stop_lock);
3290 	kvm_s390_vsie_init(kvm);
3291 	if (use_gisa)
3292 		kvm_s390_gisa_init(kvm);
3293 	INIT_LIST_HEAD(&kvm->arch.pv.need_cleanup);
3294 	kvm->arch.pv.set_aside = NULL;
3295 	KVM_EVENT(3, "vm 0x%p created by pid %u", kvm, current->pid);
3296 
3297 	return 0;
3298 out_err:
3299 	free_page((unsigned long)kvm->arch.sie_page2);
3300 	debug_unregister(kvm->arch.dbf);
3301 	sca_dispose(kvm);
3302 	KVM_EVENT(3, "creation of vm failed: %d", rc);
3303 	return rc;
3304 }
3305 
kvm_arch_vcpu_destroy(struct kvm_vcpu * vcpu)3306 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
3307 {
3308 	u16 rc, rrc;
3309 
3310 	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
3311 	trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
3312 	kvm_s390_clear_local_irqs(vcpu);
3313 	kvm_clear_async_pf_completion_queue(vcpu);
3314 	if (!kvm_is_ucontrol(vcpu->kvm))
3315 		sca_del_vcpu(vcpu);
3316 	kvm_s390_update_topology_change_report(vcpu->kvm, 1);
3317 
3318 	if (kvm_is_ucontrol(vcpu->kvm)) {
3319 		scoped_guard(spinlock, &vcpu->kvm->arch.gmap->children_lock)
3320 			gmap_remove_child(vcpu->arch.gmap);
3321 		vcpu->arch.gmap = gmap_put(vcpu->arch.gmap);
3322 	}
3323 
3324 	if (vcpu->kvm->arch.use_cmma)
3325 		kvm_s390_vcpu_unsetup_cmma(vcpu);
3326 	/* We can not hold the vcpu mutex here, we are already dying */
3327 	if (kvm_s390_pv_cpu_get_handle(vcpu))
3328 		kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc);
3329 	free_page((unsigned long)(vcpu->arch.sie_block));
3330 	kvm_s390_free_mmu_cache(vcpu->arch.mc);
3331 }
3332 
kvm_arch_destroy_vm(struct kvm * kvm)3333 void kvm_arch_destroy_vm(struct kvm *kvm)
3334 {
3335 	u16 rc, rrc;
3336 
3337 	kvm_destroy_vcpus(kvm);
3338 	sca_dispose(kvm);
3339 	kvm_s390_gisa_destroy(kvm);
3340 	/*
3341 	 * We are already at the end of life and kvm->lock is not taken.
3342 	 * This is ok as the file descriptor is closed by now and nobody
3343 	 * can mess with the pv state.
3344 	 */
3345 	kvm_s390_pv_deinit_cleanup_all(kvm, &rc, &rrc);
3346 	/*
3347 	 * Remove the mmu notifier only when the whole KVM VM is torn down,
3348 	 * and only if one was registered to begin with. If the VM is
3349 	 * currently not protected, but has been previously been protected,
3350 	 * then it's possible that the notifier is still registered.
3351 	 */
3352 	if (kvm->arch.pv.mmu_notifier.ops)
3353 		mmu_notifier_unregister(&kvm->arch.pv.mmu_notifier, kvm->mm);
3354 
3355 	debug_unregister(kvm->arch.dbf);
3356 	free_page((unsigned long)kvm->arch.sie_page2);
3357 	kvm_s390_destroy_adapters(kvm);
3358 	kvm_s390_clear_float_irqs(kvm);
3359 	kvm_s390_vsie_destroy(kvm);
3360 	kvm->arch.gmap = gmap_put(kvm->arch.gmap);
3361 	KVM_EVENT(3, "vm 0x%p destroyed", kvm);
3362 }
3363 
3364 /* Section: vcpu related */
sca_del_vcpu(struct kvm_vcpu * vcpu)3365 static void sca_del_vcpu(struct kvm_vcpu *vcpu)
3366 {
3367 	struct esca_block *sca = vcpu->kvm->arch.sca;
3368 
3369 	if (!kvm_s390_use_sca_entries())
3370 		return;
3371 
3372 	clear_bit_inv(vcpu->vcpu_id, (unsigned long *)sca->mcn);
3373 	sca->cpu[vcpu->vcpu_id].sda = 0;
3374 }
3375 
sca_add_vcpu(struct kvm_vcpu * vcpu)3376 static void sca_add_vcpu(struct kvm_vcpu *vcpu)
3377 {
3378 	struct esca_block *sca = vcpu->kvm->arch.sca;
3379 	phys_addr_t sca_phys = virt_to_phys(sca);
3380 
3381 	/* we still need the sca header for the ipte control */
3382 	vcpu->arch.sie_block->scaoh = sca_phys >> 32;
3383 	vcpu->arch.sie_block->scaol = sca_phys & ESCA_SCAOL_MASK;
3384 	vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
3385 
3386 	if (!kvm_s390_use_sca_entries())
3387 		return;
3388 
3389 	set_bit_inv(vcpu->vcpu_id, (unsigned long *)sca->mcn);
3390 	sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block);
3391 }
3392 
sca_can_add_vcpu(struct kvm * kvm,unsigned int id)3393 static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
3394 {
3395 	if (!kvm_s390_use_sca_entries())
3396 		return id < KVM_MAX_VCPUS;
3397 
3398 	return id < KVM_S390_ESCA_CPU_SLOTS;
3399 }
3400 
3401 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
__start_cpu_timer_accounting(struct kvm_vcpu * vcpu)3402 static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3403 {
3404 	WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
3405 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3406 	vcpu->arch.cputm_start = get_tod_clock_fast();
3407 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3408 }
3409 
3410 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
__stop_cpu_timer_accounting(struct kvm_vcpu * vcpu)3411 static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3412 {
3413 	WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
3414 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3415 	vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3416 	vcpu->arch.cputm_start = 0;
3417 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3418 }
3419 
3420 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
__enable_cpu_timer_accounting(struct kvm_vcpu * vcpu)3421 static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3422 {
3423 	WARN_ON_ONCE(vcpu->arch.cputm_enabled);
3424 	vcpu->arch.cputm_enabled = true;
3425 	__start_cpu_timer_accounting(vcpu);
3426 }
3427 
3428 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
__disable_cpu_timer_accounting(struct kvm_vcpu * vcpu)3429 static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3430 {
3431 	WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
3432 	__stop_cpu_timer_accounting(vcpu);
3433 	vcpu->arch.cputm_enabled = false;
3434 }
3435 
enable_cpu_timer_accounting(struct kvm_vcpu * vcpu)3436 static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3437 {
3438 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3439 	__enable_cpu_timer_accounting(vcpu);
3440 	preempt_enable();
3441 }
3442 
disable_cpu_timer_accounting(struct kvm_vcpu * vcpu)3443 static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3444 {
3445 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3446 	__disable_cpu_timer_accounting(vcpu);
3447 	preempt_enable();
3448 }
3449 
3450 /* set the cpu timer - may only be called from the VCPU thread itself */
kvm_s390_set_cpu_timer(struct kvm_vcpu * vcpu,__u64 cputm)3451 void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
3452 {
3453 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3454 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3455 	if (vcpu->arch.cputm_enabled)
3456 		vcpu->arch.cputm_start = get_tod_clock_fast();
3457 	vcpu->arch.sie_block->cputm = cputm;
3458 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3459 	preempt_enable();
3460 }
3461 
3462 /* update and get the cpu timer - can also be called from other VCPU threads */
kvm_s390_get_cpu_timer(struct kvm_vcpu * vcpu)3463 __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
3464 {
3465 	unsigned int seq;
3466 	__u64 value;
3467 
3468 	if (unlikely(!vcpu->arch.cputm_enabled))
3469 		return vcpu->arch.sie_block->cputm;
3470 
3471 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3472 	do {
3473 		seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
3474 		/*
3475 		 * If the writer would ever execute a read in the critical
3476 		 * section, e.g. in irq context, we have a deadlock.
3477 		 */
3478 		WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
3479 		value = vcpu->arch.sie_block->cputm;
3480 		/* if cputm_start is 0, accounting is being started/stopped */
3481 		if (likely(vcpu->arch.cputm_start))
3482 			value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3483 	} while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
3484 	preempt_enable();
3485 	return value;
3486 }
3487 
kvm_arch_vcpu_load(struct kvm_vcpu * vcpu,int cpu)3488 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
3489 {
3490 
3491 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
3492 	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
3493 		__start_cpu_timer_accounting(vcpu);
3494 	vcpu->cpu = cpu;
3495 }
3496 
kvm_arch_vcpu_put(struct kvm_vcpu * vcpu)3497 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
3498 {
3499 	vcpu->cpu = -1;
3500 	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
3501 		__stop_cpu_timer_accounting(vcpu);
3502 	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
3503 
3504 }
3505 
kvm_arch_vcpu_postcreate(struct kvm_vcpu * vcpu)3506 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
3507 {
3508 	mutex_lock(&vcpu->kvm->lock);
3509 	preempt_disable();
3510 	vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
3511 	vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
3512 	preempt_enable();
3513 	mutex_unlock(&vcpu->kvm->lock);
3514 	if (!kvm_is_ucontrol(vcpu->kvm)) {
3515 		vcpu->arch.gmap = vcpu->kvm->arch.gmap;
3516 		sca_add_vcpu(vcpu);
3517 	}
3518 	if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
3519 		vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3520 }
3521 
kvm_has_pckmo_subfunc(struct kvm * kvm,unsigned long nr)3522 static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
3523 {
3524 	if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
3525 	    test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
3526 		return true;
3527 	return false;
3528 }
3529 
kvm_has_pckmo_ecc(struct kvm * kvm)3530 static bool kvm_has_pckmo_ecc(struct kvm *kvm)
3531 {
3532 	/* At least one ECC subfunction must be present */
3533 	return kvm_has_pckmo_subfunc(kvm, 32) ||
3534 	       kvm_has_pckmo_subfunc(kvm, 33) ||
3535 	       kvm_has_pckmo_subfunc(kvm, 34) ||
3536 	       kvm_has_pckmo_subfunc(kvm, 40) ||
3537 	       kvm_has_pckmo_subfunc(kvm, 41);
3538 
3539 }
3540 
kvm_has_pckmo_hmac(struct kvm * kvm)3541 static bool kvm_has_pckmo_hmac(struct kvm *kvm)
3542 {
3543 	/* At least one HMAC subfunction must be present */
3544 	return kvm_has_pckmo_subfunc(kvm, 118) ||
3545 	       kvm_has_pckmo_subfunc(kvm, 122);
3546 }
3547 
kvm_s390_vcpu_crypto_setup(struct kvm_vcpu * vcpu)3548 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
3549 {
3550 	/*
3551 	 * If the AP instructions are not being interpreted and the MSAX3
3552 	 * facility is not configured for the guest, there is nothing to set up.
3553 	 */
3554 	if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
3555 		return;
3556 
3557 	vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
3558 	vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
3559 	vcpu->arch.sie_block->eca &= ~ECA_APIE;
3560 	vcpu->arch.sie_block->ecd &= ~(ECD_ECC | ECD_HMAC);
3561 
3562 	if (vcpu->kvm->arch.crypto.apie)
3563 		vcpu->arch.sie_block->eca |= ECA_APIE;
3564 
3565 	/* Set up protected key support */
3566 	if (vcpu->kvm->arch.crypto.aes_kw) {
3567 		vcpu->arch.sie_block->ecb3 |= ECB3_AES;
3568 		/* ecc/hmac is also wrapped with AES key */
3569 		if (kvm_has_pckmo_ecc(vcpu->kvm))
3570 			vcpu->arch.sie_block->ecd |= ECD_ECC;
3571 		if (kvm_has_pckmo_hmac(vcpu->kvm))
3572 			vcpu->arch.sie_block->ecd |= ECD_HMAC;
3573 	}
3574 
3575 	if (vcpu->kvm->arch.crypto.dea_kw)
3576 		vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
3577 }
3578 
kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu * vcpu)3579 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
3580 {
3581 	free_page((unsigned long)phys_to_virt(vcpu->arch.sie_block->cbrlo));
3582 	vcpu->arch.sie_block->cbrlo = 0;
3583 }
3584 
kvm_s390_vcpu_setup_cmma(struct kvm_vcpu * vcpu)3585 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
3586 {
3587 	void *cbrlo_page = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3588 
3589 	if (!cbrlo_page)
3590 		return -ENOMEM;
3591 
3592 	vcpu->arch.sie_block->cbrlo = virt_to_phys(cbrlo_page);
3593 	return 0;
3594 }
3595 
kvm_s390_vcpu_setup_model(struct kvm_vcpu * vcpu)3596 static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
3597 {
3598 	struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
3599 
3600 	vcpu->arch.sie_block->ibc = model->ibc;
3601 	if (test_kvm_facility(vcpu->kvm, 7))
3602 		vcpu->arch.sie_block->fac = virt_to_phys(model->fac_list);
3603 }
3604 
kvm_s390_vcpu_setup(struct kvm_vcpu * vcpu)3605 static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu)
3606 {
3607 	int rc = 0;
3608 	u16 uvrc, uvrrc;
3609 
3610 	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
3611 						    CPUSTAT_SM |
3612 						    CPUSTAT_STOPPED);
3613 
3614 	if (test_kvm_facility(vcpu->kvm, 78))
3615 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
3616 	else if (test_kvm_facility(vcpu->kvm, 8))
3617 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
3618 
3619 	kvm_s390_vcpu_setup_model(vcpu);
3620 
3621 	/* pgste_set_pte has special handling for !machine_has_esop() */
3622 	if (machine_has_esop())
3623 		vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
3624 	if (test_kvm_facility(vcpu->kvm, 9))
3625 		vcpu->arch.sie_block->ecb |= ECB_SRSI;
3626 	if (test_kvm_facility(vcpu->kvm, 11))
3627 		vcpu->arch.sie_block->ecb |= ECB_PTF;
3628 	if (test_kvm_facility(vcpu->kvm, 73))
3629 		vcpu->arch.sie_block->ecb |= ECB_TE;
3630 	if (!kvm_is_ucontrol(vcpu->kvm))
3631 		vcpu->arch.sie_block->ecb |= ECB_SPECI;
3632 
3633 	if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
3634 		vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
3635 	if (test_kvm_facility(vcpu->kvm, 130))
3636 		vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
3637 	vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
3638 	if (sclp.has_cei)
3639 		vcpu->arch.sie_block->eca |= ECA_CEI;
3640 	if (sclp.has_ib)
3641 		vcpu->arch.sie_block->eca |= ECA_IB;
3642 	if (sclp.has_siif)
3643 		vcpu->arch.sie_block->eca |= ECA_SII;
3644 	if (kvm_s390_use_sca_entries())
3645 		vcpu->arch.sie_block->eca |= ECA_SIGPI;
3646 	if (test_kvm_facility(vcpu->kvm, 129)) {
3647 		vcpu->arch.sie_block->eca |= ECA_VX;
3648 		vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3649 	}
3650 	if (test_kvm_facility(vcpu->kvm, 139))
3651 		vcpu->arch.sie_block->ecd |= ECD_MEF;
3652 	if (test_kvm_facility(vcpu->kvm, 156))
3653 		vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
3654 	if (vcpu->arch.sie_block->gd) {
3655 		vcpu->arch.sie_block->eca |= ECA_AIV;
3656 		VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
3657 			   vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
3658 	}
3659 	vcpu->arch.sie_block->sdnxo = virt_to_phys(&vcpu->run->s.regs.sdnx) | SDNXC;
3660 	vcpu->arch.sie_block->riccbd = virt_to_phys(&vcpu->run->s.regs.riccb);
3661 
3662 	if (sclp.has_kss)
3663 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
3664 	else
3665 		vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
3666 
3667 	if (vcpu->kvm->arch.use_cmma) {
3668 		rc = kvm_s390_vcpu_setup_cmma(vcpu);
3669 		if (rc)
3670 			return rc;
3671 	}
3672 	hrtimer_setup(&vcpu->arch.ckc_timer, kvm_s390_idle_wakeup, CLOCK_MONOTONIC,
3673 		      HRTIMER_MODE_REL);
3674 
3675 	vcpu->arch.sie_block->hpid = HPID_KVM;
3676 
3677 	kvm_s390_vcpu_crypto_setup(vcpu);
3678 
3679 	kvm_s390_vcpu_pci_setup(vcpu);
3680 
3681 	mutex_lock(&vcpu->kvm->lock);
3682 	if (kvm_s390_pv_is_protected(vcpu->kvm)) {
3683 		rc = kvm_s390_pv_create_cpu(vcpu, &uvrc, &uvrrc);
3684 		if (rc)
3685 			kvm_s390_vcpu_unsetup_cmma(vcpu);
3686 	}
3687 	mutex_unlock(&vcpu->kvm->lock);
3688 
3689 	return rc;
3690 }
3691 
kvm_arch_vcpu_precreate(struct kvm * kvm,unsigned int id)3692 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
3693 {
3694 	if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
3695 		return -EINVAL;
3696 	return 0;
3697 }
3698 
kvm_arch_vcpu_create(struct kvm_vcpu * vcpu)3699 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
3700 {
3701 	struct sie_page *sie_page;
3702 	int rc;
3703 
3704 	BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
3705 	vcpu->arch.mc = kvm_s390_new_mmu_cache();
3706 	if (!vcpu->arch.mc)
3707 		return -ENOMEM;
3708 	sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL_ACCOUNT);
3709 	if (!sie_page) {
3710 		kvm_s390_free_mmu_cache(vcpu->arch.mc);
3711 		vcpu->arch.mc = NULL;
3712 		return -ENOMEM;
3713 	}
3714 
3715 	vcpu->arch.sie_block = &sie_page->sie_block;
3716 	vcpu->arch.sie_block->itdba = virt_to_phys(&sie_page->itdb);
3717 
3718 	/* the real guest size will always be smaller than msl */
3719 	vcpu->arch.sie_block->mso = 0;
3720 	vcpu->arch.sie_block->msl = sclp.hamax;
3721 
3722 	vcpu->arch.sie_block->icpua = vcpu->vcpu_id;
3723 	spin_lock_init(&vcpu->arch.local_int.lock);
3724 	vcpu->arch.sie_block->gd = kvm_s390_get_gisa_desc(vcpu->kvm);
3725 	seqcount_init(&vcpu->arch.cputm_seqcount);
3726 
3727 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3728 	kvm_clear_async_pf_completion_queue(vcpu);
3729 	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
3730 				    KVM_SYNC_GPRS |
3731 				    KVM_SYNC_ACRS |
3732 				    KVM_SYNC_CRS |
3733 				    KVM_SYNC_ARCH0 |
3734 				    KVM_SYNC_PFAULT |
3735 				    KVM_SYNC_DIAG318;
3736 	vcpu->arch.acrs_loaded = false;
3737 	kvm_s390_set_prefix(vcpu, 0);
3738 	if (test_kvm_facility(vcpu->kvm, 64))
3739 		vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
3740 	if (test_kvm_facility(vcpu->kvm, 82))
3741 		vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
3742 	if (test_kvm_facility(vcpu->kvm, 133))
3743 		vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
3744 	if (test_kvm_facility(vcpu->kvm, 156))
3745 		vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
3746 	/* fprs can be synchronized via vrs, even if the guest has no vx. With
3747 	 * cpu_has_vx(), (load|store)_fpu_regs() will work with vrs format.
3748 	 */
3749 	if (cpu_has_vx())
3750 		vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
3751 	else
3752 		vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
3753 
3754 	if (kvm_is_ucontrol(vcpu->kvm)) {
3755 		rc = -ENOMEM;
3756 		vcpu->arch.gmap = gmap_new_child(vcpu->kvm->arch.gmap, -1UL);
3757 		if (!vcpu->arch.gmap)
3758 			goto out_free_sie_block;
3759 	}
3760 
3761 	VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%p, sie block at 0x%p",
3762 		 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3763 	trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3764 
3765 	rc = kvm_s390_vcpu_setup(vcpu);
3766 	if (rc)
3767 		goto out_ucontrol_uninit;
3768 
3769 	kvm_s390_update_topology_change_report(vcpu->kvm, 1);
3770 	return 0;
3771 
3772 out_ucontrol_uninit:
3773 	if (kvm_is_ucontrol(vcpu->kvm)) {
3774 		gmap_remove_child(vcpu->arch.gmap);
3775 		vcpu->arch.gmap = gmap_put(vcpu->arch.gmap);
3776 	}
3777 out_free_sie_block:
3778 	free_page((unsigned long)(vcpu->arch.sie_block));
3779 	return rc;
3780 }
3781 
kvm_arch_vcpu_runnable(struct kvm_vcpu * vcpu)3782 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
3783 {
3784 	clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
3785 	return kvm_s390_vcpu_has_irq(vcpu, 0);
3786 }
3787 
kvm_arch_vcpu_in_kernel(struct kvm_vcpu * vcpu)3788 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
3789 {
3790 	return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
3791 }
3792 
kvm_s390_vcpu_block(struct kvm_vcpu * vcpu)3793 void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
3794 {
3795 	atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
3796 	exit_sie(vcpu);
3797 }
3798 
kvm_s390_vcpu_unblock(struct kvm_vcpu * vcpu)3799 void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
3800 {
3801 	atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
3802 }
3803 
kvm_s390_vcpu_request(struct kvm_vcpu * vcpu)3804 static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
3805 {
3806 	atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
3807 	exit_sie(vcpu);
3808 }
3809 
kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu * vcpu)3810 bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
3811 {
3812 	return atomic_read(&vcpu->arch.sie_block->prog20) &
3813 	       (PROG_BLOCK_SIE | PROG_REQUEST);
3814 }
3815 
kvm_s390_vcpu_request_handled(struct kvm_vcpu * vcpu)3816 static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
3817 {
3818 	atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
3819 }
3820 
3821 /*
3822  * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
3823  * If the CPU is not running (e.g. waiting as idle) the function will
3824  * return immediately. */
exit_sie(struct kvm_vcpu * vcpu)3825 void exit_sie(struct kvm_vcpu *vcpu)
3826 {
3827 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
3828 	kvm_s390_vsie_kick(vcpu);
3829 	while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
3830 		cpu_relax();
3831 }
3832 
3833 /* Kick a guest cpu out of SIE to process a request synchronously */
kvm_s390_sync_request(int req,struct kvm_vcpu * vcpu)3834 void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
3835 {
3836 	__kvm_make_request(req, vcpu);
3837 	kvm_s390_vcpu_request(vcpu);
3838 }
3839 
kvm_arch_no_poll(struct kvm_vcpu * vcpu)3840 bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
3841 {
3842 	/* do not poll with more than halt_poll_max_steal percent of steal time */
3843 	if (get_lowcore()->avg_steal_timer * 100 / (TICK_USEC << 12) >=
3844 	    READ_ONCE(halt_poll_max_steal)) {
3845 		vcpu->stat.halt_no_poll_steal++;
3846 		return true;
3847 	}
3848 	return false;
3849 }
3850 
kvm_arch_vcpu_should_kick(struct kvm_vcpu * vcpu)3851 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
3852 {
3853 	/* kvm common code refers to this, but never calls it */
3854 	BUG();
3855 	return 0;
3856 }
3857 
kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu * vcpu,struct kvm_one_reg * reg)3858 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
3859 					   struct kvm_one_reg *reg)
3860 {
3861 	int r = -EINVAL;
3862 
3863 	switch (reg->id) {
3864 	case KVM_REG_S390_TODPR:
3865 		r = put_user(vcpu->arch.sie_block->todpr,
3866 			     (u32 __user *)reg->addr);
3867 		break;
3868 	case KVM_REG_S390_EPOCHDIFF:
3869 		r = put_user(vcpu->arch.sie_block->epoch,
3870 			     (u64 __user *)reg->addr);
3871 		break;
3872 	case KVM_REG_S390_CPU_TIMER:
3873 		r = put_user(kvm_s390_get_cpu_timer(vcpu),
3874 			     (u64 __user *)reg->addr);
3875 		break;
3876 	case KVM_REG_S390_CLOCK_COMP:
3877 		r = put_user(vcpu->arch.sie_block->ckc,
3878 			     (u64 __user *)reg->addr);
3879 		break;
3880 	case KVM_REG_S390_PFTOKEN:
3881 		r = put_user(vcpu->arch.pfault_token,
3882 			     (u64 __user *)reg->addr);
3883 		break;
3884 	case KVM_REG_S390_PFCOMPARE:
3885 		r = put_user(vcpu->arch.pfault_compare,
3886 			     (u64 __user *)reg->addr);
3887 		break;
3888 	case KVM_REG_S390_PFSELECT:
3889 		r = put_user(vcpu->arch.pfault_select,
3890 			     (u64 __user *)reg->addr);
3891 		break;
3892 	case KVM_REG_S390_PP:
3893 		r = put_user(vcpu->arch.sie_block->pp,
3894 			     (u64 __user *)reg->addr);
3895 		break;
3896 	case KVM_REG_S390_GBEA:
3897 		r = put_user(vcpu->arch.sie_block->gbea,
3898 			     (u64 __user *)reg->addr);
3899 		break;
3900 	default:
3901 		break;
3902 	}
3903 
3904 	return r;
3905 }
3906 
kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu * vcpu,struct kvm_one_reg * reg)3907 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
3908 					   struct kvm_one_reg *reg)
3909 {
3910 	int r = -EINVAL;
3911 	__u64 val;
3912 
3913 	switch (reg->id) {
3914 	case KVM_REG_S390_TODPR:
3915 		r = get_user(vcpu->arch.sie_block->todpr,
3916 			     (u32 __user *)reg->addr);
3917 		break;
3918 	case KVM_REG_S390_EPOCHDIFF:
3919 		r = get_user(vcpu->arch.sie_block->epoch,
3920 			     (u64 __user *)reg->addr);
3921 		break;
3922 	case KVM_REG_S390_CPU_TIMER:
3923 		r = get_user(val, (u64 __user *)reg->addr);
3924 		if (!r)
3925 			kvm_s390_set_cpu_timer(vcpu, val);
3926 		break;
3927 	case KVM_REG_S390_CLOCK_COMP:
3928 		r = get_user(vcpu->arch.sie_block->ckc,
3929 			     (u64 __user *)reg->addr);
3930 		break;
3931 	case KVM_REG_S390_PFTOKEN:
3932 		r = get_user(vcpu->arch.pfault_token,
3933 			     (u64 __user *)reg->addr);
3934 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3935 			kvm_clear_async_pf_completion_queue(vcpu);
3936 		break;
3937 	case KVM_REG_S390_PFCOMPARE:
3938 		r = get_user(vcpu->arch.pfault_compare,
3939 			     (u64 __user *)reg->addr);
3940 		break;
3941 	case KVM_REG_S390_PFSELECT:
3942 		r = get_user(vcpu->arch.pfault_select,
3943 			     (u64 __user *)reg->addr);
3944 		break;
3945 	case KVM_REG_S390_PP:
3946 		r = get_user(vcpu->arch.sie_block->pp,
3947 			     (u64 __user *)reg->addr);
3948 		break;
3949 	case KVM_REG_S390_GBEA:
3950 		r = get_user(vcpu->arch.sie_block->gbea,
3951 			     (u64 __user *)reg->addr);
3952 		break;
3953 	default:
3954 		break;
3955 	}
3956 
3957 	return r;
3958 }
3959 
kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu * vcpu)3960 static void kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu *vcpu)
3961 {
3962 	vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI;
3963 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3964 	memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb));
3965 
3966 	kvm_clear_async_pf_completion_queue(vcpu);
3967 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
3968 		kvm_s390_vcpu_stop(vcpu);
3969 	kvm_s390_clear_local_irqs(vcpu);
3970 }
3971 
kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu * vcpu)3972 static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
3973 {
3974 	/* Initial reset is a superset of the normal reset */
3975 	kvm_arch_vcpu_ioctl_normal_reset(vcpu);
3976 
3977 	/*
3978 	 * This equals initial cpu reset in pop, but we don't switch to ESA.
3979 	 * We do not only reset the internal data, but also ...
3980 	 */
3981 	vcpu->arch.sie_block->gpsw.mask = 0;
3982 	vcpu->arch.sie_block->gpsw.addr = 0;
3983 	kvm_s390_set_prefix(vcpu, 0);
3984 	kvm_s390_set_cpu_timer(vcpu, 0);
3985 	vcpu->arch.sie_block->ckc = 0;
3986 	memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
3987 	vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
3988 	vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
3989 
3990 	/* ... the data in sync regs */
3991 	memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs));
3992 	vcpu->run->s.regs.ckc = 0;
3993 	vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK;
3994 	vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK;
3995 	vcpu->run->psw_addr = 0;
3996 	vcpu->run->psw_mask = 0;
3997 	vcpu->run->s.regs.todpr = 0;
3998 	vcpu->run->s.regs.cputm = 0;
3999 	vcpu->run->s.regs.ckc = 0;
4000 	vcpu->run->s.regs.pp = 0;
4001 	vcpu->run->s.regs.gbea = 1;
4002 	vcpu->run->s.regs.fpc = 0;
4003 	/*
4004 	 * Do not reset these registers in the protected case, as some of
4005 	 * them are overlaid and they are not accessible in this case
4006 	 * anyway.
4007 	 */
4008 	if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
4009 		vcpu->arch.sie_block->gbea = 1;
4010 		vcpu->arch.sie_block->pp = 0;
4011 		vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
4012 		vcpu->arch.sie_block->todpr = 0;
4013 	}
4014 }
4015 
kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu * vcpu)4016 static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
4017 {
4018 	struct kvm_sync_regs *regs = &vcpu->run->s.regs;
4019 
4020 	/* Clear reset is a superset of the initial reset */
4021 	kvm_arch_vcpu_ioctl_initial_reset(vcpu);
4022 
4023 	memset(&regs->gprs, 0, sizeof(regs->gprs));
4024 	memset(&regs->vrs, 0, sizeof(regs->vrs));
4025 	memset(&regs->acrs, 0, sizeof(regs->acrs));
4026 	memset(&regs->gscb, 0, sizeof(regs->gscb));
4027 
4028 	regs->etoken = 0;
4029 	regs->etoken_extension = 0;
4030 }
4031 
kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)4032 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4033 {
4034 	vcpu_load(vcpu);
4035 	memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
4036 	vcpu_put(vcpu);
4037 	return 0;
4038 }
4039 
kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)4040 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4041 {
4042 	vcpu_load(vcpu);
4043 	memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
4044 	vcpu_put(vcpu);
4045 	return 0;
4046 }
4047 
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)4048 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
4049 				  struct kvm_sregs *sregs)
4050 {
4051 	vcpu_load(vcpu);
4052 
4053 	memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
4054 	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
4055 
4056 	vcpu_put(vcpu);
4057 	return 0;
4058 }
4059 
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)4060 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
4061 				  struct kvm_sregs *sregs)
4062 {
4063 	vcpu_load(vcpu);
4064 
4065 	memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
4066 	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
4067 
4068 	vcpu_put(vcpu);
4069 	return 0;
4070 }
4071 
kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)4072 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4073 {
4074 	vcpu_load(vcpu);
4075 
4076 	vcpu->run->s.regs.fpc = fpu->fpc;
4077 	if (cpu_has_vx())
4078 		convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
4079 				 (freg_t *) fpu->fprs);
4080 	else
4081 		memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
4082 
4083 	vcpu_put(vcpu);
4084 	return 0;
4085 }
4086 
kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)4087 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4088 {
4089 	vcpu_load(vcpu);
4090 
4091 	if (cpu_has_vx())
4092 		convert_vx_to_fp((freg_t *) fpu->fprs,
4093 				 (__vector128 *) vcpu->run->s.regs.vrs);
4094 	else
4095 		memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
4096 	fpu->fpc = vcpu->run->s.regs.fpc;
4097 
4098 	vcpu_put(vcpu);
4099 	return 0;
4100 }
4101 
kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu * vcpu,psw_t psw)4102 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
4103 {
4104 	int rc = 0;
4105 
4106 	if (!is_vcpu_stopped(vcpu))
4107 		rc = -EBUSY;
4108 	else {
4109 		vcpu->run->psw_mask = psw.mask;
4110 		vcpu->run->psw_addr = psw.addr;
4111 	}
4112 	return rc;
4113 }
4114 
kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu * vcpu,struct kvm_translation * tr)4115 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
4116 				  struct kvm_translation *tr)
4117 {
4118 	return -EINVAL; /* not implemented yet */
4119 }
4120 
4121 #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
4122 			      KVM_GUESTDBG_USE_HW_BP | \
4123 			      KVM_GUESTDBG_ENABLE)
4124 
kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu * vcpu,struct kvm_guest_debug * dbg)4125 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
4126 					struct kvm_guest_debug *dbg)
4127 {
4128 	int rc = 0;
4129 
4130 	vcpu_load(vcpu);
4131 
4132 	vcpu->guest_debug = 0;
4133 	kvm_s390_clear_bp_data(vcpu);
4134 
4135 	if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
4136 		rc = -EINVAL;
4137 		goto out;
4138 	}
4139 	if (!sclp.has_gpere) {
4140 		rc = -EINVAL;
4141 		goto out;
4142 	}
4143 
4144 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
4145 		vcpu->guest_debug = dbg->control;
4146 		/* enforce guest PER */
4147 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
4148 
4149 		if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
4150 			rc = kvm_s390_import_bp_data(vcpu, dbg);
4151 	} else {
4152 		kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
4153 		vcpu->arch.guestdbg.last_bp = 0;
4154 	}
4155 
4156 	if (rc) {
4157 		vcpu->guest_debug = 0;
4158 		kvm_s390_clear_bp_data(vcpu);
4159 		kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
4160 	}
4161 
4162 out:
4163 	vcpu_put(vcpu);
4164 	return rc;
4165 }
4166 
kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)4167 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
4168 				    struct kvm_mp_state *mp_state)
4169 {
4170 	int ret;
4171 
4172 	vcpu_load(vcpu);
4173 
4174 	/* CHECK_STOP and LOAD are not supported yet */
4175 	ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
4176 				      KVM_MP_STATE_OPERATING;
4177 
4178 	vcpu_put(vcpu);
4179 	return ret;
4180 }
4181 
kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)4182 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
4183 				    struct kvm_mp_state *mp_state)
4184 {
4185 	int rc = 0;
4186 
4187 	vcpu_load(vcpu);
4188 
4189 	/* user space knows about this interface - let it control the state */
4190 	kvm_s390_set_user_cpu_state_ctrl(vcpu->kvm);
4191 
4192 	switch (mp_state->mp_state) {
4193 	case KVM_MP_STATE_STOPPED:
4194 		rc = kvm_s390_vcpu_stop(vcpu);
4195 		break;
4196 	case KVM_MP_STATE_OPERATING:
4197 		rc = kvm_s390_vcpu_start(vcpu);
4198 		break;
4199 	case KVM_MP_STATE_LOAD:
4200 		if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
4201 			rc = -ENXIO;
4202 			break;
4203 		}
4204 		rc = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR_LOAD);
4205 		break;
4206 	case KVM_MP_STATE_CHECK_STOP:
4207 		fallthrough;	/* CHECK_STOP and LOAD are not supported yet */
4208 	default:
4209 		rc = -ENXIO;
4210 	}
4211 
4212 	vcpu_put(vcpu);
4213 	return rc;
4214 }
4215 
ibs_enabled(struct kvm_vcpu * vcpu)4216 static bool ibs_enabled(struct kvm_vcpu *vcpu)
4217 {
4218 	return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
4219 }
4220 
vcpu_ucontrol_translate(struct kvm_vcpu * vcpu,gpa_t * gaddr)4221 static int vcpu_ucontrol_translate(struct kvm_vcpu *vcpu, gpa_t *gaddr)
4222 {
4223 	int rc;
4224 
4225 	if (kvm_is_ucontrol(vcpu->kvm)) {
4226 		rc = gmap_ucas_translate(vcpu->arch.mc, vcpu->arch.gmap, gaddr);
4227 		if (rc == -EREMOTE) {
4228 			vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
4229 			vcpu->run->s390_ucontrol.trans_exc_code = *gaddr;
4230 			vcpu->run->s390_ucontrol.pgm_code = PGM_SEGMENT_TRANSLATION;
4231 		}
4232 		return rc;
4233 	}
4234 	return 0;
4235 }
4236 
kvm_s390_fixup_prefix(struct kvm_vcpu * vcpu)4237 static int kvm_s390_fixup_prefix(struct kvm_vcpu *vcpu)
4238 {
4239 	gpa_t gaddr = kvm_s390_get_prefix(vcpu);
4240 	gfn_t gfn;
4241 	int rc;
4242 
4243 	if (vcpu_ucontrol_translate(vcpu, &gaddr))
4244 		return -EREMOTE;
4245 	gfn = gpa_to_gfn(gaddr);
4246 
4247 	rc = kvm_s390_faultin_gfn_simple(vcpu, NULL, gfn, true);
4248 	if (rc)
4249 		return rc;
4250 	rc = kvm_s390_faultin_gfn_simple(vcpu, NULL, gfn + 1, true);
4251 	if (rc)
4252 		return rc;
4253 
4254 	scoped_guard(write_lock, &vcpu->kvm->mmu_lock)
4255 		rc = dat_set_prefix_notif_bit(vcpu->kvm->arch.gmap->asce, gfn);
4256 	return rc;
4257 }
4258 
kvm_s390_handle_requests(struct kvm_vcpu * vcpu)4259 static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
4260 {
4261 retry:
4262 	kvm_s390_vcpu_request_handled(vcpu);
4263 	if (!kvm_request_pending(vcpu))
4264 		return 0;
4265 	/*
4266 	 * If the guest prefix changed, re-arm the ipte notifier for the
4267 	 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
4268 	 * This ensures that the ipte instruction for this request has
4269 	 * already finished. We might race against a second unmapper that
4270 	 * wants to set the blocking bit. Lets just retry the request loop.
4271 	 */
4272 	if (kvm_check_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu)) {
4273 		int rc;
4274 
4275 		rc = kvm_s390_fixup_prefix(vcpu);
4276 		if (rc) {
4277 			kvm_make_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu);
4278 			return rc;
4279 		}
4280 		goto retry;
4281 	}
4282 
4283 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
4284 		vcpu->arch.sie_block->ihcpu = 0xffff;
4285 		goto retry;
4286 	}
4287 
4288 	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
4289 		if (!ibs_enabled(vcpu)) {
4290 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
4291 			kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
4292 		}
4293 		goto retry;
4294 	}
4295 
4296 	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
4297 		if (ibs_enabled(vcpu)) {
4298 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
4299 			kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
4300 		}
4301 		goto retry;
4302 	}
4303 
4304 	if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
4305 		vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
4306 		goto retry;
4307 	}
4308 
4309 	if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
4310 		/*
4311 		 * Disable CMM virtualization; we will emulate the ESSA
4312 		 * instruction manually, in order to provide additional
4313 		 * functionalities needed for live migration.
4314 		 */
4315 		vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
4316 		goto retry;
4317 	}
4318 
4319 	if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
4320 		/*
4321 		 * Re-enable CMM virtualization if CMMA is available and
4322 		 * CMM has been used.
4323 		 */
4324 		if (vcpu->kvm->arch.use_cmma && uses_cmm(vcpu->arch.gmap))
4325 			vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
4326 		goto retry;
4327 	}
4328 
4329 	/* we left the vsie handler, nothing to do, just clear the request */
4330 	kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
4331 
4332 	return 0;
4333 }
4334 
__kvm_s390_set_tod_clock(struct kvm * kvm,const struct kvm_s390_vm_tod_clock * gtod)4335 static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
4336 {
4337 	struct kvm_vcpu *vcpu;
4338 	union tod_clock clk;
4339 	unsigned long i;
4340 
4341 	preempt_disable();
4342 
4343 	store_tod_clock_ext(&clk);
4344 
4345 	kvm->arch.epoch = gtod->tod - clk.tod;
4346 	kvm->arch.epdx = 0;
4347 	if (test_kvm_facility(kvm, 139)) {
4348 		kvm->arch.epdx = gtod->epoch_idx - clk.ei;
4349 		if (kvm->arch.epoch > gtod->tod)
4350 			kvm->arch.epdx -= 1;
4351 	}
4352 
4353 	kvm_s390_vcpu_block_all(kvm);
4354 	kvm_for_each_vcpu(i, vcpu, kvm) {
4355 		vcpu->arch.sie_block->epoch = kvm->arch.epoch;
4356 		vcpu->arch.sie_block->epdx  = kvm->arch.epdx;
4357 	}
4358 
4359 	kvm_s390_vcpu_unblock_all(kvm);
4360 	preempt_enable();
4361 }
4362 
kvm_s390_try_set_tod_clock(struct kvm * kvm,const struct kvm_s390_vm_tod_clock * gtod)4363 int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
4364 {
4365 	if (!mutex_trylock(&kvm->lock))
4366 		return 0;
4367 	__kvm_s390_set_tod_clock(kvm, gtod);
4368 	mutex_unlock(&kvm->lock);
4369 	return 1;
4370 }
4371 
__kvm_inject_pfault_token(struct kvm_vcpu * vcpu,bool start_token,unsigned long token)4372 static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
4373 				      unsigned long token)
4374 {
4375 	struct kvm_s390_interrupt inti;
4376 	struct kvm_s390_irq irq;
4377 
4378 	if (start_token) {
4379 		irq.u.ext.ext_params2 = token;
4380 		irq.type = KVM_S390_INT_PFAULT_INIT;
4381 		WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
4382 	} else {
4383 		inti.type = KVM_S390_INT_PFAULT_DONE;
4384 		inti.parm64 = token;
4385 		WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
4386 	}
4387 }
4388 
kvm_arch_async_page_not_present(struct kvm_vcpu * vcpu,struct kvm_async_pf * work)4389 bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
4390 				     struct kvm_async_pf *work)
4391 {
4392 	trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
4393 	__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
4394 
4395 	return true;
4396 }
4397 
kvm_arch_async_page_present(struct kvm_vcpu * vcpu,struct kvm_async_pf * work)4398 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
4399 				 struct kvm_async_pf *work)
4400 {
4401 	trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
4402 	__kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
4403 }
4404 
kvm_arch_async_page_ready(struct kvm_vcpu * vcpu,struct kvm_async_pf * work)4405 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
4406 			       struct kvm_async_pf *work)
4407 {
4408 	/* s390 will always inject the page directly */
4409 }
4410 
kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu * vcpu)4411 bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
4412 {
4413 	/*
4414 	 * s390 will always inject the page directly,
4415 	 * but we still want check_async_completion to cleanup
4416 	 */
4417 	return true;
4418 }
4419 
kvm_arch_setup_async_pf(struct kvm_vcpu * vcpu)4420 bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
4421 {
4422 	hva_t hva;
4423 	struct kvm_arch_async_pf arch;
4424 
4425 	if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4426 		return false;
4427 	if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
4428 	    vcpu->arch.pfault_compare)
4429 		return false;
4430 	if (psw_extint_disabled(vcpu))
4431 		return false;
4432 	if (kvm_s390_vcpu_has_irq(vcpu, 0))
4433 		return false;
4434 	if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
4435 		return false;
4436 	if (!pfault_enabled(vcpu->arch.gmap))
4437 		return false;
4438 
4439 	hva = gfn_to_hva(vcpu->kvm, current->thread.gmap_teid.addr);
4440 	if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
4441 		return false;
4442 
4443 	return kvm_setup_async_pf(vcpu, current->thread.gmap_teid.addr * PAGE_SIZE, hva, &arch);
4444 }
4445 
vcpu_pre_run(struct kvm_vcpu * vcpu)4446 static int vcpu_pre_run(struct kvm_vcpu *vcpu)
4447 {
4448 	int rc, cpuflags;
4449 
4450 	/*
4451 	 * On s390 notifications for arriving pages will be delivered directly
4452 	 * to the guest but the house keeping for completed pfaults is
4453 	 * handled outside the worker.
4454 	 */
4455 	kvm_check_async_pf_completion(vcpu);
4456 
4457 	vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
4458 	vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
4459 
4460 	if (!kvm_is_ucontrol(vcpu->kvm)) {
4461 		rc = kvm_s390_deliver_pending_interrupts(vcpu);
4462 		if (rc || guestdbg_exit_pending(vcpu))
4463 			return rc;
4464 	}
4465 
4466 	rc = kvm_s390_handle_requests(vcpu);
4467 	if (rc)
4468 		return rc;
4469 
4470 	if (guestdbg_enabled(vcpu)) {
4471 		kvm_s390_backup_guest_per_regs(vcpu);
4472 		kvm_s390_patch_guest_per_regs(vcpu);
4473 	}
4474 
4475 	clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
4476 
4477 	vcpu->arch.sie_block->icptcode = 0;
4478 	current->thread.gmap_int_code = 0;
4479 	cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
4480 	VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
4481 	trace_kvm_s390_sie_enter(vcpu, cpuflags);
4482 
4483 	return 0;
4484 }
4485 
vcpu_post_run_addressing_exception(struct kvm_vcpu * vcpu)4486 static int vcpu_post_run_addressing_exception(struct kvm_vcpu *vcpu)
4487 {
4488 	struct kvm_s390_pgm_info pgm_info = {
4489 		.code = PGM_ADDRESSING,
4490 	};
4491 	u8 opcode, ilen;
4492 	int rc;
4493 
4494 	VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
4495 	trace_kvm_s390_sie_fault(vcpu);
4496 
4497 	/*
4498 	 * We want to inject an addressing exception, which is defined as a
4499 	 * suppressing or terminating exception. However, since we came here
4500 	 * by a DAT access exception, the PSW still points to the faulting
4501 	 * instruction since DAT exceptions are nullifying. So we've got
4502 	 * to look up the current opcode to get the length of the instruction
4503 	 * to be able to forward the PSW.
4504 	 */
4505 	rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
4506 	ilen = insn_length(opcode);
4507 	if (rc < 0) {
4508 		return rc;
4509 	} else if (rc) {
4510 		/* Instruction-Fetching Exceptions - we can't detect the ilen.
4511 		 * Forward by arbitrary ilc, injection will take care of
4512 		 * nullification if necessary.
4513 		 */
4514 		pgm_info = vcpu->arch.pgm;
4515 		ilen = 4;
4516 	}
4517 	pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
4518 	kvm_s390_forward_psw(vcpu, ilen);
4519 	return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
4520 }
4521 
kvm_s390_assert_primary_as(struct kvm_vcpu * vcpu)4522 static void kvm_s390_assert_primary_as(struct kvm_vcpu *vcpu)
4523 {
4524 	KVM_BUG(current->thread.gmap_teid.as != PSW_BITS_AS_PRIMARY, vcpu->kvm,
4525 		"Unexpected program interrupt 0x%x, TEID 0x%016lx",
4526 		current->thread.gmap_int_code, current->thread.gmap_teid.val);
4527 }
4528 
vcpu_dat_fault_handler(struct kvm_vcpu * vcpu,gpa_t gaddr,bool wr)4529 static int vcpu_dat_fault_handler(struct kvm_vcpu *vcpu, gpa_t gaddr, bool wr)
4530 {
4531 	struct guest_fault f = {
4532 		.write_attempt = wr,
4533 		.attempt_pfault = pfault_enabled(vcpu->arch.gmap),
4534 	};
4535 	int rc;
4536 
4537 	if (vcpu_ucontrol_translate(vcpu, &gaddr))
4538 		return -EREMOTE;
4539 	f.gfn = gpa_to_gfn(gaddr);
4540 
4541 	rc = kvm_s390_faultin_gfn(vcpu, NULL, &f);
4542 	if (rc <= 0)
4543 		return rc;
4544 	if (rc == PGM_ADDRESSING)
4545 		return vcpu_post_run_addressing_exception(vcpu);
4546 	KVM_BUG_ON(rc, vcpu->kvm);
4547 	return -EINVAL;
4548 }
4549 
vcpu_post_run_handle_fault(struct kvm_vcpu * vcpu)4550 static int vcpu_post_run_handle_fault(struct kvm_vcpu *vcpu)
4551 {
4552 	unsigned int foll = 0;
4553 	unsigned long gaddr;
4554 	int rc;
4555 
4556 	gaddr = current->thread.gmap_teid.addr * PAGE_SIZE;
4557 	if (kvm_s390_cur_gmap_fault_is_write())
4558 		foll = FOLL_WRITE;
4559 
4560 	switch (current->thread.gmap_int_code & PGM_INT_CODE_MASK) {
4561 	case 0:
4562 		vcpu->stat.exit_null++;
4563 		break;
4564 	case PGM_SECURE_STORAGE_ACCESS:
4565 	case PGM_SECURE_STORAGE_VIOLATION:
4566 		kvm_s390_assert_primary_as(vcpu);
4567 		/*
4568 		 * This can happen after a reboot with asynchronous teardown;
4569 		 * the new guest (normal or protected) will run on top of the
4570 		 * previous protected guest. The old pages need to be destroyed
4571 		 * so the new guest can use them.
4572 		 */
4573 		if (kvm_s390_pv_destroy_page(vcpu->kvm, gaddr)) {
4574 			/*
4575 			 * Either KVM messed up the secure guest mapping or the
4576 			 * same page is mapped into multiple secure guests.
4577 			 *
4578 			 * This exception is only triggered when a guest 2 is
4579 			 * running and can therefore never occur in kernel
4580 			 * context.
4581 			 */
4582 			pr_warn_ratelimited("Secure storage violation (%x) in task: %s, pid %d\n",
4583 					    current->thread.gmap_int_code, current->comm,
4584 					    current->pid);
4585 			send_sig(SIGSEGV, current, 0);
4586 		}
4587 		break;
4588 	case PGM_NON_SECURE_STORAGE_ACCESS:
4589 		kvm_s390_assert_primary_as(vcpu);
4590 		/*
4591 		 * This is normal operation; a page belonging to a protected
4592 		 * guest has not been imported yet. Try to import the page into
4593 		 * the protected guest.
4594 		 */
4595 		rc = kvm_s390_pv_convert_to_secure(vcpu->kvm, gaddr);
4596 		if (rc == -EINVAL)
4597 			send_sig(SIGSEGV, current, 0);
4598 		if (rc != -ENXIO)
4599 			break;
4600 		foll = FOLL_WRITE;
4601 		fallthrough;
4602 	case PGM_PROTECTION:
4603 	case PGM_SEGMENT_TRANSLATION:
4604 	case PGM_PAGE_TRANSLATION:
4605 	case PGM_ASCE_TYPE:
4606 	case PGM_REGION_FIRST_TRANS:
4607 	case PGM_REGION_SECOND_TRANS:
4608 	case PGM_REGION_THIRD_TRANS:
4609 		kvm_s390_assert_primary_as(vcpu);
4610 		return vcpu_dat_fault_handler(vcpu, gaddr, foll);
4611 	default:
4612 		KVM_BUG(1, vcpu->kvm, "Unexpected program interrupt 0x%x, TEID 0x%016lx",
4613 			current->thread.gmap_int_code, current->thread.gmap_teid.val);
4614 		send_sig(SIGSEGV, current, 0);
4615 		break;
4616 	}
4617 	return 0;
4618 }
4619 
vcpu_post_run(struct kvm_vcpu * vcpu,int exit_reason)4620 static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
4621 {
4622 	struct mcck_volatile_info *mcck_info;
4623 	struct sie_page *sie_page;
4624 	int rc;
4625 
4626 	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
4627 		   vcpu->arch.sie_block->icptcode);
4628 	trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
4629 
4630 	if (guestdbg_enabled(vcpu))
4631 		kvm_s390_restore_guest_per_regs(vcpu);
4632 
4633 	vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
4634 	vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
4635 
4636 	if (exit_reason == -EINTR) {
4637 		VCPU_EVENT(vcpu, 3, "%s", "machine check");
4638 		sie_page = container_of(vcpu->arch.sie_block,
4639 					struct sie_page, sie_block);
4640 		mcck_info = &sie_page->mcck_info;
4641 		kvm_s390_reinject_machine_check(vcpu, mcck_info);
4642 		return 0;
4643 	}
4644 
4645 	if (vcpu->arch.sie_block->icptcode > 0) {
4646 		rc = kvm_handle_sie_intercept(vcpu);
4647 
4648 		if (rc != -EOPNOTSUPP)
4649 			return rc;
4650 		vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
4651 		vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
4652 		vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
4653 		vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
4654 		return -EREMOTE;
4655 	}
4656 
4657 	return vcpu_post_run_handle_fault(vcpu);
4658 }
4659 
kvm_s390_enter_exit_sie(struct kvm_s390_sie_block * scb,u64 * gprs,unsigned long gasce)4660 int noinstr kvm_s390_enter_exit_sie(struct kvm_s390_sie_block *scb,
4661 				    u64 *gprs, unsigned long gasce)
4662 {
4663 	int ret;
4664 
4665 	guest_state_enter_irqoff();
4666 
4667 	/*
4668 	 * The guest_state_{enter,exit}_irqoff() functions inform lockdep and
4669 	 * tracing that entry to the guest will enable host IRQs, and exit from
4670 	 * the guest will disable host IRQs.
4671 	 */
4672 	ret = sie64a(scb, gprs, gasce);
4673 
4674 	guest_state_exit_irqoff();
4675 
4676 	return ret;
4677 }
4678 
4679 #define PSW_INT_MASK (PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_MCHECK)
__vcpu_run(struct kvm_vcpu * vcpu)4680 static int __vcpu_run(struct kvm_vcpu *vcpu)
4681 {
4682 	int rc, exit_reason;
4683 	struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block;
4684 
4685 	/*
4686 	 * We try to hold kvm->srcu during most of vcpu_run (except when run-
4687 	 * ning the guest), so that memslots (and other stuff) are protected
4688 	 */
4689 	kvm_vcpu_srcu_read_lock(vcpu);
4690 
4691 	while (true) {
4692 		rc = vcpu_pre_run(vcpu);
4693 		kvm_vcpu_srcu_read_unlock(vcpu);
4694 		if (rc || guestdbg_exit_pending(vcpu))
4695 			break;
4696 
4697 		/*
4698 		 * As PF_VCPU will be used in fault handler, between
4699 		 * guest_timing_enter_irqoff and guest_timing_exit_irqoff
4700 		 * should be no uaccess.
4701 		 */
4702 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4703 			memcpy(sie_page->pv_grregs,
4704 			       vcpu->run->s.regs.gprs,
4705 			       sizeof(sie_page->pv_grregs));
4706 		}
4707 
4708 xfer_to_guest_mode_check:
4709 		local_irq_disable();
4710 		xfer_to_guest_mode_prepare();
4711 		if (xfer_to_guest_mode_work_pending()) {
4712 			local_irq_enable();
4713 			rc = kvm_xfer_to_guest_mode_handle_work(vcpu);
4714 			if (rc)
4715 				break;
4716 			goto xfer_to_guest_mode_check;
4717 		}
4718 
4719 		guest_timing_enter_irqoff();
4720 		__disable_cpu_timer_accounting(vcpu);
4721 
4722 		exit_reason = kvm_s390_enter_exit_sie(vcpu->arch.sie_block,
4723 						      vcpu->run->s.regs.gprs,
4724 						      vcpu->arch.gmap->asce.val);
4725 
4726 		__enable_cpu_timer_accounting(vcpu);
4727 		guest_timing_exit_irqoff();
4728 		local_irq_enable();
4729 
4730 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4731 			memcpy(vcpu->run->s.regs.gprs,
4732 			       sie_page->pv_grregs,
4733 			       sizeof(sie_page->pv_grregs));
4734 			/*
4735 			 * We're not allowed to inject interrupts on intercepts
4736 			 * that leave the guest state in an "in-between" state
4737 			 * where the next SIE entry will do a continuation.
4738 			 * Fence interrupts in our "internal" PSW.
4739 			 */
4740 			if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR ||
4741 			    vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) {
4742 				vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
4743 			}
4744 		}
4745 		kvm_vcpu_srcu_read_lock(vcpu);
4746 
4747 		rc = vcpu_post_run(vcpu, exit_reason);
4748 		if (rc || guestdbg_exit_pending(vcpu)) {
4749 			kvm_vcpu_srcu_read_unlock(vcpu);
4750 			break;
4751 		}
4752 	}
4753 
4754 	return rc;
4755 }
4756 
sync_regs_fmt2(struct kvm_vcpu * vcpu)4757 static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
4758 {
4759 	struct kvm_run *kvm_run = vcpu->run;
4760 	struct runtime_instr_cb *riccb;
4761 	struct gs_cb *gscb;
4762 
4763 	riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
4764 	gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
4765 	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
4766 	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
4767 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4768 		vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
4769 		vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
4770 		vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
4771 	}
4772 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
4773 		vcpu->arch.pfault_token = kvm_run->s.regs.pft;
4774 		vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
4775 		vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
4776 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4777 			kvm_clear_async_pf_completion_queue(vcpu);
4778 	}
4779 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) {
4780 		vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318;
4781 		vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc;
4782 		VCPU_EVENT(vcpu, 3, "setting cpnc to %d", vcpu->arch.diag318_info.cpnc);
4783 	}
4784 	/*
4785 	 * If userspace sets the riccb (e.g. after migration) to a valid state,
4786 	 * we should enable RI here instead of doing the lazy enablement.
4787 	 */
4788 	if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
4789 	    test_kvm_facility(vcpu->kvm, 64) &&
4790 	    riccb->v &&
4791 	    !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
4792 		VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
4793 		vcpu->arch.sie_block->ecb3 |= ECB3_RI;
4794 	}
4795 	/*
4796 	 * If userspace sets the gscb (e.g. after migration) to non-zero,
4797 	 * we should enable GS here instead of doing the lazy enablement.
4798 	 */
4799 	if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
4800 	    test_kvm_facility(vcpu->kvm, 133) &&
4801 	    gscb->gssm &&
4802 	    !vcpu->arch.gs_enabled) {
4803 		VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
4804 		vcpu->arch.sie_block->ecb |= ECB_GS;
4805 		vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
4806 		vcpu->arch.gs_enabled = 1;
4807 	}
4808 	if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
4809 	    test_kvm_facility(vcpu->kvm, 82)) {
4810 		vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
4811 		vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
4812 	}
4813 	if (cpu_has_gs()) {
4814 		preempt_disable();
4815 		local_ctl_set_bit(2, CR2_GUARDED_STORAGE_BIT);
4816 		if (current->thread.gs_cb) {
4817 			vcpu->arch.host_gscb = current->thread.gs_cb;
4818 			save_gs_cb(vcpu->arch.host_gscb);
4819 		}
4820 		if (vcpu->arch.gs_enabled) {
4821 			current->thread.gs_cb = (struct gs_cb *)
4822 						&vcpu->run->s.regs.gscb;
4823 			restore_gs_cb(current->thread.gs_cb);
4824 		}
4825 		preempt_enable();
4826 	}
4827 	/* SIE will load etoken directly from SDNX and therefore kvm_run */
4828 }
4829 
sync_regs(struct kvm_vcpu * vcpu)4830 static void sync_regs(struct kvm_vcpu *vcpu)
4831 {
4832 	struct kvm_run *kvm_run = vcpu->run;
4833 
4834 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
4835 		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
4836 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
4837 		memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
4838 		/* some control register changes require a tlb flush */
4839 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
4840 	}
4841 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4842 		kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
4843 		vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
4844 	}
4845 	save_access_regs(vcpu->arch.host_acrs);
4846 	restore_access_regs(vcpu->run->s.regs.acrs);
4847 	vcpu->arch.acrs_loaded = true;
4848 	kvm_s390_fpu_load(vcpu->run);
4849 	/* Sync fmt2 only data */
4850 	if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
4851 		sync_regs_fmt2(vcpu);
4852 	} else {
4853 		/*
4854 		 * In several places we have to modify our internal view to
4855 		 * not do things that are disallowed by the ultravisor. For
4856 		 * example we must not inject interrupts after specific exits
4857 		 * (e.g. 112 prefix page not secure). We do this by turning
4858 		 * off the machine check, external and I/O interrupt bits
4859 		 * of our PSW copy. To avoid getting validity intercepts, we
4860 		 * do only accept the condition code from userspace.
4861 		 */
4862 		vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC;
4863 		vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask &
4864 						   PSW_MASK_CC;
4865 	}
4866 
4867 	kvm_run->kvm_dirty_regs = 0;
4868 }
4869 
store_regs_fmt2(struct kvm_vcpu * vcpu)4870 static void store_regs_fmt2(struct kvm_vcpu *vcpu)
4871 {
4872 	struct kvm_run *kvm_run = vcpu->run;
4873 
4874 	kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
4875 	kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
4876 	kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
4877 	kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
4878 	kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
4879 	if (cpu_has_gs()) {
4880 		preempt_disable();
4881 		local_ctl_set_bit(2, CR2_GUARDED_STORAGE_BIT);
4882 		if (vcpu->arch.gs_enabled)
4883 			save_gs_cb(current->thread.gs_cb);
4884 		current->thread.gs_cb = vcpu->arch.host_gscb;
4885 		restore_gs_cb(vcpu->arch.host_gscb);
4886 		if (!vcpu->arch.host_gscb)
4887 			local_ctl_clear_bit(2, CR2_GUARDED_STORAGE_BIT);
4888 		vcpu->arch.host_gscb = NULL;
4889 		preempt_enable();
4890 	}
4891 	/* SIE will save etoken directly into SDNX and therefore kvm_run */
4892 }
4893 
store_regs(struct kvm_vcpu * vcpu)4894 static void store_regs(struct kvm_vcpu *vcpu)
4895 {
4896 	struct kvm_run *kvm_run = vcpu->run;
4897 
4898 	kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
4899 	kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
4900 	kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
4901 	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
4902 	kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
4903 	kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
4904 	kvm_run->s.regs.pft = vcpu->arch.pfault_token;
4905 	kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
4906 	kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
4907 	save_access_regs(vcpu->run->s.regs.acrs);
4908 	restore_access_regs(vcpu->arch.host_acrs);
4909 	vcpu->arch.acrs_loaded = false;
4910 	kvm_s390_fpu_store(vcpu->run);
4911 	if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
4912 		store_regs_fmt2(vcpu);
4913 }
4914 
kvm_arch_vcpu_ioctl_run(struct kvm_vcpu * vcpu)4915 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
4916 {
4917 	struct kvm_run *kvm_run = vcpu->run;
4918 	DECLARE_KERNEL_FPU_ONSTACK32(fpu);
4919 	int rc;
4920 
4921 	/*
4922 	 * Running a VM while dumping always has the potential to
4923 	 * produce inconsistent dump data. But for PV vcpus a SIE
4924 	 * entry while dumping could also lead to a fatal validity
4925 	 * intercept which we absolutely want to avoid.
4926 	 */
4927 	if (vcpu->kvm->arch.pv.dumping)
4928 		return -EINVAL;
4929 
4930 	if (!vcpu->wants_to_run)
4931 		return -EINTR;
4932 
4933 	if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS ||
4934 	    kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS)
4935 		return -EINVAL;
4936 
4937 	vcpu_load(vcpu);
4938 
4939 	if (guestdbg_exit_pending(vcpu)) {
4940 		kvm_s390_prepare_debug_exit(vcpu);
4941 		rc = 0;
4942 		goto out;
4943 	}
4944 
4945 	kvm_sigset_activate(vcpu);
4946 
4947 	/*
4948 	 * no need to check the return value of vcpu_start as it can only have
4949 	 * an error for protvirt, but protvirt means user cpu state
4950 	 */
4951 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
4952 		kvm_s390_vcpu_start(vcpu);
4953 	} else if (is_vcpu_stopped(vcpu)) {
4954 		pr_err_ratelimited("can't run stopped vcpu %d\n",
4955 				   vcpu->vcpu_id);
4956 		rc = -EINVAL;
4957 		goto out;
4958 	}
4959 
4960 	kernel_fpu_begin(&fpu, KERNEL_FPC | KERNEL_VXR);
4961 	sync_regs(vcpu);
4962 	enable_cpu_timer_accounting(vcpu);
4963 
4964 	might_fault();
4965 	rc = __vcpu_run(vcpu);
4966 
4967 	if (signal_pending(current) && !rc) {
4968 		kvm_run->exit_reason = KVM_EXIT_INTR;
4969 		vcpu->stat.signal_exits++;
4970 		rc = -EINTR;
4971 	}
4972 
4973 	if (guestdbg_exit_pending(vcpu) && !rc)  {
4974 		kvm_s390_prepare_debug_exit(vcpu);
4975 		rc = 0;
4976 	}
4977 
4978 	if (rc == -EREMOTE) {
4979 		/* userspace support is needed, kvm_run has been prepared */
4980 		rc = 0;
4981 	}
4982 
4983 	disable_cpu_timer_accounting(vcpu);
4984 	store_regs(vcpu);
4985 	kernel_fpu_end(&fpu, KERNEL_FPC | KERNEL_VXR);
4986 
4987 	kvm_sigset_deactivate(vcpu);
4988 
4989 	vcpu->stat.exit_userspace++;
4990 out:
4991 	vcpu_put(vcpu);
4992 	return rc;
4993 }
4994 
4995 /*
4996  * store status at address
4997  * we use have two special cases:
4998  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
4999  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
5000  */
kvm_s390_store_status_unloaded(struct kvm_vcpu * vcpu,unsigned long gpa)5001 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
5002 {
5003 	unsigned char archmode = 1;
5004 	freg_t fprs[NUM_FPRS];
5005 	unsigned int px;
5006 	u64 clkcomp, cputm;
5007 	int rc;
5008 
5009 	px = kvm_s390_get_prefix(vcpu);
5010 	if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
5011 		if (write_guest_abs(vcpu, 163, &archmode, 1))
5012 			return -EFAULT;
5013 		gpa = 0;
5014 	} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
5015 		if (write_guest_real(vcpu, 163, &archmode, 1))
5016 			return -EFAULT;
5017 		gpa = px;
5018 	} else
5019 		gpa -= __LC_FPREGS_SAVE_AREA;
5020 
5021 	/* manually convert vector registers if necessary */
5022 	if (cpu_has_vx()) {
5023 		convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
5024 		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
5025 				     fprs, 128);
5026 	} else {
5027 		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
5028 				     vcpu->run->s.regs.fprs, 128);
5029 	}
5030 	rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
5031 			      vcpu->run->s.regs.gprs, 128);
5032 	rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
5033 			      &vcpu->arch.sie_block->gpsw, 16);
5034 	rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
5035 			      &px, 4);
5036 	rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
5037 			      &vcpu->run->s.regs.fpc, 4);
5038 	rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
5039 			      &vcpu->arch.sie_block->todpr, 4);
5040 	cputm = kvm_s390_get_cpu_timer(vcpu);
5041 	rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
5042 			      &cputm, 8);
5043 	clkcomp = vcpu->arch.sie_block->ckc >> 8;
5044 	rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
5045 			      &clkcomp, 8);
5046 	rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
5047 			      &vcpu->run->s.regs.acrs, 64);
5048 	rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
5049 			      &vcpu->arch.sie_block->gcr, 128);
5050 	return rc ? -EFAULT : 0;
5051 }
5052 
kvm_s390_vcpu_store_status(struct kvm_vcpu * vcpu,unsigned long addr)5053 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
5054 {
5055 	/*
5056 	 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
5057 	 * switch in the run ioctl. Let's update our copies before we save
5058 	 * it into the save area
5059 	 */
5060 	kvm_s390_fpu_store(vcpu->run);
5061 	save_access_regs(vcpu->run->s.regs.acrs);
5062 
5063 	return kvm_s390_store_status_unloaded(vcpu, addr);
5064 }
5065 
__disable_ibs_on_vcpu(struct kvm_vcpu * vcpu)5066 static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
5067 {
5068 	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
5069 	kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
5070 }
5071 
__disable_ibs_on_all_vcpus(struct kvm * kvm)5072 static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
5073 {
5074 	unsigned long i;
5075 	struct kvm_vcpu *vcpu;
5076 
5077 	kvm_for_each_vcpu(i, vcpu, kvm) {
5078 		__disable_ibs_on_vcpu(vcpu);
5079 	}
5080 }
5081 
__enable_ibs_on_vcpu(struct kvm_vcpu * vcpu)5082 static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
5083 {
5084 	if (!sclp.has_ibs)
5085 		return;
5086 	kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
5087 	kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
5088 }
5089 
kvm_s390_vcpu_start(struct kvm_vcpu * vcpu)5090 int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
5091 {
5092 	int i, online_vcpus, r = 0, started_vcpus = 0;
5093 
5094 	if (!is_vcpu_stopped(vcpu))
5095 		return 0;
5096 
5097 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
5098 	/* Only one cpu at a time may enter/leave the STOPPED state. */
5099 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
5100 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
5101 
5102 	/* Let's tell the UV that we want to change into the operating state */
5103 	if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5104 		r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR);
5105 		if (r) {
5106 			spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5107 			return r;
5108 		}
5109 	}
5110 
5111 	for (i = 0; i < online_vcpus; i++) {
5112 		if (!is_vcpu_stopped(kvm_get_vcpu(vcpu->kvm, i)))
5113 			started_vcpus++;
5114 	}
5115 
5116 	if (started_vcpus == 0) {
5117 		/* we're the only active VCPU -> speed it up */
5118 		__enable_ibs_on_vcpu(vcpu);
5119 	} else if (started_vcpus == 1) {
5120 		/*
5121 		 * As we are starting a second VCPU, we have to disable
5122 		 * the IBS facility on all VCPUs to remove potentially
5123 		 * outstanding ENABLE requests.
5124 		 */
5125 		__disable_ibs_on_all_vcpus(vcpu->kvm);
5126 	}
5127 
5128 	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
5129 	/*
5130 	 * The real PSW might have changed due to a RESTART interpreted by the
5131 	 * ultravisor. We block all interrupts and let the next sie exit
5132 	 * refresh our view.
5133 	 */
5134 	if (kvm_s390_pv_cpu_is_protected(vcpu))
5135 		vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
5136 	/*
5137 	 * Another VCPU might have used IBS while we were offline.
5138 	 * Let's play safe and flush the VCPU at startup.
5139 	 */
5140 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
5141 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5142 	return 0;
5143 }
5144 
kvm_s390_vcpu_stop(struct kvm_vcpu * vcpu)5145 int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
5146 {
5147 	int i, online_vcpus, r = 0, started_vcpus = 0;
5148 	struct kvm_vcpu *started_vcpu = NULL;
5149 
5150 	if (is_vcpu_stopped(vcpu))
5151 		return 0;
5152 
5153 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
5154 	/* Only one cpu at a time may enter/leave the STOPPED state. */
5155 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
5156 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
5157 
5158 	/* Let's tell the UV that we want to change into the stopped state */
5159 	if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5160 		r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_STP);
5161 		if (r) {
5162 			spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5163 			return r;
5164 		}
5165 	}
5166 
5167 	/*
5168 	 * Set the VCPU to STOPPED and THEN clear the interrupt flag,
5169 	 * now that the SIGP STOP and SIGP STOP AND STORE STATUS orders
5170 	 * have been fully processed. This will ensure that the VCPU
5171 	 * is kept BUSY if another VCPU is inquiring with SIGP SENSE.
5172 	 */
5173 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
5174 	kvm_s390_clear_stop_irq(vcpu);
5175 
5176 	__disable_ibs_on_vcpu(vcpu);
5177 
5178 	for (i = 0; i < online_vcpus; i++) {
5179 		struct kvm_vcpu *tmp = kvm_get_vcpu(vcpu->kvm, i);
5180 
5181 		if (!is_vcpu_stopped(tmp)) {
5182 			started_vcpus++;
5183 			started_vcpu = tmp;
5184 		}
5185 	}
5186 
5187 	if (started_vcpus == 1) {
5188 		/*
5189 		 * As we only have one VCPU left, we want to enable the
5190 		 * IBS facility for that VCPU to speed it up.
5191 		 */
5192 		__enable_ibs_on_vcpu(started_vcpu);
5193 	}
5194 
5195 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5196 	return 0;
5197 }
5198 
kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu * vcpu,struct kvm_enable_cap * cap)5199 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
5200 				     struct kvm_enable_cap *cap)
5201 {
5202 	int r;
5203 
5204 	if (cap->flags)
5205 		return -EINVAL;
5206 
5207 	switch (cap->cap) {
5208 	case KVM_CAP_S390_CSS_SUPPORT:
5209 		if (!vcpu->kvm->arch.css_support) {
5210 			vcpu->kvm->arch.css_support = 1;
5211 			VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
5212 			trace_kvm_s390_enable_css(vcpu->kvm);
5213 		}
5214 		r = 0;
5215 		break;
5216 	default:
5217 		r = -EINVAL;
5218 		break;
5219 	}
5220 	return r;
5221 }
5222 
kvm_s390_vcpu_sida_op(struct kvm_vcpu * vcpu,struct kvm_s390_mem_op * mop)5223 static long kvm_s390_vcpu_sida_op(struct kvm_vcpu *vcpu,
5224 				  struct kvm_s390_mem_op *mop)
5225 {
5226 	void __user *uaddr = (void __user *)mop->buf;
5227 	void *sida_addr;
5228 	int r = 0;
5229 
5230 	if (mop->flags || !mop->size)
5231 		return -EINVAL;
5232 	if (mop->size + mop->sida_offset < mop->size)
5233 		return -EINVAL;
5234 	if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block))
5235 		return -E2BIG;
5236 	if (!kvm_s390_pv_cpu_is_protected(vcpu))
5237 		return -EINVAL;
5238 
5239 	sida_addr = (char *)sida_addr(vcpu->arch.sie_block) + mop->sida_offset;
5240 
5241 	switch (mop->op) {
5242 	case KVM_S390_MEMOP_SIDA_READ:
5243 		if (copy_to_user(uaddr, sida_addr, mop->size))
5244 			r = -EFAULT;
5245 
5246 		break;
5247 	case KVM_S390_MEMOP_SIDA_WRITE:
5248 		if (copy_from_user(sida_addr, uaddr, mop->size))
5249 			r = -EFAULT;
5250 		break;
5251 	}
5252 	return r;
5253 }
5254 
kvm_s390_vcpu_mem_op(struct kvm_vcpu * vcpu,struct kvm_s390_mem_op * mop)5255 static long kvm_s390_vcpu_mem_op(struct kvm_vcpu *vcpu,
5256 				 struct kvm_s390_mem_op *mop)
5257 {
5258 	void __user *uaddr = (void __user *)mop->buf;
5259 	void *tmpbuf __free(kvfree) = NULL;
5260 	enum gacc_mode acc_mode;
5261 	int r;
5262 
5263 	r = mem_op_validate_common(mop, KVM_S390_MEMOP_F_INJECT_EXCEPTION |
5264 					KVM_S390_MEMOP_F_CHECK_ONLY |
5265 					KVM_S390_MEMOP_F_SKEY_PROTECTION);
5266 	if (r)
5267 		return r;
5268 	if (mop->ar >= NUM_ACRS)
5269 		return -EINVAL;
5270 	if (kvm_s390_pv_cpu_is_protected(vcpu))
5271 		return -EINVAL;
5272 	if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
5273 		tmpbuf = vmalloc(mop->size);
5274 		if (!tmpbuf)
5275 			return -ENOMEM;
5276 	}
5277 
5278 	acc_mode = mop->op == KVM_S390_MEMOP_LOGICAL_READ ? GACC_FETCH : GACC_STORE;
5279 	if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
5280 		r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size,
5281 				    acc_mode, mop->key);
5282 	} else if (acc_mode == GACC_FETCH) {
5283 		r = read_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf,
5284 					mop->size, mop->key);
5285 		if (!r && copy_to_user(uaddr, tmpbuf, mop->size))
5286 			return -EFAULT;
5287 	} else {
5288 		if (copy_from_user(tmpbuf, uaddr, mop->size))
5289 			return -EFAULT;
5290 		r = write_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf,
5291 					 mop->size, mop->key);
5292 	}
5293 
5294 	if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
5295 		kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
5296 
5297 	return r;
5298 }
5299 
kvm_s390_vcpu_memsida_op(struct kvm_vcpu * vcpu,struct kvm_s390_mem_op * mop)5300 static long kvm_s390_vcpu_memsida_op(struct kvm_vcpu *vcpu,
5301 				     struct kvm_s390_mem_op *mop)
5302 {
5303 	int r, srcu_idx;
5304 
5305 	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
5306 
5307 	switch (mop->op) {
5308 	case KVM_S390_MEMOP_LOGICAL_READ:
5309 	case KVM_S390_MEMOP_LOGICAL_WRITE:
5310 		r = kvm_s390_vcpu_mem_op(vcpu, mop);
5311 		break;
5312 	case KVM_S390_MEMOP_SIDA_READ:
5313 	case KVM_S390_MEMOP_SIDA_WRITE:
5314 		/* we are locked against sida going away by the vcpu->mutex */
5315 		r = kvm_s390_vcpu_sida_op(vcpu, mop);
5316 		break;
5317 	default:
5318 		r = -EINVAL;
5319 	}
5320 
5321 	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
5322 	return r;
5323 }
5324 
kvm_arch_vcpu_unlocked_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)5325 long kvm_arch_vcpu_unlocked_ioctl(struct file *filp, unsigned int ioctl,
5326 				  unsigned long arg)
5327 {
5328 	struct kvm_vcpu *vcpu = filp->private_data;
5329 	void __user *argp = (void __user *)arg;
5330 	int rc;
5331 
5332 	switch (ioctl) {
5333 	case KVM_S390_IRQ: {
5334 		struct kvm_s390_irq s390irq;
5335 
5336 		if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
5337 			return -EFAULT;
5338 		rc = kvm_s390_inject_vcpu(vcpu, &s390irq);
5339 		break;
5340 	}
5341 	case KVM_S390_INTERRUPT: {
5342 		struct kvm_s390_interrupt s390int;
5343 		struct kvm_s390_irq s390irq = {};
5344 
5345 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
5346 			return -EFAULT;
5347 		if (s390int_to_s390irq(&s390int, &s390irq))
5348 			return -EINVAL;
5349 		rc = kvm_s390_inject_vcpu(vcpu, &s390irq);
5350 		break;
5351 	}
5352 	default:
5353 		rc = -ENOIOCTLCMD;
5354 		break;
5355 	}
5356 
5357 	/*
5358 	 * To simplify single stepping of userspace-emulated instructions,
5359 	 * KVM_EXIT_S390_SIEIC exit sets KVM_GUESTDBG_EXIT_PENDING (see
5360 	 * should_handle_per_ifetch()). However, if userspace emulation injects
5361 	 * an interrupt, it needs to be cleared, so that KVM_EXIT_DEBUG happens
5362 	 * after (and not before) the interrupt delivery.
5363 	 */
5364 	if (!rc)
5365 		vcpu->guest_debug &= ~KVM_GUESTDBG_EXIT_PENDING;
5366 
5367 	return rc;
5368 }
5369 
kvm_s390_handle_pv_vcpu_dump(struct kvm_vcpu * vcpu,struct kvm_pv_cmd * cmd)5370 static int kvm_s390_handle_pv_vcpu_dump(struct kvm_vcpu *vcpu,
5371 					struct kvm_pv_cmd *cmd)
5372 {
5373 	struct kvm_s390_pv_dmp dmp;
5374 	void *data;
5375 	int ret;
5376 
5377 	/* Dump initialization is a prerequisite */
5378 	if (!vcpu->kvm->arch.pv.dumping)
5379 		return -EINVAL;
5380 
5381 	if (copy_from_user(&dmp, (__u8 __user *)cmd->data, sizeof(dmp)))
5382 		return -EFAULT;
5383 
5384 	/* We only handle this subcmd right now */
5385 	if (dmp.subcmd != KVM_PV_DUMP_CPU)
5386 		return -EINVAL;
5387 
5388 	/* CPU dump length is the same as create cpu storage donation. */
5389 	if (dmp.buff_len != uv_info.guest_cpu_stor_len)
5390 		return -EINVAL;
5391 
5392 	data = kvzalloc(uv_info.guest_cpu_stor_len, GFP_KERNEL);
5393 	if (!data)
5394 		return -ENOMEM;
5395 
5396 	ret = kvm_s390_pv_dump_cpu(vcpu, data, &cmd->rc, &cmd->rrc);
5397 
5398 	VCPU_EVENT(vcpu, 3, "PROTVIRT DUMP CPU %d rc %x rrc %x",
5399 		   vcpu->vcpu_id, cmd->rc, cmd->rrc);
5400 
5401 	if (ret)
5402 		ret = -EINVAL;
5403 
5404 	/* On success copy over the dump data */
5405 	if (!ret && copy_to_user((__u8 __user *)dmp.buff_addr, data, uv_info.guest_cpu_stor_len))
5406 		ret = -EFAULT;
5407 
5408 	kvfree(data);
5409 	return ret;
5410 }
5411 
kvm_arch_vcpu_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)5412 long kvm_arch_vcpu_ioctl(struct file *filp,
5413 			 unsigned int ioctl, unsigned long arg)
5414 {
5415 	struct kvm_vcpu *vcpu = filp->private_data;
5416 	void __user *argp = (void __user *)arg;
5417 	int idx;
5418 	long r;
5419 	u16 rc, rrc;
5420 
5421 	vcpu_load(vcpu);
5422 
5423 	switch (ioctl) {
5424 	case KVM_S390_STORE_STATUS:
5425 		idx = srcu_read_lock(&vcpu->kvm->srcu);
5426 		r = kvm_s390_store_status_unloaded(vcpu, arg);
5427 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
5428 		break;
5429 	case KVM_S390_SET_INITIAL_PSW: {
5430 		psw_t psw;
5431 
5432 		r = -EFAULT;
5433 		if (copy_from_user(&psw, argp, sizeof(psw)))
5434 			break;
5435 		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
5436 		break;
5437 	}
5438 	case KVM_S390_CLEAR_RESET:
5439 		r = 0;
5440 		kvm_arch_vcpu_ioctl_clear_reset(vcpu);
5441 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5442 			r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
5443 					  UVC_CMD_CPU_RESET_CLEAR, &rc, &rrc);
5444 			VCPU_EVENT(vcpu, 3, "PROTVIRT RESET CLEAR VCPU: rc %x rrc %x",
5445 				   rc, rrc);
5446 		}
5447 		break;
5448 	case KVM_S390_INITIAL_RESET:
5449 		r = 0;
5450 		kvm_arch_vcpu_ioctl_initial_reset(vcpu);
5451 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5452 			r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
5453 					  UVC_CMD_CPU_RESET_INITIAL,
5454 					  &rc, &rrc);
5455 			VCPU_EVENT(vcpu, 3, "PROTVIRT RESET INITIAL VCPU: rc %x rrc %x",
5456 				   rc, rrc);
5457 		}
5458 		break;
5459 	case KVM_S390_NORMAL_RESET:
5460 		r = 0;
5461 		kvm_arch_vcpu_ioctl_normal_reset(vcpu);
5462 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5463 			r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
5464 					  UVC_CMD_CPU_RESET, &rc, &rrc);
5465 			VCPU_EVENT(vcpu, 3, "PROTVIRT RESET NORMAL VCPU: rc %x rrc %x",
5466 				   rc, rrc);
5467 		}
5468 		break;
5469 	case KVM_SET_ONE_REG:
5470 	case KVM_GET_ONE_REG: {
5471 		struct kvm_one_reg reg;
5472 		r = -EINVAL;
5473 		if (kvm_s390_pv_cpu_is_protected(vcpu))
5474 			break;
5475 		r = -EFAULT;
5476 		if (copy_from_user(&reg, argp, sizeof(reg)))
5477 			break;
5478 		if (ioctl == KVM_SET_ONE_REG)
5479 			r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
5480 		else
5481 			r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
5482 		break;
5483 	}
5484 #ifdef CONFIG_KVM_S390_UCONTROL
5485 	case KVM_S390_UCAS_MAP: {
5486 		struct kvm_s390_ucas_mapping ucas;
5487 
5488 		r = -EFAULT;
5489 		if (copy_from_user(&ucas, argp, sizeof(ucas)))
5490 			break;
5491 
5492 		r = -EINVAL;
5493 		if (!kvm_is_ucontrol(vcpu->kvm))
5494 			break;
5495 		if (!IS_ALIGNED(ucas.user_addr | ucas.vcpu_addr | ucas.length, _SEGMENT_SIZE))
5496 			break;
5497 
5498 		r = gmap_ucas_map(vcpu->arch.gmap, gpa_to_gfn(ucas.user_addr),
5499 				  gpa_to_gfn(ucas.vcpu_addr),
5500 				  ucas.length >> _SEGMENT_SHIFT);
5501 		break;
5502 	}
5503 	case KVM_S390_UCAS_UNMAP: {
5504 		struct kvm_s390_ucas_mapping ucas;
5505 
5506 		r = -EFAULT;
5507 		if (copy_from_user(&ucas, argp, sizeof(ucas)))
5508 			break;
5509 
5510 		r = -EINVAL;
5511 		if (!kvm_is_ucontrol(vcpu->kvm))
5512 			break;
5513 		if (!IS_ALIGNED(ucas.vcpu_addr | ucas.length, _SEGMENT_SIZE))
5514 			break;
5515 
5516 		gmap_ucas_unmap(vcpu->arch.gmap, gpa_to_gfn(ucas.vcpu_addr),
5517 				ucas.length >> _SEGMENT_SHIFT);
5518 		r = 0;
5519 		break;
5520 	}
5521 #endif
5522 	case KVM_S390_VCPU_FAULT: {
5523 		idx = srcu_read_lock(&vcpu->kvm->srcu);
5524 		r = vcpu_dat_fault_handler(vcpu, arg, 0);
5525 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
5526 		break;
5527 	}
5528 	case KVM_ENABLE_CAP:
5529 	{
5530 		struct kvm_enable_cap cap;
5531 		r = -EFAULT;
5532 		if (copy_from_user(&cap, argp, sizeof(cap)))
5533 			break;
5534 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
5535 		break;
5536 	}
5537 	case KVM_S390_MEM_OP: {
5538 		struct kvm_s390_mem_op mem_op;
5539 
5540 		if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
5541 			r = kvm_s390_vcpu_memsida_op(vcpu, &mem_op);
5542 		else
5543 			r = -EFAULT;
5544 		break;
5545 	}
5546 	case KVM_S390_SET_IRQ_STATE: {
5547 		struct kvm_s390_irq_state irq_state;
5548 
5549 		r = -EFAULT;
5550 		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
5551 			break;
5552 		if (irq_state.len > VCPU_IRQS_MAX_BUF ||
5553 		    irq_state.len == 0 ||
5554 		    irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
5555 			r = -EINVAL;
5556 			break;
5557 		}
5558 		/* do not use irq_state.flags, it will break old QEMUs */
5559 		r = kvm_s390_set_irq_state(vcpu,
5560 					   (void __user *) irq_state.buf,
5561 					   irq_state.len);
5562 		break;
5563 	}
5564 	case KVM_S390_GET_IRQ_STATE: {
5565 		struct kvm_s390_irq_state irq_state;
5566 
5567 		r = -EFAULT;
5568 		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
5569 			break;
5570 		if (irq_state.len == 0) {
5571 			r = -EINVAL;
5572 			break;
5573 		}
5574 		/* do not use irq_state.flags, it will break old QEMUs */
5575 		r = kvm_s390_get_irq_state(vcpu,
5576 					   (__u8 __user *)  irq_state.buf,
5577 					   irq_state.len);
5578 		break;
5579 	}
5580 	case KVM_S390_PV_CPU_COMMAND: {
5581 		struct kvm_pv_cmd cmd;
5582 
5583 		r = -EINVAL;
5584 		if (!is_prot_virt_host())
5585 			break;
5586 
5587 		r = -EFAULT;
5588 		if (copy_from_user(&cmd, argp, sizeof(cmd)))
5589 			break;
5590 
5591 		r = -EINVAL;
5592 		if (cmd.flags)
5593 			break;
5594 
5595 		/* We only handle this cmd right now */
5596 		if (cmd.cmd != KVM_PV_DUMP)
5597 			break;
5598 
5599 		r = kvm_s390_handle_pv_vcpu_dump(vcpu, &cmd);
5600 
5601 		/* Always copy over UV rc / rrc data */
5602 		if (copy_to_user((__u8 __user *)argp, &cmd.rc,
5603 				 sizeof(cmd.rc) + sizeof(cmd.rrc)))
5604 			r = -EFAULT;
5605 		break;
5606 	}
5607 	default:
5608 		r = -ENOTTY;
5609 	}
5610 
5611 	vcpu_put(vcpu);
5612 	return r;
5613 }
5614 
kvm_arch_vcpu_fault(struct kvm_vcpu * vcpu,struct vm_fault * vmf)5615 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
5616 {
5617 #ifdef CONFIG_KVM_S390_UCONTROL
5618 	if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
5619 		 && (kvm_is_ucontrol(vcpu->kvm))) {
5620 		vmf->page = virt_to_page(vcpu->arch.sie_block);
5621 		get_page(vmf->page);
5622 		return 0;
5623 	}
5624 #endif
5625 	return VM_FAULT_SIGBUS;
5626 }
5627 
kvm_arch_irqchip_in_kernel(struct kvm * kvm)5628 bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
5629 {
5630 	return true;
5631 }
5632 
5633 /* Section: memory related */
kvm_arch_prepare_memory_region(struct kvm * kvm,const struct kvm_memory_slot * old,struct kvm_memory_slot * new,enum kvm_mr_change change)5634 int kvm_arch_prepare_memory_region(struct kvm *kvm,
5635 				   const struct kvm_memory_slot *old,
5636 				   struct kvm_memory_slot *new,
5637 				   enum kvm_mr_change change)
5638 {
5639 	gpa_t size;
5640 
5641 	if (kvm_is_ucontrol(kvm) && new->id < KVM_USER_MEM_SLOTS)
5642 		return -EINVAL;
5643 
5644 	/* When we are protected, we should not change the memory slots */
5645 	if (kvm_s390_pv_get_handle(kvm))
5646 		return -EINVAL;
5647 
5648 	if (change != KVM_MR_DELETE && change != KVM_MR_FLAGS_ONLY) {
5649 		/*
5650 		 * A few sanity checks. We can have memory slots which have to be
5651 		 * located/ended at a segment boundary (1MB). The memory in userland is
5652 		 * ok to be fragmented into various different vmas. It is okay to mmap()
5653 		 * and munmap() stuff in this slot after doing this call at any time
5654 		 */
5655 
5656 		if (new->userspace_addr & 0xffffful)
5657 			return -EINVAL;
5658 
5659 		size = new->npages * PAGE_SIZE;
5660 		if (size & 0xffffful)
5661 			return -EINVAL;
5662 
5663 		if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit)
5664 			return -EINVAL;
5665 	}
5666 
5667 	if (!kvm->arch.migration_mode)
5668 		return 0;
5669 
5670 	/*
5671 	 * Turn off migration mode when:
5672 	 * - userspace creates a new memslot with dirty logging off,
5673 	 * - userspace modifies an existing memslot (MOVE or FLAGS_ONLY) and
5674 	 *   dirty logging is turned off.
5675 	 * Migration mode expects dirty page logging being enabled to store
5676 	 * its dirty bitmap.
5677 	 */
5678 	if (change != KVM_MR_DELETE &&
5679 	    !(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
5680 		WARN(kvm_s390_vm_stop_migration(kvm),
5681 		     "Failed to stop migration mode");
5682 
5683 	return 0;
5684 }
5685 
kvm_arch_commit_memory_region(struct kvm * kvm,struct kvm_memory_slot * old,const struct kvm_memory_slot * new,enum kvm_mr_change change)5686 void kvm_arch_commit_memory_region(struct kvm *kvm,
5687 				struct kvm_memory_slot *old,
5688 				const struct kvm_memory_slot *new,
5689 				enum kvm_mr_change change)
5690 {
5691 	struct kvm_s390_mmu_cache *mc = NULL;
5692 	int rc = 0;
5693 
5694 	if (change == KVM_MR_FLAGS_ONLY)
5695 		return;
5696 
5697 	mc = kvm_s390_new_mmu_cache();
5698 	if (!mc) {
5699 		rc = -ENOMEM;
5700 		goto out;
5701 	}
5702 
5703 	scoped_guard(write_lock, &kvm->mmu_lock) {
5704 		switch (change) {
5705 		case KVM_MR_DELETE:
5706 			rc = dat_delete_slot(mc, kvm->arch.gmap->asce, old->base_gfn, old->npages);
5707 			break;
5708 		case KVM_MR_MOVE:
5709 			rc = dat_delete_slot(mc, kvm->arch.gmap->asce, old->base_gfn, old->npages);
5710 			if (rc)
5711 				break;
5712 			fallthrough;
5713 		case KVM_MR_CREATE:
5714 			rc = dat_create_slot(mc, kvm->arch.gmap->asce, new->base_gfn, new->npages);
5715 			break;
5716 		case KVM_MR_FLAGS_ONLY:
5717 			break;
5718 		default:
5719 			WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
5720 		}
5721 	}
5722 out:
5723 	if (rc)
5724 		pr_warn("failed to commit memory region\n");
5725 	kvm_s390_free_mmu_cache(mc);
5726 	return;
5727 }
5728 
5729 /**
5730  * kvm_test_age_gfn() - test young
5731  * @kvm: the kvm instance
5732  * @range: the range of guest addresses whose young status needs to be cleared
5733  *
5734  * Context: called by KVM common code without holding the kvm mmu lock
5735  * Return: true if any page in the given range is young, otherwise 0.
5736  */
kvm_test_age_gfn(struct kvm * kvm,struct kvm_gfn_range * range)5737 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
5738 {
5739 	scoped_guard(read_lock, &kvm->mmu_lock)
5740 		return dat_test_age_gfn(kvm->arch.gmap->asce, range->start, range->end);
5741 }
5742 
5743 /**
5744  * kvm_age_gfn() - clear young
5745  * @kvm: the kvm instance
5746  * @range: the range of guest addresses whose young status needs to be cleared
5747  *
5748  * Context: called by KVM common code without holding the kvm mmu lock
5749  * Return: true if any page in the given range was young, otherwise 0.
5750  */
kvm_age_gfn(struct kvm * kvm,struct kvm_gfn_range * range)5751 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
5752 {
5753 	scoped_guard(read_lock, &kvm->mmu_lock)
5754 		return gmap_age_gfn(kvm->arch.gmap, range->start, range->end);
5755 }
5756 
5757 /**
5758  * kvm_unmap_gfn_range() - Unmap a range of guest addresses
5759  * @kvm: the kvm instance
5760  * @range: the range of guest page frames to invalidate
5761  *
5762  * This function always returns false because every DAT table modification
5763  * has to use the appropriate DAT table manipulation instructions, which will
5764  * keep the TLB coherent, hence no additional TLB flush is ever required.
5765  *
5766  * Context: called by KVM common code with the kvm mmu write lock held
5767  * Return: false
5768  */
kvm_unmap_gfn_range(struct kvm * kvm,struct kvm_gfn_range * range)5769 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
5770 {
5771 	return gmap_unmap_gfn_range(kvm->arch.gmap, range->slot, range->start, range->end);
5772 }
5773 
nonhyp_mask(int i)5774 static inline unsigned long nonhyp_mask(int i)
5775 {
5776 	unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
5777 
5778 	return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
5779 }
5780 
kvm_s390_init(void)5781 static int __init kvm_s390_init(void)
5782 {
5783 	int i, r;
5784 
5785 	if (!sclp.has_sief2) {
5786 		pr_info("SIE is not available\n");
5787 		return -ENODEV;
5788 	}
5789 
5790 	for (i = 0; i < 16; i++)
5791 		kvm_s390_fac_base[i] |=
5792 			stfle_fac_list[i] & nonhyp_mask(i);
5793 
5794 	r = __kvm_s390_init();
5795 	if (r)
5796 		return r;
5797 
5798 	r = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE);
5799 	if (r) {
5800 		__kvm_s390_exit();
5801 		return r;
5802 	}
5803 	return 0;
5804 }
5805 
kvm_s390_exit(void)5806 static void __exit kvm_s390_exit(void)
5807 {
5808 	kvm_exit();
5809 
5810 	__kvm_s390_exit();
5811 }
5812 
5813 module_init(kvm_s390_init);
5814 module_exit(kvm_s390_exit);
5815 
5816 /*
5817  * Enable autoloading of the kvm module.
5818  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
5819  * since x86 takes a different approach.
5820  */
5821 #include <linux/miscdevice.h>
5822 MODULE_ALIAS_MISCDEV(KVM_MINOR);
5823 MODULE_ALIAS("devname:kvm");
5824