xref: /linux/arch/s390/kvm/kvm-s390.c (revision a5210135489ae7bc1ef1cb4a8157361dd7b468cd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * hosting IBM Z kernel virtual machines (s390x)
4  *
5  * Copyright IBM Corp. 2008, 2020
6  *
7  *    Author(s): Carsten Otte <cotte@de.ibm.com>
8  *               Christian Borntraeger <borntraeger@de.ibm.com>
9  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
10  *               Jason J. Herne <jjherne@us.ibm.com>
11  */
12 
13 #define pr_fmt(fmt) "kvm-s390: " fmt
14 
15 #include <linux/compiler.h>
16 #include <linux/entry-virt.h>
17 #include <linux/export.h>
18 #include <linux/err.h>
19 #include <linux/fs.h>
20 #include <linux/hrtimer.h>
21 #include <linux/init.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/mman.h>
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/cpufeature.h>
28 #include <linux/random.h>
29 #include <linux/slab.h>
30 #include <linux/timer.h>
31 #include <linux/vmalloc.h>
32 #include <linux/bitmap.h>
33 #include <linux/sched/signal.h>
34 #include <linux/string.h>
35 #include <linux/pgtable.h>
36 #include <linux/mmu_notifier.h>
37 
38 #include <asm/access-regs.h>
39 #include <asm/asm-offsets.h>
40 #include <asm/lowcore.h>
41 #include <asm/machine.h>
42 #include <asm/stp.h>
43 #include <asm/gmap_helpers.h>
44 #include <asm/nmi.h>
45 #include <asm/isc.h>
46 #include <asm/sclp.h>
47 #include <asm/cpacf.h>
48 #include <asm/timex.h>
49 #include <asm/asm.h>
50 #include <asm/fpu.h>
51 #include <asm/ap.h>
52 #include <asm/uv.h>
53 #include "kvm-s390.h"
54 #include "gaccess.h"
55 #include "gmap.h"
56 #include "faultin.h"
57 #include "pci.h"
58 
59 #define CREATE_TRACE_POINTS
60 #include "trace.h"
61 #include "trace-s390.h"
62 
63 #define MEM_OP_MAX_SIZE 65536	/* Maximum transfer size for KVM_S390_MEM_OP */
64 #define LOCAL_IRQS 32
65 #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
66 			   (KVM_MAX_VCPUS + LOCAL_IRQS))
67 
68 const struct kvm_stats_desc kvm_vm_stats_desc[] = {
69 	KVM_GENERIC_VM_STATS(),
70 	STATS_DESC_COUNTER(VM, inject_io),
71 	STATS_DESC_COUNTER(VM, inject_float_mchk),
72 	STATS_DESC_COUNTER(VM, inject_pfault_done),
73 	STATS_DESC_COUNTER(VM, inject_service_signal),
74 	STATS_DESC_COUNTER(VM, inject_virtio),
75 	STATS_DESC_COUNTER(VM, aen_forward),
76 	STATS_DESC_COUNTER(VM, gmap_shadow_reuse),
77 	STATS_DESC_COUNTER(VM, gmap_shadow_create),
78 	STATS_DESC_COUNTER(VM, gmap_shadow_r1_entry),
79 	STATS_DESC_COUNTER(VM, gmap_shadow_r2_entry),
80 	STATS_DESC_COUNTER(VM, gmap_shadow_r3_entry),
81 	STATS_DESC_COUNTER(VM, gmap_shadow_sg_entry),
82 	STATS_DESC_COUNTER(VM, gmap_shadow_pg_entry),
83 };
84 
85 const struct kvm_stats_header kvm_vm_stats_header = {
86 	.name_size = KVM_STATS_NAME_SIZE,
87 	.num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
88 	.id_offset = sizeof(struct kvm_stats_header),
89 	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
90 	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
91 		       sizeof(kvm_vm_stats_desc),
92 };
93 
94 const struct kvm_stats_desc kvm_vcpu_stats_desc[] = {
95 	KVM_GENERIC_VCPU_STATS(),
96 	STATS_DESC_COUNTER(VCPU, exit_userspace),
97 	STATS_DESC_COUNTER(VCPU, exit_null),
98 	STATS_DESC_COUNTER(VCPU, exit_external_request),
99 	STATS_DESC_COUNTER(VCPU, exit_io_request),
100 	STATS_DESC_COUNTER(VCPU, exit_external_interrupt),
101 	STATS_DESC_COUNTER(VCPU, exit_stop_request),
102 	STATS_DESC_COUNTER(VCPU, exit_validity),
103 	STATS_DESC_COUNTER(VCPU, exit_instruction),
104 	STATS_DESC_COUNTER(VCPU, exit_pei),
105 	STATS_DESC_COUNTER(VCPU, halt_no_poll_steal),
106 	STATS_DESC_COUNTER(VCPU, instruction_lctl),
107 	STATS_DESC_COUNTER(VCPU, instruction_lctlg),
108 	STATS_DESC_COUNTER(VCPU, instruction_stctl),
109 	STATS_DESC_COUNTER(VCPU, instruction_stctg),
110 	STATS_DESC_COUNTER(VCPU, exit_program_interruption),
111 	STATS_DESC_COUNTER(VCPU, exit_instr_and_program),
112 	STATS_DESC_COUNTER(VCPU, exit_operation_exception),
113 	STATS_DESC_COUNTER(VCPU, deliver_ckc),
114 	STATS_DESC_COUNTER(VCPU, deliver_cputm),
115 	STATS_DESC_COUNTER(VCPU, deliver_external_call),
116 	STATS_DESC_COUNTER(VCPU, deliver_emergency_signal),
117 	STATS_DESC_COUNTER(VCPU, deliver_service_signal),
118 	STATS_DESC_COUNTER(VCPU, deliver_virtio),
119 	STATS_DESC_COUNTER(VCPU, deliver_stop_signal),
120 	STATS_DESC_COUNTER(VCPU, deliver_prefix_signal),
121 	STATS_DESC_COUNTER(VCPU, deliver_restart_signal),
122 	STATS_DESC_COUNTER(VCPU, deliver_program),
123 	STATS_DESC_COUNTER(VCPU, deliver_io),
124 	STATS_DESC_COUNTER(VCPU, deliver_machine_check),
125 	STATS_DESC_COUNTER(VCPU, exit_wait_state),
126 	STATS_DESC_COUNTER(VCPU, inject_ckc),
127 	STATS_DESC_COUNTER(VCPU, inject_cputm),
128 	STATS_DESC_COUNTER(VCPU, inject_external_call),
129 	STATS_DESC_COUNTER(VCPU, inject_emergency_signal),
130 	STATS_DESC_COUNTER(VCPU, inject_mchk),
131 	STATS_DESC_COUNTER(VCPU, inject_pfault_init),
132 	STATS_DESC_COUNTER(VCPU, inject_program),
133 	STATS_DESC_COUNTER(VCPU, inject_restart),
134 	STATS_DESC_COUNTER(VCPU, inject_set_prefix),
135 	STATS_DESC_COUNTER(VCPU, inject_stop_signal),
136 	STATS_DESC_COUNTER(VCPU, instruction_epsw),
137 	STATS_DESC_COUNTER(VCPU, instruction_gs),
138 	STATS_DESC_COUNTER(VCPU, instruction_io_other),
139 	STATS_DESC_COUNTER(VCPU, instruction_lpsw),
140 	STATS_DESC_COUNTER(VCPU, instruction_lpswe),
141 	STATS_DESC_COUNTER(VCPU, instruction_lpswey),
142 	STATS_DESC_COUNTER(VCPU, instruction_pfmf),
143 	STATS_DESC_COUNTER(VCPU, instruction_ptff),
144 	STATS_DESC_COUNTER(VCPU, instruction_sck),
145 	STATS_DESC_COUNTER(VCPU, instruction_sckpf),
146 	STATS_DESC_COUNTER(VCPU, instruction_stidp),
147 	STATS_DESC_COUNTER(VCPU, instruction_spx),
148 	STATS_DESC_COUNTER(VCPU, instruction_stpx),
149 	STATS_DESC_COUNTER(VCPU, instruction_stap),
150 	STATS_DESC_COUNTER(VCPU, instruction_iske),
151 	STATS_DESC_COUNTER(VCPU, instruction_ri),
152 	STATS_DESC_COUNTER(VCPU, instruction_rrbe),
153 	STATS_DESC_COUNTER(VCPU, instruction_sske),
154 	STATS_DESC_COUNTER(VCPU, instruction_ipte_interlock),
155 	STATS_DESC_COUNTER(VCPU, instruction_stsi),
156 	STATS_DESC_COUNTER(VCPU, instruction_stfl),
157 	STATS_DESC_COUNTER(VCPU, instruction_tb),
158 	STATS_DESC_COUNTER(VCPU, instruction_tpi),
159 	STATS_DESC_COUNTER(VCPU, instruction_tprot),
160 	STATS_DESC_COUNTER(VCPU, instruction_tsch),
161 	STATS_DESC_COUNTER(VCPU, instruction_sie),
162 	STATS_DESC_COUNTER(VCPU, instruction_essa),
163 	STATS_DESC_COUNTER(VCPU, instruction_sthyi),
164 	STATS_DESC_COUNTER(VCPU, instruction_sigp_sense),
165 	STATS_DESC_COUNTER(VCPU, instruction_sigp_sense_running),
166 	STATS_DESC_COUNTER(VCPU, instruction_sigp_external_call),
167 	STATS_DESC_COUNTER(VCPU, instruction_sigp_emergency),
168 	STATS_DESC_COUNTER(VCPU, instruction_sigp_cond_emergency),
169 	STATS_DESC_COUNTER(VCPU, instruction_sigp_start),
170 	STATS_DESC_COUNTER(VCPU, instruction_sigp_stop),
171 	STATS_DESC_COUNTER(VCPU, instruction_sigp_stop_store_status),
172 	STATS_DESC_COUNTER(VCPU, instruction_sigp_store_status),
173 	STATS_DESC_COUNTER(VCPU, instruction_sigp_store_adtl_status),
174 	STATS_DESC_COUNTER(VCPU, instruction_sigp_arch),
175 	STATS_DESC_COUNTER(VCPU, instruction_sigp_prefix),
176 	STATS_DESC_COUNTER(VCPU, instruction_sigp_restart),
177 	STATS_DESC_COUNTER(VCPU, instruction_sigp_init_cpu_reset),
178 	STATS_DESC_COUNTER(VCPU, instruction_sigp_cpu_reset),
179 	STATS_DESC_COUNTER(VCPU, instruction_sigp_unknown),
180 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_10),
181 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_44),
182 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_9c),
183 	STATS_DESC_COUNTER(VCPU, diag_9c_ignored),
184 	STATS_DESC_COUNTER(VCPU, diag_9c_forward),
185 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_258),
186 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_308),
187 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_500),
188 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_other),
189 	STATS_DESC_COUNTER(VCPU, pfault_sync),
190 	STATS_DESC_COUNTER(VCPU, signal_exits)
191 };
192 
193 const struct kvm_stats_header kvm_vcpu_stats_header = {
194 	.name_size = KVM_STATS_NAME_SIZE,
195 	.num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
196 	.id_offset = sizeof(struct kvm_stats_header),
197 	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
198 	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
199 		       sizeof(kvm_vcpu_stats_desc),
200 };
201 
202 /* allow nested virtualization in KVM (if enabled by user space) */
203 static int nested;
204 module_param(nested, int, S_IRUGO);
205 MODULE_PARM_DESC(nested, "Nested virtualization support");
206 
207 /* allow 1m huge page guest backing, if !nested */
208 static int hpage;
209 module_param(hpage, int, 0444);
210 MODULE_PARM_DESC(hpage, "1m huge page backing support");
211 
212 /* maximum percentage of steal time for polling.  >100 is treated like 100 */
213 static u8 halt_poll_max_steal = 10;
214 module_param(halt_poll_max_steal, byte, 0644);
215 MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
216 
217 /* if set to true, the GISA will be initialized and used if available */
218 static bool use_gisa  = true;
219 module_param(use_gisa, bool, 0644);
220 MODULE_PARM_DESC(use_gisa, "Use the GISA if the host supports it.");
221 
222 /* maximum diag9c forwarding per second */
223 unsigned int diag9c_forwarding_hz;
224 module_param(diag9c_forwarding_hz, uint, 0644);
225 MODULE_PARM_DESC(diag9c_forwarding_hz, "Maximum diag9c forwarding per second, 0 to turn off");
226 
227 /*
228  * allow asynchronous deinit for protected guests; enable by default since
229  * the feature is opt-in anyway
230  */
231 static int async_destroy = 1;
232 module_param(async_destroy, int, 0444);
233 MODULE_PARM_DESC(async_destroy, "Asynchronous destroy for protected guests");
234 
235 /*
236  * For now we handle at most 16 double words as this is what the s390 base
237  * kernel handles and stores in the prefix page. If we ever need to go beyond
238  * this, this requires changes to code, but the external uapi can stay.
239  */
240 #define SIZE_INTERNAL 16
241 
242 /*
243  * Base feature mask that defines default mask for facilities. Consists of the
244  * defines in FACILITIES_KVM and the non-hypervisor managed bits.
245  */
246 static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
247 /*
248  * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
249  * and defines the facilities that can be enabled via a cpu model.
250  */
251 static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
252 
kvm_s390_fac_size(void)253 static unsigned long kvm_s390_fac_size(void)
254 {
255 	BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
256 	BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
257 	BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
258 		sizeof(stfle_fac_list));
259 
260 	return SIZE_INTERNAL;
261 }
262 
263 /* available cpu features supported by kvm */
264 static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
265 /* available subfunctions indicated via query / "test bit" */
266 static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
267 
268 debug_info_t *kvm_s390_dbf;
269 debug_info_t *kvm_s390_dbf_uv;
270 
271 /* Section: not file related */
272 /* forward declarations */
kvm_clock_sync_scb(struct kvm_s390_sie_block * scb,u64 delta)273 static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
274 {
275 	u8 delta_idx = 0;
276 
277 	/*
278 	 * The TOD jumps by delta, we have to compensate this by adding
279 	 * -delta to the epoch.
280 	 */
281 	delta = -delta;
282 
283 	/* sign-extension - we're adding to signed values below */
284 	if ((s64)delta < 0)
285 		delta_idx = -1;
286 
287 	scb->epoch += delta;
288 	if (scb->ecd & ECD_MEF) {
289 		scb->epdx += delta_idx;
290 		if (scb->epoch < delta)
291 			scb->epdx += 1;
292 	}
293 }
294 
295 /*
296  * This callback is executed during stop_machine(). All CPUs are therefore
297  * temporarily stopped. In order not to change guest behavior, we have to
298  * disable preemption whenever we touch the epoch of kvm and the VCPUs,
299  * so a CPU won't be stopped while calculating with the epoch.
300  */
kvm_clock_sync(struct notifier_block * notifier,unsigned long val,void * v)301 static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
302 			  void *v)
303 {
304 	struct kvm *kvm;
305 	struct kvm_vcpu *vcpu;
306 	unsigned long i;
307 	unsigned long long *delta = v;
308 
309 	list_for_each_entry(kvm, &vm_list, vm_list) {
310 		kvm_for_each_vcpu(i, vcpu, kvm) {
311 			kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
312 			if (i == 0) {
313 				kvm->arch.epoch = vcpu->arch.sie_block->epoch;
314 				kvm->arch.epdx = vcpu->arch.sie_block->epdx;
315 			}
316 			if (vcpu->arch.cputm_enabled)
317 				vcpu->arch.cputm_start += *delta;
318 			if (vcpu->arch.vsie_block)
319 				kvm_clock_sync_scb(vcpu->arch.vsie_block,
320 						   *delta);
321 		}
322 	}
323 	return NOTIFY_OK;
324 }
325 
326 static struct notifier_block kvm_clock_notifier = {
327 	.notifier_call = kvm_clock_sync,
328 };
329 
allow_cpu_feat(unsigned long nr)330 static void allow_cpu_feat(unsigned long nr)
331 {
332 	set_bit_inv(nr, kvm_s390_available_cpu_feat);
333 }
334 
plo_test_bit(unsigned char nr)335 static inline int plo_test_bit(unsigned char nr)
336 {
337 	unsigned long function = (unsigned long)nr | 0x100;
338 	int cc;
339 
340 	asm volatile(
341 		"	lgr	0,%[function]\n"
342 		/* Parameter registers are ignored for "test bit" */
343 		"	plo	0,0,0,0(0)\n"
344 		CC_IPM(cc)
345 		: CC_OUT(cc, cc)
346 		: [function] "d" (function)
347 		: CC_CLOBBER_LIST("0"));
348 	return CC_TRANSFORM(cc) == 0;
349 }
350 
pfcr_query(u8 (* query)[16])351 static __always_inline void pfcr_query(u8 (*query)[16])
352 {
353 	asm volatile(
354 		"	lghi	0,0\n"
355 		"	.insn   rsy,0xeb0000000016,0,0,%[query]"
356 		: [query] "=QS" (*query)
357 		:
358 		: "cc", "0");
359 }
360 
__sortl_query(u8 (* query)[32])361 static __always_inline void __sortl_query(u8 (*query)[32])
362 {
363 	asm volatile(
364 		"	lghi	0,0\n"
365 		"	la	1,%[query]\n"
366 		/* Parameter registers are ignored */
367 		"	.insn	rre,0xb9380000,2,4"
368 		: [query] "=R" (*query)
369 		:
370 		: "cc", "0", "1");
371 }
372 
__dfltcc_query(u8 (* query)[32])373 static __always_inline void __dfltcc_query(u8 (*query)[32])
374 {
375 	asm volatile(
376 		"	lghi	0,0\n"
377 		"	la	1,%[query]\n"
378 		/* Parameter registers are ignored */
379 		"	.insn	rrf,0xb9390000,2,4,6,0"
380 		: [query] "=R" (*query)
381 		:
382 		: "cc", "0", "1");
383 }
384 
kvm_s390_cpu_feat_init(void)385 static void __init kvm_s390_cpu_feat_init(void)
386 {
387 	int i;
388 
389 	for (i = 0; i < 256; ++i) {
390 		if (plo_test_bit(i))
391 			kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
392 	}
393 
394 	if (test_facility(28)) /* TOD-clock steering */
395 		ptff(kvm_s390_available_subfunc.ptff,
396 		     sizeof(kvm_s390_available_subfunc.ptff),
397 		     PTFF_QAF);
398 
399 	if (test_facility(17)) { /* MSA */
400 		__cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
401 			      kvm_s390_available_subfunc.kmac);
402 		__cpacf_query(CPACF_KMC, (cpacf_mask_t *)
403 			      kvm_s390_available_subfunc.kmc);
404 		__cpacf_query(CPACF_KM, (cpacf_mask_t *)
405 			      kvm_s390_available_subfunc.km);
406 		__cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
407 			      kvm_s390_available_subfunc.kimd);
408 		__cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
409 			      kvm_s390_available_subfunc.klmd);
410 	}
411 	if (test_facility(76)) /* MSA3 */
412 		__cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
413 			      kvm_s390_available_subfunc.pckmo);
414 	if (test_facility(77)) { /* MSA4 */
415 		__cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
416 			      kvm_s390_available_subfunc.kmctr);
417 		__cpacf_query(CPACF_KMF, (cpacf_mask_t *)
418 			      kvm_s390_available_subfunc.kmf);
419 		__cpacf_query(CPACF_KMO, (cpacf_mask_t *)
420 			      kvm_s390_available_subfunc.kmo);
421 		__cpacf_query(CPACF_PCC, (cpacf_mask_t *)
422 			      kvm_s390_available_subfunc.pcc);
423 	}
424 	if (test_facility(57)) /* MSA5 */
425 		__cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
426 			      kvm_s390_available_subfunc.ppno);
427 
428 	if (test_facility(146)) /* MSA8 */
429 		__cpacf_query(CPACF_KMA, (cpacf_mask_t *)
430 			      kvm_s390_available_subfunc.kma);
431 
432 	if (test_facility(155)) /* MSA9 */
433 		__cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
434 			      kvm_s390_available_subfunc.kdsa);
435 
436 	if (test_facility(150)) /* SORTL */
437 		__sortl_query(&kvm_s390_available_subfunc.sortl);
438 
439 	if (test_facility(151)) /* DFLTCC */
440 		__dfltcc_query(&kvm_s390_available_subfunc.dfltcc);
441 
442 	if (test_facility(201))	/* PFCR */
443 		pfcr_query(&kvm_s390_available_subfunc.pfcr);
444 
445 	if (machine_has_esop())
446 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
447 	/*
448 	 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
449 	 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
450 	 */
451 	if (!sclp.has_sief2 || !machine_has_esop() || !sclp.has_64bscao ||
452 	    !test_facility(3) || !nested)
453 		return;
454 	allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
455 	if (sclp.has_64bscao)
456 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
457 	if (sclp.has_siif)
458 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
459 	if (sclp.has_gpere)
460 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
461 	if (sclp.has_gsls)
462 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
463 	if (sclp.has_ib)
464 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
465 	if (sclp.has_cei)
466 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
467 	if (sclp.has_ibs)
468 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
469 	if (sclp.has_kss)
470 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
471 	/*
472 	 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
473 	 * all skey handling functions read/set the skey from the PGSTE
474 	 * instead of the real storage key.
475 	 *
476 	 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
477 	 * pages being detected as preserved although they are resident.
478 	 *
479 	 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
480 	 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
481 	 *
482 	 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
483 	 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
484 	 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
485 	 *
486 	 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
487 	 * cannot easily shadow the SCA because of the ipte lock.
488 	 */
489 }
490 
__kvm_s390_init(void)491 static int __init __kvm_s390_init(void)
492 {
493 	int rc = -ENOMEM;
494 
495 	kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
496 	if (!kvm_s390_dbf)
497 		return -ENOMEM;
498 
499 	kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long));
500 	if (!kvm_s390_dbf_uv)
501 		goto err_kvm_uv;
502 
503 	if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view) ||
504 	    debug_register_view(kvm_s390_dbf_uv, &debug_sprintf_view))
505 		goto err_debug_view;
506 
507 	kvm_s390_cpu_feat_init();
508 
509 	/* Register floating interrupt controller interface. */
510 	rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
511 	if (rc) {
512 		pr_err("A FLIC registration call failed with rc=%d\n", rc);
513 		goto err_flic;
514 	}
515 
516 	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) {
517 		rc = kvm_s390_pci_init();
518 		if (rc) {
519 			pr_err("Unable to allocate AIFT for PCI\n");
520 			goto err_pci;
521 		}
522 	}
523 
524 	rc = kvm_s390_gib_init(GAL_ISC);
525 	if (rc)
526 		goto err_gib;
527 
528 	atomic_notifier_chain_register(&s390_epoch_delta_notifier,
529 				       &kvm_clock_notifier);
530 
531 	return 0;
532 
533 err_gib:
534 	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
535 		kvm_s390_pci_exit();
536 err_pci:
537 err_flic:
538 err_debug_view:
539 	debug_unregister(kvm_s390_dbf_uv);
540 err_kvm_uv:
541 	debug_unregister(kvm_s390_dbf);
542 	return rc;
543 }
544 
__kvm_s390_exit(void)545 static void __kvm_s390_exit(void)
546 {
547 	atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
548 					 &kvm_clock_notifier);
549 
550 	kvm_s390_gib_destroy();
551 	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
552 		kvm_s390_pci_exit();
553 	debug_unregister(kvm_s390_dbf);
554 	debug_unregister(kvm_s390_dbf_uv);
555 }
556 
kvm_s390_keyop(struct kvm_s390_mmu_cache * mc,struct kvm * kvm,int op,unsigned long addr,union skey skey)557 static int kvm_s390_keyop(struct kvm_s390_mmu_cache *mc, struct kvm *kvm, int op,
558 			  unsigned long addr, union skey skey)
559 {
560 	union asce asce = kvm->arch.gmap->asce;
561 	gfn_t gfn = gpa_to_gfn(addr);
562 	int r;
563 
564 	guard(read_lock)(&kvm->mmu_lock);
565 
566 	switch (op) {
567 	case KVM_S390_KEYOP_SSKE:
568 		r = dat_cond_set_storage_key(mc, asce, gfn, skey, &skey, 0, 0, 0);
569 		if (r >= 0)
570 			return skey.skey;
571 		break;
572 	case KVM_S390_KEYOP_ISKE:
573 		r = dat_get_storage_key(asce, gfn, &skey);
574 		if (!r)
575 			return skey.skey;
576 		break;
577 	case KVM_S390_KEYOP_RRBE:
578 		r = dat_reset_reference_bit(asce, gfn);
579 		if (r > 0)
580 			return r << 1;
581 		break;
582 	default:
583 		return -EINVAL;
584 	}
585 	return r;
586 }
587 
588 /* Section: device related */
kvm_arch_dev_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)589 long kvm_arch_dev_ioctl(struct file *filp,
590 			unsigned int ioctl, unsigned long arg)
591 {
592 	if (ioctl == KVM_S390_ENABLE_SIE)
593 		return 0;
594 	return -EINVAL;
595 }
596 
kvm_vm_ioctl_check_extension(struct kvm * kvm,long ext)597 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
598 {
599 	int r;
600 
601 	switch (ext) {
602 	case KVM_CAP_S390_PSW:
603 	case KVM_CAP_S390_GMAP:
604 #ifdef CONFIG_KVM_S390_UCONTROL
605 	case KVM_CAP_S390_UCONTROL:
606 #endif
607 	case KVM_CAP_ASYNC_PF:
608 	case KVM_CAP_SYNC_REGS:
609 	case KVM_CAP_ONE_REG:
610 	case KVM_CAP_ENABLE_CAP:
611 	case KVM_CAP_S390_CSS_SUPPORT:
612 	case KVM_CAP_IOEVENTFD:
613 	case KVM_CAP_S390_IRQCHIP:
614 	case KVM_CAP_VM_ATTRIBUTES:
615 	case KVM_CAP_MP_STATE:
616 	case KVM_CAP_IMMEDIATE_EXIT:
617 	case KVM_CAP_S390_INJECT_IRQ:
618 	case KVM_CAP_S390_USER_SIGP:
619 	case KVM_CAP_S390_USER_STSI:
620 	case KVM_CAP_S390_SKEYS:
621 	case KVM_CAP_S390_IRQ_STATE:
622 	case KVM_CAP_S390_USER_INSTR0:
623 	case KVM_CAP_S390_CMMA_MIGRATION:
624 	case KVM_CAP_S390_AIS:
625 	case KVM_CAP_S390_AIS_MIGRATION:
626 	case KVM_CAP_S390_VCPU_RESETS:
627 	case KVM_CAP_SET_GUEST_DEBUG:
628 	case KVM_CAP_S390_DIAG318:
629 	case KVM_CAP_IRQFD_RESAMPLE:
630 	case KVM_CAP_S390_USER_OPEREXEC:
631 	case KVM_CAP_S390_KEYOP:
632 	case KVM_CAP_S390_VSIE_ESAMODE:
633 		r = 1;
634 		break;
635 	case KVM_CAP_SET_GUEST_DEBUG2:
636 		r = KVM_GUESTDBG_VALID_MASK;
637 		break;
638 	case KVM_CAP_S390_HPAGE_1M:
639 		r = 0;
640 		if (hpage && !(kvm && kvm_is_ucontrol(kvm)))
641 			r = 1;
642 		break;
643 	case KVM_CAP_S390_MEM_OP:
644 		r = MEM_OP_MAX_SIZE;
645 		break;
646 	case KVM_CAP_S390_MEM_OP_EXTENSION:
647 		/*
648 		 * Flag bits indicating which extensions are supported.
649 		 * If r > 0, the base extension must also be supported/indicated,
650 		 * in order to maintain backwards compatibility.
651 		 */
652 		r = KVM_S390_MEMOP_EXTENSION_CAP_BASE |
653 		    KVM_S390_MEMOP_EXTENSION_CAP_CMPXCHG;
654 		break;
655 	case KVM_CAP_NR_VCPUS:
656 	case KVM_CAP_MAX_VCPUS:
657 	case KVM_CAP_MAX_VCPU_ID:
658 		/*
659 		 * Return the same value for KVM_CAP_MAX_VCPUS and
660 		 * KVM_CAP_MAX_VCPU_ID to conform with the KVM API.
661 		 */
662 		r = KVM_S390_ESCA_CPU_SLOTS;
663 		if (!kvm_s390_use_sca_entries())
664 			r = KVM_MAX_VCPUS;
665 		if (ext == KVM_CAP_NR_VCPUS)
666 			r = min_t(unsigned int, num_online_cpus(), r);
667 		break;
668 	case KVM_CAP_S390_COW:
669 		r = machine_has_esop();
670 		break;
671 	case KVM_CAP_S390_VECTOR_REGISTERS:
672 		r = test_facility(129);
673 		break;
674 	case KVM_CAP_S390_RI:
675 		r = test_facility(64);
676 		break;
677 	case KVM_CAP_S390_GS:
678 		r = test_facility(133);
679 		break;
680 	case KVM_CAP_S390_BPB:
681 		r = test_facility(82);
682 		break;
683 	case KVM_CAP_S390_PROTECTED_ASYNC_DISABLE:
684 		r = async_destroy && is_prot_virt_host();
685 		break;
686 	case KVM_CAP_S390_PROTECTED:
687 		r = is_prot_virt_host();
688 		break;
689 	case KVM_CAP_S390_PROTECTED_DUMP: {
690 		u64 pv_cmds_dump[] = {
691 			BIT_UVC_CMD_DUMP_INIT,
692 			BIT_UVC_CMD_DUMP_CONFIG_STOR_STATE,
693 			BIT_UVC_CMD_DUMP_CPU,
694 			BIT_UVC_CMD_DUMP_COMPLETE,
695 		};
696 		int i;
697 
698 		r = is_prot_virt_host();
699 
700 		for (i = 0; i < ARRAY_SIZE(pv_cmds_dump); i++) {
701 			if (!test_bit_inv(pv_cmds_dump[i],
702 					  (unsigned long *)&uv_info.inst_calls_list)) {
703 				r = 0;
704 				break;
705 			}
706 		}
707 		break;
708 	}
709 	case KVM_CAP_S390_ZPCI_OP:
710 		r = kvm_s390_pci_interp_allowed();
711 		break;
712 	case KVM_CAP_S390_CPU_TOPOLOGY:
713 		r = test_facility(11);
714 		break;
715 	default:
716 		r = 0;
717 	}
718 	return r;
719 }
720 
kvm_arch_sync_dirty_log(struct kvm * kvm,struct kvm_memory_slot * memslot)721 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
722 {
723 	gfn_t last_gfn = memslot->base_gfn + memslot->npages;
724 
725 	scoped_guard(read_lock, &kvm->mmu_lock)
726 		gmap_sync_dirty_log(kvm->arch.gmap, memslot->base_gfn, last_gfn);
727 }
728 
729 /* Section: vm related */
730 static void sca_del_vcpu(struct kvm_vcpu *vcpu);
731 
732 /*
733  * Get (and clear) the dirty memory log for a memory slot.
734  */
kvm_vm_ioctl_get_dirty_log(struct kvm * kvm,struct kvm_dirty_log * log)735 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
736 			       struct kvm_dirty_log *log)
737 {
738 	int r;
739 	unsigned long n;
740 	struct kvm_memory_slot *memslot;
741 	int is_dirty;
742 
743 	if (kvm_is_ucontrol(kvm))
744 		return -EINVAL;
745 
746 	mutex_lock(&kvm->slots_lock);
747 
748 	r = -EINVAL;
749 	if (log->slot >= KVM_USER_MEM_SLOTS)
750 		goto out;
751 
752 	r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
753 	if (r)
754 		goto out;
755 
756 	/* Clear the dirty log */
757 	if (is_dirty) {
758 		n = kvm_dirty_bitmap_bytes(memslot);
759 		memset(memslot->dirty_bitmap, 0, n);
760 	}
761 	r = 0;
762 out:
763 	mutex_unlock(&kvm->slots_lock);
764 	return r;
765 }
766 
icpt_operexc_on_all_vcpus(struct kvm * kvm)767 static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
768 {
769 	unsigned long i;
770 	struct kvm_vcpu *vcpu;
771 
772 	kvm_for_each_vcpu(i, vcpu, kvm) {
773 		kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
774 	}
775 }
776 
kvm_vm_ioctl_enable_cap(struct kvm * kvm,struct kvm_enable_cap * cap)777 int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
778 {
779 	int r;
780 
781 	if (cap->flags)
782 		return -EINVAL;
783 
784 	switch (cap->cap) {
785 	case KVM_CAP_S390_IRQCHIP:
786 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
787 		kvm->arch.use_irqchip = 1;
788 		r = 0;
789 		break;
790 	case KVM_CAP_S390_USER_SIGP:
791 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
792 		kvm->arch.user_sigp = 1;
793 		r = 0;
794 		break;
795 	case KVM_CAP_S390_VECTOR_REGISTERS:
796 		mutex_lock(&kvm->lock);
797 		if (kvm->created_vcpus) {
798 			r = -EBUSY;
799 		} else if (cpu_has_vx()) {
800 			set_kvm_facility(kvm->arch.model.fac_mask, 129);
801 			set_kvm_facility(kvm->arch.model.fac_list, 129);
802 			if (test_facility(134)) {
803 				set_kvm_facility(kvm->arch.model.fac_mask, 134);
804 				set_kvm_facility(kvm->arch.model.fac_list, 134);
805 			}
806 			if (test_facility(135)) {
807 				set_kvm_facility(kvm->arch.model.fac_mask, 135);
808 				set_kvm_facility(kvm->arch.model.fac_list, 135);
809 			}
810 			if (test_facility(148)) {
811 				set_kvm_facility(kvm->arch.model.fac_mask, 148);
812 				set_kvm_facility(kvm->arch.model.fac_list, 148);
813 			}
814 			if (test_facility(152)) {
815 				set_kvm_facility(kvm->arch.model.fac_mask, 152);
816 				set_kvm_facility(kvm->arch.model.fac_list, 152);
817 			}
818 			if (test_facility(192)) {
819 				set_kvm_facility(kvm->arch.model.fac_mask, 192);
820 				set_kvm_facility(kvm->arch.model.fac_list, 192);
821 			}
822 			if (test_facility(198)) {
823 				set_kvm_facility(kvm->arch.model.fac_mask, 198);
824 				set_kvm_facility(kvm->arch.model.fac_list, 198);
825 			}
826 			if (test_facility(199)) {
827 				set_kvm_facility(kvm->arch.model.fac_mask, 199);
828 				set_kvm_facility(kvm->arch.model.fac_list, 199);
829 			}
830 			r = 0;
831 		} else
832 			r = -EINVAL;
833 		mutex_unlock(&kvm->lock);
834 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
835 			 r ? "(not available)" : "(success)");
836 		break;
837 	case KVM_CAP_S390_RI:
838 		r = -EINVAL;
839 		mutex_lock(&kvm->lock);
840 		if (kvm->created_vcpus) {
841 			r = -EBUSY;
842 		} else if (test_facility(64)) {
843 			set_kvm_facility(kvm->arch.model.fac_mask, 64);
844 			set_kvm_facility(kvm->arch.model.fac_list, 64);
845 			r = 0;
846 		}
847 		mutex_unlock(&kvm->lock);
848 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
849 			 r ? "(not available)" : "(success)");
850 		break;
851 	case KVM_CAP_S390_AIS:
852 		mutex_lock(&kvm->lock);
853 		if (kvm->created_vcpus) {
854 			r = -EBUSY;
855 		} else {
856 			set_kvm_facility(kvm->arch.model.fac_mask, 72);
857 			set_kvm_facility(kvm->arch.model.fac_list, 72);
858 			r = 0;
859 		}
860 		mutex_unlock(&kvm->lock);
861 		VM_EVENT(kvm, 3, "ENABLE: AIS %s",
862 			 r ? "(not available)" : "(success)");
863 		break;
864 	case KVM_CAP_S390_GS:
865 		r = -EINVAL;
866 		mutex_lock(&kvm->lock);
867 		if (kvm->created_vcpus) {
868 			r = -EBUSY;
869 		} else if (test_facility(133)) {
870 			set_kvm_facility(kvm->arch.model.fac_mask, 133);
871 			set_kvm_facility(kvm->arch.model.fac_list, 133);
872 			r = 0;
873 		}
874 		mutex_unlock(&kvm->lock);
875 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
876 			 r ? "(not available)" : "(success)");
877 		break;
878 	case KVM_CAP_S390_HPAGE_1M:
879 		mutex_lock(&kvm->lock);
880 		if (kvm->created_vcpus)
881 			r = -EBUSY;
882 		else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
883 			r = -EINVAL;
884 		else {
885 			r = 0;
886 			set_bit(GMAP_FLAG_ALLOW_HPAGE_1M, &kvm->arch.gmap->flags);
887 			/*
888 			 * We might have to create fake 4k page
889 			 * tables. To avoid that the hardware works on
890 			 * stale PGSTEs, we emulate these instructions.
891 			 */
892 			kvm->arch.use_skf = 0;
893 			kvm->arch.use_pfmfi = 0;
894 		}
895 		mutex_unlock(&kvm->lock);
896 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
897 			 r ? "(not available)" : "(success)");
898 		break;
899 	case KVM_CAP_S390_USER_STSI:
900 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
901 		kvm->arch.user_stsi = 1;
902 		r = 0;
903 		break;
904 	case KVM_CAP_S390_USER_INSTR0:
905 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
906 		kvm->arch.user_instr0 = 1;
907 		icpt_operexc_on_all_vcpus(kvm);
908 		r = 0;
909 		break;
910 	case KVM_CAP_S390_CPU_TOPOLOGY:
911 		r = -EINVAL;
912 		mutex_lock(&kvm->lock);
913 		if (kvm->created_vcpus) {
914 			r = -EBUSY;
915 		} else if (test_facility(11)) {
916 			set_kvm_facility(kvm->arch.model.fac_mask, 11);
917 			set_kvm_facility(kvm->arch.model.fac_list, 11);
918 			r = 0;
919 		}
920 		mutex_unlock(&kvm->lock);
921 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_CPU_TOPOLOGY %s",
922 			 r ? "(not available)" : "(success)");
923 		break;
924 	case KVM_CAP_S390_USER_OPEREXEC:
925 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_OPEREXEC");
926 		kvm->arch.user_operexec = 1;
927 		icpt_operexc_on_all_vcpus(kvm);
928 		r = 0;
929 		break;
930 	case KVM_CAP_S390_VSIE_ESAMODE:
931 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_VSIE_ESAMODE");
932 		kvm->arch.allow_vsie_esamode = 1;
933 		r = 0;
934 		break;
935 	default:
936 		r = -EINVAL;
937 		break;
938 	}
939 	return r;
940 }
941 
kvm_s390_get_mem_control(struct kvm * kvm,struct kvm_device_attr * attr)942 static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
943 {
944 	int ret;
945 
946 	switch (attr->attr) {
947 	case KVM_S390_VM_MEM_LIMIT_SIZE:
948 		ret = 0;
949 		VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
950 			 kvm->arch.mem_limit);
951 		if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
952 			ret = -EFAULT;
953 		break;
954 	default:
955 		ret = -ENXIO;
956 		break;
957 	}
958 	return ret;
959 }
960 
kvm_s390_set_mem_control(struct kvm * kvm,struct kvm_device_attr * attr)961 static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
962 {
963 	int ret;
964 
965 	switch (attr->attr) {
966 	case KVM_S390_VM_MEM_ENABLE_CMMA:
967 		ret = -ENXIO;
968 		if (!sclp.has_cmma)
969 			break;
970 
971 		VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
972 		mutex_lock(&kvm->lock);
973 		if (kvm->created_vcpus)
974 			ret = -EBUSY;
975 		else {
976 			kvm->arch.use_cmma = 1;
977 			/* Not compatible with cmma. */
978 			kvm->arch.use_pfmfi = 0;
979 			ret = 0;
980 		}
981 		mutex_unlock(&kvm->lock);
982 		break;
983 	case KVM_S390_VM_MEM_CLR_CMMA: {
984 		gfn_t start_gfn = 0;
985 
986 		ret = -ENXIO;
987 		if (!sclp.has_cmma)
988 			break;
989 		ret = -EINVAL;
990 		if (!kvm->arch.use_cmma)
991 			break;
992 
993 		VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
994 		do {
995 			start_gfn = dat_reset_cmma(kvm->arch.gmap->asce, start_gfn);
996 			cond_resched();
997 		} while (start_gfn);
998 		ret = 0;
999 		break;
1000 	}
1001 	case KVM_S390_VM_MEM_LIMIT_SIZE: {
1002 		unsigned long new_limit;
1003 
1004 		if (kvm_is_ucontrol(kvm))
1005 			return -EINVAL;
1006 
1007 		if (get_user(new_limit, (u64 __user *)attr->addr))
1008 			return -EFAULT;
1009 
1010 		if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
1011 		    new_limit > kvm->arch.mem_limit)
1012 			return -E2BIG;
1013 
1014 		if (!new_limit)
1015 			return -EINVAL;
1016 
1017 		ret = -EBUSY;
1018 		if (!kvm->created_vcpus)
1019 			ret = gmap_set_limit(kvm->arch.gmap, gpa_to_gfn(new_limit));
1020 		VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
1021 		VM_EVENT(kvm, 3, "New guest asce: 0x%p",
1022 			 (void *)kvm->arch.gmap->asce.val);
1023 		break;
1024 	}
1025 	default:
1026 		ret = -ENXIO;
1027 		break;
1028 	}
1029 	return ret;
1030 }
1031 
1032 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
1033 
kvm_s390_vcpu_crypto_reset_all(struct kvm * kvm)1034 void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
1035 {
1036 	struct kvm_vcpu *vcpu;
1037 	unsigned long i;
1038 
1039 	kvm_s390_vcpu_block_all(kvm);
1040 
1041 	kvm_for_each_vcpu(i, vcpu, kvm) {
1042 		kvm_s390_vcpu_crypto_setup(vcpu);
1043 		/* recreate the shadow crycb by leaving the VSIE handler */
1044 		kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
1045 	}
1046 
1047 	kvm_s390_vcpu_unblock_all(kvm);
1048 }
1049 
kvm_s390_vm_set_crypto(struct kvm * kvm,struct kvm_device_attr * attr)1050 static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
1051 {
1052 	mutex_lock(&kvm->lock);
1053 	switch (attr->attr) {
1054 	case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1055 		if (!test_kvm_facility(kvm, 76)) {
1056 			mutex_unlock(&kvm->lock);
1057 			return -EINVAL;
1058 		}
1059 		get_random_bytes(
1060 			kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1061 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1062 		kvm->arch.crypto.aes_kw = 1;
1063 		VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
1064 		break;
1065 	case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1066 		if (!test_kvm_facility(kvm, 76)) {
1067 			mutex_unlock(&kvm->lock);
1068 			return -EINVAL;
1069 		}
1070 		get_random_bytes(
1071 			kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1072 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1073 		kvm->arch.crypto.dea_kw = 1;
1074 		VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
1075 		break;
1076 	case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1077 		if (!test_kvm_facility(kvm, 76)) {
1078 			mutex_unlock(&kvm->lock);
1079 			return -EINVAL;
1080 		}
1081 		kvm->arch.crypto.aes_kw = 0;
1082 		memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
1083 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1084 		VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
1085 		break;
1086 	case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1087 		if (!test_kvm_facility(kvm, 76)) {
1088 			mutex_unlock(&kvm->lock);
1089 			return -EINVAL;
1090 		}
1091 		kvm->arch.crypto.dea_kw = 0;
1092 		memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
1093 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1094 		VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
1095 		break;
1096 	case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1097 		if (!ap_instructions_available()) {
1098 			mutex_unlock(&kvm->lock);
1099 			return -EOPNOTSUPP;
1100 		}
1101 		kvm->arch.crypto.apie = 1;
1102 		break;
1103 	case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1104 		if (!ap_instructions_available()) {
1105 			mutex_unlock(&kvm->lock);
1106 			return -EOPNOTSUPP;
1107 		}
1108 		kvm->arch.crypto.apie = 0;
1109 		break;
1110 	default:
1111 		mutex_unlock(&kvm->lock);
1112 		return -ENXIO;
1113 	}
1114 
1115 	kvm_s390_vcpu_crypto_reset_all(kvm);
1116 	mutex_unlock(&kvm->lock);
1117 	return 0;
1118 }
1119 
kvm_s390_vcpu_pci_setup(struct kvm_vcpu * vcpu)1120 static void kvm_s390_vcpu_pci_setup(struct kvm_vcpu *vcpu)
1121 {
1122 	/* Only set the ECB bits after guest requests zPCI interpretation */
1123 	if (!vcpu->kvm->arch.use_zpci_interp)
1124 		return;
1125 
1126 	vcpu->arch.sie_block->ecb2 |= ECB2_ZPCI_LSI;
1127 	vcpu->arch.sie_block->ecb3 |= ECB3_AISII + ECB3_AISI;
1128 }
1129 
kvm_s390_vcpu_pci_enable_interp(struct kvm * kvm)1130 void kvm_s390_vcpu_pci_enable_interp(struct kvm *kvm)
1131 {
1132 	struct kvm_vcpu *vcpu;
1133 	unsigned long i;
1134 
1135 	lockdep_assert_held(&kvm->lock);
1136 
1137 	if (!kvm_s390_pci_interp_allowed())
1138 		return;
1139 
1140 	/*
1141 	 * If host is configured for PCI and the necessary facilities are
1142 	 * available, turn on interpretation for the life of this guest
1143 	 */
1144 	kvm->arch.use_zpci_interp = 1;
1145 
1146 	kvm_s390_vcpu_block_all(kvm);
1147 
1148 	kvm_for_each_vcpu(i, vcpu, kvm) {
1149 		kvm_s390_vcpu_pci_setup(vcpu);
1150 		kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
1151 	}
1152 
1153 	kvm_s390_vcpu_unblock_all(kvm);
1154 }
1155 
kvm_s390_sync_request_broadcast(struct kvm * kvm,int req)1156 static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
1157 {
1158 	unsigned long cx;
1159 	struct kvm_vcpu *vcpu;
1160 
1161 	kvm_for_each_vcpu(cx, vcpu, kvm)
1162 		kvm_s390_sync_request(req, vcpu);
1163 }
1164 
1165 /*
1166  * Must be called with kvm->srcu held to avoid races on memslots, and with
1167  * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
1168  */
kvm_s390_vm_start_migration(struct kvm * kvm)1169 static int kvm_s390_vm_start_migration(struct kvm *kvm)
1170 {
1171 	struct kvm_memory_slot *ms;
1172 	struct kvm_memslots *slots;
1173 	unsigned long ram_pages = 0;
1174 	int bkt;
1175 
1176 	/* migration mode already enabled */
1177 	if (kvm->arch.migration_mode)
1178 		return 0;
1179 	slots = kvm_memslots(kvm);
1180 	if (!slots || kvm_memslots_empty(slots))
1181 		return -EINVAL;
1182 
1183 	if (!kvm->arch.use_cmma) {
1184 		kvm->arch.migration_mode = 1;
1185 		return 0;
1186 	}
1187 	kvm_for_each_memslot(ms, bkt, slots) {
1188 		if (!ms->dirty_bitmap)
1189 			return -EINVAL;
1190 		ram_pages += ms->npages;
1191 	}
1192 	/* mark all the pages as dirty */
1193 	gmap_set_cmma_all_dirty(kvm->arch.gmap);
1194 	atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
1195 	kvm->arch.migration_mode = 1;
1196 	kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
1197 	return 0;
1198 }
1199 
1200 /*
1201  * Must be called with kvm->slots_lock to avoid races with ourselves and
1202  * kvm_s390_vm_start_migration.
1203  */
kvm_s390_vm_stop_migration(struct kvm * kvm)1204 static int kvm_s390_vm_stop_migration(struct kvm *kvm)
1205 {
1206 	/* migration mode already disabled */
1207 	if (!kvm->arch.migration_mode)
1208 		return 0;
1209 	kvm->arch.migration_mode = 0;
1210 	if (kvm->arch.use_cmma)
1211 		kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
1212 	return 0;
1213 }
1214 
kvm_s390_vm_set_migration(struct kvm * kvm,struct kvm_device_attr * attr)1215 static int kvm_s390_vm_set_migration(struct kvm *kvm,
1216 				     struct kvm_device_attr *attr)
1217 {
1218 	int res = -ENXIO;
1219 
1220 	mutex_lock(&kvm->slots_lock);
1221 	switch (attr->attr) {
1222 	case KVM_S390_VM_MIGRATION_START:
1223 		res = kvm_s390_vm_start_migration(kvm);
1224 		break;
1225 	case KVM_S390_VM_MIGRATION_STOP:
1226 		res = kvm_s390_vm_stop_migration(kvm);
1227 		break;
1228 	default:
1229 		break;
1230 	}
1231 	mutex_unlock(&kvm->slots_lock);
1232 
1233 	return res;
1234 }
1235 
kvm_s390_vm_get_migration(struct kvm * kvm,struct kvm_device_attr * attr)1236 static int kvm_s390_vm_get_migration(struct kvm *kvm,
1237 				     struct kvm_device_attr *attr)
1238 {
1239 	u64 mig = kvm->arch.migration_mode;
1240 
1241 	if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1242 		return -ENXIO;
1243 
1244 	if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1245 		return -EFAULT;
1246 	return 0;
1247 }
1248 
1249 static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
1250 
kvm_s390_set_tod_ext(struct kvm * kvm,struct kvm_device_attr * attr)1251 static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1252 {
1253 	struct kvm_s390_vm_tod_clock gtod;
1254 
1255 	if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
1256 		return -EFAULT;
1257 
1258 	if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
1259 		return -EINVAL;
1260 	__kvm_s390_set_tod_clock(kvm, &gtod);
1261 
1262 	VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
1263 		gtod.epoch_idx, gtod.tod);
1264 
1265 	return 0;
1266 }
1267 
kvm_s390_set_tod_high(struct kvm * kvm,struct kvm_device_attr * attr)1268 static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1269 {
1270 	u8 gtod_high;
1271 
1272 	if (copy_from_user(&gtod_high, (void __user *)attr->addr,
1273 					   sizeof(gtod_high)))
1274 		return -EFAULT;
1275 
1276 	if (gtod_high != 0)
1277 		return -EINVAL;
1278 	VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
1279 
1280 	return 0;
1281 }
1282 
kvm_s390_set_tod_low(struct kvm * kvm,struct kvm_device_attr * attr)1283 static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1284 {
1285 	struct kvm_s390_vm_tod_clock gtod = { 0 };
1286 
1287 	if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
1288 			   sizeof(gtod.tod)))
1289 		return -EFAULT;
1290 
1291 	__kvm_s390_set_tod_clock(kvm, &gtod);
1292 	VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
1293 	return 0;
1294 }
1295 
kvm_s390_set_tod(struct kvm * kvm,struct kvm_device_attr * attr)1296 static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1297 {
1298 	int ret;
1299 
1300 	if (attr->flags)
1301 		return -EINVAL;
1302 
1303 	mutex_lock(&kvm->lock);
1304 	/*
1305 	 * For protected guests, the TOD is managed by the ultravisor, so trying
1306 	 * to change it will never bring the expected results.
1307 	 */
1308 	if (kvm_s390_pv_is_protected(kvm)) {
1309 		ret = -EOPNOTSUPP;
1310 		goto out_unlock;
1311 	}
1312 
1313 	switch (attr->attr) {
1314 	case KVM_S390_VM_TOD_EXT:
1315 		ret = kvm_s390_set_tod_ext(kvm, attr);
1316 		break;
1317 	case KVM_S390_VM_TOD_HIGH:
1318 		ret = kvm_s390_set_tod_high(kvm, attr);
1319 		break;
1320 	case KVM_S390_VM_TOD_LOW:
1321 		ret = kvm_s390_set_tod_low(kvm, attr);
1322 		break;
1323 	default:
1324 		ret = -ENXIO;
1325 		break;
1326 	}
1327 
1328 out_unlock:
1329 	mutex_unlock(&kvm->lock);
1330 	return ret;
1331 }
1332 
kvm_s390_get_tod_clock(struct kvm * kvm,struct kvm_s390_vm_tod_clock * gtod)1333 static void kvm_s390_get_tod_clock(struct kvm *kvm,
1334 				   struct kvm_s390_vm_tod_clock *gtod)
1335 {
1336 	union tod_clock clk;
1337 
1338 	preempt_disable();
1339 
1340 	store_tod_clock_ext(&clk);
1341 
1342 	gtod->tod = clk.tod + kvm->arch.epoch;
1343 	gtod->epoch_idx = 0;
1344 	if (test_kvm_facility(kvm, 139)) {
1345 		gtod->epoch_idx = clk.ei + kvm->arch.epdx;
1346 		if (gtod->tod < clk.tod)
1347 			gtod->epoch_idx += 1;
1348 	}
1349 
1350 	preempt_enable();
1351 }
1352 
kvm_s390_get_tod_ext(struct kvm * kvm,struct kvm_device_attr * attr)1353 static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1354 {
1355 	struct kvm_s390_vm_tod_clock gtod;
1356 
1357 	memset(&gtod, 0, sizeof(gtod));
1358 	kvm_s390_get_tod_clock(kvm, &gtod);
1359 	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1360 		return -EFAULT;
1361 
1362 	VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1363 		gtod.epoch_idx, gtod.tod);
1364 	return 0;
1365 }
1366 
kvm_s390_get_tod_high(struct kvm * kvm,struct kvm_device_attr * attr)1367 static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1368 {
1369 	u8 gtod_high = 0;
1370 
1371 	if (copy_to_user((void __user *)attr->addr, &gtod_high,
1372 					 sizeof(gtod_high)))
1373 		return -EFAULT;
1374 	VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
1375 
1376 	return 0;
1377 }
1378 
kvm_s390_get_tod_low(struct kvm * kvm,struct kvm_device_attr * attr)1379 static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1380 {
1381 	u64 gtod;
1382 
1383 	gtod = kvm_s390_get_tod_clock_fast(kvm);
1384 	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1385 		return -EFAULT;
1386 	VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
1387 
1388 	return 0;
1389 }
1390 
kvm_s390_get_tod(struct kvm * kvm,struct kvm_device_attr * attr)1391 static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1392 {
1393 	int ret;
1394 
1395 	if (attr->flags)
1396 		return -EINVAL;
1397 
1398 	switch (attr->attr) {
1399 	case KVM_S390_VM_TOD_EXT:
1400 		ret = kvm_s390_get_tod_ext(kvm, attr);
1401 		break;
1402 	case KVM_S390_VM_TOD_HIGH:
1403 		ret = kvm_s390_get_tod_high(kvm, attr);
1404 		break;
1405 	case KVM_S390_VM_TOD_LOW:
1406 		ret = kvm_s390_get_tod_low(kvm, attr);
1407 		break;
1408 	default:
1409 		ret = -ENXIO;
1410 		break;
1411 	}
1412 	return ret;
1413 }
1414 
kvm_s390_set_processor(struct kvm * kvm,struct kvm_device_attr * attr)1415 static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1416 {
1417 	struct kvm_s390_vm_cpu_processor *proc;
1418 	u16 lowest_ibc, unblocked_ibc;
1419 	int ret = 0;
1420 
1421 	mutex_lock(&kvm->lock);
1422 	if (kvm->created_vcpus) {
1423 		ret = -EBUSY;
1424 		goto out;
1425 	}
1426 	proc = kzalloc_obj(*proc, GFP_KERNEL_ACCOUNT);
1427 	if (!proc) {
1428 		ret = -ENOMEM;
1429 		goto out;
1430 	}
1431 	if (!copy_from_user(proc, (void __user *)attr->addr,
1432 			    sizeof(*proc))) {
1433 		kvm->arch.model.cpuid = proc->cpuid;
1434 		lowest_ibc = sclp.ibc >> 16 & 0xfff;
1435 		unblocked_ibc = sclp.ibc & 0xfff;
1436 		if (lowest_ibc && proc->ibc) {
1437 			if (proc->ibc > unblocked_ibc)
1438 				kvm->arch.model.ibc = unblocked_ibc;
1439 			else if (proc->ibc < lowest_ibc)
1440 				kvm->arch.model.ibc = lowest_ibc;
1441 			else
1442 				kvm->arch.model.ibc = proc->ibc;
1443 		}
1444 		memcpy(kvm->arch.model.fac_list, proc->fac_list,
1445 		       S390_ARCH_FAC_LIST_SIZE_BYTE);
1446 		VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1447 			 kvm->arch.model.ibc,
1448 			 kvm->arch.model.cpuid);
1449 		VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1450 			 kvm->arch.model.fac_list[0],
1451 			 kvm->arch.model.fac_list[1],
1452 			 kvm->arch.model.fac_list[2]);
1453 	} else
1454 		ret = -EFAULT;
1455 	kfree(proc);
1456 out:
1457 	mutex_unlock(&kvm->lock);
1458 	return ret;
1459 }
1460 
kvm_s390_set_processor_feat(struct kvm * kvm,struct kvm_device_attr * attr)1461 static int kvm_s390_set_processor_feat(struct kvm *kvm,
1462 				       struct kvm_device_attr *attr)
1463 {
1464 	struct kvm_s390_vm_cpu_feat data;
1465 
1466 	if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1467 		return -EFAULT;
1468 	if (!bitmap_subset((unsigned long *) data.feat,
1469 			   kvm_s390_available_cpu_feat,
1470 			   KVM_S390_VM_CPU_FEAT_NR_BITS))
1471 		return -EINVAL;
1472 
1473 	mutex_lock(&kvm->lock);
1474 	if (kvm->created_vcpus) {
1475 		mutex_unlock(&kvm->lock);
1476 		return -EBUSY;
1477 	}
1478 	bitmap_from_arr64(kvm->arch.cpu_feat, data.feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
1479 	mutex_unlock(&kvm->lock);
1480 	VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1481 			 data.feat[0],
1482 			 data.feat[1],
1483 			 data.feat[2]);
1484 	return 0;
1485 }
1486 
kvm_s390_set_processor_subfunc(struct kvm * kvm,struct kvm_device_attr * attr)1487 static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1488 					  struct kvm_device_attr *attr)
1489 {
1490 	mutex_lock(&kvm->lock);
1491 	if (kvm->created_vcpus) {
1492 		mutex_unlock(&kvm->lock);
1493 		return -EBUSY;
1494 	}
1495 
1496 	if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1497 			   sizeof(struct kvm_s390_vm_cpu_subfunc))) {
1498 		mutex_unlock(&kvm->lock);
1499 		return -EFAULT;
1500 	}
1501 	mutex_unlock(&kvm->lock);
1502 
1503 	VM_EVENT(kvm, 3, "SET: guest PLO    subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1504 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1505 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1506 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1507 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1508 	VM_EVENT(kvm, 3, "SET: guest PTFF   subfunc 0x%16.16lx.%16.16lx",
1509 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1510 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1511 	VM_EVENT(kvm, 3, "SET: guest KMAC   subfunc 0x%16.16lx.%16.16lx",
1512 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1513 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1514 	VM_EVENT(kvm, 3, "SET: guest KMC    subfunc 0x%16.16lx.%16.16lx",
1515 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1516 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1517 	VM_EVENT(kvm, 3, "SET: guest KM     subfunc 0x%16.16lx.%16.16lx",
1518 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1519 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1520 	VM_EVENT(kvm, 3, "SET: guest KIMD   subfunc 0x%16.16lx.%16.16lx",
1521 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1522 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1523 	VM_EVENT(kvm, 3, "SET: guest KLMD   subfunc 0x%16.16lx.%16.16lx",
1524 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1525 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1526 	VM_EVENT(kvm, 3, "SET: guest PCKMO  subfunc 0x%16.16lx.%16.16lx",
1527 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1528 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1529 	VM_EVENT(kvm, 3, "SET: guest KMCTR  subfunc 0x%16.16lx.%16.16lx",
1530 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1531 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1532 	VM_EVENT(kvm, 3, "SET: guest KMF    subfunc 0x%16.16lx.%16.16lx",
1533 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1534 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1535 	VM_EVENT(kvm, 3, "SET: guest KMO    subfunc 0x%16.16lx.%16.16lx",
1536 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1537 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1538 	VM_EVENT(kvm, 3, "SET: guest PCC    subfunc 0x%16.16lx.%16.16lx",
1539 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1540 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1541 	VM_EVENT(kvm, 3, "SET: guest PPNO   subfunc 0x%16.16lx.%16.16lx",
1542 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1543 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1544 	VM_EVENT(kvm, 3, "SET: guest KMA    subfunc 0x%16.16lx.%16.16lx",
1545 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1546 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
1547 	VM_EVENT(kvm, 3, "SET: guest KDSA   subfunc 0x%16.16lx.%16.16lx",
1548 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1549 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1550 	VM_EVENT(kvm, 3, "SET: guest SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1551 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1552 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1553 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1554 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
1555 	VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1556 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1557 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1558 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1559 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
1560 	VM_EVENT(kvm, 3, "GET: guest PFCR   subfunc 0x%16.16lx.%16.16lx",
1561 		 ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[0],
1562 		 ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[1]);
1563 
1564 	return 0;
1565 }
1566 
1567 #define KVM_S390_VM_CPU_UV_FEAT_GUEST_MASK	\
1568 (						\
1569 	((struct kvm_s390_vm_cpu_uv_feat){	\
1570 		.ap = 1,			\
1571 		.ap_intr = 1,			\
1572 	})					\
1573 	.feat					\
1574 )
1575 
kvm_s390_set_uv_feat(struct kvm * kvm,struct kvm_device_attr * attr)1576 static int kvm_s390_set_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr)
1577 {
1578 	struct kvm_s390_vm_cpu_uv_feat __user *ptr = (void __user *)attr->addr;
1579 	unsigned long data, filter;
1580 
1581 	filter = uv_info.uv_feature_indications & KVM_S390_VM_CPU_UV_FEAT_GUEST_MASK;
1582 	if (get_user(data, &ptr->feat))
1583 		return -EFAULT;
1584 	if (!bitmap_subset(&data, &filter, KVM_S390_VM_CPU_UV_FEAT_NR_BITS))
1585 		return -EINVAL;
1586 
1587 	mutex_lock(&kvm->lock);
1588 	if (kvm->created_vcpus) {
1589 		mutex_unlock(&kvm->lock);
1590 		return -EBUSY;
1591 	}
1592 	kvm->arch.model.uv_feat_guest.feat = data;
1593 	mutex_unlock(&kvm->lock);
1594 
1595 	VM_EVENT(kvm, 3, "SET: guest UV-feat: 0x%16.16lx", data);
1596 
1597 	return 0;
1598 }
1599 
kvm_s390_set_cpu_model(struct kvm * kvm,struct kvm_device_attr * attr)1600 static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1601 {
1602 	int ret = -ENXIO;
1603 
1604 	switch (attr->attr) {
1605 	case KVM_S390_VM_CPU_PROCESSOR:
1606 		ret = kvm_s390_set_processor(kvm, attr);
1607 		break;
1608 	case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1609 		ret = kvm_s390_set_processor_feat(kvm, attr);
1610 		break;
1611 	case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1612 		ret = kvm_s390_set_processor_subfunc(kvm, attr);
1613 		break;
1614 	case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST:
1615 		ret = kvm_s390_set_uv_feat(kvm, attr);
1616 		break;
1617 	}
1618 	return ret;
1619 }
1620 
kvm_s390_get_processor(struct kvm * kvm,struct kvm_device_attr * attr)1621 static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1622 {
1623 	struct kvm_s390_vm_cpu_processor *proc;
1624 	int ret = 0;
1625 
1626 	proc = kzalloc_obj(*proc, GFP_KERNEL_ACCOUNT);
1627 	if (!proc) {
1628 		ret = -ENOMEM;
1629 		goto out;
1630 	}
1631 	proc->cpuid = kvm->arch.model.cpuid;
1632 	proc->ibc = kvm->arch.model.ibc;
1633 	memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1634 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
1635 	VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1636 		 kvm->arch.model.ibc,
1637 		 kvm->arch.model.cpuid);
1638 	VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1639 		 kvm->arch.model.fac_list[0],
1640 		 kvm->arch.model.fac_list[1],
1641 		 kvm->arch.model.fac_list[2]);
1642 	if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1643 		ret = -EFAULT;
1644 	kfree(proc);
1645 out:
1646 	return ret;
1647 }
1648 
kvm_s390_get_machine(struct kvm * kvm,struct kvm_device_attr * attr)1649 static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1650 {
1651 	struct kvm_s390_vm_cpu_machine *mach;
1652 	int ret = 0;
1653 
1654 	mach = kzalloc_obj(*mach, GFP_KERNEL_ACCOUNT);
1655 	if (!mach) {
1656 		ret = -ENOMEM;
1657 		goto out;
1658 	}
1659 	get_cpu_id((struct cpuid *) &mach->cpuid);
1660 	mach->ibc = sclp.ibc;
1661 	memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
1662 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
1663 	memcpy((unsigned long *)&mach->fac_list, stfle_fac_list,
1664 	       sizeof(stfle_fac_list));
1665 	VM_EVENT(kvm, 3, "GET: host ibc:  0x%4.4x, host cpuid:  0x%16.16llx",
1666 		 kvm->arch.model.ibc,
1667 		 kvm->arch.model.cpuid);
1668 	VM_EVENT(kvm, 3, "GET: host facmask:  0x%16.16llx.%16.16llx.%16.16llx",
1669 		 mach->fac_mask[0],
1670 		 mach->fac_mask[1],
1671 		 mach->fac_mask[2]);
1672 	VM_EVENT(kvm, 3, "GET: host faclist:  0x%16.16llx.%16.16llx.%16.16llx",
1673 		 mach->fac_list[0],
1674 		 mach->fac_list[1],
1675 		 mach->fac_list[2]);
1676 	if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1677 		ret = -EFAULT;
1678 	kfree(mach);
1679 out:
1680 	return ret;
1681 }
1682 
kvm_s390_get_processor_feat(struct kvm * kvm,struct kvm_device_attr * attr)1683 static int kvm_s390_get_processor_feat(struct kvm *kvm,
1684 				       struct kvm_device_attr *attr)
1685 {
1686 	struct kvm_s390_vm_cpu_feat data;
1687 
1688 	bitmap_to_arr64(data.feat, kvm->arch.cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
1689 	if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1690 		return -EFAULT;
1691 	VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1692 			 data.feat[0],
1693 			 data.feat[1],
1694 			 data.feat[2]);
1695 	return 0;
1696 }
1697 
kvm_s390_get_machine_feat(struct kvm * kvm,struct kvm_device_attr * attr)1698 static int kvm_s390_get_machine_feat(struct kvm *kvm,
1699 				     struct kvm_device_attr *attr)
1700 {
1701 	struct kvm_s390_vm_cpu_feat data;
1702 
1703 	bitmap_to_arr64(data.feat, kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
1704 	if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1705 		return -EFAULT;
1706 	VM_EVENT(kvm, 3, "GET: host feat:  0x%16.16llx.0x%16.16llx.0x%16.16llx",
1707 			 data.feat[0],
1708 			 data.feat[1],
1709 			 data.feat[2]);
1710 	return 0;
1711 }
1712 
kvm_s390_get_processor_subfunc(struct kvm * kvm,struct kvm_device_attr * attr)1713 static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1714 					  struct kvm_device_attr *attr)
1715 {
1716 	if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1717 	    sizeof(struct kvm_s390_vm_cpu_subfunc)))
1718 		return -EFAULT;
1719 
1720 	VM_EVENT(kvm, 3, "GET: guest PLO    subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1721 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1722 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1723 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1724 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1725 	VM_EVENT(kvm, 3, "GET: guest PTFF   subfunc 0x%16.16lx.%16.16lx",
1726 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1727 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1728 	VM_EVENT(kvm, 3, "GET: guest KMAC   subfunc 0x%16.16lx.%16.16lx",
1729 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1730 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1731 	VM_EVENT(kvm, 3, "GET: guest KMC    subfunc 0x%16.16lx.%16.16lx",
1732 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1733 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1734 	VM_EVENT(kvm, 3, "GET: guest KM     subfunc 0x%16.16lx.%16.16lx",
1735 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1736 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1737 	VM_EVENT(kvm, 3, "GET: guest KIMD   subfunc 0x%16.16lx.%16.16lx",
1738 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1739 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1740 	VM_EVENT(kvm, 3, "GET: guest KLMD   subfunc 0x%16.16lx.%16.16lx",
1741 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1742 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1743 	VM_EVENT(kvm, 3, "GET: guest PCKMO  subfunc 0x%16.16lx.%16.16lx",
1744 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1745 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1746 	VM_EVENT(kvm, 3, "GET: guest KMCTR  subfunc 0x%16.16lx.%16.16lx",
1747 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1748 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1749 	VM_EVENT(kvm, 3, "GET: guest KMF    subfunc 0x%16.16lx.%16.16lx",
1750 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1751 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1752 	VM_EVENT(kvm, 3, "GET: guest KMO    subfunc 0x%16.16lx.%16.16lx",
1753 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1754 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1755 	VM_EVENT(kvm, 3, "GET: guest PCC    subfunc 0x%16.16lx.%16.16lx",
1756 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1757 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1758 	VM_EVENT(kvm, 3, "GET: guest PPNO   subfunc 0x%16.16lx.%16.16lx",
1759 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1760 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1761 	VM_EVENT(kvm, 3, "GET: guest KMA    subfunc 0x%16.16lx.%16.16lx",
1762 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1763 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
1764 	VM_EVENT(kvm, 3, "GET: guest KDSA   subfunc 0x%16.16lx.%16.16lx",
1765 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1766 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1767 	VM_EVENT(kvm, 3, "GET: guest SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1768 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1769 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1770 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1771 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
1772 	VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1773 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1774 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1775 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1776 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
1777 	VM_EVENT(kvm, 3, "GET: guest PFCR   subfunc 0x%16.16lx.%16.16lx",
1778 		 ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[0],
1779 		 ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[1]);
1780 
1781 	return 0;
1782 }
1783 
kvm_s390_get_machine_subfunc(struct kvm * kvm,struct kvm_device_attr * attr)1784 static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1785 					struct kvm_device_attr *attr)
1786 {
1787 	if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1788 	    sizeof(struct kvm_s390_vm_cpu_subfunc)))
1789 		return -EFAULT;
1790 
1791 	VM_EVENT(kvm, 3, "GET: host  PLO    subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1792 		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
1793 		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
1794 		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
1795 		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
1796 	VM_EVENT(kvm, 3, "GET: host  PTFF   subfunc 0x%16.16lx.%16.16lx",
1797 		 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
1798 		 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
1799 	VM_EVENT(kvm, 3, "GET: host  KMAC   subfunc 0x%16.16lx.%16.16lx",
1800 		 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
1801 		 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
1802 	VM_EVENT(kvm, 3, "GET: host  KMC    subfunc 0x%16.16lx.%16.16lx",
1803 		 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
1804 		 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
1805 	VM_EVENT(kvm, 3, "GET: host  KM     subfunc 0x%16.16lx.%16.16lx",
1806 		 ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
1807 		 ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
1808 	VM_EVENT(kvm, 3, "GET: host  KIMD   subfunc 0x%16.16lx.%16.16lx",
1809 		 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
1810 		 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
1811 	VM_EVENT(kvm, 3, "GET: host  KLMD   subfunc 0x%16.16lx.%16.16lx",
1812 		 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
1813 		 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
1814 	VM_EVENT(kvm, 3, "GET: host  PCKMO  subfunc 0x%16.16lx.%16.16lx",
1815 		 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
1816 		 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
1817 	VM_EVENT(kvm, 3, "GET: host  KMCTR  subfunc 0x%16.16lx.%16.16lx",
1818 		 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
1819 		 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
1820 	VM_EVENT(kvm, 3, "GET: host  KMF    subfunc 0x%16.16lx.%16.16lx",
1821 		 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
1822 		 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
1823 	VM_EVENT(kvm, 3, "GET: host  KMO    subfunc 0x%16.16lx.%16.16lx",
1824 		 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
1825 		 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
1826 	VM_EVENT(kvm, 3, "GET: host  PCC    subfunc 0x%16.16lx.%16.16lx",
1827 		 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
1828 		 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
1829 	VM_EVENT(kvm, 3, "GET: host  PPNO   subfunc 0x%16.16lx.%16.16lx",
1830 		 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
1831 		 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
1832 	VM_EVENT(kvm, 3, "GET: host  KMA    subfunc 0x%16.16lx.%16.16lx",
1833 		 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
1834 		 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
1835 	VM_EVENT(kvm, 3, "GET: host  KDSA   subfunc 0x%16.16lx.%16.16lx",
1836 		 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
1837 		 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
1838 	VM_EVENT(kvm, 3, "GET: host  SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1839 		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
1840 		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
1841 		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
1842 		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
1843 	VM_EVENT(kvm, 3, "GET: host  DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1844 		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
1845 		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
1846 		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
1847 		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
1848 	VM_EVENT(kvm, 3, "GET: host  PFCR   subfunc 0x%16.16lx.%16.16lx",
1849 		 ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[0],
1850 		 ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[1]);
1851 
1852 	return 0;
1853 }
1854 
kvm_s390_get_processor_uv_feat(struct kvm * kvm,struct kvm_device_attr * attr)1855 static int kvm_s390_get_processor_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr)
1856 {
1857 	struct kvm_s390_vm_cpu_uv_feat __user *dst = (void __user *)attr->addr;
1858 	unsigned long feat = kvm->arch.model.uv_feat_guest.feat;
1859 
1860 	if (put_user(feat, &dst->feat))
1861 		return -EFAULT;
1862 	VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx", feat);
1863 
1864 	return 0;
1865 }
1866 
kvm_s390_get_machine_uv_feat(struct kvm * kvm,struct kvm_device_attr * attr)1867 static int kvm_s390_get_machine_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr)
1868 {
1869 	struct kvm_s390_vm_cpu_uv_feat __user *dst = (void __user *)attr->addr;
1870 	unsigned long feat;
1871 
1872 	BUILD_BUG_ON(sizeof(*dst) != sizeof(uv_info.uv_feature_indications));
1873 
1874 	feat = uv_info.uv_feature_indications & KVM_S390_VM_CPU_UV_FEAT_GUEST_MASK;
1875 	if (put_user(feat, &dst->feat))
1876 		return -EFAULT;
1877 	VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx", feat);
1878 
1879 	return 0;
1880 }
1881 
kvm_s390_get_cpu_model(struct kvm * kvm,struct kvm_device_attr * attr)1882 static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1883 {
1884 	int ret = -ENXIO;
1885 
1886 	switch (attr->attr) {
1887 	case KVM_S390_VM_CPU_PROCESSOR:
1888 		ret = kvm_s390_get_processor(kvm, attr);
1889 		break;
1890 	case KVM_S390_VM_CPU_MACHINE:
1891 		ret = kvm_s390_get_machine(kvm, attr);
1892 		break;
1893 	case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1894 		ret = kvm_s390_get_processor_feat(kvm, attr);
1895 		break;
1896 	case KVM_S390_VM_CPU_MACHINE_FEAT:
1897 		ret = kvm_s390_get_machine_feat(kvm, attr);
1898 		break;
1899 	case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1900 		ret = kvm_s390_get_processor_subfunc(kvm, attr);
1901 		break;
1902 	case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1903 		ret = kvm_s390_get_machine_subfunc(kvm, attr);
1904 		break;
1905 	case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST:
1906 		ret = kvm_s390_get_processor_uv_feat(kvm, attr);
1907 		break;
1908 	case KVM_S390_VM_CPU_MACHINE_UV_FEAT_GUEST:
1909 		ret = kvm_s390_get_machine_uv_feat(kvm, attr);
1910 		break;
1911 	}
1912 	return ret;
1913 }
1914 
1915 /**
1916  * kvm_s390_update_topology_change_report - update CPU topology change report
1917  * @kvm: guest KVM description
1918  * @val: set or clear the MTCR bit
1919  *
1920  * Updates the Multiprocessor Topology-Change-Report bit to signal
1921  * the guest with a topology change.
1922  * This is only relevant if the topology facility is present.
1923  */
kvm_s390_update_topology_change_report(struct kvm * kvm,bool val)1924 static void kvm_s390_update_topology_change_report(struct kvm *kvm, bool val)
1925 {
1926 	union sca_utility new, old;
1927 	struct esca_block *sca;
1928 
1929 	sca = kvm->arch.sca;
1930 	old = READ_ONCE(sca->utility);
1931 	do {
1932 		new = old;
1933 		new.mtcr = val;
1934 	} while (!try_cmpxchg(&sca->utility.val, &old.val, new.val));
1935 }
1936 
kvm_s390_set_topo_change_indication(struct kvm * kvm,struct kvm_device_attr * attr)1937 static int kvm_s390_set_topo_change_indication(struct kvm *kvm,
1938 					       struct kvm_device_attr *attr)
1939 {
1940 	if (!test_kvm_facility(kvm, 11))
1941 		return -ENXIO;
1942 
1943 	kvm_s390_update_topology_change_report(kvm, !!attr->attr);
1944 	return 0;
1945 }
1946 
kvm_s390_get_topo_change_indication(struct kvm * kvm,struct kvm_device_attr * attr)1947 static int kvm_s390_get_topo_change_indication(struct kvm *kvm,
1948 					       struct kvm_device_attr *attr)
1949 {
1950 	u8 topo;
1951 
1952 	if (!test_kvm_facility(kvm, 11))
1953 		return -ENXIO;
1954 
1955 	topo = kvm->arch.sca->utility.mtcr;
1956 
1957 	return put_user(topo, (u8 __user *)attr->addr);
1958 }
1959 
kvm_s390_vm_set_attr(struct kvm * kvm,struct kvm_device_attr * attr)1960 static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1961 {
1962 	int ret;
1963 
1964 	switch (attr->group) {
1965 	case KVM_S390_VM_MEM_CTRL:
1966 		ret = kvm_s390_set_mem_control(kvm, attr);
1967 		break;
1968 	case KVM_S390_VM_TOD:
1969 		ret = kvm_s390_set_tod(kvm, attr);
1970 		break;
1971 	case KVM_S390_VM_CPU_MODEL:
1972 		ret = kvm_s390_set_cpu_model(kvm, attr);
1973 		break;
1974 	case KVM_S390_VM_CRYPTO:
1975 		ret = kvm_s390_vm_set_crypto(kvm, attr);
1976 		break;
1977 	case KVM_S390_VM_MIGRATION:
1978 		ret = kvm_s390_vm_set_migration(kvm, attr);
1979 		break;
1980 	case KVM_S390_VM_CPU_TOPOLOGY:
1981 		ret = kvm_s390_set_topo_change_indication(kvm, attr);
1982 		break;
1983 	default:
1984 		ret = -ENXIO;
1985 		break;
1986 	}
1987 
1988 	return ret;
1989 }
1990 
kvm_s390_vm_get_attr(struct kvm * kvm,struct kvm_device_attr * attr)1991 static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1992 {
1993 	int ret;
1994 
1995 	switch (attr->group) {
1996 	case KVM_S390_VM_MEM_CTRL:
1997 		ret = kvm_s390_get_mem_control(kvm, attr);
1998 		break;
1999 	case KVM_S390_VM_TOD:
2000 		ret = kvm_s390_get_tod(kvm, attr);
2001 		break;
2002 	case KVM_S390_VM_CPU_MODEL:
2003 		ret = kvm_s390_get_cpu_model(kvm, attr);
2004 		break;
2005 	case KVM_S390_VM_MIGRATION:
2006 		ret = kvm_s390_vm_get_migration(kvm, attr);
2007 		break;
2008 	case KVM_S390_VM_CPU_TOPOLOGY:
2009 		ret = kvm_s390_get_topo_change_indication(kvm, attr);
2010 		break;
2011 	default:
2012 		ret = -ENXIO;
2013 		break;
2014 	}
2015 
2016 	return ret;
2017 }
2018 
kvm_s390_vm_has_attr(struct kvm * kvm,struct kvm_device_attr * attr)2019 static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
2020 {
2021 	int ret;
2022 
2023 	switch (attr->group) {
2024 	case KVM_S390_VM_MEM_CTRL:
2025 		switch (attr->attr) {
2026 		case KVM_S390_VM_MEM_ENABLE_CMMA:
2027 		case KVM_S390_VM_MEM_CLR_CMMA:
2028 			ret = sclp.has_cmma ? 0 : -ENXIO;
2029 			break;
2030 		case KVM_S390_VM_MEM_LIMIT_SIZE:
2031 			ret = 0;
2032 			break;
2033 		default:
2034 			ret = -ENXIO;
2035 			break;
2036 		}
2037 		break;
2038 	case KVM_S390_VM_TOD:
2039 		switch (attr->attr) {
2040 		case KVM_S390_VM_TOD_LOW:
2041 		case KVM_S390_VM_TOD_HIGH:
2042 			ret = 0;
2043 			break;
2044 		default:
2045 			ret = -ENXIO;
2046 			break;
2047 		}
2048 		break;
2049 	case KVM_S390_VM_CPU_MODEL:
2050 		switch (attr->attr) {
2051 		case KVM_S390_VM_CPU_PROCESSOR:
2052 		case KVM_S390_VM_CPU_MACHINE:
2053 		case KVM_S390_VM_CPU_PROCESSOR_FEAT:
2054 		case KVM_S390_VM_CPU_MACHINE_FEAT:
2055 		case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
2056 		case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
2057 		case KVM_S390_VM_CPU_MACHINE_UV_FEAT_GUEST:
2058 		case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST:
2059 			ret = 0;
2060 			break;
2061 		default:
2062 			ret = -ENXIO;
2063 			break;
2064 		}
2065 		break;
2066 	case KVM_S390_VM_CRYPTO:
2067 		switch (attr->attr) {
2068 		case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
2069 		case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
2070 		case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
2071 		case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
2072 			ret = 0;
2073 			break;
2074 		case KVM_S390_VM_CRYPTO_ENABLE_APIE:
2075 		case KVM_S390_VM_CRYPTO_DISABLE_APIE:
2076 			ret = ap_instructions_available() ? 0 : -ENXIO;
2077 			break;
2078 		default:
2079 			ret = -ENXIO;
2080 			break;
2081 		}
2082 		break;
2083 	case KVM_S390_VM_MIGRATION:
2084 		ret = 0;
2085 		break;
2086 	case KVM_S390_VM_CPU_TOPOLOGY:
2087 		ret = test_kvm_facility(kvm, 11) ? 0 : -ENXIO;
2088 		break;
2089 	default:
2090 		ret = -ENXIO;
2091 		break;
2092 	}
2093 
2094 	return ret;
2095 }
2096 
kvm_s390_get_skeys(struct kvm * kvm,struct kvm_s390_skeys * args)2097 static int kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
2098 {
2099 	union skey *keys;
2100 	int i, r = 0;
2101 
2102 	if (args->flags != 0)
2103 		return -EINVAL;
2104 
2105 	/* Is this guest using storage keys? */
2106 	if (!uses_skeys(kvm->arch.gmap))
2107 		return KVM_S390_GET_SKEYS_NONE;
2108 
2109 	/* Enforce sane limit on memory allocation */
2110 	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
2111 		return -EINVAL;
2112 
2113 	keys = kvmalloc_array(args->count, sizeof(*keys), GFP_KERNEL_ACCOUNT);
2114 	if (!keys)
2115 		return -ENOMEM;
2116 
2117 	scoped_guard(read_lock, &kvm->mmu_lock) {
2118 		for (i = 0; i < args->count; i++) {
2119 			r = dat_get_storage_key(kvm->arch.gmap->asce,
2120 						args->start_gfn + i, keys + i);
2121 			if (r)
2122 				break;
2123 		}
2124 	}
2125 
2126 	if (!r) {
2127 		r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
2128 				 sizeof(uint8_t) * args->count);
2129 		if (r)
2130 			r = -EFAULT;
2131 	}
2132 
2133 	kvfree(keys);
2134 	return r;
2135 }
2136 
kvm_s390_set_skeys(struct kvm * kvm,struct kvm_s390_skeys * args)2137 static int kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
2138 {
2139 	struct kvm_s390_mmu_cache *mc;
2140 	union skey *keys;
2141 	int i, r = 0;
2142 
2143 	if (args->flags != 0)
2144 		return -EINVAL;
2145 
2146 	/* Enforce sane limit on memory allocation */
2147 	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
2148 		return -EINVAL;
2149 
2150 	keys = kvmalloc_array(args->count, sizeof(*keys), GFP_KERNEL_ACCOUNT);
2151 	if (!keys)
2152 		return -ENOMEM;
2153 
2154 	r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
2155 			   sizeof(uint8_t) * args->count);
2156 	if (r) {
2157 		r = -EFAULT;
2158 		goto out;
2159 	}
2160 
2161 	/* Enable storage key handling for the guest */
2162 	r = gmap_enable_skeys(kvm->arch.gmap);
2163 	if (r)
2164 		goto out;
2165 
2166 	r = -EINVAL;
2167 	for (i = 0; i < args->count; i++) {
2168 		/* Lowest order bit is reserved */
2169 		if (keys[i].zero)
2170 			goto out;
2171 	}
2172 
2173 	mc = kvm_s390_new_mmu_cache();
2174 	if (!mc) {
2175 		r = -ENOMEM;
2176 		goto out;
2177 	}
2178 
2179 	r = 0;
2180 	do {
2181 		r = kvm_s390_mmu_cache_topup(mc);
2182 		if (r == -ENOMEM)
2183 			break;
2184 		scoped_guard(read_lock, &kvm->mmu_lock) {
2185 			for (i = 0 ; i < args->count; i++) {
2186 				r = dat_set_storage_key(mc, kvm->arch.gmap->asce,
2187 							args->start_gfn + i, keys[i], 0);
2188 				if (r)
2189 					break;
2190 			}
2191 		}
2192 	} while (r == -ENOMEM);
2193 	kvm_s390_free_mmu_cache(mc);
2194 out:
2195 	kvfree(keys);
2196 	return r;
2197 }
2198 
2199 /*
2200  * This function searches for the next page with dirty CMMA attributes, and
2201  * saves the attributes in the buffer up to either the end of the buffer or
2202  * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
2203  * no trailing clean bytes are saved.
2204  * In case no dirty bits were found, or if CMMA was not enabled or used, the
2205  * output buffer will indicate 0 as length.
2206  */
kvm_s390_get_cmma_bits(struct kvm * kvm,struct kvm_s390_cmma_log * args)2207 static int kvm_s390_get_cmma_bits(struct kvm *kvm,
2208 				  struct kvm_s390_cmma_log *args)
2209 {
2210 	int peek, ret;
2211 	u8 *values;
2212 
2213 	if (!kvm->arch.use_cmma)
2214 		return -ENXIO;
2215 	/* Invalid/unsupported flags were specified */
2216 	if (args->flags & ~KVM_S390_CMMA_PEEK)
2217 		return -EINVAL;
2218 	/* Migration mode query, and we are not doing a migration */
2219 	peek = !!(args->flags & KVM_S390_CMMA_PEEK);
2220 	if (!peek && !kvm->arch.migration_mode)
2221 		return -EINVAL;
2222 	/* CMMA is disabled or was not used, or the buffer has length zero */
2223 	args->count = min(args->count, KVM_S390_CMMA_SIZE_MAX);
2224 	if (!args->count || !uses_cmm(kvm->arch.gmap)) {
2225 		memset(args, 0, sizeof(*args));
2226 		return 0;
2227 	}
2228 	/* We are not peeking, and there are no dirty pages */
2229 	if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
2230 		memset(args, 0, sizeof(*args));
2231 		return 0;
2232 	}
2233 
2234 	values = vmalloc(args->count);
2235 	if (!values)
2236 		return -ENOMEM;
2237 
2238 	scoped_guard(read_lock, &kvm->mmu_lock) {
2239 		if (peek)
2240 			ret = dat_peek_cmma(args->start_gfn, kvm->arch.gmap->asce, &args->count,
2241 					    values);
2242 		else
2243 			ret = dat_get_cmma(kvm->arch.gmap->asce, &args->start_gfn, &args->count,
2244 					   values, &kvm->arch.cmma_dirty_pages);
2245 	}
2246 
2247 	if (kvm->arch.migration_mode)
2248 		args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2249 	else
2250 		args->remaining = 0;
2251 
2252 	if (copy_to_user((void __user *)args->values, values, args->count))
2253 		ret = -EFAULT;
2254 
2255 	vfree(values);
2256 	return ret;
2257 }
2258 
2259 /*
2260  * This function sets the CMMA attributes for the given pages. If the input
2261  * buffer has zero length, no action is taken, otherwise the attributes are
2262  * set and the mm->context.uses_cmm flag is set.
2263  */
kvm_s390_set_cmma_bits(struct kvm * kvm,const struct kvm_s390_cmma_log * args)2264 static int kvm_s390_set_cmma_bits(struct kvm *kvm,
2265 				  const struct kvm_s390_cmma_log *args)
2266 {
2267 	struct kvm_s390_mmu_cache *mc;
2268 	u8 *bits = NULL;
2269 	int r = 0;
2270 
2271 	if (!kvm->arch.use_cmma)
2272 		return -ENXIO;
2273 	/* invalid/unsupported flags */
2274 	if (args->flags != 0)
2275 		return -EINVAL;
2276 	/* Enforce sane limit on memory allocation */
2277 	if (args->count > KVM_S390_CMMA_SIZE_MAX)
2278 		return -EINVAL;
2279 	/* Nothing to do */
2280 	if (args->count == 0)
2281 		return 0;
2282 
2283 	mc = kvm_s390_new_mmu_cache();
2284 	if (!mc)
2285 		return -ENOMEM;
2286 	bits = vmalloc(array_size(sizeof(*bits), args->count));
2287 	if (!bits)
2288 		goto out;
2289 
2290 	r = copy_from_user(bits, (void __user *)args->values, args->count);
2291 	if (r) {
2292 		r = -EFAULT;
2293 		goto out;
2294 	}
2295 
2296 	do {
2297 		r = kvm_s390_mmu_cache_topup(mc);
2298 		if (r)
2299 			break;
2300 		scoped_guard(read_lock, &kvm->mmu_lock) {
2301 			r = dat_set_cmma_bits(mc, kvm->arch.gmap->asce, args->start_gfn,
2302 					      args->count, args->mask, bits);
2303 		}
2304 	} while (r == -ENOMEM);
2305 
2306 	set_bit(GMAP_FLAG_USES_CMM, &kvm->arch.gmap->flags);
2307 out:
2308 	kvm_s390_free_mmu_cache(mc);
2309 	vfree(bits);
2310 	return r;
2311 }
2312 
2313 /**
2314  * kvm_s390_cpus_from_pv - Convert all protected vCPUs in a protected VM to
2315  * non protected.
2316  * @kvm: the VM whose protected vCPUs are to be converted
2317  * @rc: return value for the RC field of the UVC (in case of error)
2318  * @rrc: return value for the RRC field of the UVC (in case of error)
2319  *
2320  * Does not stop in case of error, tries to convert as many
2321  * CPUs as possible. In case of error, the RC and RRC of the last error are
2322  * returned.
2323  *
2324  * Return: 0 in case of success, otherwise -EIO
2325  */
kvm_s390_cpus_from_pv(struct kvm * kvm,u16 * rc,u16 * rrc)2326 int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
2327 {
2328 	struct kvm_vcpu *vcpu;
2329 	unsigned long i;
2330 	u16 _rc, _rrc;
2331 	int ret = 0;
2332 
2333 	/*
2334 	 * We ignore failures and try to destroy as many CPUs as possible.
2335 	 * At the same time we must not free the assigned resources when
2336 	 * this fails, as the ultravisor has still access to that memory.
2337 	 * So kvm_s390_pv_destroy_cpu can leave a "wanted" memory leak
2338 	 * behind.
2339 	 * We want to return the first failure rc and rrc, though.
2340 	 */
2341 	kvm_for_each_vcpu(i, vcpu, kvm) {
2342 		mutex_lock(&vcpu->mutex);
2343 		if (kvm_s390_pv_destroy_cpu(vcpu, &_rc, &_rrc) && !ret) {
2344 			*rc = _rc;
2345 			*rrc = _rrc;
2346 			ret = -EIO;
2347 		}
2348 		mutex_unlock(&vcpu->mutex);
2349 	}
2350 	/* Ensure that we re-enable gisa if the non-PV guest used it but the PV guest did not. */
2351 	if (use_gisa)
2352 		kvm_s390_gisa_enable(kvm);
2353 	return ret;
2354 }
2355 
2356 /**
2357  * kvm_s390_cpus_to_pv - Convert all non-protected vCPUs in a protected VM
2358  * to protected.
2359  * @kvm: the VM whose protected vCPUs are to be converted
2360  * @rc: return value for the RC field of the UVC (in case of error)
2361  * @rrc: return value for the RRC field of the UVC (in case of error)
2362  *
2363  * Tries to undo the conversion in case of error.
2364  *
2365  * Return: 0 in case of success, otherwise -EIO
2366  */
kvm_s390_cpus_to_pv(struct kvm * kvm,u16 * rc,u16 * rrc)2367 static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
2368 {
2369 	unsigned long i;
2370 	int r = 0;
2371 	u16 dummy;
2372 
2373 	struct kvm_vcpu *vcpu;
2374 
2375 	/* Disable the GISA if the ultravisor does not support AIV. */
2376 	if (!uv_has_feature(BIT_UV_FEAT_AIV))
2377 		kvm_s390_gisa_disable(kvm);
2378 
2379 	kvm_for_each_vcpu(i, vcpu, kvm) {
2380 		mutex_lock(&vcpu->mutex);
2381 		r = kvm_s390_pv_create_cpu(vcpu, rc, rrc);
2382 		mutex_unlock(&vcpu->mutex);
2383 		if (r)
2384 			break;
2385 	}
2386 	if (r)
2387 		kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
2388 	return r;
2389 }
2390 
2391 /*
2392  * Here we provide user space with a direct interface to query UV
2393  * related data like UV maxima and available features as well as
2394  * feature specific data.
2395  *
2396  * To facilitate future extension of the data structures we'll try to
2397  * write data up to the maximum requested length.
2398  */
kvm_s390_handle_pv_info(struct kvm_s390_pv_info * info)2399 static ssize_t kvm_s390_handle_pv_info(struct kvm_s390_pv_info *info)
2400 {
2401 	ssize_t len_min;
2402 
2403 	switch (info->header.id) {
2404 	case KVM_PV_INFO_VM: {
2405 		len_min =  sizeof(info->header) + sizeof(info->vm);
2406 
2407 		if (info->header.len_max < len_min)
2408 			return -EINVAL;
2409 
2410 		memcpy(info->vm.inst_calls_list,
2411 		       uv_info.inst_calls_list,
2412 		       sizeof(uv_info.inst_calls_list));
2413 
2414 		/* It's max cpuid not max cpus, so it's off by one */
2415 		info->vm.max_cpus = uv_info.max_guest_cpu_id + 1;
2416 		info->vm.max_guests = uv_info.max_num_sec_conf;
2417 		info->vm.max_guest_addr = uv_info.max_sec_stor_addr;
2418 		info->vm.feature_indication = uv_info.uv_feature_indications;
2419 
2420 		return len_min;
2421 	}
2422 	case KVM_PV_INFO_DUMP: {
2423 		len_min =  sizeof(info->header) + sizeof(info->dump);
2424 
2425 		if (info->header.len_max < len_min)
2426 			return -EINVAL;
2427 
2428 		info->dump.dump_cpu_buffer_len = uv_info.guest_cpu_stor_len;
2429 		info->dump.dump_config_mem_buffer_per_1m = uv_info.conf_dump_storage_state_len;
2430 		info->dump.dump_config_finalize_len = uv_info.conf_dump_finalize_len;
2431 		return len_min;
2432 	}
2433 	default:
2434 		return -EINVAL;
2435 	}
2436 }
2437 
kvm_s390_pv_dmp(struct kvm * kvm,struct kvm_pv_cmd * cmd,struct kvm_s390_pv_dmp dmp)2438 static int kvm_s390_pv_dmp(struct kvm *kvm, struct kvm_pv_cmd *cmd,
2439 			   struct kvm_s390_pv_dmp dmp)
2440 {
2441 	int r = -EINVAL;
2442 	void __user *result_buff = (void __user *)dmp.buff_addr;
2443 
2444 	switch (dmp.subcmd) {
2445 	case KVM_PV_DUMP_INIT: {
2446 		if (kvm->arch.pv.dumping)
2447 			break;
2448 
2449 		/*
2450 		 * Block SIE entry as concurrent dump UVCs could lead
2451 		 * to validities.
2452 		 */
2453 		kvm_s390_vcpu_block_all(kvm);
2454 
2455 		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2456 				  UVC_CMD_DUMP_INIT, &cmd->rc, &cmd->rrc);
2457 		KVM_UV_EVENT(kvm, 3, "PROTVIRT DUMP INIT: rc %x rrc %x",
2458 			     cmd->rc, cmd->rrc);
2459 		if (!r) {
2460 			kvm->arch.pv.dumping = true;
2461 		} else {
2462 			kvm_s390_vcpu_unblock_all(kvm);
2463 			r = -EINVAL;
2464 		}
2465 		break;
2466 	}
2467 	case KVM_PV_DUMP_CONFIG_STOR_STATE: {
2468 		if (!kvm->arch.pv.dumping)
2469 			break;
2470 
2471 		/*
2472 		 * gaddr is an output parameter since we might stop
2473 		 * early. As dmp will be copied back in our caller, we
2474 		 * don't need to do it ourselves.
2475 		 */
2476 		r = kvm_s390_pv_dump_stor_state(kvm, result_buff, &dmp.gaddr, dmp.buff_len,
2477 						&cmd->rc, &cmd->rrc);
2478 		break;
2479 	}
2480 	case KVM_PV_DUMP_COMPLETE: {
2481 		if (!kvm->arch.pv.dumping)
2482 			break;
2483 
2484 		r = -EINVAL;
2485 		if (dmp.buff_len < uv_info.conf_dump_finalize_len)
2486 			break;
2487 
2488 		r = kvm_s390_pv_dump_complete(kvm, result_buff,
2489 					      &cmd->rc, &cmd->rrc);
2490 		break;
2491 	}
2492 	default:
2493 		r = -ENOTTY;
2494 		break;
2495 	}
2496 
2497 	return r;
2498 }
2499 
kvm_s390_handle_pv(struct kvm * kvm,struct kvm_pv_cmd * cmd)2500 static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
2501 {
2502 	const bool need_lock = (cmd->cmd != KVM_PV_ASYNC_CLEANUP_PERFORM);
2503 	void __user *argp = (void __user *)cmd->data;
2504 	int r = 0;
2505 	u16 dummy;
2506 
2507 	if (need_lock)
2508 		mutex_lock(&kvm->lock);
2509 
2510 	switch (cmd->cmd) {
2511 	case KVM_PV_ENABLE: {
2512 		r = -EINVAL;
2513 		if (kvm_s390_pv_is_protected(kvm))
2514 			break;
2515 
2516 		mmap_write_lock(kvm->mm);
2517 		/*
2518 		 * Disable creation of new THPs. Existing THPs can stay, they
2519 		 * will be split when any part of them gets imported.
2520 		 */
2521 		mm_flags_clear(MMF_DISABLE_THP_EXCEPT_ADVISED, kvm->mm);
2522 		mm_flags_set(MMF_DISABLE_THP_COMPLETELY, kvm->mm);
2523 		set_bit(GMAP_FLAG_EXPORT_ON_UNMAP, &kvm->arch.gmap->flags);
2524 		r = gmap_helper_disable_cow_sharing();
2525 		mmap_write_unlock(kvm->mm);
2526 		if (r)
2527 			break;
2528 
2529 		r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc);
2530 		if (r)
2531 			break;
2532 
2533 		r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc);
2534 		if (r)
2535 			kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
2536 
2537 		/* we need to block service interrupts from now on */
2538 		set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2539 		break;
2540 	}
2541 	case KVM_PV_ASYNC_CLEANUP_PREPARE:
2542 		r = -EINVAL;
2543 		if (!kvm_s390_pv_is_protected(kvm) || !async_destroy)
2544 			break;
2545 
2546 		r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
2547 		/*
2548 		 * If a CPU could not be destroyed, destroy VM will also fail.
2549 		 * There is no point in trying to destroy it. Instead return
2550 		 * the rc and rrc from the first CPU that failed destroying.
2551 		 */
2552 		if (r)
2553 			break;
2554 		r = kvm_s390_pv_set_aside(kvm, &cmd->rc, &cmd->rrc);
2555 
2556 		/* no need to block service interrupts any more */
2557 		clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2558 		break;
2559 	case KVM_PV_ASYNC_CLEANUP_PERFORM:
2560 		r = -EINVAL;
2561 		if (!async_destroy)
2562 			break;
2563 		/* kvm->lock must not be held; this is asserted inside the function. */
2564 		r = kvm_s390_pv_deinit_aside_vm(kvm, &cmd->rc, &cmd->rrc);
2565 		break;
2566 	case KVM_PV_DISABLE: {
2567 		r = -EINVAL;
2568 		if (!kvm_s390_pv_is_protected(kvm))
2569 			break;
2570 
2571 		r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
2572 		/*
2573 		 * If a CPU could not be destroyed, destroy VM will also fail.
2574 		 * There is no point in trying to destroy it. Instead return
2575 		 * the rc and rrc from the first CPU that failed destroying.
2576 		 */
2577 		if (r)
2578 			break;
2579 		r = kvm_s390_pv_deinit_cleanup_all(kvm, &cmd->rc, &cmd->rrc);
2580 
2581 		/* no need to block service interrupts any more */
2582 		clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2583 		break;
2584 	}
2585 	case KVM_PV_SET_SEC_PARMS: {
2586 		struct kvm_s390_pv_sec_parm parms = {};
2587 		void *hdr;
2588 
2589 		r = -EINVAL;
2590 		if (!kvm_s390_pv_is_protected(kvm))
2591 			break;
2592 
2593 		r = -EFAULT;
2594 		if (copy_from_user(&parms, argp, sizeof(parms)))
2595 			break;
2596 
2597 		/* Currently restricted to 1MiB */
2598 		r = -EINVAL;
2599 		if (parms.length > SZ_1M)
2600 			break;
2601 
2602 		r = -ENOMEM;
2603 		hdr = vmalloc(parms.length);
2604 		if (!hdr)
2605 			break;
2606 
2607 		r = -EFAULT;
2608 		if (!copy_from_user(hdr, (void __user *)parms.origin,
2609 				    parms.length))
2610 			r = kvm_s390_pv_set_sec_parms(kvm, hdr, parms.length,
2611 						      &cmd->rc, &cmd->rrc);
2612 
2613 		vfree(hdr);
2614 		break;
2615 	}
2616 	case KVM_PV_UNPACK: {
2617 		struct kvm_s390_pv_unp unp = {};
2618 
2619 		r = -EINVAL;
2620 		if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm))
2621 			break;
2622 
2623 		r = -EFAULT;
2624 		if (copy_from_user(&unp, argp, sizeof(unp)))
2625 			break;
2626 
2627 		r = kvm_s390_pv_unpack(kvm, unp.addr, unp.size, unp.tweak,
2628 				       &cmd->rc, &cmd->rrc);
2629 		break;
2630 	}
2631 	case KVM_PV_VERIFY: {
2632 		r = -EINVAL;
2633 		if (!kvm_s390_pv_is_protected(kvm))
2634 			break;
2635 
2636 		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2637 				  UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc);
2638 		KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc,
2639 			     cmd->rrc);
2640 		break;
2641 	}
2642 	case KVM_PV_PREP_RESET: {
2643 		r = -EINVAL;
2644 		if (!kvm_s390_pv_is_protected(kvm))
2645 			break;
2646 
2647 		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2648 				  UVC_CMD_PREPARE_RESET, &cmd->rc, &cmd->rrc);
2649 		KVM_UV_EVENT(kvm, 3, "PROTVIRT PREP RESET: rc %x rrc %x",
2650 			     cmd->rc, cmd->rrc);
2651 		break;
2652 	}
2653 	case KVM_PV_UNSHARE_ALL: {
2654 		r = -EINVAL;
2655 		if (!kvm_s390_pv_is_protected(kvm))
2656 			break;
2657 
2658 		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2659 				  UVC_CMD_SET_UNSHARE_ALL, &cmd->rc, &cmd->rrc);
2660 		KVM_UV_EVENT(kvm, 3, "PROTVIRT UNSHARE: rc %x rrc %x",
2661 			     cmd->rc, cmd->rrc);
2662 		break;
2663 	}
2664 	case KVM_PV_INFO: {
2665 		struct kvm_s390_pv_info info = {};
2666 		ssize_t data_len;
2667 
2668 		/*
2669 		 * No need to check the VM protection here.
2670 		 *
2671 		 * Maybe user space wants to query some of the data
2672 		 * when the VM is still unprotected. If we see the
2673 		 * need to fence a new data command we can still
2674 		 * return an error in the info handler.
2675 		 */
2676 
2677 		r = -EFAULT;
2678 		if (copy_from_user(&info, argp, sizeof(info.header)))
2679 			break;
2680 
2681 		r = -EINVAL;
2682 		if (info.header.len_max < sizeof(info.header))
2683 			break;
2684 
2685 		data_len = kvm_s390_handle_pv_info(&info);
2686 		if (data_len < 0) {
2687 			r = data_len;
2688 			break;
2689 		}
2690 		/*
2691 		 * If a data command struct is extended (multiple
2692 		 * times) this can be used to determine how much of it
2693 		 * is valid.
2694 		 */
2695 		info.header.len_written = data_len;
2696 
2697 		r = -EFAULT;
2698 		if (copy_to_user(argp, &info, data_len))
2699 			break;
2700 
2701 		r = 0;
2702 		break;
2703 	}
2704 	case KVM_PV_DUMP: {
2705 		struct kvm_s390_pv_dmp dmp;
2706 
2707 		r = -EINVAL;
2708 		if (!kvm_s390_pv_is_protected(kvm))
2709 			break;
2710 
2711 		r = -EFAULT;
2712 		if (copy_from_user(&dmp, argp, sizeof(dmp)))
2713 			break;
2714 
2715 		r = kvm_s390_pv_dmp(kvm, cmd, dmp);
2716 		if (r)
2717 			break;
2718 
2719 		if (copy_to_user(argp, &dmp, sizeof(dmp))) {
2720 			r = -EFAULT;
2721 			break;
2722 		}
2723 
2724 		break;
2725 	}
2726 	default:
2727 		r = -ENOTTY;
2728 	}
2729 	if (need_lock)
2730 		mutex_unlock(&kvm->lock);
2731 
2732 	return r;
2733 }
2734 
mem_op_validate_common(struct kvm_s390_mem_op * mop,u64 supported_flags)2735 static int mem_op_validate_common(struct kvm_s390_mem_op *mop, u64 supported_flags)
2736 {
2737 	if (mop->flags & ~supported_flags || !mop->size)
2738 		return -EINVAL;
2739 	if (mop->size > MEM_OP_MAX_SIZE)
2740 		return -E2BIG;
2741 	if (mop->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION) {
2742 		if (mop->key > 0xf)
2743 			return -EINVAL;
2744 	} else {
2745 		mop->key = 0;
2746 	}
2747 	return 0;
2748 }
2749 
kvm_s390_vm_mem_op_abs(struct kvm * kvm,struct kvm_s390_mem_op * mop)2750 static int kvm_s390_vm_mem_op_abs(struct kvm *kvm, struct kvm_s390_mem_op *mop)
2751 {
2752 	void __user *uaddr = (void __user *)mop->buf;
2753 	void *tmpbuf __free(kvfree) = NULL;
2754 	enum gacc_mode acc_mode;
2755 	int r;
2756 
2757 	r = mem_op_validate_common(mop, KVM_S390_MEMOP_F_SKEY_PROTECTION |
2758 					KVM_S390_MEMOP_F_CHECK_ONLY);
2759 	if (r)
2760 		return r;
2761 
2762 	if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2763 		tmpbuf = vmalloc(mop->size);
2764 		if (!tmpbuf)
2765 			return -ENOMEM;
2766 	}
2767 
2768 	acc_mode = mop->op == KVM_S390_MEMOP_ABSOLUTE_READ ? GACC_FETCH : GACC_STORE;
2769 
2770 	scoped_guard(srcu, &kvm->srcu) {
2771 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)
2772 			return check_gpa_range(kvm, mop->gaddr, mop->size, acc_mode, mop->key);
2773 
2774 		if (acc_mode == GACC_STORE && copy_from_user(tmpbuf, uaddr, mop->size))
2775 			return -EFAULT;
2776 		r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf,
2777 					      mop->size, acc_mode, mop->key);
2778 		if (r)
2779 			return r;
2780 		if (acc_mode != GACC_STORE && copy_to_user(uaddr, tmpbuf, mop->size))
2781 			return -EFAULT;
2782 	}
2783 	return 0;
2784 }
2785 
kvm_s390_vm_mem_op_cmpxchg(struct kvm * kvm,struct kvm_s390_mem_op * mop)2786 static int kvm_s390_vm_mem_op_cmpxchg(struct kvm *kvm, struct kvm_s390_mem_op *mop)
2787 {
2788 	void __user *uaddr = (void __user *)mop->buf;
2789 	void __user *old_addr = (void __user *)mop->old_addr;
2790 	union kvm_s390_quad old = { .sixteen = 0 };
2791 	union kvm_s390_quad new = { .sixteen = 0 };
2792 	bool success = false;
2793 	int r;
2794 
2795 	r = mem_op_validate_common(mop, KVM_S390_MEMOP_F_SKEY_PROTECTION);
2796 	if (r)
2797 		return r;
2798 	/*
2799 	 * This validates off_in_quad. Checking that size is a power
2800 	 * of two is not necessary, as cmpxchg_guest_abs_with_key
2801 	 * takes care of that
2802 	 */
2803 	if (mop->size > sizeof(new))
2804 		return -EINVAL;
2805 	if (copy_from_user(&new, uaddr, mop->size))
2806 		return -EFAULT;
2807 	if (copy_from_user(&old, old_addr, mop->size))
2808 		return -EFAULT;
2809 
2810 	scoped_guard(srcu, &kvm->srcu) {
2811 		r = cmpxchg_guest_abs_with_key(kvm, mop->gaddr, mop->size, &old, new,
2812 					       mop->key, &success);
2813 
2814 		if (!success && copy_to_user(old_addr, &old, mop->size))
2815 			return -EFAULT;
2816 	}
2817 	return r;
2818 }
2819 
kvm_s390_vm_mem_op(struct kvm * kvm,struct kvm_s390_mem_op * mop)2820 static int kvm_s390_vm_mem_op(struct kvm *kvm, struct kvm_s390_mem_op *mop)
2821 {
2822 	/*
2823 	 * This is technically a heuristic only, if the kvm->lock is not
2824 	 * taken, it is not guaranteed that the vm is/remains non-protected.
2825 	 * This is ok from a kernel perspective, wrongdoing is detected
2826 	 * on the access, -EFAULT is returned and the vm may crash the
2827 	 * next time it accesses the memory in question.
2828 	 * There is no sane usecase to do switching and a memop on two
2829 	 * different CPUs at the same time.
2830 	 */
2831 	if (kvm_s390_pv_get_handle(kvm))
2832 		return -EINVAL;
2833 
2834 	switch (mop->op) {
2835 	case KVM_S390_MEMOP_ABSOLUTE_READ:
2836 	case KVM_S390_MEMOP_ABSOLUTE_WRITE:
2837 		return kvm_s390_vm_mem_op_abs(kvm, mop);
2838 	case KVM_S390_MEMOP_ABSOLUTE_CMPXCHG:
2839 		return kvm_s390_vm_mem_op_cmpxchg(kvm, mop);
2840 	default:
2841 		return -EINVAL;
2842 	}
2843 }
2844 
kvm_arch_vm_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)2845 int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
2846 {
2847 	struct kvm *kvm = filp->private_data;
2848 	void __user *argp = (void __user *)arg;
2849 	struct kvm_device_attr attr;
2850 	int r;
2851 
2852 	switch (ioctl) {
2853 	case KVM_S390_INTERRUPT: {
2854 		struct kvm_s390_interrupt s390int;
2855 
2856 		r = -EFAULT;
2857 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
2858 			break;
2859 		r = kvm_s390_inject_vm(kvm, &s390int);
2860 		break;
2861 	}
2862 	case KVM_CREATE_IRQCHIP: {
2863 		r = -EINVAL;
2864 		if (kvm->arch.use_irqchip)
2865 			r = 0;
2866 		break;
2867 	}
2868 	case KVM_SET_DEVICE_ATTR: {
2869 		r = -EFAULT;
2870 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2871 			break;
2872 		r = kvm_s390_vm_set_attr(kvm, &attr);
2873 		break;
2874 	}
2875 	case KVM_GET_DEVICE_ATTR: {
2876 		r = -EFAULT;
2877 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2878 			break;
2879 		r = kvm_s390_vm_get_attr(kvm, &attr);
2880 		break;
2881 	}
2882 	case KVM_HAS_DEVICE_ATTR: {
2883 		r = -EFAULT;
2884 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2885 			break;
2886 		r = kvm_s390_vm_has_attr(kvm, &attr);
2887 		break;
2888 	}
2889 	case KVM_S390_GET_SKEYS: {
2890 		struct kvm_s390_skeys args;
2891 
2892 		r = -EFAULT;
2893 		if (copy_from_user(&args, argp,
2894 				   sizeof(struct kvm_s390_skeys)))
2895 			break;
2896 		r = kvm_s390_get_skeys(kvm, &args);
2897 		break;
2898 	}
2899 	case KVM_S390_SET_SKEYS: {
2900 		struct kvm_s390_skeys args;
2901 
2902 		r = -EFAULT;
2903 		if (copy_from_user(&args, argp,
2904 				   sizeof(struct kvm_s390_skeys)))
2905 			break;
2906 		r = kvm_s390_set_skeys(kvm, &args);
2907 		break;
2908 	}
2909 	case KVM_S390_GET_CMMA_BITS: {
2910 		struct kvm_s390_cmma_log args;
2911 
2912 		r = -EFAULT;
2913 		if (copy_from_user(&args, argp, sizeof(args)))
2914 			break;
2915 		mutex_lock(&kvm->slots_lock);
2916 		r = kvm_s390_get_cmma_bits(kvm, &args);
2917 		mutex_unlock(&kvm->slots_lock);
2918 		if (!r) {
2919 			r = copy_to_user(argp, &args, sizeof(args));
2920 			if (r)
2921 				r = -EFAULT;
2922 		}
2923 		break;
2924 	}
2925 	case KVM_S390_SET_CMMA_BITS: {
2926 		struct kvm_s390_cmma_log args;
2927 
2928 		r = -EFAULT;
2929 		if (copy_from_user(&args, argp, sizeof(args)))
2930 			break;
2931 		mutex_lock(&kvm->slots_lock);
2932 		r = kvm_s390_set_cmma_bits(kvm, &args);
2933 		mutex_unlock(&kvm->slots_lock);
2934 		break;
2935 	}
2936 	case KVM_S390_PV_COMMAND: {
2937 		struct kvm_pv_cmd args;
2938 
2939 		/* protvirt means user cpu state */
2940 		kvm_s390_set_user_cpu_state_ctrl(kvm);
2941 		r = 0;
2942 		if (!is_prot_virt_host()) {
2943 			r = -EINVAL;
2944 			break;
2945 		}
2946 		if (copy_from_user(&args, argp, sizeof(args))) {
2947 			r = -EFAULT;
2948 			break;
2949 		}
2950 		if (args.flags) {
2951 			r = -EINVAL;
2952 			break;
2953 		}
2954 		/* must be called without kvm->lock */
2955 		r = kvm_s390_handle_pv(kvm, &args);
2956 		if (copy_to_user(argp, &args, sizeof(args))) {
2957 			r = -EFAULT;
2958 			break;
2959 		}
2960 		break;
2961 	}
2962 	case KVM_S390_MEM_OP: {
2963 		struct kvm_s390_mem_op mem_op;
2964 
2965 		if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
2966 			r = kvm_s390_vm_mem_op(kvm, &mem_op);
2967 		else
2968 			r = -EFAULT;
2969 		break;
2970 	}
2971 	case KVM_S390_KEYOP: {
2972 		struct kvm_s390_mmu_cache *mc;
2973 		struct kvm_s390_keyop kop;
2974 		union skey skey;
2975 
2976 		if (copy_from_user(&kop, argp, sizeof(kop))) {
2977 			r = -EFAULT;
2978 			break;
2979 		}
2980 		skey.skey = kop.key;
2981 
2982 		mc = kvm_s390_new_mmu_cache();
2983 		if (!mc)
2984 			return -ENOMEM;
2985 
2986 		r = kvm_s390_keyop(mc, kvm, kop.operation, kop.guest_addr, skey);
2987 		kvm_s390_free_mmu_cache(mc);
2988 		if (r < 0)
2989 			break;
2990 
2991 		kop.key = r;
2992 		r = 0;
2993 		if (copy_to_user(argp, &kop, sizeof(kop)))
2994 			r = -EFAULT;
2995 		break;
2996 	}
2997 	case KVM_S390_ZPCI_OP: {
2998 		struct kvm_s390_zpci_op args;
2999 
3000 		r = -EINVAL;
3001 		if (!IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
3002 			break;
3003 		if (copy_from_user(&args, argp, sizeof(args))) {
3004 			r = -EFAULT;
3005 			break;
3006 		}
3007 		r = kvm_s390_pci_zpci_op(kvm, &args);
3008 		break;
3009 	}
3010 	default:
3011 		r = -ENOTTY;
3012 	}
3013 
3014 	return r;
3015 }
3016 
kvm_s390_apxa_installed(void)3017 static int kvm_s390_apxa_installed(void)
3018 {
3019 	struct ap_config_info info;
3020 
3021 	if (ap_instructions_available()) {
3022 		if (ap_qci(&info) == 0)
3023 			return info.apxa;
3024 	}
3025 
3026 	return 0;
3027 }
3028 
3029 /*
3030  * The format of the crypto control block (CRYCB) is specified in the 3 low
3031  * order bits of the CRYCB designation (CRYCBD) field as follows:
3032  * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
3033  *	     AP extended addressing (APXA) facility are installed.
3034  * Format 1: The APXA facility is not installed but the MSAX3 facility is.
3035  * Format 2: Both the APXA and MSAX3 facilities are installed
3036  */
kvm_s390_set_crycb_format(struct kvm * kvm)3037 static void kvm_s390_set_crycb_format(struct kvm *kvm)
3038 {
3039 	kvm->arch.crypto.crycbd = virt_to_phys(kvm->arch.crypto.crycb);
3040 
3041 	/* Clear the CRYCB format bits - i.e., set format 0 by default */
3042 	kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
3043 
3044 	/* Check whether MSAX3 is installed */
3045 	if (!test_kvm_facility(kvm, 76))
3046 		return;
3047 
3048 	if (kvm_s390_apxa_installed())
3049 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
3050 	else
3051 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
3052 }
3053 
3054 /*
3055  * kvm_arch_crypto_set_masks
3056  *
3057  * @kvm: pointer to the target guest's KVM struct containing the crypto masks
3058  *	 to be set.
3059  * @apm: the mask identifying the accessible AP adapters
3060  * @aqm: the mask identifying the accessible AP domains
3061  * @adm: the mask identifying the accessible AP control domains
3062  *
3063  * Set the masks that identify the adapters, domains and control domains to
3064  * which the KVM guest is granted access.
3065  *
3066  * Note: The kvm->lock mutex must be locked by the caller before invoking this
3067  *	 function.
3068  */
kvm_arch_crypto_set_masks(struct kvm * kvm,unsigned long * apm,unsigned long * aqm,unsigned long * adm)3069 void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
3070 			       unsigned long *aqm, unsigned long *adm)
3071 {
3072 	struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
3073 
3074 	kvm_s390_vcpu_block_all(kvm);
3075 
3076 	switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
3077 	case CRYCB_FORMAT2: /* APCB1 use 256 bits */
3078 		memcpy(crycb->apcb1.apm, apm, 32);
3079 		VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
3080 			 apm[0], apm[1], apm[2], apm[3]);
3081 		memcpy(crycb->apcb1.aqm, aqm, 32);
3082 		VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
3083 			 aqm[0], aqm[1], aqm[2], aqm[3]);
3084 		memcpy(crycb->apcb1.adm, adm, 32);
3085 		VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
3086 			 adm[0], adm[1], adm[2], adm[3]);
3087 		break;
3088 	case CRYCB_FORMAT1:
3089 	case CRYCB_FORMAT0: /* Fall through both use APCB0 */
3090 		memcpy(crycb->apcb0.apm, apm, 8);
3091 		memcpy(crycb->apcb0.aqm, aqm, 2);
3092 		memcpy(crycb->apcb0.adm, adm, 2);
3093 		VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
3094 			 apm[0], *((unsigned short *)aqm),
3095 			 *((unsigned short *)adm));
3096 		break;
3097 	default:	/* Can not happen */
3098 		break;
3099 	}
3100 
3101 	/* recreate the shadow crycb for each vcpu */
3102 	kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
3103 	kvm_s390_vcpu_unblock_all(kvm);
3104 }
3105 EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
3106 
3107 /*
3108  * kvm_arch_crypto_clear_masks
3109  *
3110  * @kvm: pointer to the target guest's KVM struct containing the crypto masks
3111  *	 to be cleared.
3112  *
3113  * Clear the masks that identify the adapters, domains and control domains to
3114  * which the KVM guest is granted access.
3115  *
3116  * Note: The kvm->lock mutex must be locked by the caller before invoking this
3117  *	 function.
3118  */
kvm_arch_crypto_clear_masks(struct kvm * kvm)3119 void kvm_arch_crypto_clear_masks(struct kvm *kvm)
3120 {
3121 	kvm_s390_vcpu_block_all(kvm);
3122 
3123 	memset(&kvm->arch.crypto.crycb->apcb0, 0,
3124 	       sizeof(kvm->arch.crypto.crycb->apcb0));
3125 	memset(&kvm->arch.crypto.crycb->apcb1, 0,
3126 	       sizeof(kvm->arch.crypto.crycb->apcb1));
3127 
3128 	VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
3129 	/* recreate the shadow crycb for each vcpu */
3130 	kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
3131 	kvm_s390_vcpu_unblock_all(kvm);
3132 }
3133 EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
3134 
kvm_s390_get_initial_cpuid(void)3135 static u64 kvm_s390_get_initial_cpuid(void)
3136 {
3137 	struct cpuid cpuid;
3138 
3139 	get_cpu_id(&cpuid);
3140 	cpuid.version = 0xff;
3141 	return *((u64 *) &cpuid);
3142 }
3143 
kvm_s390_crypto_init(struct kvm * kvm)3144 static void kvm_s390_crypto_init(struct kvm *kvm)
3145 {
3146 	kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
3147 	kvm_s390_set_crycb_format(kvm);
3148 	init_rwsem(&kvm->arch.crypto.pqap_hook_rwsem);
3149 
3150 	if (!test_kvm_facility(kvm, 76))
3151 		return;
3152 
3153 	/* Enable AES/DEA protected key functions by default */
3154 	kvm->arch.crypto.aes_kw = 1;
3155 	kvm->arch.crypto.dea_kw = 1;
3156 	get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
3157 			 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
3158 	get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
3159 			 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
3160 }
3161 
sca_dispose(struct kvm * kvm)3162 static void sca_dispose(struct kvm *kvm)
3163 {
3164 	free_pages_exact(kvm->arch.sca, sizeof(*kvm->arch.sca));
3165 	kvm->arch.sca = NULL;
3166 }
3167 
kvm_arch_free_vm(struct kvm * kvm)3168 void kvm_arch_free_vm(struct kvm *kvm)
3169 {
3170 	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
3171 		kvm_s390_pci_clear_list(kvm);
3172 
3173 	__kvm_arch_free_vm(kvm);
3174 }
3175 
kvm_arch_init_vm(struct kvm * kvm,unsigned long type)3176 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
3177 {
3178 	gfp_t alloc_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO;
3179 	char debug_name[16];
3180 	int i, rc;
3181 
3182 	mutex_init(&kvm->arch.pv.import_lock);
3183 
3184 	rc = -EINVAL;
3185 #ifdef CONFIG_KVM_S390_UCONTROL
3186 	if (type & ~KVM_VM_S390_UCONTROL)
3187 		goto out_err;
3188 	if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
3189 		goto out_err;
3190 #else
3191 	if (type)
3192 		goto out_err;
3193 #endif
3194 	rc = -ENOMEM;
3195 
3196 	if (!sclp.has_64bscao)
3197 		alloc_flags |= GFP_DMA;
3198 	mutex_lock(&kvm_lock);
3199 
3200 	kvm->arch.sca = alloc_pages_exact(sizeof(*kvm->arch.sca), alloc_flags);
3201 	mutex_unlock(&kvm_lock);
3202 	if (!kvm->arch.sca)
3203 		goto out_err;
3204 
3205 	snprintf(debug_name, sizeof(debug_name), "kvm-%u", current->pid);
3206 
3207 	kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
3208 	if (!kvm->arch.dbf)
3209 		goto out_err;
3210 
3211 	BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
3212 	kvm->arch.sie_page2 =
3213 	     (struct sie_page2 *) get_zeroed_page(GFP_KERNEL_ACCOUNT | GFP_DMA);
3214 	if (!kvm->arch.sie_page2)
3215 		goto out_err;
3216 
3217 	kvm->arch.sie_page2->kvm = kvm;
3218 	kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
3219 
3220 	for (i = 0; i < kvm_s390_fac_size(); i++) {
3221 		kvm->arch.model.fac_mask[i] = stfle_fac_list[i] &
3222 					      (kvm_s390_fac_base[i] |
3223 					       kvm_s390_fac_ext[i]);
3224 		kvm->arch.model.fac_list[i] = stfle_fac_list[i] &
3225 					      kvm_s390_fac_base[i];
3226 	}
3227 	kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
3228 
3229 	/* we are always in czam mode - even on pre z14 machines */
3230 	set_kvm_facility(kvm->arch.model.fac_mask, 138);
3231 	set_kvm_facility(kvm->arch.model.fac_list, 138);
3232 	/* we emulate STHYI in kvm */
3233 	set_kvm_facility(kvm->arch.model.fac_mask, 74);
3234 	set_kvm_facility(kvm->arch.model.fac_list, 74);
3235 	if (machine_has_tlb_guest()) {
3236 		set_kvm_facility(kvm->arch.model.fac_mask, 147);
3237 		set_kvm_facility(kvm->arch.model.fac_list, 147);
3238 	}
3239 
3240 	if (css_general_characteristics.aiv && test_facility(65))
3241 		set_kvm_facility(kvm->arch.model.fac_mask, 65);
3242 
3243 	kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
3244 	kvm->arch.model.ibc = sclp.ibc & 0x0fff;
3245 
3246 	kvm->arch.model.uv_feat_guest.feat = 0;
3247 
3248 	kvm_s390_crypto_init(kvm);
3249 
3250 	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) {
3251 		mutex_lock(&kvm->lock);
3252 		kvm_s390_pci_init_list(kvm);
3253 		kvm_s390_vcpu_pci_enable_interp(kvm);
3254 		mutex_unlock(&kvm->lock);
3255 	}
3256 
3257 	mutex_init(&kvm->arch.float_int.ais_lock);
3258 	spin_lock_init(&kvm->arch.float_int.lock);
3259 	for (i = 0; i < FIRQ_LIST_COUNT; i++)
3260 		INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
3261 	init_waitqueue_head(&kvm->arch.ipte_wq);
3262 	mutex_init(&kvm->arch.ipte_mutex);
3263 
3264 	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
3265 	VM_EVENT(kvm, 3, "vm created with type %lu", type);
3266 
3267 	kvm->arch.mem_limit = type & KVM_VM_S390_UCONTROL ? KVM_S390_NO_MEM_LIMIT : sclp.hamax + 1;
3268 	kvm->arch.gmap = gmap_new(kvm, gpa_to_gfn(kvm->arch.mem_limit));
3269 	if (!kvm->arch.gmap)
3270 		goto out_err;
3271 	clear_bit(GMAP_FLAG_PFAULT_ENABLED, &kvm->arch.gmap->flags);
3272 
3273 	if (type & KVM_VM_S390_UCONTROL) {
3274 		struct kvm_userspace_memory_region2 fake_memslot = {
3275 			.slot = KVM_S390_UCONTROL_MEMSLOT,
3276 			.guest_phys_addr = 0,
3277 			.userspace_addr = 0,
3278 			.memory_size = ALIGN_DOWN(TASK_SIZE, _SEGMENT_SIZE),
3279 			.flags = 0,
3280 		};
3281 
3282 		/* one flat fake memslot covering the whole address-space */
3283 		mutex_lock(&kvm->slots_lock);
3284 		KVM_BUG_ON(kvm_set_internal_memslot(kvm, &fake_memslot), kvm);
3285 		mutex_unlock(&kvm->slots_lock);
3286 		set_bit(GMAP_FLAG_IS_UCONTROL, &kvm->arch.gmap->flags);
3287 	} else {
3288 		struct crst_table *table = dereference_asce(kvm->arch.gmap->asce);
3289 
3290 		crst_table_init((void *)table, _CRSTE_HOLE(table->crstes[0].h.tt).val);
3291 	}
3292 
3293 	kvm->arch.use_pfmfi = sclp.has_pfmfi;
3294 	kvm->arch.use_skf = sclp.has_skey;
3295 	spin_lock_init(&kvm->arch.start_stop_lock);
3296 	kvm_s390_vsie_init(kvm);
3297 	if (use_gisa)
3298 		kvm_s390_gisa_init(kvm);
3299 	INIT_LIST_HEAD(&kvm->arch.pv.need_cleanup);
3300 	kvm->arch.pv.set_aside = NULL;
3301 	KVM_EVENT(3, "vm 0x%p created by pid %u", kvm, current->pid);
3302 
3303 	return 0;
3304 out_err:
3305 	free_page((unsigned long)kvm->arch.sie_page2);
3306 	debug_unregister(kvm->arch.dbf);
3307 	sca_dispose(kvm);
3308 	KVM_EVENT(3, "creation of vm failed: %d", rc);
3309 	return rc;
3310 }
3311 
kvm_arch_vcpu_destroy(struct kvm_vcpu * vcpu)3312 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
3313 {
3314 	u16 rc, rrc;
3315 
3316 	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
3317 	trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
3318 	kvm_s390_clear_local_irqs(vcpu);
3319 	kvm_clear_async_pf_completion_queue(vcpu);
3320 	if (!kvm_is_ucontrol(vcpu->kvm))
3321 		sca_del_vcpu(vcpu);
3322 	kvm_s390_update_topology_change_report(vcpu->kvm, 1);
3323 
3324 	if (kvm_is_ucontrol(vcpu->kvm)) {
3325 		scoped_guard(spinlock, &vcpu->kvm->arch.gmap->children_lock)
3326 			gmap_remove_child(vcpu->arch.gmap);
3327 		vcpu->arch.gmap = gmap_put(vcpu->arch.gmap);
3328 	}
3329 
3330 	if (vcpu->kvm->arch.use_cmma)
3331 		kvm_s390_vcpu_unsetup_cmma(vcpu);
3332 	/* We can not hold the vcpu mutex here, we are already dying */
3333 	if (kvm_s390_pv_cpu_get_handle(vcpu))
3334 		kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc);
3335 	free_page((unsigned long)(vcpu->arch.sie_block));
3336 	kvm_s390_free_mmu_cache(vcpu->arch.mc);
3337 }
3338 
kvm_arch_destroy_vm(struct kvm * kvm)3339 void kvm_arch_destroy_vm(struct kvm *kvm)
3340 {
3341 	u16 rc, rrc;
3342 
3343 	kvm_destroy_vcpus(kvm);
3344 	sca_dispose(kvm);
3345 	kvm_s390_gisa_destroy(kvm);
3346 	/*
3347 	 * We are already at the end of life and kvm->lock is not taken.
3348 	 * This is ok as the file descriptor is closed by now and nobody
3349 	 * can mess with the pv state.
3350 	 */
3351 	kvm_s390_pv_deinit_cleanup_all(kvm, &rc, &rrc);
3352 	/*
3353 	 * Remove the mmu notifier only when the whole KVM VM is torn down,
3354 	 * and only if one was registered to begin with. If the VM is
3355 	 * currently not protected, but has been previously been protected,
3356 	 * then it's possible that the notifier is still registered.
3357 	 */
3358 	if (kvm->arch.pv.mmu_notifier.ops)
3359 		mmu_notifier_unregister(&kvm->arch.pv.mmu_notifier, kvm->mm);
3360 
3361 	debug_unregister(kvm->arch.dbf);
3362 	free_page((unsigned long)kvm->arch.sie_page2);
3363 	kvm_s390_destroy_adapters(kvm);
3364 	kvm_s390_clear_float_irqs(kvm);
3365 	kvm_s390_vsie_destroy(kvm);
3366 	kvm->arch.gmap = gmap_put(kvm->arch.gmap);
3367 	KVM_EVENT(3, "vm 0x%p destroyed", kvm);
3368 }
3369 
3370 /* Section: vcpu related */
sca_del_vcpu(struct kvm_vcpu * vcpu)3371 static void sca_del_vcpu(struct kvm_vcpu *vcpu)
3372 {
3373 	struct esca_block *sca = vcpu->kvm->arch.sca;
3374 
3375 	if (!kvm_s390_use_sca_entries())
3376 		return;
3377 
3378 	clear_bit_inv(vcpu->vcpu_id, (unsigned long *)sca->mcn);
3379 	sca->cpu[vcpu->vcpu_id].sda = 0;
3380 }
3381 
sca_add_vcpu(struct kvm_vcpu * vcpu)3382 static void sca_add_vcpu(struct kvm_vcpu *vcpu)
3383 {
3384 	struct esca_block *sca = vcpu->kvm->arch.sca;
3385 	phys_addr_t sca_phys = virt_to_phys(sca);
3386 
3387 	/* we still need the sca header for the ipte control */
3388 	vcpu->arch.sie_block->scaoh = sca_phys >> 32;
3389 	vcpu->arch.sie_block->scaol = sca_phys & ESCA_SCAOL_MASK;
3390 	vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
3391 
3392 	if (!kvm_s390_use_sca_entries())
3393 		return;
3394 
3395 	set_bit_inv(vcpu->vcpu_id, (unsigned long *)sca->mcn);
3396 	sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block);
3397 }
3398 
sca_can_add_vcpu(struct kvm * kvm,unsigned int id)3399 static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
3400 {
3401 	if (!kvm_s390_use_sca_entries())
3402 		return id < KVM_MAX_VCPUS;
3403 
3404 	return id < KVM_S390_ESCA_CPU_SLOTS;
3405 }
3406 
3407 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
__start_cpu_timer_accounting(struct kvm_vcpu * vcpu)3408 static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3409 {
3410 	WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
3411 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3412 	vcpu->arch.cputm_start = get_tod_clock_fast();
3413 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3414 }
3415 
3416 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
__stop_cpu_timer_accounting(struct kvm_vcpu * vcpu)3417 static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3418 {
3419 	WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
3420 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3421 	vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3422 	vcpu->arch.cputm_start = 0;
3423 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3424 }
3425 
3426 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
__enable_cpu_timer_accounting(struct kvm_vcpu * vcpu)3427 static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3428 {
3429 	WARN_ON_ONCE(vcpu->arch.cputm_enabled);
3430 	vcpu->arch.cputm_enabled = true;
3431 	__start_cpu_timer_accounting(vcpu);
3432 }
3433 
3434 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
__disable_cpu_timer_accounting(struct kvm_vcpu * vcpu)3435 static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3436 {
3437 	WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
3438 	__stop_cpu_timer_accounting(vcpu);
3439 	vcpu->arch.cputm_enabled = false;
3440 }
3441 
enable_cpu_timer_accounting(struct kvm_vcpu * vcpu)3442 static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3443 {
3444 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3445 	__enable_cpu_timer_accounting(vcpu);
3446 	preempt_enable();
3447 }
3448 
disable_cpu_timer_accounting(struct kvm_vcpu * vcpu)3449 static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3450 {
3451 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3452 	__disable_cpu_timer_accounting(vcpu);
3453 	preempt_enable();
3454 }
3455 
3456 /* set the cpu timer - may only be called from the VCPU thread itself */
kvm_s390_set_cpu_timer(struct kvm_vcpu * vcpu,__u64 cputm)3457 void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
3458 {
3459 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3460 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3461 	if (vcpu->arch.cputm_enabled)
3462 		vcpu->arch.cputm_start = get_tod_clock_fast();
3463 	vcpu->arch.sie_block->cputm = cputm;
3464 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3465 	preempt_enable();
3466 }
3467 
3468 /* update and get the cpu timer - can also be called from other VCPU threads */
kvm_s390_get_cpu_timer(struct kvm_vcpu * vcpu)3469 __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
3470 {
3471 	unsigned int seq;
3472 	__u64 value;
3473 
3474 	if (unlikely(!vcpu->arch.cputm_enabled))
3475 		return vcpu->arch.sie_block->cputm;
3476 
3477 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3478 	do {
3479 		seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
3480 		/*
3481 		 * If the writer would ever execute a read in the critical
3482 		 * section, e.g. in irq context, we have a deadlock.
3483 		 */
3484 		WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
3485 		value = vcpu->arch.sie_block->cputm;
3486 		/* if cputm_start is 0, accounting is being started/stopped */
3487 		if (likely(vcpu->arch.cputm_start))
3488 			value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3489 	} while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
3490 	preempt_enable();
3491 	return value;
3492 }
3493 
kvm_arch_vcpu_load(struct kvm_vcpu * vcpu,int cpu)3494 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
3495 {
3496 
3497 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
3498 	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
3499 		__start_cpu_timer_accounting(vcpu);
3500 	vcpu->cpu = cpu;
3501 }
3502 
kvm_arch_vcpu_put(struct kvm_vcpu * vcpu)3503 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
3504 {
3505 	vcpu->cpu = -1;
3506 	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
3507 		__stop_cpu_timer_accounting(vcpu);
3508 	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
3509 
3510 }
3511 
kvm_arch_vcpu_postcreate(struct kvm_vcpu * vcpu)3512 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
3513 {
3514 	mutex_lock(&vcpu->kvm->lock);
3515 	preempt_disable();
3516 	vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
3517 	vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
3518 	preempt_enable();
3519 	mutex_unlock(&vcpu->kvm->lock);
3520 	if (!kvm_is_ucontrol(vcpu->kvm)) {
3521 		vcpu->arch.gmap = vcpu->kvm->arch.gmap;
3522 		sca_add_vcpu(vcpu);
3523 	}
3524 	if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
3525 		vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3526 }
3527 
kvm_has_pckmo_subfunc(struct kvm * kvm,unsigned long nr)3528 static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
3529 {
3530 	if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
3531 	    test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
3532 		return true;
3533 	return false;
3534 }
3535 
kvm_has_pckmo_ecc(struct kvm * kvm)3536 static bool kvm_has_pckmo_ecc(struct kvm *kvm)
3537 {
3538 	/* At least one ECC subfunction must be present */
3539 	return kvm_has_pckmo_subfunc(kvm, 32) ||
3540 	       kvm_has_pckmo_subfunc(kvm, 33) ||
3541 	       kvm_has_pckmo_subfunc(kvm, 34) ||
3542 	       kvm_has_pckmo_subfunc(kvm, 40) ||
3543 	       kvm_has_pckmo_subfunc(kvm, 41);
3544 
3545 }
3546 
kvm_has_pckmo_hmac(struct kvm * kvm)3547 static bool kvm_has_pckmo_hmac(struct kvm *kvm)
3548 {
3549 	/* At least one HMAC subfunction must be present */
3550 	return kvm_has_pckmo_subfunc(kvm, 118) ||
3551 	       kvm_has_pckmo_subfunc(kvm, 122);
3552 }
3553 
kvm_s390_vcpu_crypto_setup(struct kvm_vcpu * vcpu)3554 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
3555 {
3556 	/*
3557 	 * If the AP instructions are not being interpreted and the MSAX3
3558 	 * facility is not configured for the guest, there is nothing to set up.
3559 	 */
3560 	if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
3561 		return;
3562 
3563 	vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
3564 	vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
3565 	vcpu->arch.sie_block->eca &= ~ECA_APIE;
3566 	vcpu->arch.sie_block->ecd &= ~(ECD_ECC | ECD_HMAC);
3567 
3568 	if (vcpu->kvm->arch.crypto.apie)
3569 		vcpu->arch.sie_block->eca |= ECA_APIE;
3570 
3571 	/* Set up protected key support */
3572 	if (vcpu->kvm->arch.crypto.aes_kw) {
3573 		vcpu->arch.sie_block->ecb3 |= ECB3_AES;
3574 		/* ecc/hmac is also wrapped with AES key */
3575 		if (kvm_has_pckmo_ecc(vcpu->kvm))
3576 			vcpu->arch.sie_block->ecd |= ECD_ECC;
3577 		if (kvm_has_pckmo_hmac(vcpu->kvm))
3578 			vcpu->arch.sie_block->ecd |= ECD_HMAC;
3579 	}
3580 
3581 	if (vcpu->kvm->arch.crypto.dea_kw)
3582 		vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
3583 }
3584 
kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu * vcpu)3585 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
3586 {
3587 	free_page((unsigned long)phys_to_virt(vcpu->arch.sie_block->cbrlo));
3588 	vcpu->arch.sie_block->cbrlo = 0;
3589 }
3590 
kvm_s390_vcpu_setup_cmma(struct kvm_vcpu * vcpu)3591 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
3592 {
3593 	void *cbrlo_page = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3594 
3595 	if (!cbrlo_page)
3596 		return -ENOMEM;
3597 
3598 	vcpu->arch.sie_block->cbrlo = virt_to_phys(cbrlo_page);
3599 	return 0;
3600 }
3601 
kvm_s390_vcpu_setup_model(struct kvm_vcpu * vcpu)3602 static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
3603 {
3604 	struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
3605 
3606 	vcpu->arch.sie_block->ibc = model->ibc;
3607 	if (test_kvm_facility(vcpu->kvm, 7))
3608 		vcpu->arch.sie_block->fac = virt_to_phys(model->fac_list);
3609 }
3610 
kvm_s390_vcpu_setup(struct kvm_vcpu * vcpu)3611 static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu)
3612 {
3613 	int rc = 0;
3614 	u16 uvrc, uvrrc;
3615 
3616 	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
3617 						    CPUSTAT_SM |
3618 						    CPUSTAT_STOPPED);
3619 
3620 	if (test_kvm_facility(vcpu->kvm, 78))
3621 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
3622 	else if (test_kvm_facility(vcpu->kvm, 8))
3623 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
3624 
3625 	kvm_s390_vcpu_setup_model(vcpu);
3626 
3627 	/* pgste_set_pte has special handling for !machine_has_esop() */
3628 	if (machine_has_esop())
3629 		vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
3630 	if (test_kvm_facility(vcpu->kvm, 9))
3631 		vcpu->arch.sie_block->ecb |= ECB_SRSI;
3632 	if (test_kvm_facility(vcpu->kvm, 11))
3633 		vcpu->arch.sie_block->ecb |= ECB_PTF;
3634 	if (test_kvm_facility(vcpu->kvm, 73))
3635 		vcpu->arch.sie_block->ecb |= ECB_TE;
3636 	if (!kvm_is_ucontrol(vcpu->kvm))
3637 		vcpu->arch.sie_block->ecb |= ECB_SPECI;
3638 
3639 	if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
3640 		vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
3641 	if (test_kvm_facility(vcpu->kvm, 130))
3642 		vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
3643 	vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
3644 	if (sclp.has_cei)
3645 		vcpu->arch.sie_block->eca |= ECA_CEI;
3646 	if (sclp.has_ib)
3647 		vcpu->arch.sie_block->eca |= ECA_IB;
3648 	if (sclp.has_siif)
3649 		vcpu->arch.sie_block->eca |= ECA_SII;
3650 	if (kvm_s390_use_sca_entries())
3651 		vcpu->arch.sie_block->eca |= ECA_SIGPI;
3652 	if (test_kvm_facility(vcpu->kvm, 129)) {
3653 		vcpu->arch.sie_block->eca |= ECA_VX;
3654 		vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3655 	}
3656 	if (test_kvm_facility(vcpu->kvm, 139))
3657 		vcpu->arch.sie_block->ecd |= ECD_MEF;
3658 	if (test_kvm_facility(vcpu->kvm, 156))
3659 		vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
3660 	if (vcpu->arch.sie_block->gd) {
3661 		vcpu->arch.sie_block->eca |= ECA_AIV;
3662 		VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
3663 			   vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
3664 	}
3665 	vcpu->arch.sie_block->sdnxo = virt_to_phys(&vcpu->run->s.regs.sdnx) | SDNXC;
3666 	vcpu->arch.sie_block->riccbd = virt_to_phys(&vcpu->run->s.regs.riccb);
3667 
3668 	if (sclp.has_kss)
3669 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
3670 	else
3671 		vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
3672 
3673 	if (vcpu->kvm->arch.use_cmma) {
3674 		rc = kvm_s390_vcpu_setup_cmma(vcpu);
3675 		if (rc)
3676 			return rc;
3677 	}
3678 	hrtimer_setup(&vcpu->arch.ckc_timer, kvm_s390_idle_wakeup, CLOCK_MONOTONIC,
3679 		      HRTIMER_MODE_REL);
3680 
3681 	vcpu->arch.sie_block->hpid = HPID_KVM;
3682 
3683 	kvm_s390_vcpu_crypto_setup(vcpu);
3684 
3685 	kvm_s390_vcpu_pci_setup(vcpu);
3686 
3687 	mutex_lock(&vcpu->kvm->lock);
3688 	if (kvm_s390_pv_is_protected(vcpu->kvm)) {
3689 		rc = kvm_s390_pv_create_cpu(vcpu, &uvrc, &uvrrc);
3690 		if (rc)
3691 			kvm_s390_vcpu_unsetup_cmma(vcpu);
3692 	}
3693 	mutex_unlock(&vcpu->kvm->lock);
3694 
3695 	return rc;
3696 }
3697 
kvm_arch_vcpu_precreate(struct kvm * kvm,unsigned int id)3698 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
3699 {
3700 	if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
3701 		return -EINVAL;
3702 	return 0;
3703 }
3704 
kvm_arch_vcpu_create(struct kvm_vcpu * vcpu)3705 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
3706 {
3707 	struct sie_page *sie_page;
3708 	int rc;
3709 
3710 	BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
3711 	vcpu->arch.mc = kvm_s390_new_mmu_cache();
3712 	if (!vcpu->arch.mc)
3713 		return -ENOMEM;
3714 	sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL_ACCOUNT);
3715 	if (!sie_page) {
3716 		kvm_s390_free_mmu_cache(vcpu->arch.mc);
3717 		vcpu->arch.mc = NULL;
3718 		return -ENOMEM;
3719 	}
3720 
3721 	vcpu->arch.sie_block = &sie_page->sie_block;
3722 	vcpu->arch.sie_block->itdba = virt_to_phys(&sie_page->itdb);
3723 
3724 	/* the real guest size will always be smaller than msl */
3725 	vcpu->arch.sie_block->mso = 0;
3726 	vcpu->arch.sie_block->msl = sclp.hamax;
3727 
3728 	vcpu->arch.sie_block->icpua = vcpu->vcpu_id;
3729 	spin_lock_init(&vcpu->arch.local_int.lock);
3730 	vcpu->arch.sie_block->gd = kvm_s390_get_gisa_desc(vcpu->kvm);
3731 	seqcount_init(&vcpu->arch.cputm_seqcount);
3732 
3733 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3734 	kvm_clear_async_pf_completion_queue(vcpu);
3735 	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
3736 				    KVM_SYNC_GPRS |
3737 				    KVM_SYNC_ACRS |
3738 				    KVM_SYNC_CRS |
3739 				    KVM_SYNC_ARCH0 |
3740 				    KVM_SYNC_PFAULT |
3741 				    KVM_SYNC_DIAG318;
3742 	vcpu->arch.acrs_loaded = false;
3743 	kvm_s390_set_prefix(vcpu, 0);
3744 	if (test_kvm_facility(vcpu->kvm, 64))
3745 		vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
3746 	if (test_kvm_facility(vcpu->kvm, 82))
3747 		vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
3748 	if (test_kvm_facility(vcpu->kvm, 133))
3749 		vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
3750 	if (test_kvm_facility(vcpu->kvm, 156))
3751 		vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
3752 	/* fprs can be synchronized via vrs, even if the guest has no vx. With
3753 	 * cpu_has_vx(), (load|store)_fpu_regs() will work with vrs format.
3754 	 */
3755 	if (cpu_has_vx())
3756 		vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
3757 	else
3758 		vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
3759 
3760 	if (kvm_is_ucontrol(vcpu->kvm)) {
3761 		rc = -ENOMEM;
3762 		vcpu->arch.gmap = gmap_new_child(vcpu->kvm->arch.gmap, -1UL);
3763 		if (!vcpu->arch.gmap)
3764 			goto out_free_sie_block;
3765 	}
3766 
3767 	VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%p, sie block at 0x%p",
3768 		 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3769 	trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3770 
3771 	rc = kvm_s390_vcpu_setup(vcpu);
3772 	if (rc)
3773 		goto out_ucontrol_uninit;
3774 
3775 	kvm_s390_update_topology_change_report(vcpu->kvm, 1);
3776 	return 0;
3777 
3778 out_ucontrol_uninit:
3779 	if (kvm_is_ucontrol(vcpu->kvm)) {
3780 		gmap_remove_child(vcpu->arch.gmap);
3781 		vcpu->arch.gmap = gmap_put(vcpu->arch.gmap);
3782 	}
3783 out_free_sie_block:
3784 	free_page((unsigned long)(vcpu->arch.sie_block));
3785 	return rc;
3786 }
3787 
kvm_arch_vcpu_runnable(struct kvm_vcpu * vcpu)3788 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
3789 {
3790 	clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
3791 	return kvm_s390_vcpu_has_irq(vcpu, 0);
3792 }
3793 
kvm_arch_vcpu_in_kernel(struct kvm_vcpu * vcpu)3794 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
3795 {
3796 	return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
3797 }
3798 
kvm_s390_vcpu_block(struct kvm_vcpu * vcpu)3799 void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
3800 {
3801 	atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
3802 	exit_sie(vcpu);
3803 }
3804 
kvm_s390_vcpu_unblock(struct kvm_vcpu * vcpu)3805 void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
3806 {
3807 	atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
3808 }
3809 
kvm_s390_vcpu_request(struct kvm_vcpu * vcpu)3810 static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
3811 {
3812 	atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
3813 	exit_sie(vcpu);
3814 }
3815 
kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu * vcpu)3816 bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
3817 {
3818 	return atomic_read(&vcpu->arch.sie_block->prog20) &
3819 	       (PROG_BLOCK_SIE | PROG_REQUEST);
3820 }
3821 
kvm_s390_vcpu_request_handled(struct kvm_vcpu * vcpu)3822 static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
3823 {
3824 	atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
3825 }
3826 
3827 /*
3828  * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
3829  * If the CPU is not running (e.g. waiting as idle) the function will
3830  * return immediately. */
exit_sie(struct kvm_vcpu * vcpu)3831 void exit_sie(struct kvm_vcpu *vcpu)
3832 {
3833 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
3834 	kvm_s390_vsie_kick(vcpu);
3835 	while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
3836 		cpu_relax();
3837 }
3838 
3839 /* Kick a guest cpu out of SIE to process a request synchronously */
kvm_s390_sync_request(int req,struct kvm_vcpu * vcpu)3840 void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
3841 {
3842 	__kvm_make_request(req, vcpu);
3843 	kvm_s390_vcpu_request(vcpu);
3844 }
3845 
kvm_arch_no_poll(struct kvm_vcpu * vcpu)3846 bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
3847 {
3848 	/* do not poll with more than halt_poll_max_steal percent of steal time */
3849 	if (get_lowcore()->avg_steal_timer * 100 / (TICK_USEC << 12) >=
3850 	    READ_ONCE(halt_poll_max_steal)) {
3851 		vcpu->stat.halt_no_poll_steal++;
3852 		return true;
3853 	}
3854 	return false;
3855 }
3856 
kvm_arch_vcpu_should_kick(struct kvm_vcpu * vcpu)3857 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
3858 {
3859 	/* kvm common code refers to this, but never calls it */
3860 	BUG();
3861 	return 0;
3862 }
3863 
kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu * vcpu,struct kvm_one_reg * reg)3864 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
3865 					   struct kvm_one_reg *reg)
3866 {
3867 	int r = -EINVAL;
3868 
3869 	switch (reg->id) {
3870 	case KVM_REG_S390_TODPR:
3871 		r = put_user(vcpu->arch.sie_block->todpr,
3872 			     (u32 __user *)reg->addr);
3873 		break;
3874 	case KVM_REG_S390_EPOCHDIFF:
3875 		r = put_user(vcpu->arch.sie_block->epoch,
3876 			     (u64 __user *)reg->addr);
3877 		break;
3878 	case KVM_REG_S390_CPU_TIMER:
3879 		r = put_user(kvm_s390_get_cpu_timer(vcpu),
3880 			     (u64 __user *)reg->addr);
3881 		break;
3882 	case KVM_REG_S390_CLOCK_COMP:
3883 		r = put_user(vcpu->arch.sie_block->ckc,
3884 			     (u64 __user *)reg->addr);
3885 		break;
3886 	case KVM_REG_S390_PFTOKEN:
3887 		r = put_user(vcpu->arch.pfault_token,
3888 			     (u64 __user *)reg->addr);
3889 		break;
3890 	case KVM_REG_S390_PFCOMPARE:
3891 		r = put_user(vcpu->arch.pfault_compare,
3892 			     (u64 __user *)reg->addr);
3893 		break;
3894 	case KVM_REG_S390_PFSELECT:
3895 		r = put_user(vcpu->arch.pfault_select,
3896 			     (u64 __user *)reg->addr);
3897 		break;
3898 	case KVM_REG_S390_PP:
3899 		r = put_user(vcpu->arch.sie_block->pp,
3900 			     (u64 __user *)reg->addr);
3901 		break;
3902 	case KVM_REG_S390_GBEA:
3903 		r = put_user(vcpu->arch.sie_block->gbea,
3904 			     (u64 __user *)reg->addr);
3905 		break;
3906 	default:
3907 		break;
3908 	}
3909 
3910 	return r;
3911 }
3912 
kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu * vcpu,struct kvm_one_reg * reg)3913 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
3914 					   struct kvm_one_reg *reg)
3915 {
3916 	int r = -EINVAL;
3917 	__u64 val;
3918 
3919 	switch (reg->id) {
3920 	case KVM_REG_S390_TODPR:
3921 		r = get_user(vcpu->arch.sie_block->todpr,
3922 			     (u32 __user *)reg->addr);
3923 		break;
3924 	case KVM_REG_S390_EPOCHDIFF:
3925 		r = get_user(vcpu->arch.sie_block->epoch,
3926 			     (u64 __user *)reg->addr);
3927 		break;
3928 	case KVM_REG_S390_CPU_TIMER:
3929 		r = get_user(val, (u64 __user *)reg->addr);
3930 		if (!r)
3931 			kvm_s390_set_cpu_timer(vcpu, val);
3932 		break;
3933 	case KVM_REG_S390_CLOCK_COMP:
3934 		r = get_user(vcpu->arch.sie_block->ckc,
3935 			     (u64 __user *)reg->addr);
3936 		break;
3937 	case KVM_REG_S390_PFTOKEN:
3938 		r = get_user(vcpu->arch.pfault_token,
3939 			     (u64 __user *)reg->addr);
3940 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3941 			kvm_clear_async_pf_completion_queue(vcpu);
3942 		break;
3943 	case KVM_REG_S390_PFCOMPARE:
3944 		r = get_user(vcpu->arch.pfault_compare,
3945 			     (u64 __user *)reg->addr);
3946 		break;
3947 	case KVM_REG_S390_PFSELECT:
3948 		r = get_user(vcpu->arch.pfault_select,
3949 			     (u64 __user *)reg->addr);
3950 		break;
3951 	case KVM_REG_S390_PP:
3952 		r = get_user(vcpu->arch.sie_block->pp,
3953 			     (u64 __user *)reg->addr);
3954 		break;
3955 	case KVM_REG_S390_GBEA:
3956 		r = get_user(vcpu->arch.sie_block->gbea,
3957 			     (u64 __user *)reg->addr);
3958 		break;
3959 	default:
3960 		break;
3961 	}
3962 
3963 	return r;
3964 }
3965 
kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu * vcpu)3966 static void kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu *vcpu)
3967 {
3968 	vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI;
3969 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3970 	memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb));
3971 
3972 	kvm_clear_async_pf_completion_queue(vcpu);
3973 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
3974 		kvm_s390_vcpu_stop(vcpu);
3975 	kvm_s390_clear_local_irqs(vcpu);
3976 }
3977 
kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu * vcpu)3978 static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
3979 {
3980 	/* Initial reset is a superset of the normal reset */
3981 	kvm_arch_vcpu_ioctl_normal_reset(vcpu);
3982 
3983 	/*
3984 	 * This equals initial cpu reset in pop, but we don't switch to ESA.
3985 	 * We do not only reset the internal data, but also ...
3986 	 */
3987 	vcpu->arch.sie_block->gpsw.mask = 0;
3988 	vcpu->arch.sie_block->gpsw.addr = 0;
3989 	kvm_s390_set_prefix(vcpu, 0);
3990 	kvm_s390_set_cpu_timer(vcpu, 0);
3991 	vcpu->arch.sie_block->ckc = 0;
3992 	memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
3993 	vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
3994 	vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
3995 
3996 	/* ... the data in sync regs */
3997 	memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs));
3998 	vcpu->run->s.regs.ckc = 0;
3999 	vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK;
4000 	vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK;
4001 	vcpu->run->psw_addr = 0;
4002 	vcpu->run->psw_mask = 0;
4003 	vcpu->run->s.regs.todpr = 0;
4004 	vcpu->run->s.regs.cputm = 0;
4005 	vcpu->run->s.regs.ckc = 0;
4006 	vcpu->run->s.regs.pp = 0;
4007 	vcpu->run->s.regs.gbea = 1;
4008 	vcpu->run->s.regs.fpc = 0;
4009 	/*
4010 	 * Do not reset these registers in the protected case, as some of
4011 	 * them are overlaid and they are not accessible in this case
4012 	 * anyway.
4013 	 */
4014 	if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
4015 		vcpu->arch.sie_block->gbea = 1;
4016 		vcpu->arch.sie_block->pp = 0;
4017 		vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
4018 		vcpu->arch.sie_block->todpr = 0;
4019 	}
4020 }
4021 
kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu * vcpu)4022 static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
4023 {
4024 	struct kvm_sync_regs *regs = &vcpu->run->s.regs;
4025 
4026 	/* Clear reset is a superset of the initial reset */
4027 	kvm_arch_vcpu_ioctl_initial_reset(vcpu);
4028 
4029 	memset(&regs->gprs, 0, sizeof(regs->gprs));
4030 	memset(&regs->vrs, 0, sizeof(regs->vrs));
4031 	memset(&regs->acrs, 0, sizeof(regs->acrs));
4032 	memset(&regs->gscb, 0, sizeof(regs->gscb));
4033 
4034 	regs->etoken = 0;
4035 	regs->etoken_extension = 0;
4036 }
4037 
kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)4038 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4039 {
4040 	vcpu_load(vcpu);
4041 	memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
4042 	vcpu_put(vcpu);
4043 	return 0;
4044 }
4045 
kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)4046 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4047 {
4048 	vcpu_load(vcpu);
4049 	memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
4050 	vcpu_put(vcpu);
4051 	return 0;
4052 }
4053 
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)4054 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
4055 				  struct kvm_sregs *sregs)
4056 {
4057 	vcpu_load(vcpu);
4058 
4059 	memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
4060 	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
4061 
4062 	vcpu_put(vcpu);
4063 	return 0;
4064 }
4065 
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)4066 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
4067 				  struct kvm_sregs *sregs)
4068 {
4069 	vcpu_load(vcpu);
4070 
4071 	memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
4072 	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
4073 
4074 	vcpu_put(vcpu);
4075 	return 0;
4076 }
4077 
kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)4078 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4079 {
4080 	vcpu_load(vcpu);
4081 
4082 	vcpu->run->s.regs.fpc = fpu->fpc;
4083 	if (cpu_has_vx())
4084 		convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
4085 				 (freg_t *) fpu->fprs);
4086 	else
4087 		memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
4088 
4089 	vcpu_put(vcpu);
4090 	return 0;
4091 }
4092 
kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)4093 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4094 {
4095 	vcpu_load(vcpu);
4096 
4097 	if (cpu_has_vx())
4098 		convert_vx_to_fp((freg_t *) fpu->fprs,
4099 				 (__vector128 *) vcpu->run->s.regs.vrs);
4100 	else
4101 		memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
4102 	fpu->fpc = vcpu->run->s.regs.fpc;
4103 
4104 	vcpu_put(vcpu);
4105 	return 0;
4106 }
4107 
kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu * vcpu,psw_t psw)4108 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
4109 {
4110 	int rc = 0;
4111 
4112 	if (!is_vcpu_stopped(vcpu))
4113 		rc = -EBUSY;
4114 	else {
4115 		vcpu->run->psw_mask = psw.mask;
4116 		vcpu->run->psw_addr = psw.addr;
4117 	}
4118 	return rc;
4119 }
4120 
kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu * vcpu,struct kvm_translation * tr)4121 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
4122 				  struct kvm_translation *tr)
4123 {
4124 	return -EINVAL; /* not implemented yet */
4125 }
4126 
4127 #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
4128 			      KVM_GUESTDBG_USE_HW_BP | \
4129 			      KVM_GUESTDBG_ENABLE)
4130 
kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu * vcpu,struct kvm_guest_debug * dbg)4131 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
4132 					struct kvm_guest_debug *dbg)
4133 {
4134 	int rc = 0;
4135 
4136 	vcpu_load(vcpu);
4137 
4138 	vcpu->guest_debug = 0;
4139 	kvm_s390_clear_bp_data(vcpu);
4140 
4141 	if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
4142 		rc = -EINVAL;
4143 		goto out;
4144 	}
4145 	if (!sclp.has_gpere) {
4146 		rc = -EINVAL;
4147 		goto out;
4148 	}
4149 
4150 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
4151 		vcpu->guest_debug = dbg->control;
4152 		/* enforce guest PER */
4153 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
4154 
4155 		if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
4156 			rc = kvm_s390_import_bp_data(vcpu, dbg);
4157 	} else {
4158 		kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
4159 		vcpu->arch.guestdbg.last_bp = 0;
4160 	}
4161 
4162 	if (rc) {
4163 		vcpu->guest_debug = 0;
4164 		kvm_s390_clear_bp_data(vcpu);
4165 		kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
4166 	}
4167 
4168 out:
4169 	vcpu_put(vcpu);
4170 	return rc;
4171 }
4172 
kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)4173 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
4174 				    struct kvm_mp_state *mp_state)
4175 {
4176 	int ret;
4177 
4178 	vcpu_load(vcpu);
4179 
4180 	/* CHECK_STOP and LOAD are not supported yet */
4181 	ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
4182 				      KVM_MP_STATE_OPERATING;
4183 
4184 	vcpu_put(vcpu);
4185 	return ret;
4186 }
4187 
kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)4188 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
4189 				    struct kvm_mp_state *mp_state)
4190 {
4191 	int rc = 0;
4192 
4193 	vcpu_load(vcpu);
4194 
4195 	/* user space knows about this interface - let it control the state */
4196 	kvm_s390_set_user_cpu_state_ctrl(vcpu->kvm);
4197 
4198 	switch (mp_state->mp_state) {
4199 	case KVM_MP_STATE_STOPPED:
4200 		rc = kvm_s390_vcpu_stop(vcpu);
4201 		break;
4202 	case KVM_MP_STATE_OPERATING:
4203 		rc = kvm_s390_vcpu_start(vcpu);
4204 		break;
4205 	case KVM_MP_STATE_LOAD:
4206 		if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
4207 			rc = -ENXIO;
4208 			break;
4209 		}
4210 		rc = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR_LOAD);
4211 		break;
4212 	case KVM_MP_STATE_CHECK_STOP:
4213 		fallthrough;	/* CHECK_STOP and LOAD are not supported yet */
4214 	default:
4215 		rc = -ENXIO;
4216 	}
4217 
4218 	vcpu_put(vcpu);
4219 	return rc;
4220 }
4221 
ibs_enabled(struct kvm_vcpu * vcpu)4222 static bool ibs_enabled(struct kvm_vcpu *vcpu)
4223 {
4224 	return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
4225 }
4226 
vcpu_ucontrol_translate(struct kvm_vcpu * vcpu,gpa_t * gaddr)4227 static int vcpu_ucontrol_translate(struct kvm_vcpu *vcpu, gpa_t *gaddr)
4228 {
4229 	int rc;
4230 
4231 	if (kvm_is_ucontrol(vcpu->kvm)) {
4232 		rc = gmap_ucas_translate(vcpu->arch.mc, vcpu->arch.gmap, gaddr);
4233 		if (rc == -EREMOTE) {
4234 			vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
4235 			vcpu->run->s390_ucontrol.trans_exc_code = *gaddr;
4236 			vcpu->run->s390_ucontrol.pgm_code = PGM_SEGMENT_TRANSLATION;
4237 		}
4238 		return rc;
4239 	}
4240 	return 0;
4241 }
4242 
kvm_s390_fixup_prefix(struct kvm_vcpu * vcpu)4243 static int kvm_s390_fixup_prefix(struct kvm_vcpu *vcpu)
4244 {
4245 	gpa_t gaddr = kvm_s390_get_prefix(vcpu);
4246 	gfn_t gfn;
4247 	int rc;
4248 
4249 	if (vcpu_ucontrol_translate(vcpu, &gaddr))
4250 		return -EREMOTE;
4251 	gfn = gpa_to_gfn(gaddr);
4252 
4253 	rc = kvm_s390_faultin_gfn_simple(vcpu, NULL, gfn, true);
4254 	if (rc)
4255 		return rc;
4256 	rc = kvm_s390_faultin_gfn_simple(vcpu, NULL, gfn + 1, true);
4257 	if (rc)
4258 		return rc;
4259 
4260 	scoped_guard(write_lock, &vcpu->kvm->mmu_lock)
4261 		rc = dat_set_prefix_notif_bit(vcpu->kvm->arch.gmap->asce, gfn);
4262 	return rc;
4263 }
4264 
kvm_s390_handle_requests(struct kvm_vcpu * vcpu)4265 static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
4266 {
4267 retry:
4268 	kvm_s390_vcpu_request_handled(vcpu);
4269 	if (!kvm_request_pending(vcpu))
4270 		return 0;
4271 	/*
4272 	 * If the guest prefix changed, re-arm the ipte notifier for the
4273 	 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
4274 	 * This ensures that the ipte instruction for this request has
4275 	 * already finished. We might race against a second unmapper that
4276 	 * wants to set the blocking bit. Lets just retry the request loop.
4277 	 */
4278 	if (kvm_check_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu)) {
4279 		int rc;
4280 
4281 		rc = kvm_s390_fixup_prefix(vcpu);
4282 		if (rc) {
4283 			kvm_make_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu);
4284 			return rc;
4285 		}
4286 		goto retry;
4287 	}
4288 
4289 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
4290 		vcpu->arch.sie_block->ihcpu = 0xffff;
4291 		goto retry;
4292 	}
4293 
4294 	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
4295 		if (!ibs_enabled(vcpu)) {
4296 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
4297 			kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
4298 		}
4299 		goto retry;
4300 	}
4301 
4302 	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
4303 		if (ibs_enabled(vcpu)) {
4304 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
4305 			kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
4306 		}
4307 		goto retry;
4308 	}
4309 
4310 	if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
4311 		vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
4312 		goto retry;
4313 	}
4314 
4315 	if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
4316 		/*
4317 		 * Disable CMM virtualization; we will emulate the ESSA
4318 		 * instruction manually, in order to provide additional
4319 		 * functionalities needed for live migration.
4320 		 */
4321 		vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
4322 		goto retry;
4323 	}
4324 
4325 	if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
4326 		/*
4327 		 * Re-enable CMM virtualization if CMMA is available and
4328 		 * CMM has been used.
4329 		 */
4330 		if (vcpu->kvm->arch.use_cmma && uses_cmm(vcpu->arch.gmap))
4331 			vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
4332 		goto retry;
4333 	}
4334 
4335 	/* we left the vsie handler, nothing to do, just clear the request */
4336 	kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
4337 
4338 	return 0;
4339 }
4340 
__kvm_s390_set_tod_clock(struct kvm * kvm,const struct kvm_s390_vm_tod_clock * gtod)4341 static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
4342 {
4343 	struct kvm_vcpu *vcpu;
4344 	union tod_clock clk;
4345 	unsigned long i;
4346 
4347 	preempt_disable();
4348 
4349 	store_tod_clock_ext(&clk);
4350 
4351 	kvm->arch.epoch = gtod->tod - clk.tod;
4352 	kvm->arch.epdx = 0;
4353 	if (test_kvm_facility(kvm, 139)) {
4354 		kvm->arch.epdx = gtod->epoch_idx - clk.ei;
4355 		if (kvm->arch.epoch > gtod->tod)
4356 			kvm->arch.epdx -= 1;
4357 	}
4358 
4359 	kvm_s390_vcpu_block_all(kvm);
4360 	kvm_for_each_vcpu(i, vcpu, kvm) {
4361 		vcpu->arch.sie_block->epoch = kvm->arch.epoch;
4362 		vcpu->arch.sie_block->epdx  = kvm->arch.epdx;
4363 	}
4364 
4365 	kvm_s390_vcpu_unblock_all(kvm);
4366 	preempt_enable();
4367 }
4368 
kvm_s390_try_set_tod_clock(struct kvm * kvm,const struct kvm_s390_vm_tod_clock * gtod)4369 int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
4370 {
4371 	if (!mutex_trylock(&kvm->lock))
4372 		return 0;
4373 	__kvm_s390_set_tod_clock(kvm, gtod);
4374 	mutex_unlock(&kvm->lock);
4375 	return 1;
4376 }
4377 
__kvm_inject_pfault_token(struct kvm_vcpu * vcpu,bool start_token,unsigned long token)4378 static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
4379 				      unsigned long token)
4380 {
4381 	struct kvm_s390_interrupt inti;
4382 	struct kvm_s390_irq irq;
4383 
4384 	if (start_token) {
4385 		irq.u.ext.ext_params2 = token;
4386 		irq.type = KVM_S390_INT_PFAULT_INIT;
4387 		WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
4388 	} else {
4389 		inti.type = KVM_S390_INT_PFAULT_DONE;
4390 		inti.parm64 = token;
4391 		WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
4392 	}
4393 }
4394 
kvm_arch_async_page_not_present(struct kvm_vcpu * vcpu,struct kvm_async_pf * work)4395 bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
4396 				     struct kvm_async_pf *work)
4397 {
4398 	trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
4399 	__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
4400 
4401 	return true;
4402 }
4403 
kvm_arch_async_page_present(struct kvm_vcpu * vcpu,struct kvm_async_pf * work)4404 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
4405 				 struct kvm_async_pf *work)
4406 {
4407 	trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
4408 	__kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
4409 }
4410 
kvm_arch_async_page_ready(struct kvm_vcpu * vcpu,struct kvm_async_pf * work)4411 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
4412 			       struct kvm_async_pf *work)
4413 {
4414 	/* s390 will always inject the page directly */
4415 }
4416 
kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu * vcpu)4417 bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
4418 {
4419 	/*
4420 	 * s390 will always inject the page directly,
4421 	 * but we still want check_async_completion to cleanup
4422 	 */
4423 	return true;
4424 }
4425 
kvm_arch_setup_async_pf(struct kvm_vcpu * vcpu)4426 bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
4427 {
4428 	hva_t hva;
4429 	struct kvm_arch_async_pf arch;
4430 
4431 	if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4432 		return false;
4433 	if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
4434 	    vcpu->arch.pfault_compare)
4435 		return false;
4436 	if (psw_extint_disabled(vcpu))
4437 		return false;
4438 	if (kvm_s390_vcpu_has_irq(vcpu, 0))
4439 		return false;
4440 	if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
4441 		return false;
4442 	if (!pfault_enabled(vcpu->arch.gmap))
4443 		return false;
4444 
4445 	hva = gfn_to_hva(vcpu->kvm, current->thread.gmap_teid.addr);
4446 	if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
4447 		return false;
4448 
4449 	return kvm_setup_async_pf(vcpu, current->thread.gmap_teid.addr * PAGE_SIZE, hva, &arch);
4450 }
4451 
vcpu_pre_run(struct kvm_vcpu * vcpu)4452 static int vcpu_pre_run(struct kvm_vcpu *vcpu)
4453 {
4454 	int rc, cpuflags;
4455 
4456 	/*
4457 	 * On s390 notifications for arriving pages will be delivered directly
4458 	 * to the guest but the house keeping for completed pfaults is
4459 	 * handled outside the worker.
4460 	 */
4461 	kvm_check_async_pf_completion(vcpu);
4462 
4463 	vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
4464 	vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
4465 
4466 	if (!kvm_is_ucontrol(vcpu->kvm)) {
4467 		rc = kvm_s390_deliver_pending_interrupts(vcpu);
4468 		if (rc || guestdbg_exit_pending(vcpu))
4469 			return rc;
4470 	}
4471 
4472 	rc = kvm_s390_handle_requests(vcpu);
4473 	if (rc)
4474 		return rc;
4475 
4476 	if (guestdbg_enabled(vcpu)) {
4477 		kvm_s390_backup_guest_per_regs(vcpu);
4478 		kvm_s390_patch_guest_per_regs(vcpu);
4479 	}
4480 
4481 	clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
4482 
4483 	vcpu->arch.sie_block->icptcode = 0;
4484 	current->thread.gmap_int_code = 0;
4485 	cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
4486 	VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
4487 	trace_kvm_s390_sie_enter(vcpu, cpuflags);
4488 
4489 	return 0;
4490 }
4491 
vcpu_post_run_addressing_exception(struct kvm_vcpu * vcpu)4492 static int vcpu_post_run_addressing_exception(struct kvm_vcpu *vcpu)
4493 {
4494 	struct kvm_s390_pgm_info pgm_info = {
4495 		.code = PGM_ADDRESSING,
4496 	};
4497 	u8 opcode, ilen;
4498 	int rc;
4499 
4500 	VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
4501 	trace_kvm_s390_sie_fault(vcpu);
4502 
4503 	/*
4504 	 * We want to inject an addressing exception, which is defined as a
4505 	 * suppressing or terminating exception. However, since we came here
4506 	 * by a DAT access exception, the PSW still points to the faulting
4507 	 * instruction since DAT exceptions are nullifying. So we've got
4508 	 * to look up the current opcode to get the length of the instruction
4509 	 * to be able to forward the PSW.
4510 	 */
4511 	rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
4512 	ilen = insn_length(opcode);
4513 	if (rc < 0) {
4514 		return rc;
4515 	} else if (rc) {
4516 		/* Instruction-Fetching Exceptions - we can't detect the ilen.
4517 		 * Forward by arbitrary ilc, injection will take care of
4518 		 * nullification if necessary.
4519 		 */
4520 		pgm_info = vcpu->arch.pgm;
4521 		ilen = 4;
4522 	}
4523 	pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
4524 	kvm_s390_forward_psw(vcpu, ilen);
4525 	return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
4526 }
4527 
kvm_s390_assert_primary_as(struct kvm_vcpu * vcpu)4528 static void kvm_s390_assert_primary_as(struct kvm_vcpu *vcpu)
4529 {
4530 	KVM_BUG(current->thread.gmap_teid.as != PSW_BITS_AS_PRIMARY, vcpu->kvm,
4531 		"Unexpected program interrupt 0x%x, TEID 0x%016lx",
4532 		current->thread.gmap_int_code, current->thread.gmap_teid.val);
4533 }
4534 
vcpu_dat_fault_handler(struct kvm_vcpu * vcpu,gpa_t gaddr,bool wr)4535 static int vcpu_dat_fault_handler(struct kvm_vcpu *vcpu, gpa_t gaddr, bool wr)
4536 {
4537 	struct guest_fault f = {
4538 		.write_attempt = wr,
4539 		.attempt_pfault = pfault_enabled(vcpu->arch.gmap),
4540 	};
4541 	int rc;
4542 
4543 	if (vcpu_ucontrol_translate(vcpu, &gaddr))
4544 		return -EREMOTE;
4545 	f.gfn = gpa_to_gfn(gaddr);
4546 
4547 	rc = kvm_s390_faultin_gfn(vcpu, NULL, &f);
4548 	if (rc <= 0)
4549 		return rc;
4550 	if (rc == PGM_ADDRESSING)
4551 		return vcpu_post_run_addressing_exception(vcpu);
4552 	KVM_BUG_ON(rc, vcpu->kvm);
4553 	return -EINVAL;
4554 }
4555 
vcpu_post_run_handle_fault(struct kvm_vcpu * vcpu)4556 static int vcpu_post_run_handle_fault(struct kvm_vcpu *vcpu)
4557 {
4558 	unsigned int foll = 0;
4559 	unsigned long gaddr;
4560 	int rc;
4561 
4562 	gaddr = current->thread.gmap_teid.addr * PAGE_SIZE;
4563 	if (kvm_s390_cur_gmap_fault_is_write())
4564 		foll = FOLL_WRITE;
4565 
4566 	switch (current->thread.gmap_int_code & PGM_INT_CODE_MASK) {
4567 	case 0:
4568 		vcpu->stat.exit_null++;
4569 		break;
4570 	case PGM_SECURE_STORAGE_ACCESS:
4571 	case PGM_SECURE_STORAGE_VIOLATION:
4572 		kvm_s390_assert_primary_as(vcpu);
4573 		/*
4574 		 * This can happen after a reboot with asynchronous teardown;
4575 		 * the new guest (normal or protected) will run on top of the
4576 		 * previous protected guest. The old pages need to be destroyed
4577 		 * so the new guest can use them.
4578 		 */
4579 		if (kvm_s390_pv_destroy_page(vcpu->kvm, gaddr)) {
4580 			/*
4581 			 * Either KVM messed up the secure guest mapping or the
4582 			 * same page is mapped into multiple secure guests.
4583 			 *
4584 			 * This exception is only triggered when a guest 2 is
4585 			 * running and can therefore never occur in kernel
4586 			 * context.
4587 			 */
4588 			pr_warn_ratelimited("Secure storage violation (%x) in task: %s, pid %d\n",
4589 					    current->thread.gmap_int_code, current->comm,
4590 					    current->pid);
4591 			send_sig(SIGSEGV, current, 0);
4592 		}
4593 		break;
4594 	case PGM_NON_SECURE_STORAGE_ACCESS:
4595 		kvm_s390_assert_primary_as(vcpu);
4596 		/*
4597 		 * This is normal operation; a page belonging to a protected
4598 		 * guest has not been imported yet. Try to import the page into
4599 		 * the protected guest.
4600 		 */
4601 		rc = kvm_s390_pv_convert_to_secure(vcpu->kvm, gaddr);
4602 		if (rc == -EINVAL)
4603 			send_sig(SIGSEGV, current, 0);
4604 		if (rc != -ENXIO)
4605 			break;
4606 		foll = FOLL_WRITE;
4607 		fallthrough;
4608 	case PGM_PROTECTION:
4609 	case PGM_SEGMENT_TRANSLATION:
4610 	case PGM_PAGE_TRANSLATION:
4611 	case PGM_ASCE_TYPE:
4612 	case PGM_REGION_FIRST_TRANS:
4613 	case PGM_REGION_SECOND_TRANS:
4614 	case PGM_REGION_THIRD_TRANS:
4615 		kvm_s390_assert_primary_as(vcpu);
4616 		return vcpu_dat_fault_handler(vcpu, gaddr, foll);
4617 	default:
4618 		KVM_BUG(1, vcpu->kvm, "Unexpected program interrupt 0x%x, TEID 0x%016lx",
4619 			current->thread.gmap_int_code, current->thread.gmap_teid.val);
4620 		send_sig(SIGSEGV, current, 0);
4621 		break;
4622 	}
4623 	return 0;
4624 }
4625 
vcpu_post_run(struct kvm_vcpu * vcpu,int sie_return)4626 static int vcpu_post_run(struct kvm_vcpu *vcpu, int sie_return)
4627 {
4628 	struct mcck_volatile_info *mcck_info;
4629 	struct sie_page *sie_page;
4630 	int rc;
4631 
4632 	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
4633 		   vcpu->arch.sie_block->icptcode);
4634 	trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
4635 
4636 	if (guestdbg_enabled(vcpu))
4637 		kvm_s390_restore_guest_per_regs(vcpu);
4638 
4639 	vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
4640 	vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
4641 
4642 	if (sie_return == SIE64_RETURN_MCCK) {
4643 		sie_page = container_of(vcpu->arch.sie_block,
4644 					struct sie_page, sie_block);
4645 		mcck_info = &sie_page->mcck_info;
4646 		kvm_s390_reinject_machine_check(vcpu, mcck_info);
4647 		return 0;
4648 	}
4649 	WARN_ON_ONCE(sie_return != SIE64_RETURN_NORMAL);
4650 
4651 	if (vcpu->arch.sie_block->icptcode > 0) {
4652 		rc = kvm_handle_sie_intercept(vcpu);
4653 
4654 		if (rc != -EOPNOTSUPP)
4655 			return rc;
4656 		vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
4657 		vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
4658 		vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
4659 		vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
4660 		return -EREMOTE;
4661 	}
4662 
4663 	return vcpu_post_run_handle_fault(vcpu);
4664 }
4665 
kvm_s390_enter_exit_sie(struct kvm_s390_sie_block * scb,u64 * gprs,unsigned long gasce)4666 int noinstr kvm_s390_enter_exit_sie(struct kvm_s390_sie_block *scb,
4667 				    u64 *gprs, unsigned long gasce)
4668 {
4669 	int ret;
4670 
4671 	guest_state_enter_irqoff();
4672 
4673 	/*
4674 	 * The guest_state_{enter,exit}_irqoff() functions inform lockdep and
4675 	 * tracing that entry to the guest will enable host IRQs, and exit from
4676 	 * the guest will disable host IRQs.
4677 	 */
4678 	ret = sie64a(scb, gprs, gasce);
4679 
4680 	guest_state_exit_irqoff();
4681 
4682 	return ret;
4683 }
4684 
4685 #define PSW_INT_MASK (PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_MCHECK)
__vcpu_run(struct kvm_vcpu * vcpu)4686 static int __vcpu_run(struct kvm_vcpu *vcpu)
4687 {
4688 	int rc, sie_return;
4689 	struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block;
4690 
4691 	/*
4692 	 * We try to hold kvm->srcu during most of vcpu_run (except when run-
4693 	 * ning the guest), so that memslots (and other stuff) are protected
4694 	 */
4695 	kvm_vcpu_srcu_read_lock(vcpu);
4696 
4697 	while (true) {
4698 		rc = vcpu_pre_run(vcpu);
4699 		kvm_vcpu_srcu_read_unlock(vcpu);
4700 		if (rc || guestdbg_exit_pending(vcpu))
4701 			break;
4702 
4703 		/*
4704 		 * As PF_VCPU will be used in fault handler, between
4705 		 * guest_timing_enter_irqoff and guest_timing_exit_irqoff
4706 		 * should be no uaccess.
4707 		 */
4708 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4709 			memcpy(sie_page->pv_grregs,
4710 			       vcpu->run->s.regs.gprs,
4711 			       sizeof(sie_page->pv_grregs));
4712 		}
4713 
4714 xfer_to_guest_mode_check:
4715 		local_irq_disable();
4716 		xfer_to_guest_mode_prepare();
4717 		if (xfer_to_guest_mode_work_pending()) {
4718 			local_irq_enable();
4719 			rc = kvm_xfer_to_guest_mode_handle_work(vcpu);
4720 			if (rc)
4721 				break;
4722 			goto xfer_to_guest_mode_check;
4723 		}
4724 
4725 		guest_timing_enter_irqoff();
4726 		__disable_cpu_timer_accounting(vcpu);
4727 
4728 		sie_return = kvm_s390_enter_exit_sie(vcpu->arch.sie_block,
4729 						     vcpu->run->s.regs.gprs,
4730 						     vcpu->arch.gmap->asce.val);
4731 
4732 		__enable_cpu_timer_accounting(vcpu);
4733 		guest_timing_exit_irqoff();
4734 		local_irq_enable();
4735 
4736 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4737 			memcpy(vcpu->run->s.regs.gprs,
4738 			       sie_page->pv_grregs,
4739 			       sizeof(sie_page->pv_grregs));
4740 			/*
4741 			 * We're not allowed to inject interrupts on intercepts
4742 			 * that leave the guest state in an "in-between" state
4743 			 * where the next SIE entry will do a continuation.
4744 			 * Fence interrupts in our "internal" PSW.
4745 			 */
4746 			if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR ||
4747 			    vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) {
4748 				vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
4749 			}
4750 		}
4751 		kvm_vcpu_srcu_read_lock(vcpu);
4752 
4753 		rc = vcpu_post_run(vcpu, sie_return);
4754 		if (rc || guestdbg_exit_pending(vcpu)) {
4755 			kvm_vcpu_srcu_read_unlock(vcpu);
4756 			break;
4757 		}
4758 	}
4759 
4760 	return rc;
4761 }
4762 
sync_regs_fmt2(struct kvm_vcpu * vcpu)4763 static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
4764 {
4765 	struct kvm_run *kvm_run = vcpu->run;
4766 	struct runtime_instr_cb *riccb;
4767 	struct gs_cb *gscb;
4768 
4769 	riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
4770 	gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
4771 	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
4772 	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
4773 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4774 		vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
4775 		vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
4776 		vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
4777 	}
4778 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
4779 		vcpu->arch.pfault_token = kvm_run->s.regs.pft;
4780 		vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
4781 		vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
4782 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4783 			kvm_clear_async_pf_completion_queue(vcpu);
4784 	}
4785 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) {
4786 		vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318;
4787 		vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc;
4788 		VCPU_EVENT(vcpu, 3, "setting cpnc to %d", vcpu->arch.diag318_info.cpnc);
4789 	}
4790 	/*
4791 	 * If userspace sets the riccb (e.g. after migration) to a valid state,
4792 	 * we should enable RI here instead of doing the lazy enablement.
4793 	 */
4794 	if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
4795 	    test_kvm_facility(vcpu->kvm, 64) &&
4796 	    riccb->v &&
4797 	    !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
4798 		VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
4799 		vcpu->arch.sie_block->ecb3 |= ECB3_RI;
4800 	}
4801 	/*
4802 	 * If userspace sets the gscb (e.g. after migration) to non-zero,
4803 	 * we should enable GS here instead of doing the lazy enablement.
4804 	 */
4805 	if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
4806 	    test_kvm_facility(vcpu->kvm, 133) &&
4807 	    gscb->gssm &&
4808 	    !vcpu->arch.gs_enabled) {
4809 		VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
4810 		vcpu->arch.sie_block->ecb |= ECB_GS;
4811 		vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
4812 		vcpu->arch.gs_enabled = 1;
4813 	}
4814 	if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
4815 	    test_kvm_facility(vcpu->kvm, 82)) {
4816 		vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
4817 		vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
4818 	}
4819 	if (cpu_has_gs()) {
4820 		preempt_disable();
4821 		local_ctl_set_bit(2, CR2_GUARDED_STORAGE_BIT);
4822 		if (current->thread.gs_cb) {
4823 			vcpu->arch.host_gscb = current->thread.gs_cb;
4824 			save_gs_cb(vcpu->arch.host_gscb);
4825 		}
4826 		if (vcpu->arch.gs_enabled) {
4827 			current->thread.gs_cb = (struct gs_cb *)
4828 						&vcpu->run->s.regs.gscb;
4829 			restore_gs_cb(current->thread.gs_cb);
4830 		}
4831 		preempt_enable();
4832 	}
4833 	/* SIE will load etoken directly from SDNX and therefore kvm_run */
4834 }
4835 
sync_regs(struct kvm_vcpu * vcpu)4836 static void sync_regs(struct kvm_vcpu *vcpu)
4837 {
4838 	struct kvm_run *kvm_run = vcpu->run;
4839 
4840 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
4841 		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
4842 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
4843 		memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
4844 		/* some control register changes require a tlb flush */
4845 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
4846 	}
4847 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4848 		kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
4849 		vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
4850 	}
4851 	save_access_regs(vcpu->arch.host_acrs);
4852 	restore_access_regs(vcpu->run->s.regs.acrs);
4853 	vcpu->arch.acrs_loaded = true;
4854 	kvm_s390_fpu_load(vcpu->run);
4855 	/* Sync fmt2 only data */
4856 	if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
4857 		sync_regs_fmt2(vcpu);
4858 	} else {
4859 		/*
4860 		 * In several places we have to modify our internal view to
4861 		 * not do things that are disallowed by the ultravisor. For
4862 		 * example we must not inject interrupts after specific exits
4863 		 * (e.g. 112 prefix page not secure). We do this by turning
4864 		 * off the machine check, external and I/O interrupt bits
4865 		 * of our PSW copy. To avoid getting validity intercepts, we
4866 		 * do only accept the condition code from userspace.
4867 		 */
4868 		vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC;
4869 		vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask &
4870 						   PSW_MASK_CC;
4871 	}
4872 
4873 	kvm_run->kvm_dirty_regs = 0;
4874 }
4875 
store_regs_fmt2(struct kvm_vcpu * vcpu)4876 static void store_regs_fmt2(struct kvm_vcpu *vcpu)
4877 {
4878 	struct kvm_run *kvm_run = vcpu->run;
4879 
4880 	kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
4881 	kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
4882 	kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
4883 	kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
4884 	kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
4885 	if (cpu_has_gs()) {
4886 		preempt_disable();
4887 		local_ctl_set_bit(2, CR2_GUARDED_STORAGE_BIT);
4888 		if (vcpu->arch.gs_enabled)
4889 			save_gs_cb(current->thread.gs_cb);
4890 		current->thread.gs_cb = vcpu->arch.host_gscb;
4891 		restore_gs_cb(vcpu->arch.host_gscb);
4892 		if (!vcpu->arch.host_gscb)
4893 			local_ctl_clear_bit(2, CR2_GUARDED_STORAGE_BIT);
4894 		vcpu->arch.host_gscb = NULL;
4895 		preempt_enable();
4896 	}
4897 	/* SIE will save etoken directly into SDNX and therefore kvm_run */
4898 }
4899 
store_regs(struct kvm_vcpu * vcpu)4900 static void store_regs(struct kvm_vcpu *vcpu)
4901 {
4902 	struct kvm_run *kvm_run = vcpu->run;
4903 
4904 	kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
4905 	kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
4906 	kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
4907 	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
4908 	kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
4909 	kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
4910 	kvm_run->s.regs.pft = vcpu->arch.pfault_token;
4911 	kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
4912 	kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
4913 	save_access_regs(vcpu->run->s.regs.acrs);
4914 	restore_access_regs(vcpu->arch.host_acrs);
4915 	vcpu->arch.acrs_loaded = false;
4916 	kvm_s390_fpu_store(vcpu->run);
4917 	if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
4918 		store_regs_fmt2(vcpu);
4919 }
4920 
kvm_arch_vcpu_ioctl_run(struct kvm_vcpu * vcpu)4921 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
4922 {
4923 	struct kvm_run *kvm_run = vcpu->run;
4924 	DECLARE_KERNEL_FPU_ONSTACK32(fpu);
4925 	int rc;
4926 
4927 	/*
4928 	 * Running a VM while dumping always has the potential to
4929 	 * produce inconsistent dump data. But for PV vcpus a SIE
4930 	 * entry while dumping could also lead to a fatal validity
4931 	 * intercept which we absolutely want to avoid.
4932 	 */
4933 	if (vcpu->kvm->arch.pv.dumping)
4934 		return -EINVAL;
4935 
4936 	if (!vcpu->wants_to_run)
4937 		return -EINTR;
4938 
4939 	if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS ||
4940 	    kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS)
4941 		return -EINVAL;
4942 
4943 	vcpu_load(vcpu);
4944 
4945 	if (guestdbg_exit_pending(vcpu)) {
4946 		kvm_s390_prepare_debug_exit(vcpu);
4947 		rc = 0;
4948 		goto out;
4949 	}
4950 
4951 	kvm_sigset_activate(vcpu);
4952 
4953 	/*
4954 	 * no need to check the return value of vcpu_start as it can only have
4955 	 * an error for protvirt, but protvirt means user cpu state
4956 	 */
4957 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
4958 		kvm_s390_vcpu_start(vcpu);
4959 	} else if (is_vcpu_stopped(vcpu)) {
4960 		pr_err_ratelimited("can't run stopped vcpu %d\n",
4961 				   vcpu->vcpu_id);
4962 		rc = -EINVAL;
4963 		goto out;
4964 	}
4965 
4966 	kernel_fpu_begin(&fpu, KERNEL_FPC | KERNEL_VXR);
4967 	sync_regs(vcpu);
4968 	enable_cpu_timer_accounting(vcpu);
4969 
4970 	might_fault();
4971 	rc = __vcpu_run(vcpu);
4972 
4973 	if (signal_pending(current) && !rc) {
4974 		kvm_run->exit_reason = KVM_EXIT_INTR;
4975 		vcpu->stat.signal_exits++;
4976 		rc = -EINTR;
4977 	}
4978 
4979 	if (guestdbg_exit_pending(vcpu) && !rc)  {
4980 		kvm_s390_prepare_debug_exit(vcpu);
4981 		rc = 0;
4982 	}
4983 
4984 	if (rc == -EREMOTE) {
4985 		/* userspace support is needed, kvm_run has been prepared */
4986 		rc = 0;
4987 	}
4988 
4989 	disable_cpu_timer_accounting(vcpu);
4990 	store_regs(vcpu);
4991 	kernel_fpu_end(&fpu, KERNEL_FPC | KERNEL_VXR);
4992 
4993 	kvm_sigset_deactivate(vcpu);
4994 
4995 	vcpu->stat.exit_userspace++;
4996 out:
4997 	vcpu_put(vcpu);
4998 	return rc;
4999 }
5000 
5001 /*
5002  * store status at address
5003  * we use have two special cases:
5004  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
5005  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
5006  */
kvm_s390_store_status_unloaded(struct kvm_vcpu * vcpu,unsigned long gpa)5007 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
5008 {
5009 	unsigned char archmode = 1;
5010 	freg_t fprs[NUM_FPRS];
5011 	unsigned int px;
5012 	u64 clkcomp, cputm;
5013 	int rc;
5014 
5015 	px = kvm_s390_get_prefix(vcpu);
5016 	if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
5017 		if (write_guest_abs(vcpu, 163, &archmode, 1))
5018 			return -EFAULT;
5019 		gpa = 0;
5020 	} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
5021 		if (write_guest_real(vcpu, 163, &archmode, 1))
5022 			return -EFAULT;
5023 		gpa = px;
5024 	} else
5025 		gpa -= __LC_FPREGS_SAVE_AREA;
5026 
5027 	/* manually convert vector registers if necessary */
5028 	if (cpu_has_vx()) {
5029 		convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
5030 		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
5031 				     fprs, 128);
5032 	} else {
5033 		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
5034 				     vcpu->run->s.regs.fprs, 128);
5035 	}
5036 	rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
5037 			      vcpu->run->s.regs.gprs, 128);
5038 	rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
5039 			      &vcpu->arch.sie_block->gpsw, 16);
5040 	rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
5041 			      &px, 4);
5042 	rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
5043 			      &vcpu->run->s.regs.fpc, 4);
5044 	rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
5045 			      &vcpu->arch.sie_block->todpr, 4);
5046 	cputm = kvm_s390_get_cpu_timer(vcpu);
5047 	rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
5048 			      &cputm, 8);
5049 	clkcomp = vcpu->arch.sie_block->ckc >> 8;
5050 	rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
5051 			      &clkcomp, 8);
5052 	rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
5053 			      &vcpu->run->s.regs.acrs, 64);
5054 	rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
5055 			      &vcpu->arch.sie_block->gcr, 128);
5056 	return rc ? -EFAULT : 0;
5057 }
5058 
kvm_s390_vcpu_store_status(struct kvm_vcpu * vcpu,unsigned long addr)5059 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
5060 {
5061 	/*
5062 	 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
5063 	 * switch in the run ioctl. Let's update our copies before we save
5064 	 * it into the save area
5065 	 */
5066 	kvm_s390_fpu_store(vcpu->run);
5067 	save_access_regs(vcpu->run->s.regs.acrs);
5068 
5069 	return kvm_s390_store_status_unloaded(vcpu, addr);
5070 }
5071 
__disable_ibs_on_vcpu(struct kvm_vcpu * vcpu)5072 static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
5073 {
5074 	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
5075 	kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
5076 }
5077 
__disable_ibs_on_all_vcpus(struct kvm * kvm)5078 static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
5079 {
5080 	unsigned long i;
5081 	struct kvm_vcpu *vcpu;
5082 
5083 	kvm_for_each_vcpu(i, vcpu, kvm) {
5084 		__disable_ibs_on_vcpu(vcpu);
5085 	}
5086 }
5087 
__enable_ibs_on_vcpu(struct kvm_vcpu * vcpu)5088 static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
5089 {
5090 	if (!sclp.has_ibs)
5091 		return;
5092 	kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
5093 	kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
5094 }
5095 
kvm_s390_vcpu_start(struct kvm_vcpu * vcpu)5096 int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
5097 {
5098 	int i, online_vcpus, r = 0, started_vcpus = 0;
5099 
5100 	if (!is_vcpu_stopped(vcpu))
5101 		return 0;
5102 
5103 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
5104 	/* Only one cpu at a time may enter/leave the STOPPED state. */
5105 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
5106 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
5107 
5108 	/* Let's tell the UV that we want to change into the operating state */
5109 	if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5110 		r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR);
5111 		if (r) {
5112 			spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5113 			return r;
5114 		}
5115 	}
5116 
5117 	for (i = 0; i < online_vcpus; i++) {
5118 		if (!is_vcpu_stopped(kvm_get_vcpu(vcpu->kvm, i)))
5119 			started_vcpus++;
5120 	}
5121 
5122 	if (started_vcpus == 0) {
5123 		/* we're the only active VCPU -> speed it up */
5124 		__enable_ibs_on_vcpu(vcpu);
5125 	} else if (started_vcpus == 1) {
5126 		/*
5127 		 * As we are starting a second VCPU, we have to disable
5128 		 * the IBS facility on all VCPUs to remove potentially
5129 		 * outstanding ENABLE requests.
5130 		 */
5131 		__disable_ibs_on_all_vcpus(vcpu->kvm);
5132 	}
5133 
5134 	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
5135 	/*
5136 	 * The real PSW might have changed due to a RESTART interpreted by the
5137 	 * ultravisor. We block all interrupts and let the next sie exit
5138 	 * refresh our view.
5139 	 */
5140 	if (kvm_s390_pv_cpu_is_protected(vcpu))
5141 		vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
5142 	/*
5143 	 * Another VCPU might have used IBS while we were offline.
5144 	 * Let's play safe and flush the VCPU at startup.
5145 	 */
5146 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
5147 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5148 	return 0;
5149 }
5150 
kvm_s390_vcpu_stop(struct kvm_vcpu * vcpu)5151 int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
5152 {
5153 	int i, online_vcpus, r = 0, started_vcpus = 0;
5154 	struct kvm_vcpu *started_vcpu = NULL;
5155 
5156 	if (is_vcpu_stopped(vcpu))
5157 		return 0;
5158 
5159 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
5160 	/* Only one cpu at a time may enter/leave the STOPPED state. */
5161 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
5162 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
5163 
5164 	/* Let's tell the UV that we want to change into the stopped state */
5165 	if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5166 		r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_STP);
5167 		if (r) {
5168 			spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5169 			return r;
5170 		}
5171 	}
5172 
5173 	/*
5174 	 * Set the VCPU to STOPPED and THEN clear the interrupt flag,
5175 	 * now that the SIGP STOP and SIGP STOP AND STORE STATUS orders
5176 	 * have been fully processed. This will ensure that the VCPU
5177 	 * is kept BUSY if another VCPU is inquiring with SIGP SENSE.
5178 	 */
5179 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
5180 	kvm_s390_clear_stop_irq(vcpu);
5181 
5182 	__disable_ibs_on_vcpu(vcpu);
5183 
5184 	for (i = 0; i < online_vcpus; i++) {
5185 		struct kvm_vcpu *tmp = kvm_get_vcpu(vcpu->kvm, i);
5186 
5187 		if (!is_vcpu_stopped(tmp)) {
5188 			started_vcpus++;
5189 			started_vcpu = tmp;
5190 		}
5191 	}
5192 
5193 	if (started_vcpus == 1) {
5194 		/*
5195 		 * As we only have one VCPU left, we want to enable the
5196 		 * IBS facility for that VCPU to speed it up.
5197 		 */
5198 		__enable_ibs_on_vcpu(started_vcpu);
5199 	}
5200 
5201 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5202 	return 0;
5203 }
5204 
kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu * vcpu,struct kvm_enable_cap * cap)5205 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
5206 				     struct kvm_enable_cap *cap)
5207 {
5208 	int r;
5209 
5210 	if (cap->flags)
5211 		return -EINVAL;
5212 
5213 	switch (cap->cap) {
5214 	case KVM_CAP_S390_CSS_SUPPORT:
5215 		if (!vcpu->kvm->arch.css_support) {
5216 			vcpu->kvm->arch.css_support = 1;
5217 			VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
5218 			trace_kvm_s390_enable_css(vcpu->kvm);
5219 		}
5220 		r = 0;
5221 		break;
5222 	default:
5223 		r = -EINVAL;
5224 		break;
5225 	}
5226 	return r;
5227 }
5228 
kvm_s390_vcpu_sida_op(struct kvm_vcpu * vcpu,struct kvm_s390_mem_op * mop)5229 static long kvm_s390_vcpu_sida_op(struct kvm_vcpu *vcpu,
5230 				  struct kvm_s390_mem_op *mop)
5231 {
5232 	void __user *uaddr = (void __user *)mop->buf;
5233 	void *sida_addr;
5234 	int r = 0;
5235 
5236 	if (mop->flags || !mop->size)
5237 		return -EINVAL;
5238 	if (mop->size + mop->sida_offset < mop->size)
5239 		return -EINVAL;
5240 	if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block))
5241 		return -E2BIG;
5242 	if (!kvm_s390_pv_cpu_is_protected(vcpu))
5243 		return -EINVAL;
5244 
5245 	sida_addr = (char *)sida_addr(vcpu->arch.sie_block) + mop->sida_offset;
5246 
5247 	switch (mop->op) {
5248 	case KVM_S390_MEMOP_SIDA_READ:
5249 		if (copy_to_user(uaddr, sida_addr, mop->size))
5250 			r = -EFAULT;
5251 
5252 		break;
5253 	case KVM_S390_MEMOP_SIDA_WRITE:
5254 		if (copy_from_user(sida_addr, uaddr, mop->size))
5255 			r = -EFAULT;
5256 		break;
5257 	}
5258 	return r;
5259 }
5260 
kvm_s390_vcpu_mem_op(struct kvm_vcpu * vcpu,struct kvm_s390_mem_op * mop)5261 static long kvm_s390_vcpu_mem_op(struct kvm_vcpu *vcpu,
5262 				 struct kvm_s390_mem_op *mop)
5263 {
5264 	void __user *uaddr = (void __user *)mop->buf;
5265 	void *tmpbuf __free(kvfree) = NULL;
5266 	enum gacc_mode acc_mode;
5267 	int r;
5268 
5269 	r = mem_op_validate_common(mop, KVM_S390_MEMOP_F_INJECT_EXCEPTION |
5270 					KVM_S390_MEMOP_F_CHECK_ONLY |
5271 					KVM_S390_MEMOP_F_SKEY_PROTECTION);
5272 	if (r)
5273 		return r;
5274 	if (mop->ar >= NUM_ACRS)
5275 		return -EINVAL;
5276 	if (kvm_s390_pv_cpu_is_protected(vcpu))
5277 		return -EINVAL;
5278 	if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
5279 		tmpbuf = vmalloc(mop->size);
5280 		if (!tmpbuf)
5281 			return -ENOMEM;
5282 	}
5283 
5284 	acc_mode = mop->op == KVM_S390_MEMOP_LOGICAL_READ ? GACC_FETCH : GACC_STORE;
5285 	if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
5286 		r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size,
5287 				    acc_mode, mop->key);
5288 	} else if (acc_mode == GACC_FETCH) {
5289 		r = read_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf,
5290 					mop->size, mop->key);
5291 		if (!r && copy_to_user(uaddr, tmpbuf, mop->size))
5292 			return -EFAULT;
5293 	} else {
5294 		if (copy_from_user(tmpbuf, uaddr, mop->size))
5295 			return -EFAULT;
5296 		r = write_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf,
5297 					 mop->size, mop->key);
5298 	}
5299 
5300 	if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
5301 		kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
5302 
5303 	return r;
5304 }
5305 
kvm_s390_vcpu_memsida_op(struct kvm_vcpu * vcpu,struct kvm_s390_mem_op * mop)5306 static long kvm_s390_vcpu_memsida_op(struct kvm_vcpu *vcpu,
5307 				     struct kvm_s390_mem_op *mop)
5308 {
5309 	int r, srcu_idx;
5310 
5311 	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
5312 
5313 	switch (mop->op) {
5314 	case KVM_S390_MEMOP_LOGICAL_READ:
5315 	case KVM_S390_MEMOP_LOGICAL_WRITE:
5316 		r = kvm_s390_vcpu_mem_op(vcpu, mop);
5317 		break;
5318 	case KVM_S390_MEMOP_SIDA_READ:
5319 	case KVM_S390_MEMOP_SIDA_WRITE:
5320 		/* we are locked against sida going away by the vcpu->mutex */
5321 		r = kvm_s390_vcpu_sida_op(vcpu, mop);
5322 		break;
5323 	default:
5324 		r = -EINVAL;
5325 	}
5326 
5327 	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
5328 	return r;
5329 }
5330 
kvm_arch_vcpu_unlocked_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)5331 long kvm_arch_vcpu_unlocked_ioctl(struct file *filp, unsigned int ioctl,
5332 				  unsigned long arg)
5333 {
5334 	struct kvm_vcpu *vcpu = filp->private_data;
5335 	void __user *argp = (void __user *)arg;
5336 	int rc;
5337 
5338 	switch (ioctl) {
5339 	case KVM_S390_IRQ: {
5340 		struct kvm_s390_irq s390irq;
5341 
5342 		if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
5343 			return -EFAULT;
5344 		rc = kvm_s390_inject_vcpu(vcpu, &s390irq);
5345 		break;
5346 	}
5347 	case KVM_S390_INTERRUPT: {
5348 		struct kvm_s390_interrupt s390int;
5349 		struct kvm_s390_irq s390irq = {};
5350 
5351 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
5352 			return -EFAULT;
5353 		if (s390int_to_s390irq(&s390int, &s390irq))
5354 			return -EINVAL;
5355 		rc = kvm_s390_inject_vcpu(vcpu, &s390irq);
5356 		break;
5357 	}
5358 	default:
5359 		rc = -ENOIOCTLCMD;
5360 		break;
5361 	}
5362 
5363 	/*
5364 	 * To simplify single stepping of userspace-emulated instructions,
5365 	 * KVM_EXIT_S390_SIEIC exit sets KVM_GUESTDBG_EXIT_PENDING (see
5366 	 * should_handle_per_ifetch()). However, if userspace emulation injects
5367 	 * an interrupt, it needs to be cleared, so that KVM_EXIT_DEBUG happens
5368 	 * after (and not before) the interrupt delivery.
5369 	 */
5370 	if (!rc)
5371 		vcpu->guest_debug &= ~KVM_GUESTDBG_EXIT_PENDING;
5372 
5373 	return rc;
5374 }
5375 
kvm_s390_handle_pv_vcpu_dump(struct kvm_vcpu * vcpu,struct kvm_pv_cmd * cmd)5376 static int kvm_s390_handle_pv_vcpu_dump(struct kvm_vcpu *vcpu,
5377 					struct kvm_pv_cmd *cmd)
5378 {
5379 	struct kvm_s390_pv_dmp dmp;
5380 	void *data;
5381 	int ret;
5382 
5383 	/* Dump initialization is a prerequisite */
5384 	if (!vcpu->kvm->arch.pv.dumping)
5385 		return -EINVAL;
5386 
5387 	if (copy_from_user(&dmp, (__u8 __user *)cmd->data, sizeof(dmp)))
5388 		return -EFAULT;
5389 
5390 	/* We only handle this subcmd right now */
5391 	if (dmp.subcmd != KVM_PV_DUMP_CPU)
5392 		return -EINVAL;
5393 
5394 	/* CPU dump length is the same as create cpu storage donation. */
5395 	if (dmp.buff_len != uv_info.guest_cpu_stor_len)
5396 		return -EINVAL;
5397 
5398 	data = kvzalloc(uv_info.guest_cpu_stor_len, GFP_KERNEL);
5399 	if (!data)
5400 		return -ENOMEM;
5401 
5402 	ret = kvm_s390_pv_dump_cpu(vcpu, data, &cmd->rc, &cmd->rrc);
5403 
5404 	VCPU_EVENT(vcpu, 3, "PROTVIRT DUMP CPU %d rc %x rrc %x",
5405 		   vcpu->vcpu_id, cmd->rc, cmd->rrc);
5406 
5407 	if (ret)
5408 		ret = -EINVAL;
5409 
5410 	/* On success copy over the dump data */
5411 	if (!ret && copy_to_user((__u8 __user *)dmp.buff_addr, data, uv_info.guest_cpu_stor_len))
5412 		ret = -EFAULT;
5413 
5414 	kvfree(data);
5415 	return ret;
5416 }
5417 
kvm_arch_vcpu_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)5418 long kvm_arch_vcpu_ioctl(struct file *filp,
5419 			 unsigned int ioctl, unsigned long arg)
5420 {
5421 	struct kvm_vcpu *vcpu = filp->private_data;
5422 	void __user *argp = (void __user *)arg;
5423 	int idx;
5424 	long r;
5425 	u16 rc, rrc;
5426 
5427 	vcpu_load(vcpu);
5428 
5429 	switch (ioctl) {
5430 	case KVM_S390_STORE_STATUS:
5431 		idx = srcu_read_lock(&vcpu->kvm->srcu);
5432 		r = kvm_s390_store_status_unloaded(vcpu, arg);
5433 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
5434 		break;
5435 	case KVM_S390_SET_INITIAL_PSW: {
5436 		psw_t psw;
5437 
5438 		r = -EFAULT;
5439 		if (copy_from_user(&psw, argp, sizeof(psw)))
5440 			break;
5441 		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
5442 		break;
5443 	}
5444 	case KVM_S390_CLEAR_RESET:
5445 		r = 0;
5446 		kvm_arch_vcpu_ioctl_clear_reset(vcpu);
5447 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5448 			r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
5449 					  UVC_CMD_CPU_RESET_CLEAR, &rc, &rrc);
5450 			VCPU_EVENT(vcpu, 3, "PROTVIRT RESET CLEAR VCPU: rc %x rrc %x",
5451 				   rc, rrc);
5452 		}
5453 		break;
5454 	case KVM_S390_INITIAL_RESET:
5455 		r = 0;
5456 		kvm_arch_vcpu_ioctl_initial_reset(vcpu);
5457 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5458 			r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
5459 					  UVC_CMD_CPU_RESET_INITIAL,
5460 					  &rc, &rrc);
5461 			VCPU_EVENT(vcpu, 3, "PROTVIRT RESET INITIAL VCPU: rc %x rrc %x",
5462 				   rc, rrc);
5463 		}
5464 		break;
5465 	case KVM_S390_NORMAL_RESET:
5466 		r = 0;
5467 		kvm_arch_vcpu_ioctl_normal_reset(vcpu);
5468 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5469 			r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
5470 					  UVC_CMD_CPU_RESET, &rc, &rrc);
5471 			VCPU_EVENT(vcpu, 3, "PROTVIRT RESET NORMAL VCPU: rc %x rrc %x",
5472 				   rc, rrc);
5473 		}
5474 		break;
5475 	case KVM_SET_ONE_REG:
5476 	case KVM_GET_ONE_REG: {
5477 		struct kvm_one_reg reg;
5478 		r = -EINVAL;
5479 		if (kvm_s390_pv_cpu_is_protected(vcpu))
5480 			break;
5481 		r = -EFAULT;
5482 		if (copy_from_user(&reg, argp, sizeof(reg)))
5483 			break;
5484 		if (ioctl == KVM_SET_ONE_REG)
5485 			r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
5486 		else
5487 			r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
5488 		break;
5489 	}
5490 #ifdef CONFIG_KVM_S390_UCONTROL
5491 	case KVM_S390_UCAS_MAP: {
5492 		struct kvm_s390_ucas_mapping ucas;
5493 
5494 		r = -EFAULT;
5495 		if (copy_from_user(&ucas, argp, sizeof(ucas)))
5496 			break;
5497 
5498 		r = -EINVAL;
5499 		if (!kvm_is_ucontrol(vcpu->kvm))
5500 			break;
5501 		if (!IS_ALIGNED(ucas.user_addr | ucas.vcpu_addr | ucas.length, _SEGMENT_SIZE))
5502 			break;
5503 
5504 		r = gmap_ucas_map(vcpu->arch.gmap, gpa_to_gfn(ucas.user_addr),
5505 				  gpa_to_gfn(ucas.vcpu_addr),
5506 				  ucas.length >> _SEGMENT_SHIFT);
5507 		break;
5508 	}
5509 	case KVM_S390_UCAS_UNMAP: {
5510 		struct kvm_s390_ucas_mapping ucas;
5511 
5512 		r = -EFAULT;
5513 		if (copy_from_user(&ucas, argp, sizeof(ucas)))
5514 			break;
5515 
5516 		r = -EINVAL;
5517 		if (!kvm_is_ucontrol(vcpu->kvm))
5518 			break;
5519 		if (!IS_ALIGNED(ucas.vcpu_addr | ucas.length, _SEGMENT_SIZE))
5520 			break;
5521 
5522 		gmap_ucas_unmap(vcpu->arch.gmap, gpa_to_gfn(ucas.vcpu_addr),
5523 				ucas.length >> _SEGMENT_SHIFT);
5524 		r = 0;
5525 		break;
5526 	}
5527 #endif
5528 	case KVM_S390_VCPU_FAULT: {
5529 		gpa_t gaddr = arg;
5530 
5531 		scoped_guard(srcu, &vcpu->kvm->srcu) {
5532 			r = vcpu_ucontrol_translate(vcpu, &gaddr);
5533 			if (r)
5534 				break;
5535 
5536 			r = kvm_s390_faultin_gfn_simple(vcpu, NULL, gpa_to_gfn(gaddr), false);
5537 			if (r == PGM_ADDRESSING)
5538 				r = -EFAULT;
5539 			if (r <= 0)
5540 				break;
5541 			r = -EIO;
5542 			KVM_BUG_ON(r, vcpu->kvm);
5543 		}
5544 		break;
5545 	}
5546 	case KVM_ENABLE_CAP:
5547 	{
5548 		struct kvm_enable_cap cap;
5549 		r = -EFAULT;
5550 		if (copy_from_user(&cap, argp, sizeof(cap)))
5551 			break;
5552 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
5553 		break;
5554 	}
5555 	case KVM_S390_MEM_OP: {
5556 		struct kvm_s390_mem_op mem_op;
5557 
5558 		if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
5559 			r = kvm_s390_vcpu_memsida_op(vcpu, &mem_op);
5560 		else
5561 			r = -EFAULT;
5562 		break;
5563 	}
5564 	case KVM_S390_SET_IRQ_STATE: {
5565 		struct kvm_s390_irq_state irq_state;
5566 
5567 		r = -EFAULT;
5568 		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
5569 			break;
5570 		if (irq_state.len > VCPU_IRQS_MAX_BUF ||
5571 		    irq_state.len == 0 ||
5572 		    irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
5573 			r = -EINVAL;
5574 			break;
5575 		}
5576 		/* do not use irq_state.flags, it will break old QEMUs */
5577 		r = kvm_s390_set_irq_state(vcpu,
5578 					   (void __user *) irq_state.buf,
5579 					   irq_state.len);
5580 		break;
5581 	}
5582 	case KVM_S390_GET_IRQ_STATE: {
5583 		struct kvm_s390_irq_state irq_state;
5584 
5585 		r = -EFAULT;
5586 		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
5587 			break;
5588 		if (irq_state.len == 0) {
5589 			r = -EINVAL;
5590 			break;
5591 		}
5592 		/* do not use irq_state.flags, it will break old QEMUs */
5593 		r = kvm_s390_get_irq_state(vcpu,
5594 					   (__u8 __user *)  irq_state.buf,
5595 					   irq_state.len);
5596 		break;
5597 	}
5598 	case KVM_S390_PV_CPU_COMMAND: {
5599 		struct kvm_pv_cmd cmd;
5600 
5601 		r = -EINVAL;
5602 		if (!is_prot_virt_host())
5603 			break;
5604 
5605 		r = -EFAULT;
5606 		if (copy_from_user(&cmd, argp, sizeof(cmd)))
5607 			break;
5608 
5609 		r = -EINVAL;
5610 		if (cmd.flags)
5611 			break;
5612 
5613 		/* We only handle this cmd right now */
5614 		if (cmd.cmd != KVM_PV_DUMP)
5615 			break;
5616 
5617 		r = kvm_s390_handle_pv_vcpu_dump(vcpu, &cmd);
5618 
5619 		/* Always copy over UV rc / rrc data */
5620 		if (copy_to_user((__u8 __user *)argp, &cmd.rc,
5621 				 sizeof(cmd.rc) + sizeof(cmd.rrc)))
5622 			r = -EFAULT;
5623 		break;
5624 	}
5625 	default:
5626 		r = -ENOTTY;
5627 	}
5628 
5629 	vcpu_put(vcpu);
5630 	return r;
5631 }
5632 
kvm_arch_vcpu_fault(struct kvm_vcpu * vcpu,struct vm_fault * vmf)5633 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
5634 {
5635 #ifdef CONFIG_KVM_S390_UCONTROL
5636 	if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
5637 		 && (kvm_is_ucontrol(vcpu->kvm))) {
5638 		vmf->page = virt_to_page(vcpu->arch.sie_block);
5639 		get_page(vmf->page);
5640 		return 0;
5641 	}
5642 #endif
5643 	return VM_FAULT_SIGBUS;
5644 }
5645 
kvm_arch_irqchip_in_kernel(struct kvm * kvm)5646 bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
5647 {
5648 	return true;
5649 }
5650 
5651 /* Section: memory related */
kvm_arch_prepare_memory_region(struct kvm * kvm,const struct kvm_memory_slot * old,struct kvm_memory_slot * new,enum kvm_mr_change change)5652 int kvm_arch_prepare_memory_region(struct kvm *kvm,
5653 				   const struct kvm_memory_slot *old,
5654 				   struct kvm_memory_slot *new,
5655 				   enum kvm_mr_change change)
5656 {
5657 	if (kvm_is_ucontrol(kvm) && new && new->id < KVM_USER_MEM_SLOTS)
5658 		return -EINVAL;
5659 
5660 	/* When we are protected, we should not change the memory slots */
5661 	if (kvm_s390_pv_get_handle(kvm))
5662 		return -EINVAL;
5663 
5664 	if (change != KVM_MR_DELETE && change != KVM_MR_FLAGS_ONLY) {
5665 		/*
5666 		 * A few sanity checks. The memory in userland is ok to be
5667 		 * fragmented into various different vmas. It is okay to mmap()
5668 		 * and munmap() stuff in this slot after doing this call at any
5669 		 * time.
5670 		 */
5671 		if (new->userspace_addr & ~PAGE_MASK)
5672 			return -EINVAL;
5673 		if ((new->base_gfn + new->npages) * PAGE_SIZE > kvm->arch.mem_limit)
5674 			return -EINVAL;
5675 	}
5676 
5677 	if (!kvm->arch.migration_mode)
5678 		return 0;
5679 
5680 	/*
5681 	 * Turn off migration mode when:
5682 	 * - userspace creates a new memslot with dirty logging off,
5683 	 * - userspace modifies an existing memslot (MOVE or FLAGS_ONLY) and
5684 	 *   dirty logging is turned off.
5685 	 * Migration mode expects dirty page logging being enabled to store
5686 	 * its dirty bitmap.
5687 	 */
5688 	if (change != KVM_MR_DELETE &&
5689 	    !(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
5690 		WARN(kvm_s390_vm_stop_migration(kvm),
5691 		     "Failed to stop migration mode");
5692 
5693 	return 0;
5694 }
5695 
kvm_arch_commit_memory_region(struct kvm * kvm,struct kvm_memory_slot * old,const struct kvm_memory_slot * new,enum kvm_mr_change change)5696 void kvm_arch_commit_memory_region(struct kvm *kvm,
5697 				struct kvm_memory_slot *old,
5698 				const struct kvm_memory_slot *new,
5699 				enum kvm_mr_change change)
5700 {
5701 	struct kvm_s390_mmu_cache *mc = NULL;
5702 	int rc = 0;
5703 
5704 	if (change == KVM_MR_FLAGS_ONLY)
5705 		return;
5706 
5707 	mc = kvm_s390_new_mmu_cache();
5708 	if (!mc) {
5709 		rc = -ENOMEM;
5710 		goto out;
5711 	}
5712 
5713 	scoped_guard(write_lock, &kvm->mmu_lock) {
5714 		switch (change) {
5715 		case KVM_MR_DELETE:
5716 			rc = dat_delete_slot(mc, kvm->arch.gmap->asce, old->base_gfn, old->npages);
5717 			break;
5718 		case KVM_MR_MOVE:
5719 			rc = dat_delete_slot(mc, kvm->arch.gmap->asce, old->base_gfn, old->npages);
5720 			if (rc)
5721 				break;
5722 			fallthrough;
5723 		case KVM_MR_CREATE:
5724 			rc = dat_create_slot(mc, kvm->arch.gmap->asce, new->base_gfn, new->npages);
5725 			break;
5726 		case KVM_MR_FLAGS_ONLY:
5727 			break;
5728 		default:
5729 			WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
5730 		}
5731 	}
5732 out:
5733 	if (rc)
5734 		pr_warn("failed to commit memory region\n");
5735 	kvm_s390_free_mmu_cache(mc);
5736 	return;
5737 }
5738 
5739 /**
5740  * kvm_test_age_gfn() - test young
5741  * @kvm: the kvm instance
5742  * @range: the range of guest addresses whose young status needs to be cleared
5743  *
5744  * Context: called by KVM common code without holding the kvm mmu lock
5745  * Return: true if any page in the given range is young, otherwise 0.
5746  */
kvm_test_age_gfn(struct kvm * kvm,struct kvm_gfn_range * range)5747 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
5748 {
5749 	scoped_guard(read_lock, &kvm->mmu_lock)
5750 		return dat_test_age_gfn(kvm->arch.gmap->asce, range->start, range->end);
5751 }
5752 
5753 /**
5754  * kvm_age_gfn() - clear young
5755  * @kvm: the kvm instance
5756  * @range: the range of guest addresses whose young status needs to be cleared
5757  *
5758  * Context: called by KVM common code without holding the kvm mmu lock
5759  * Return: true if any page in the given range was young, otherwise 0.
5760  */
kvm_age_gfn(struct kvm * kvm,struct kvm_gfn_range * range)5761 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
5762 {
5763 	scoped_guard(read_lock, &kvm->mmu_lock)
5764 		return gmap_age_gfn(kvm->arch.gmap, range->start, range->end);
5765 }
5766 
5767 /**
5768  * kvm_unmap_gfn_range() - Unmap a range of guest addresses
5769  * @kvm: the kvm instance
5770  * @range: the range of guest page frames to invalidate
5771  *
5772  * This function always returns false because every DAT table modification
5773  * has to use the appropriate DAT table manipulation instructions, which will
5774  * keep the TLB coherent, hence no additional TLB flush is ever required.
5775  *
5776  * Context: called by KVM common code with the kvm mmu write lock held
5777  * Return: false
5778  */
kvm_unmap_gfn_range(struct kvm * kvm,struct kvm_gfn_range * range)5779 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
5780 {
5781 	return gmap_unmap_gfn_range(kvm->arch.gmap, range->slot, range->start, range->end);
5782 }
5783 
nonhyp_mask(int i)5784 static inline unsigned long nonhyp_mask(int i)
5785 {
5786 	unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
5787 
5788 	return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
5789 }
5790 
kvm_s390_init(void)5791 static int __init kvm_s390_init(void)
5792 {
5793 	int i, r;
5794 
5795 	if (!sclp.has_sief2) {
5796 		pr_info("SIE is not available\n");
5797 		return -ENODEV;
5798 	}
5799 
5800 	for (i = 0; i < 16; i++)
5801 		kvm_s390_fac_base[i] |=
5802 			stfle_fac_list[i] & nonhyp_mask(i);
5803 
5804 	r = __kvm_s390_init();
5805 	if (r)
5806 		return r;
5807 
5808 	r = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE);
5809 	if (r) {
5810 		__kvm_s390_exit();
5811 		return r;
5812 	}
5813 	return 0;
5814 }
5815 
kvm_s390_exit(void)5816 static void __exit kvm_s390_exit(void)
5817 {
5818 	kvm_exit();
5819 
5820 	__kvm_s390_exit();
5821 }
5822 
5823 module_init(kvm_s390_init);
5824 module_exit(kvm_s390_exit);
5825 
5826 /*
5827  * Enable autoloading of the kvm module.
5828  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
5829  * since x86 takes a different approach.
5830  */
5831 #include <linux/miscdevice.h>
5832 MODULE_ALIAS_MISCDEV(KVM_MINOR);
5833 MODULE_ALIAS("devname:kvm");
5834