xref: /linux/arch/s390/kvm/kvm-s390.c (revision 235539b48a2357da28f52d66d04bec04f3dcb9dd)
1b0c632dbSHeiko Carstens /*
2a53c8fabSHeiko Carstens  * hosting zSeries kernel virtual machines
3b0c632dbSHeiko Carstens  *
4628eb9b8SChristian Ehrhardt  * Copyright IBM Corp. 2008, 2009
5b0c632dbSHeiko Carstens  *
6b0c632dbSHeiko Carstens  * This program is free software; you can redistribute it and/or modify
7b0c632dbSHeiko Carstens  * it under the terms of the GNU General Public License (version 2 only)
8b0c632dbSHeiko Carstens  * as published by the Free Software Foundation.
9b0c632dbSHeiko Carstens  *
10b0c632dbSHeiko Carstens  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11b0c632dbSHeiko Carstens  *               Christian Borntraeger <borntraeger@de.ibm.com>
12b0c632dbSHeiko Carstens  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13628eb9b8SChristian Ehrhardt  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
1415f36ebdSJason J. Herne  *               Jason J. Herne <jjherne@us.ibm.com>
15b0c632dbSHeiko Carstens  */
16b0c632dbSHeiko Carstens 
17b0c632dbSHeiko Carstens #include <linux/compiler.h>
18b0c632dbSHeiko Carstens #include <linux/err.h>
19b0c632dbSHeiko Carstens #include <linux/fs.h>
20ca872302SChristian Borntraeger #include <linux/hrtimer.h>
21b0c632dbSHeiko Carstens #include <linux/init.h>
22b0c632dbSHeiko Carstens #include <linux/kvm.h>
23b0c632dbSHeiko Carstens #include <linux/kvm_host.h>
24b2d73b2aSMartin Schwidefsky #include <linux/mman.h>
25b0c632dbSHeiko Carstens #include <linux/module.h>
26a374e892STony Krowiak #include <linux/random.h>
27b0c632dbSHeiko Carstens #include <linux/slab.h>
28ba5c1e9bSCarsten Otte #include <linux/timer.h>
2941408c28SThomas Huth #include <linux/vmalloc.h>
3015c9705fSDavid Hildenbrand #include <linux/bitmap.h>
31cbb870c8SHeiko Carstens #include <asm/asm-offsets.h>
32b0c632dbSHeiko Carstens #include <asm/lowcore.h>
33fd5ada04SMartin Schwidefsky #include <asm/stp.h>
34b0c632dbSHeiko Carstens #include <asm/pgtable.h>
351e133ab2SMartin Schwidefsky #include <asm/gmap.h>
36f5daba1dSHeiko Carstens #include <asm/nmi.h>
37a0616cdeSDavid Howells #include <asm/switch_to.h>
386d3da241SJens Freimann #include <asm/isc.h>
391526bf9cSChristian Borntraeger #include <asm/sclp.h>
400a763c78SDavid Hildenbrand #include <asm/cpacf.h>
41221bb8a4SLinus Torvalds #include <asm/timex.h>
428f2abe6aSChristian Borntraeger #include "kvm-s390.h"
43b0c632dbSHeiko Carstens #include "gaccess.h"
44b0c632dbSHeiko Carstens 
45ea2cdd27SDavid Hildenbrand #define KMSG_COMPONENT "kvm-s390"
46ea2cdd27SDavid Hildenbrand #undef pr_fmt
47ea2cdd27SDavid Hildenbrand #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
48ea2cdd27SDavid Hildenbrand 
495786fffaSCornelia Huck #define CREATE_TRACE_POINTS
505786fffaSCornelia Huck #include "trace.h"
51ade38c31SCornelia Huck #include "trace-s390.h"
525786fffaSCornelia Huck 
5341408c28SThomas Huth #define MEM_OP_MAX_SIZE 65536	/* Maximum transfer size for KVM_S390_MEM_OP */
54816c7667SJens Freimann #define LOCAL_IRQS 32
55816c7667SJens Freimann #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
56816c7667SJens Freimann 			   (KVM_MAX_VCPUS + LOCAL_IRQS))
5741408c28SThomas Huth 
58b0c632dbSHeiko Carstens #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
59b0c632dbSHeiko Carstens 
60b0c632dbSHeiko Carstens struct kvm_stats_debugfs_item debugfs_entries[] = {
61b0c632dbSHeiko Carstens 	{ "userspace_handled", VCPU_STAT(exit_userspace) },
620eaeafa1SChristian Borntraeger 	{ "exit_null", VCPU_STAT(exit_null) },
638f2abe6aSChristian Borntraeger 	{ "exit_validity", VCPU_STAT(exit_validity) },
648f2abe6aSChristian Borntraeger 	{ "exit_stop_request", VCPU_STAT(exit_stop_request) },
658f2abe6aSChristian Borntraeger 	{ "exit_external_request", VCPU_STAT(exit_external_request) },
668f2abe6aSChristian Borntraeger 	{ "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
67ba5c1e9bSCarsten Otte 	{ "exit_instruction", VCPU_STAT(exit_instruction) },
689ec6de19SAlexander Yarygin 	{ "exit_pei", VCPU_STAT(exit_pei) },
69ba5c1e9bSCarsten Otte 	{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
70ba5c1e9bSCarsten Otte 	{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
71a011eeb2SJanosch Frank 	{ "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
72f7819512SPaolo Bonzini 	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
7362bea5bfSPaolo Bonzini 	{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
743491caf2SChristian Borntraeger 	{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
75ce2e4f0bSDavid Hildenbrand 	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
76f5e10b09SChristian Borntraeger 	{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
77ba5c1e9bSCarsten Otte 	{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
78aba07508SDavid Hildenbrand 	{ "instruction_stctl", VCPU_STAT(instruction_stctl) },
79aba07508SDavid Hildenbrand 	{ "instruction_stctg", VCPU_STAT(instruction_stctg) },
80ba5c1e9bSCarsten Otte 	{ "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
817697e71fSChristian Ehrhardt 	{ "deliver_external_call", VCPU_STAT(deliver_external_call) },
82ba5c1e9bSCarsten Otte 	{ "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
83ba5c1e9bSCarsten Otte 	{ "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
84ba5c1e9bSCarsten Otte 	{ "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
85ba5c1e9bSCarsten Otte 	{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
86ba5c1e9bSCarsten Otte 	{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
87ba5c1e9bSCarsten Otte 	{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
88ba5c1e9bSCarsten Otte 	{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
8969d0d3a3SChristian Borntraeger 	{ "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
90453423dcSChristian Borntraeger 	{ "instruction_stidp", VCPU_STAT(instruction_stidp) },
91453423dcSChristian Borntraeger 	{ "instruction_spx", VCPU_STAT(instruction_spx) },
92453423dcSChristian Borntraeger 	{ "instruction_stpx", VCPU_STAT(instruction_stpx) },
93453423dcSChristian Borntraeger 	{ "instruction_stap", VCPU_STAT(instruction_stap) },
94453423dcSChristian Borntraeger 	{ "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
958a242234SHeiko Carstens 	{ "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
96453423dcSChristian Borntraeger 	{ "instruction_stsch", VCPU_STAT(instruction_stsch) },
97453423dcSChristian Borntraeger 	{ "instruction_chsc", VCPU_STAT(instruction_chsc) },
98b31288faSKonstantin Weitz 	{ "instruction_essa", VCPU_STAT(instruction_essa) },
99453423dcSChristian Borntraeger 	{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
100453423dcSChristian Borntraeger 	{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
101bb25b9baSChristian Borntraeger 	{ "instruction_tprot", VCPU_STAT(instruction_tprot) },
10295ca2cb5SJanosch Frank 	{ "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
103a3508fbeSDavid Hildenbrand 	{ "instruction_sie", VCPU_STAT(instruction_sie) },
1045288fbf0SChristian Borntraeger 	{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
105bd59d3a4SCornelia Huck 	{ "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
1067697e71fSChristian Ehrhardt 	{ "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
1075288fbf0SChristian Borntraeger 	{ "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
10842cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
10942cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
1105288fbf0SChristian Borntraeger 	{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
11142cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
11242cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
113cd7b4b61SEric Farman 	{ "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
1145288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
1155288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
1165288fbf0SChristian Borntraeger 	{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
11742cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
11842cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
11942cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
120388186bcSChristian Borntraeger 	{ "diagnose_10", VCPU_STAT(diagnose_10) },
121e28acfeaSChristian Borntraeger 	{ "diagnose_44", VCPU_STAT(diagnose_44) },
12241628d33SKonstantin Weitz 	{ "diagnose_9c", VCPU_STAT(diagnose_9c) },
123175a5c9eSChristian Borntraeger 	{ "diagnose_258", VCPU_STAT(diagnose_258) },
124175a5c9eSChristian Borntraeger 	{ "diagnose_308", VCPU_STAT(diagnose_308) },
125175a5c9eSChristian Borntraeger 	{ "diagnose_500", VCPU_STAT(diagnose_500) },
126b0c632dbSHeiko Carstens 	{ NULL }
127b0c632dbSHeiko Carstens };
128b0c632dbSHeiko Carstens 
129a411edf1SDavid Hildenbrand /* allow nested virtualization in KVM (if enabled by user space) */
130a411edf1SDavid Hildenbrand static int nested;
131a411edf1SDavid Hildenbrand module_param(nested, int, S_IRUGO);
132a411edf1SDavid Hildenbrand MODULE_PARM_DESC(nested, "Nested virtualization support");
133a411edf1SDavid Hildenbrand 
1349d8d5786SMichael Mueller /* upper facilities limit for kvm */
135f6c1d359SHeiko Carstens unsigned long kvm_s390_fac_list_mask[16] = { FACILITIES_KVM };
136b0c632dbSHeiko Carstens 
1379d8d5786SMichael Mueller unsigned long kvm_s390_fac_list_mask_size(void)
13878c4b59fSMichael Mueller {
1399d8d5786SMichael Mueller 	BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
1409d8d5786SMichael Mueller 	return ARRAY_SIZE(kvm_s390_fac_list_mask);
14178c4b59fSMichael Mueller }
14278c4b59fSMichael Mueller 
14315c9705fSDavid Hildenbrand /* available cpu features supported by kvm */
14415c9705fSDavid Hildenbrand static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
1450a763c78SDavid Hildenbrand /* available subfunctions indicated via query / "test bit" */
1460a763c78SDavid Hildenbrand static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
14715c9705fSDavid Hildenbrand 
1489d8d5786SMichael Mueller static struct gmap_notifier gmap_notifier;
149a3508fbeSDavid Hildenbrand static struct gmap_notifier vsie_gmap_notifier;
15078f26131SChristian Borntraeger debug_info_t *kvm_s390_dbf;
1519d8d5786SMichael Mueller 
152b0c632dbSHeiko Carstens /* Section: not file related */
15313a34e06SRadim Krčmář int kvm_arch_hardware_enable(void)
154b0c632dbSHeiko Carstens {
155b0c632dbSHeiko Carstens 	/* every s390 is virtualization enabled ;-) */
15610474ae8SAlexander Graf 	return 0;
157b0c632dbSHeiko Carstens }
158b0c632dbSHeiko Carstens 
159414d3b07SMartin Schwidefsky static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
160414d3b07SMartin Schwidefsky 			      unsigned long end);
1612c70fe44SChristian Borntraeger 
162fdf03650SFan Zhang /*
163fdf03650SFan Zhang  * This callback is executed during stop_machine(). All CPUs are therefore
164fdf03650SFan Zhang  * temporarily stopped. In order not to change guest behavior, we have to
165fdf03650SFan Zhang  * disable preemption whenever we touch the epoch of kvm and the VCPUs,
166fdf03650SFan Zhang  * so a CPU won't be stopped while calculating with the epoch.
167fdf03650SFan Zhang  */
168fdf03650SFan Zhang static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
169fdf03650SFan Zhang 			  void *v)
170fdf03650SFan Zhang {
171fdf03650SFan Zhang 	struct kvm *kvm;
172fdf03650SFan Zhang 	struct kvm_vcpu *vcpu;
173fdf03650SFan Zhang 	int i;
174fdf03650SFan Zhang 	unsigned long long *delta = v;
175fdf03650SFan Zhang 
176fdf03650SFan Zhang 	list_for_each_entry(kvm, &vm_list, vm_list) {
177fdf03650SFan Zhang 		kvm->arch.epoch -= *delta;
178fdf03650SFan Zhang 		kvm_for_each_vcpu(i, vcpu, kvm) {
179fdf03650SFan Zhang 			vcpu->arch.sie_block->epoch -= *delta;
180db0758b2SDavid Hildenbrand 			if (vcpu->arch.cputm_enabled)
181db0758b2SDavid Hildenbrand 				vcpu->arch.cputm_start += *delta;
18291473b48SDavid Hildenbrand 			if (vcpu->arch.vsie_block)
18391473b48SDavid Hildenbrand 				vcpu->arch.vsie_block->epoch -= *delta;
184fdf03650SFan Zhang 		}
185fdf03650SFan Zhang 	}
186fdf03650SFan Zhang 	return NOTIFY_OK;
187fdf03650SFan Zhang }
188fdf03650SFan Zhang 
189fdf03650SFan Zhang static struct notifier_block kvm_clock_notifier = {
190fdf03650SFan Zhang 	.notifier_call = kvm_clock_sync,
191fdf03650SFan Zhang };
192fdf03650SFan Zhang 
193b0c632dbSHeiko Carstens int kvm_arch_hardware_setup(void)
194b0c632dbSHeiko Carstens {
1952c70fe44SChristian Borntraeger 	gmap_notifier.notifier_call = kvm_gmap_notifier;
196b2d73b2aSMartin Schwidefsky 	gmap_register_pte_notifier(&gmap_notifier);
197a3508fbeSDavid Hildenbrand 	vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
198a3508fbeSDavid Hildenbrand 	gmap_register_pte_notifier(&vsie_gmap_notifier);
199fdf03650SFan Zhang 	atomic_notifier_chain_register(&s390_epoch_delta_notifier,
200fdf03650SFan Zhang 				       &kvm_clock_notifier);
201b0c632dbSHeiko Carstens 	return 0;
202b0c632dbSHeiko Carstens }
203b0c632dbSHeiko Carstens 
204b0c632dbSHeiko Carstens void kvm_arch_hardware_unsetup(void)
205b0c632dbSHeiko Carstens {
206b2d73b2aSMartin Schwidefsky 	gmap_unregister_pte_notifier(&gmap_notifier);
207a3508fbeSDavid Hildenbrand 	gmap_unregister_pte_notifier(&vsie_gmap_notifier);
208fdf03650SFan Zhang 	atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
209fdf03650SFan Zhang 					 &kvm_clock_notifier);
210b0c632dbSHeiko Carstens }
211b0c632dbSHeiko Carstens 
21222be5a13SDavid Hildenbrand static void allow_cpu_feat(unsigned long nr)
21322be5a13SDavid Hildenbrand {
21422be5a13SDavid Hildenbrand 	set_bit_inv(nr, kvm_s390_available_cpu_feat);
21522be5a13SDavid Hildenbrand }
21622be5a13SDavid Hildenbrand 
2170a763c78SDavid Hildenbrand static inline int plo_test_bit(unsigned char nr)
2180a763c78SDavid Hildenbrand {
2190a763c78SDavid Hildenbrand 	register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
2200a763c78SDavid Hildenbrand 	int cc = 3; /* subfunction not available */
2210a763c78SDavid Hildenbrand 
2220a763c78SDavid Hildenbrand 	asm volatile(
2230a763c78SDavid Hildenbrand 		/* Parameter registers are ignored for "test bit" */
2240a763c78SDavid Hildenbrand 		"	plo	0,0,0,0(0)\n"
2250a763c78SDavid Hildenbrand 		"	ipm	%0\n"
2260a763c78SDavid Hildenbrand 		"	srl	%0,28\n"
2270a763c78SDavid Hildenbrand 		: "=d" (cc)
2280a763c78SDavid Hildenbrand 		: "d" (r0)
2290a763c78SDavid Hildenbrand 		: "cc");
2300a763c78SDavid Hildenbrand 	return cc == 0;
2310a763c78SDavid Hildenbrand }
2320a763c78SDavid Hildenbrand 
23322be5a13SDavid Hildenbrand static void kvm_s390_cpu_feat_init(void)
23422be5a13SDavid Hildenbrand {
2350a763c78SDavid Hildenbrand 	int i;
2360a763c78SDavid Hildenbrand 
2370a763c78SDavid Hildenbrand 	for (i = 0; i < 256; ++i) {
2380a763c78SDavid Hildenbrand 		if (plo_test_bit(i))
2390a763c78SDavid Hildenbrand 			kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
2400a763c78SDavid Hildenbrand 	}
2410a763c78SDavid Hildenbrand 
2420a763c78SDavid Hildenbrand 	if (test_facility(28)) /* TOD-clock steering */
243221bb8a4SLinus Torvalds 		ptff(kvm_s390_available_subfunc.ptff,
244221bb8a4SLinus Torvalds 		     sizeof(kvm_s390_available_subfunc.ptff),
245221bb8a4SLinus Torvalds 		     PTFF_QAF);
2460a763c78SDavid Hildenbrand 
2470a763c78SDavid Hildenbrand 	if (test_facility(17)) { /* MSA */
2480a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_KMAC, kvm_s390_available_subfunc.kmac);
2490a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_KMC, kvm_s390_available_subfunc.kmc);
2500a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_KM, kvm_s390_available_subfunc.km);
2510a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_KIMD, kvm_s390_available_subfunc.kimd);
2520a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_KLMD, kvm_s390_available_subfunc.klmd);
2530a763c78SDavid Hildenbrand 	}
2540a763c78SDavid Hildenbrand 	if (test_facility(76)) /* MSA3 */
2550a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_PCKMO, kvm_s390_available_subfunc.pckmo);
2560a763c78SDavid Hildenbrand 	if (test_facility(77)) { /* MSA4 */
2570a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_KMCTR, kvm_s390_available_subfunc.kmctr);
2580a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_KMF, kvm_s390_available_subfunc.kmf);
2590a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_KMO, kvm_s390_available_subfunc.kmo);
2600a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_PCC, kvm_s390_available_subfunc.pcc);
2610a763c78SDavid Hildenbrand 	}
2620a763c78SDavid Hildenbrand 	if (test_facility(57)) /* MSA5 */
2630a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_PPNO, kvm_s390_available_subfunc.ppno);
2640a763c78SDavid Hildenbrand 
26522be5a13SDavid Hildenbrand 	if (MACHINE_HAS_ESOP)
26622be5a13SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
267a3508fbeSDavid Hildenbrand 	/*
268a3508fbeSDavid Hildenbrand 	 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
269a3508fbeSDavid Hildenbrand 	 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
270a3508fbeSDavid Hildenbrand 	 */
271a3508fbeSDavid Hildenbrand 	if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
272a411edf1SDavid Hildenbrand 	    !test_facility(3) || !nested)
273a3508fbeSDavid Hildenbrand 		return;
274a3508fbeSDavid Hildenbrand 	allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
27519c439b5SDavid Hildenbrand 	if (sclp.has_64bscao)
27619c439b5SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
2770615a326SDavid Hildenbrand 	if (sclp.has_siif)
2780615a326SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
27977d18f6dSDavid Hildenbrand 	if (sclp.has_gpere)
28077d18f6dSDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
281a1b7b9b2SDavid Hildenbrand 	if (sclp.has_gsls)
282a1b7b9b2SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
2835630a8e8SDavid Hildenbrand 	if (sclp.has_ib)
2845630a8e8SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
28513ee3f67SDavid Hildenbrand 	if (sclp.has_cei)
28613ee3f67SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
2877fd7f39dSDavid Hildenbrand 	if (sclp.has_ibs)
2887fd7f39dSDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
2895d3876a8SDavid Hildenbrand 	/*
2905d3876a8SDavid Hildenbrand 	 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
2915d3876a8SDavid Hildenbrand 	 * all skey handling functions read/set the skey from the PGSTE
2925d3876a8SDavid Hildenbrand 	 * instead of the real storage key.
2935d3876a8SDavid Hildenbrand 	 *
2945d3876a8SDavid Hildenbrand 	 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
2955d3876a8SDavid Hildenbrand 	 * pages being detected as preserved although they are resident.
2965d3876a8SDavid Hildenbrand 	 *
2975d3876a8SDavid Hildenbrand 	 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
2985d3876a8SDavid Hildenbrand 	 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
2995d3876a8SDavid Hildenbrand 	 *
3005d3876a8SDavid Hildenbrand 	 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
3015d3876a8SDavid Hildenbrand 	 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
3025d3876a8SDavid Hildenbrand 	 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
3035d3876a8SDavid Hildenbrand 	 *
3045d3876a8SDavid Hildenbrand 	 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
3055d3876a8SDavid Hildenbrand 	 * cannot easily shadow the SCA because of the ipte lock.
3065d3876a8SDavid Hildenbrand 	 */
30722be5a13SDavid Hildenbrand }
30822be5a13SDavid Hildenbrand 
309b0c632dbSHeiko Carstens int kvm_arch_init(void *opaque)
310b0c632dbSHeiko Carstens {
31178f26131SChristian Borntraeger 	kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
31278f26131SChristian Borntraeger 	if (!kvm_s390_dbf)
31378f26131SChristian Borntraeger 		return -ENOMEM;
31478f26131SChristian Borntraeger 
31578f26131SChristian Borntraeger 	if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
31678f26131SChristian Borntraeger 		debug_unregister(kvm_s390_dbf);
31778f26131SChristian Borntraeger 		return -ENOMEM;
31878f26131SChristian Borntraeger 	}
31978f26131SChristian Borntraeger 
32022be5a13SDavid Hildenbrand 	kvm_s390_cpu_feat_init();
32122be5a13SDavid Hildenbrand 
32284877d93SCornelia Huck 	/* Register floating interrupt controller interface. */
32384877d93SCornelia Huck 	return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
324b0c632dbSHeiko Carstens }
325b0c632dbSHeiko Carstens 
32678f26131SChristian Borntraeger void kvm_arch_exit(void)
32778f26131SChristian Borntraeger {
32878f26131SChristian Borntraeger 	debug_unregister(kvm_s390_dbf);
32978f26131SChristian Borntraeger }
33078f26131SChristian Borntraeger 
331b0c632dbSHeiko Carstens /* Section: device related */
332b0c632dbSHeiko Carstens long kvm_arch_dev_ioctl(struct file *filp,
333b0c632dbSHeiko Carstens 			unsigned int ioctl, unsigned long arg)
334b0c632dbSHeiko Carstens {
335b0c632dbSHeiko Carstens 	if (ioctl == KVM_S390_ENABLE_SIE)
336b0c632dbSHeiko Carstens 		return s390_enable_sie();
337b0c632dbSHeiko Carstens 	return -EINVAL;
338b0c632dbSHeiko Carstens }
339b0c632dbSHeiko Carstens 
340784aa3d7SAlexander Graf int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
341b0c632dbSHeiko Carstens {
342d7b0b5ebSCarsten Otte 	int r;
343d7b0b5ebSCarsten Otte 
3442bd0ac4eSCarsten Otte 	switch (ext) {
345d7b0b5ebSCarsten Otte 	case KVM_CAP_S390_PSW:
346b6cf8788SChristian Borntraeger 	case KVM_CAP_S390_GMAP:
34752e16b18SChristian Borntraeger 	case KVM_CAP_SYNC_MMU:
3481efd0f59SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
3491efd0f59SCarsten Otte 	case KVM_CAP_S390_UCONTROL:
3501efd0f59SCarsten Otte #endif
3513c038e6bSDominik Dingel 	case KVM_CAP_ASYNC_PF:
35260b413c9SChristian Borntraeger 	case KVM_CAP_SYNC_REGS:
35314eebd91SCarsten Otte 	case KVM_CAP_ONE_REG:
354d6712df9SCornelia Huck 	case KVM_CAP_ENABLE_CAP:
355fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
35610ccaa1eSCornelia Huck 	case KVM_CAP_IOEVENTFD:
357c05c4186SJens Freimann 	case KVM_CAP_DEVICE_CTRL:
358d938dc55SCornelia Huck 	case KVM_CAP_ENABLE_CAP_VM:
35978599d90SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
360f2061656SDominik Dingel 	case KVM_CAP_VM_ATTRIBUTES:
3616352e4d2SDavid Hildenbrand 	case KVM_CAP_MP_STATE:
36247b43c52SJens Freimann 	case KVM_CAP_S390_INJECT_IRQ:
3632444b352SDavid Hildenbrand 	case KVM_CAP_S390_USER_SIGP:
364e44fc8c9SEkaterina Tumanova 	case KVM_CAP_S390_USER_STSI:
36530ee2a98SJason J. Herne 	case KVM_CAP_S390_SKEYS:
366816c7667SJens Freimann 	case KVM_CAP_S390_IRQ_STATE:
3676502a34cSDavid Hildenbrand 	case KVM_CAP_S390_USER_INSTR0:
368d7b0b5ebSCarsten Otte 		r = 1;
369d7b0b5ebSCarsten Otte 		break;
37041408c28SThomas Huth 	case KVM_CAP_S390_MEM_OP:
37141408c28SThomas Huth 		r = MEM_OP_MAX_SIZE;
37241408c28SThomas Huth 		break;
373e726b1bdSChristian Borntraeger 	case KVM_CAP_NR_VCPUS:
374e726b1bdSChristian Borntraeger 	case KVM_CAP_MAX_VCPUS:
37576a6dd72SDavid Hildenbrand 		r = KVM_S390_BSCA_CPU_SLOTS;
376a6940674SDavid Hildenbrand 		if (!kvm_s390_use_sca_entries())
377a6940674SDavid Hildenbrand 			r = KVM_MAX_VCPUS;
378a6940674SDavid Hildenbrand 		else if (sclp.has_esca && sclp.has_64bscao)
37976a6dd72SDavid Hildenbrand 			r = KVM_S390_ESCA_CPU_SLOTS;
380e726b1bdSChristian Borntraeger 		break;
381e1e2e605SNick Wang 	case KVM_CAP_NR_MEMSLOTS:
382e1e2e605SNick Wang 		r = KVM_USER_MEM_SLOTS;
383e1e2e605SNick Wang 		break;
3841526bf9cSChristian Borntraeger 	case KVM_CAP_S390_COW:
385abf09bedSMartin Schwidefsky 		r = MACHINE_HAS_ESOP;
3861526bf9cSChristian Borntraeger 		break;
38768c55750SEric Farman 	case KVM_CAP_S390_VECTOR_REGISTERS:
38868c55750SEric Farman 		r = MACHINE_HAS_VX;
38968c55750SEric Farman 		break;
390c6e5f166SFan Zhang 	case KVM_CAP_S390_RI:
391c6e5f166SFan Zhang 		r = test_facility(64);
392c6e5f166SFan Zhang 		break;
3932bd0ac4eSCarsten Otte 	default:
394d7b0b5ebSCarsten Otte 		r = 0;
395b0c632dbSHeiko Carstens 	}
396d7b0b5ebSCarsten Otte 	return r;
3972bd0ac4eSCarsten Otte }
398b0c632dbSHeiko Carstens 
39915f36ebdSJason J. Herne static void kvm_s390_sync_dirty_log(struct kvm *kvm,
40015f36ebdSJason J. Herne 					struct kvm_memory_slot *memslot)
40115f36ebdSJason J. Herne {
40215f36ebdSJason J. Herne 	gfn_t cur_gfn, last_gfn;
40315f36ebdSJason J. Herne 	unsigned long address;
40415f36ebdSJason J. Herne 	struct gmap *gmap = kvm->arch.gmap;
40515f36ebdSJason J. Herne 
40615f36ebdSJason J. Herne 	/* Loop over all guest pages */
40715f36ebdSJason J. Herne 	last_gfn = memslot->base_gfn + memslot->npages;
40815f36ebdSJason J. Herne 	for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
40915f36ebdSJason J. Herne 		address = gfn_to_hva_memslot(memslot, cur_gfn);
41015f36ebdSJason J. Herne 
4111e133ab2SMartin Schwidefsky 		if (test_and_clear_guest_dirty(gmap->mm, address))
41215f36ebdSJason J. Herne 			mark_page_dirty(kvm, cur_gfn);
4131763f8d0SChristian Borntraeger 		if (fatal_signal_pending(current))
4141763f8d0SChristian Borntraeger 			return;
41570c88a00SChristian Borntraeger 		cond_resched();
41615f36ebdSJason J. Herne 	}
41715f36ebdSJason J. Herne }
41815f36ebdSJason J. Herne 
419b0c632dbSHeiko Carstens /* Section: vm related */
420a6e2f683SEugene (jno) Dvurechenski static void sca_del_vcpu(struct kvm_vcpu *vcpu);
421a6e2f683SEugene (jno) Dvurechenski 
422b0c632dbSHeiko Carstens /*
423b0c632dbSHeiko Carstens  * Get (and clear) the dirty memory log for a memory slot.
424b0c632dbSHeiko Carstens  */
425b0c632dbSHeiko Carstens int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
426b0c632dbSHeiko Carstens 			       struct kvm_dirty_log *log)
427b0c632dbSHeiko Carstens {
42815f36ebdSJason J. Herne 	int r;
42915f36ebdSJason J. Herne 	unsigned long n;
4309f6b8029SPaolo Bonzini 	struct kvm_memslots *slots;
43115f36ebdSJason J. Herne 	struct kvm_memory_slot *memslot;
43215f36ebdSJason J. Herne 	int is_dirty = 0;
43315f36ebdSJason J. Herne 
43415f36ebdSJason J. Herne 	mutex_lock(&kvm->slots_lock);
43515f36ebdSJason J. Herne 
43615f36ebdSJason J. Herne 	r = -EINVAL;
43715f36ebdSJason J. Herne 	if (log->slot >= KVM_USER_MEM_SLOTS)
43815f36ebdSJason J. Herne 		goto out;
43915f36ebdSJason J. Herne 
4409f6b8029SPaolo Bonzini 	slots = kvm_memslots(kvm);
4419f6b8029SPaolo Bonzini 	memslot = id_to_memslot(slots, log->slot);
44215f36ebdSJason J. Herne 	r = -ENOENT;
44315f36ebdSJason J. Herne 	if (!memslot->dirty_bitmap)
44415f36ebdSJason J. Herne 		goto out;
44515f36ebdSJason J. Herne 
44615f36ebdSJason J. Herne 	kvm_s390_sync_dirty_log(kvm, memslot);
44715f36ebdSJason J. Herne 	r = kvm_get_dirty_log(kvm, log, &is_dirty);
44815f36ebdSJason J. Herne 	if (r)
44915f36ebdSJason J. Herne 		goto out;
45015f36ebdSJason J. Herne 
45115f36ebdSJason J. Herne 	/* Clear the dirty log */
45215f36ebdSJason J. Herne 	if (is_dirty) {
45315f36ebdSJason J. Herne 		n = kvm_dirty_bitmap_bytes(memslot);
45415f36ebdSJason J. Herne 		memset(memslot->dirty_bitmap, 0, n);
45515f36ebdSJason J. Herne 	}
45615f36ebdSJason J. Herne 	r = 0;
45715f36ebdSJason J. Herne out:
45815f36ebdSJason J. Herne 	mutex_unlock(&kvm->slots_lock);
45915f36ebdSJason J. Herne 	return r;
460b0c632dbSHeiko Carstens }
461b0c632dbSHeiko Carstens 
4626502a34cSDavid Hildenbrand static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
4636502a34cSDavid Hildenbrand {
4646502a34cSDavid Hildenbrand 	unsigned int i;
4656502a34cSDavid Hildenbrand 	struct kvm_vcpu *vcpu;
4666502a34cSDavid Hildenbrand 
4676502a34cSDavid Hildenbrand 	kvm_for_each_vcpu(i, vcpu, kvm) {
4686502a34cSDavid Hildenbrand 		kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
4696502a34cSDavid Hildenbrand 	}
4706502a34cSDavid Hildenbrand }
4716502a34cSDavid Hildenbrand 
472d938dc55SCornelia Huck static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
473d938dc55SCornelia Huck {
474d938dc55SCornelia Huck 	int r;
475d938dc55SCornelia Huck 
476d938dc55SCornelia Huck 	if (cap->flags)
477d938dc55SCornelia Huck 		return -EINVAL;
478d938dc55SCornelia Huck 
479d938dc55SCornelia Huck 	switch (cap->cap) {
48084223598SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
481c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
48284223598SCornelia Huck 		kvm->arch.use_irqchip = 1;
48384223598SCornelia Huck 		r = 0;
48484223598SCornelia Huck 		break;
4852444b352SDavid Hildenbrand 	case KVM_CAP_S390_USER_SIGP:
486c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
4872444b352SDavid Hildenbrand 		kvm->arch.user_sigp = 1;
4882444b352SDavid Hildenbrand 		r = 0;
4892444b352SDavid Hildenbrand 		break;
49068c55750SEric Farman 	case KVM_CAP_S390_VECTOR_REGISTERS:
4915967c17bSDavid Hildenbrand 		mutex_lock(&kvm->lock);
492a03825bbSPaolo Bonzini 		if (kvm->created_vcpus) {
4935967c17bSDavid Hildenbrand 			r = -EBUSY;
4945967c17bSDavid Hildenbrand 		} else if (MACHINE_HAS_VX) {
495c54f0d6aSDavid Hildenbrand 			set_kvm_facility(kvm->arch.model.fac_mask, 129);
496c54f0d6aSDavid Hildenbrand 			set_kvm_facility(kvm->arch.model.fac_list, 129);
49718280d8bSMichael Mueller 			r = 0;
49818280d8bSMichael Mueller 		} else
49918280d8bSMichael Mueller 			r = -EINVAL;
5005967c17bSDavid Hildenbrand 		mutex_unlock(&kvm->lock);
501c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
502c92ea7b9SChristian Borntraeger 			 r ? "(not available)" : "(success)");
50368c55750SEric Farman 		break;
504c6e5f166SFan Zhang 	case KVM_CAP_S390_RI:
505c6e5f166SFan Zhang 		r = -EINVAL;
506c6e5f166SFan Zhang 		mutex_lock(&kvm->lock);
507a03825bbSPaolo Bonzini 		if (kvm->created_vcpus) {
508c6e5f166SFan Zhang 			r = -EBUSY;
509c6e5f166SFan Zhang 		} else if (test_facility(64)) {
510c54f0d6aSDavid Hildenbrand 			set_kvm_facility(kvm->arch.model.fac_mask, 64);
511c54f0d6aSDavid Hildenbrand 			set_kvm_facility(kvm->arch.model.fac_list, 64);
512c6e5f166SFan Zhang 			r = 0;
513c6e5f166SFan Zhang 		}
514c6e5f166SFan Zhang 		mutex_unlock(&kvm->lock);
515c6e5f166SFan Zhang 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
516c6e5f166SFan Zhang 			 r ? "(not available)" : "(success)");
517c6e5f166SFan Zhang 		break;
518e44fc8c9SEkaterina Tumanova 	case KVM_CAP_S390_USER_STSI:
519c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
520e44fc8c9SEkaterina Tumanova 		kvm->arch.user_stsi = 1;
521e44fc8c9SEkaterina Tumanova 		r = 0;
522e44fc8c9SEkaterina Tumanova 		break;
5236502a34cSDavid Hildenbrand 	case KVM_CAP_S390_USER_INSTR0:
5246502a34cSDavid Hildenbrand 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
5256502a34cSDavid Hildenbrand 		kvm->arch.user_instr0 = 1;
5266502a34cSDavid Hildenbrand 		icpt_operexc_on_all_vcpus(kvm);
5276502a34cSDavid Hildenbrand 		r = 0;
5286502a34cSDavid Hildenbrand 		break;
529d938dc55SCornelia Huck 	default:
530d938dc55SCornelia Huck 		r = -EINVAL;
531d938dc55SCornelia Huck 		break;
532d938dc55SCornelia Huck 	}
533d938dc55SCornelia Huck 	return r;
534d938dc55SCornelia Huck }
535d938dc55SCornelia Huck 
5368c0a7ce6SDominik Dingel static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
5378c0a7ce6SDominik Dingel {
5388c0a7ce6SDominik Dingel 	int ret;
5398c0a7ce6SDominik Dingel 
5408c0a7ce6SDominik Dingel 	switch (attr->attr) {
5418c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_LIMIT_SIZE:
5428c0a7ce6SDominik Dingel 		ret = 0;
543c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
544a3a92c31SDominik Dingel 			 kvm->arch.mem_limit);
545a3a92c31SDominik Dingel 		if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
5468c0a7ce6SDominik Dingel 			ret = -EFAULT;
5478c0a7ce6SDominik Dingel 		break;
5488c0a7ce6SDominik Dingel 	default:
5498c0a7ce6SDominik Dingel 		ret = -ENXIO;
5508c0a7ce6SDominik Dingel 		break;
5518c0a7ce6SDominik Dingel 	}
5528c0a7ce6SDominik Dingel 	return ret;
5538c0a7ce6SDominik Dingel }
5548c0a7ce6SDominik Dingel 
5558c0a7ce6SDominik Dingel static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
5564f718eabSDominik Dingel {
5574f718eabSDominik Dingel 	int ret;
5584f718eabSDominik Dingel 	unsigned int idx;
5594f718eabSDominik Dingel 	switch (attr->attr) {
5604f718eabSDominik Dingel 	case KVM_S390_VM_MEM_ENABLE_CMMA:
561f9cbd9b0SDavid Hildenbrand 		ret = -ENXIO;
562c24cc9c8SDavid Hildenbrand 		if (!sclp.has_cmma)
563e6db1d61SDominik Dingel 			break;
564e6db1d61SDominik Dingel 
5654f718eabSDominik Dingel 		ret = -EBUSY;
566c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
5674f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
568a03825bbSPaolo Bonzini 		if (!kvm->created_vcpus) {
5694f718eabSDominik Dingel 			kvm->arch.use_cmma = 1;
5704f718eabSDominik Dingel 			ret = 0;
5714f718eabSDominik Dingel 		}
5724f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
5734f718eabSDominik Dingel 		break;
5744f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CLR_CMMA:
575f9cbd9b0SDavid Hildenbrand 		ret = -ENXIO;
576f9cbd9b0SDavid Hildenbrand 		if (!sclp.has_cmma)
577f9cbd9b0SDavid Hildenbrand 			break;
578c3489155SDominik Dingel 		ret = -EINVAL;
579c3489155SDominik Dingel 		if (!kvm->arch.use_cmma)
580c3489155SDominik Dingel 			break;
581c3489155SDominik Dingel 
582c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
5834f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
5844f718eabSDominik Dingel 		idx = srcu_read_lock(&kvm->srcu);
585a13cff31SDominik Dingel 		s390_reset_cmma(kvm->arch.gmap->mm);
5864f718eabSDominik Dingel 		srcu_read_unlock(&kvm->srcu, idx);
5874f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
5884f718eabSDominik Dingel 		ret = 0;
5894f718eabSDominik Dingel 		break;
5908c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_LIMIT_SIZE: {
5918c0a7ce6SDominik Dingel 		unsigned long new_limit;
5928c0a7ce6SDominik Dingel 
5938c0a7ce6SDominik Dingel 		if (kvm_is_ucontrol(kvm))
5948c0a7ce6SDominik Dingel 			return -EINVAL;
5958c0a7ce6SDominik Dingel 
5968c0a7ce6SDominik Dingel 		if (get_user(new_limit, (u64 __user *)attr->addr))
5978c0a7ce6SDominik Dingel 			return -EFAULT;
5988c0a7ce6SDominik Dingel 
599a3a92c31SDominik Dingel 		if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
600a3a92c31SDominik Dingel 		    new_limit > kvm->arch.mem_limit)
6018c0a7ce6SDominik Dingel 			return -E2BIG;
6028c0a7ce6SDominik Dingel 
603a3a92c31SDominik Dingel 		if (!new_limit)
604a3a92c31SDominik Dingel 			return -EINVAL;
605a3a92c31SDominik Dingel 
6066ea427bbSMartin Schwidefsky 		/* gmap_create takes last usable address */
607a3a92c31SDominik Dingel 		if (new_limit != KVM_S390_NO_MEM_LIMIT)
608a3a92c31SDominik Dingel 			new_limit -= 1;
609a3a92c31SDominik Dingel 
6108c0a7ce6SDominik Dingel 		ret = -EBUSY;
6118c0a7ce6SDominik Dingel 		mutex_lock(&kvm->lock);
612a03825bbSPaolo Bonzini 		if (!kvm->created_vcpus) {
6136ea427bbSMartin Schwidefsky 			/* gmap_create will round the limit up */
6146ea427bbSMartin Schwidefsky 			struct gmap *new = gmap_create(current->mm, new_limit);
6158c0a7ce6SDominik Dingel 
6168c0a7ce6SDominik Dingel 			if (!new) {
6178c0a7ce6SDominik Dingel 				ret = -ENOMEM;
6188c0a7ce6SDominik Dingel 			} else {
6196ea427bbSMartin Schwidefsky 				gmap_remove(kvm->arch.gmap);
6208c0a7ce6SDominik Dingel 				new->private = kvm;
6218c0a7ce6SDominik Dingel 				kvm->arch.gmap = new;
6228c0a7ce6SDominik Dingel 				ret = 0;
6238c0a7ce6SDominik Dingel 			}
6248c0a7ce6SDominik Dingel 		}
6258c0a7ce6SDominik Dingel 		mutex_unlock(&kvm->lock);
626a3a92c31SDominik Dingel 		VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
627a3a92c31SDominik Dingel 		VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
628a3a92c31SDominik Dingel 			 (void *) kvm->arch.gmap->asce);
6298c0a7ce6SDominik Dingel 		break;
6308c0a7ce6SDominik Dingel 	}
6314f718eabSDominik Dingel 	default:
6324f718eabSDominik Dingel 		ret = -ENXIO;
6334f718eabSDominik Dingel 		break;
6344f718eabSDominik Dingel 	}
6354f718eabSDominik Dingel 	return ret;
6364f718eabSDominik Dingel }
6374f718eabSDominik Dingel 
638a374e892STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
639a374e892STony Krowiak 
640a374e892STony Krowiak static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
641a374e892STony Krowiak {
642a374e892STony Krowiak 	struct kvm_vcpu *vcpu;
643a374e892STony Krowiak 	int i;
644a374e892STony Krowiak 
6459d8d5786SMichael Mueller 	if (!test_kvm_facility(kvm, 76))
646a374e892STony Krowiak 		return -EINVAL;
647a374e892STony Krowiak 
648a374e892STony Krowiak 	mutex_lock(&kvm->lock);
649a374e892STony Krowiak 	switch (attr->attr) {
650a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
651a374e892STony Krowiak 		get_random_bytes(
652a374e892STony Krowiak 			kvm->arch.crypto.crycb->aes_wrapping_key_mask,
653a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
654a374e892STony Krowiak 		kvm->arch.crypto.aes_kw = 1;
655c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
656a374e892STony Krowiak 		break;
657a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
658a374e892STony Krowiak 		get_random_bytes(
659a374e892STony Krowiak 			kvm->arch.crypto.crycb->dea_wrapping_key_mask,
660a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
661a374e892STony Krowiak 		kvm->arch.crypto.dea_kw = 1;
662c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
663a374e892STony Krowiak 		break;
664a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
665a374e892STony Krowiak 		kvm->arch.crypto.aes_kw = 0;
666a374e892STony Krowiak 		memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
667a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
668c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
669a374e892STony Krowiak 		break;
670a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
671a374e892STony Krowiak 		kvm->arch.crypto.dea_kw = 0;
672a374e892STony Krowiak 		memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
673a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
674c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
675a374e892STony Krowiak 		break;
676a374e892STony Krowiak 	default:
677a374e892STony Krowiak 		mutex_unlock(&kvm->lock);
678a374e892STony Krowiak 		return -ENXIO;
679a374e892STony Krowiak 	}
680a374e892STony Krowiak 
681a374e892STony Krowiak 	kvm_for_each_vcpu(i, vcpu, kvm) {
682a374e892STony Krowiak 		kvm_s390_vcpu_crypto_setup(vcpu);
683a374e892STony Krowiak 		exit_sie(vcpu);
684a374e892STony Krowiak 	}
685a374e892STony Krowiak 	mutex_unlock(&kvm->lock);
686a374e892STony Krowiak 	return 0;
687a374e892STony Krowiak }
688a374e892STony Krowiak 
68972f25020SJason J. Herne static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
69072f25020SJason J. Herne {
69172f25020SJason J. Herne 	u8 gtod_high;
69272f25020SJason J. Herne 
69372f25020SJason J. Herne 	if (copy_from_user(&gtod_high, (void __user *)attr->addr,
69472f25020SJason J. Herne 					   sizeof(gtod_high)))
69572f25020SJason J. Herne 		return -EFAULT;
69672f25020SJason J. Herne 
69772f25020SJason J. Herne 	if (gtod_high != 0)
69872f25020SJason J. Herne 		return -EINVAL;
69958c383c6SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
70072f25020SJason J. Herne 
70172f25020SJason J. Herne 	return 0;
70272f25020SJason J. Herne }
70372f25020SJason J. Herne 
70472f25020SJason J. Herne static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
70572f25020SJason J. Herne {
7065a3d883aSDavid Hildenbrand 	u64 gtod;
70772f25020SJason J. Herne 
70872f25020SJason J. Herne 	if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
70972f25020SJason J. Herne 		return -EFAULT;
71072f25020SJason J. Herne 
71125ed1675SDavid Hildenbrand 	kvm_s390_set_tod_clock(kvm, gtod);
71258c383c6SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
71372f25020SJason J. Herne 	return 0;
71472f25020SJason J. Herne }
71572f25020SJason J. Herne 
71672f25020SJason J. Herne static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
71772f25020SJason J. Herne {
71872f25020SJason J. Herne 	int ret;
71972f25020SJason J. Herne 
72072f25020SJason J. Herne 	if (attr->flags)
72172f25020SJason J. Herne 		return -EINVAL;
72272f25020SJason J. Herne 
72372f25020SJason J. Herne 	switch (attr->attr) {
72472f25020SJason J. Herne 	case KVM_S390_VM_TOD_HIGH:
72572f25020SJason J. Herne 		ret = kvm_s390_set_tod_high(kvm, attr);
72672f25020SJason J. Herne 		break;
72772f25020SJason J. Herne 	case KVM_S390_VM_TOD_LOW:
72872f25020SJason J. Herne 		ret = kvm_s390_set_tod_low(kvm, attr);
72972f25020SJason J. Herne 		break;
73072f25020SJason J. Herne 	default:
73172f25020SJason J. Herne 		ret = -ENXIO;
73272f25020SJason J. Herne 		break;
73372f25020SJason J. Herne 	}
73472f25020SJason J. Herne 	return ret;
73572f25020SJason J. Herne }
73672f25020SJason J. Herne 
73772f25020SJason J. Herne static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
73872f25020SJason J. Herne {
73972f25020SJason J. Herne 	u8 gtod_high = 0;
74072f25020SJason J. Herne 
74172f25020SJason J. Herne 	if (copy_to_user((void __user *)attr->addr, &gtod_high,
74272f25020SJason J. Herne 					 sizeof(gtod_high)))
74372f25020SJason J. Herne 		return -EFAULT;
74458c383c6SChristian Borntraeger 	VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
74572f25020SJason J. Herne 
74672f25020SJason J. Herne 	return 0;
74772f25020SJason J. Herne }
74872f25020SJason J. Herne 
74972f25020SJason J. Herne static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
75072f25020SJason J. Herne {
7515a3d883aSDavid Hildenbrand 	u64 gtod;
75272f25020SJason J. Herne 
75360417fccSDavid Hildenbrand 	gtod = kvm_s390_get_tod_clock_fast(kvm);
75472f25020SJason J. Herne 	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
75572f25020SJason J. Herne 		return -EFAULT;
75658c383c6SChristian Borntraeger 	VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
75772f25020SJason J. Herne 
75872f25020SJason J. Herne 	return 0;
75972f25020SJason J. Herne }
76072f25020SJason J. Herne 
76172f25020SJason J. Herne static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
76272f25020SJason J. Herne {
76372f25020SJason J. Herne 	int ret;
76472f25020SJason J. Herne 
76572f25020SJason J. Herne 	if (attr->flags)
76672f25020SJason J. Herne 		return -EINVAL;
76772f25020SJason J. Herne 
76872f25020SJason J. Herne 	switch (attr->attr) {
76972f25020SJason J. Herne 	case KVM_S390_VM_TOD_HIGH:
77072f25020SJason J. Herne 		ret = kvm_s390_get_tod_high(kvm, attr);
77172f25020SJason J. Herne 		break;
77272f25020SJason J. Herne 	case KVM_S390_VM_TOD_LOW:
77372f25020SJason J. Herne 		ret = kvm_s390_get_tod_low(kvm, attr);
77472f25020SJason J. Herne 		break;
77572f25020SJason J. Herne 	default:
77672f25020SJason J. Herne 		ret = -ENXIO;
77772f25020SJason J. Herne 		break;
77872f25020SJason J. Herne 	}
77972f25020SJason J. Herne 	return ret;
78072f25020SJason J. Herne }
78172f25020SJason J. Herne 
782658b6edaSMichael Mueller static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
783658b6edaSMichael Mueller {
784658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_processor *proc;
785053dd230SDavid Hildenbrand 	u16 lowest_ibc, unblocked_ibc;
786658b6edaSMichael Mueller 	int ret = 0;
787658b6edaSMichael Mueller 
788658b6edaSMichael Mueller 	mutex_lock(&kvm->lock);
789a03825bbSPaolo Bonzini 	if (kvm->created_vcpus) {
790658b6edaSMichael Mueller 		ret = -EBUSY;
791658b6edaSMichael Mueller 		goto out;
792658b6edaSMichael Mueller 	}
793658b6edaSMichael Mueller 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
794658b6edaSMichael Mueller 	if (!proc) {
795658b6edaSMichael Mueller 		ret = -ENOMEM;
796658b6edaSMichael Mueller 		goto out;
797658b6edaSMichael Mueller 	}
798658b6edaSMichael Mueller 	if (!copy_from_user(proc, (void __user *)attr->addr,
799658b6edaSMichael Mueller 			    sizeof(*proc))) {
8009bb0ec09SDavid Hildenbrand 		kvm->arch.model.cpuid = proc->cpuid;
801053dd230SDavid Hildenbrand 		lowest_ibc = sclp.ibc >> 16 & 0xfff;
802053dd230SDavid Hildenbrand 		unblocked_ibc = sclp.ibc & 0xfff;
8030487c44dSDavid Hildenbrand 		if (lowest_ibc && proc->ibc) {
804053dd230SDavid Hildenbrand 			if (proc->ibc > unblocked_ibc)
805053dd230SDavid Hildenbrand 				kvm->arch.model.ibc = unblocked_ibc;
806053dd230SDavid Hildenbrand 			else if (proc->ibc < lowest_ibc)
807053dd230SDavid Hildenbrand 				kvm->arch.model.ibc = lowest_ibc;
808053dd230SDavid Hildenbrand 			else
809658b6edaSMichael Mueller 				kvm->arch.model.ibc = proc->ibc;
810053dd230SDavid Hildenbrand 		}
811c54f0d6aSDavid Hildenbrand 		memcpy(kvm->arch.model.fac_list, proc->fac_list,
812658b6edaSMichael Mueller 		       S390_ARCH_FAC_LIST_SIZE_BYTE);
813658b6edaSMichael Mueller 	} else
814658b6edaSMichael Mueller 		ret = -EFAULT;
815658b6edaSMichael Mueller 	kfree(proc);
816658b6edaSMichael Mueller out:
817658b6edaSMichael Mueller 	mutex_unlock(&kvm->lock);
818658b6edaSMichael Mueller 	return ret;
819658b6edaSMichael Mueller }
820658b6edaSMichael Mueller 
82115c9705fSDavid Hildenbrand static int kvm_s390_set_processor_feat(struct kvm *kvm,
82215c9705fSDavid Hildenbrand 				       struct kvm_device_attr *attr)
82315c9705fSDavid Hildenbrand {
82415c9705fSDavid Hildenbrand 	struct kvm_s390_vm_cpu_feat data;
82515c9705fSDavid Hildenbrand 	int ret = -EBUSY;
82615c9705fSDavid Hildenbrand 
82715c9705fSDavid Hildenbrand 	if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
82815c9705fSDavid Hildenbrand 		return -EFAULT;
82915c9705fSDavid Hildenbrand 	if (!bitmap_subset((unsigned long *) data.feat,
83015c9705fSDavid Hildenbrand 			   kvm_s390_available_cpu_feat,
83115c9705fSDavid Hildenbrand 			   KVM_S390_VM_CPU_FEAT_NR_BITS))
83215c9705fSDavid Hildenbrand 		return -EINVAL;
83315c9705fSDavid Hildenbrand 
83415c9705fSDavid Hildenbrand 	mutex_lock(&kvm->lock);
83515c9705fSDavid Hildenbrand 	if (!atomic_read(&kvm->online_vcpus)) {
83615c9705fSDavid Hildenbrand 		bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
83715c9705fSDavid Hildenbrand 			    KVM_S390_VM_CPU_FEAT_NR_BITS);
83815c9705fSDavid Hildenbrand 		ret = 0;
83915c9705fSDavid Hildenbrand 	}
84015c9705fSDavid Hildenbrand 	mutex_unlock(&kvm->lock);
84115c9705fSDavid Hildenbrand 	return ret;
84215c9705fSDavid Hildenbrand }
84315c9705fSDavid Hildenbrand 
8440a763c78SDavid Hildenbrand static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
8450a763c78SDavid Hildenbrand 					  struct kvm_device_attr *attr)
8460a763c78SDavid Hildenbrand {
8470a763c78SDavid Hildenbrand 	/*
8480a763c78SDavid Hildenbrand 	 * Once supported by kernel + hw, we have to store the subfunctions
8490a763c78SDavid Hildenbrand 	 * in kvm->arch and remember that user space configured them.
8500a763c78SDavid Hildenbrand 	 */
8510a763c78SDavid Hildenbrand 	return -ENXIO;
8520a763c78SDavid Hildenbrand }
8530a763c78SDavid Hildenbrand 
854658b6edaSMichael Mueller static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
855658b6edaSMichael Mueller {
856658b6edaSMichael Mueller 	int ret = -ENXIO;
857658b6edaSMichael Mueller 
858658b6edaSMichael Mueller 	switch (attr->attr) {
859658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_PROCESSOR:
860658b6edaSMichael Mueller 		ret = kvm_s390_set_processor(kvm, attr);
861658b6edaSMichael Mueller 		break;
86215c9705fSDavid Hildenbrand 	case KVM_S390_VM_CPU_PROCESSOR_FEAT:
86315c9705fSDavid Hildenbrand 		ret = kvm_s390_set_processor_feat(kvm, attr);
86415c9705fSDavid Hildenbrand 		break;
8650a763c78SDavid Hildenbrand 	case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
8660a763c78SDavid Hildenbrand 		ret = kvm_s390_set_processor_subfunc(kvm, attr);
8670a763c78SDavid Hildenbrand 		break;
868658b6edaSMichael Mueller 	}
869658b6edaSMichael Mueller 	return ret;
870658b6edaSMichael Mueller }
871658b6edaSMichael Mueller 
872658b6edaSMichael Mueller static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
873658b6edaSMichael Mueller {
874658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_processor *proc;
875658b6edaSMichael Mueller 	int ret = 0;
876658b6edaSMichael Mueller 
877658b6edaSMichael Mueller 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
878658b6edaSMichael Mueller 	if (!proc) {
879658b6edaSMichael Mueller 		ret = -ENOMEM;
880658b6edaSMichael Mueller 		goto out;
881658b6edaSMichael Mueller 	}
8829bb0ec09SDavid Hildenbrand 	proc->cpuid = kvm->arch.model.cpuid;
883658b6edaSMichael Mueller 	proc->ibc = kvm->arch.model.ibc;
884c54f0d6aSDavid Hildenbrand 	memcpy(&proc->fac_list, kvm->arch.model.fac_list,
885c54f0d6aSDavid Hildenbrand 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
886658b6edaSMichael Mueller 	if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
887658b6edaSMichael Mueller 		ret = -EFAULT;
888658b6edaSMichael Mueller 	kfree(proc);
889658b6edaSMichael Mueller out:
890658b6edaSMichael Mueller 	return ret;
891658b6edaSMichael Mueller }
892658b6edaSMichael Mueller 
893658b6edaSMichael Mueller static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
894658b6edaSMichael Mueller {
895658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_machine *mach;
896658b6edaSMichael Mueller 	int ret = 0;
897658b6edaSMichael Mueller 
898658b6edaSMichael Mueller 	mach = kzalloc(sizeof(*mach), GFP_KERNEL);
899658b6edaSMichael Mueller 	if (!mach) {
900658b6edaSMichael Mueller 		ret = -ENOMEM;
901658b6edaSMichael Mueller 		goto out;
902658b6edaSMichael Mueller 	}
903658b6edaSMichael Mueller 	get_cpu_id((struct cpuid *) &mach->cpuid);
90437c5f6c8SDavid Hildenbrand 	mach->ibc = sclp.ibc;
905c54f0d6aSDavid Hildenbrand 	memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
906981467c9SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
907658b6edaSMichael Mueller 	memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
90894422ee8SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
909658b6edaSMichael Mueller 	if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
910658b6edaSMichael Mueller 		ret = -EFAULT;
911658b6edaSMichael Mueller 	kfree(mach);
912658b6edaSMichael Mueller out:
913658b6edaSMichael Mueller 	return ret;
914658b6edaSMichael Mueller }
915658b6edaSMichael Mueller 
91615c9705fSDavid Hildenbrand static int kvm_s390_get_processor_feat(struct kvm *kvm,
91715c9705fSDavid Hildenbrand 				       struct kvm_device_attr *attr)
91815c9705fSDavid Hildenbrand {
91915c9705fSDavid Hildenbrand 	struct kvm_s390_vm_cpu_feat data;
92015c9705fSDavid Hildenbrand 
92115c9705fSDavid Hildenbrand 	bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
92215c9705fSDavid Hildenbrand 		    KVM_S390_VM_CPU_FEAT_NR_BITS);
92315c9705fSDavid Hildenbrand 	if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
92415c9705fSDavid Hildenbrand 		return -EFAULT;
92515c9705fSDavid Hildenbrand 	return 0;
92615c9705fSDavid Hildenbrand }
92715c9705fSDavid Hildenbrand 
92815c9705fSDavid Hildenbrand static int kvm_s390_get_machine_feat(struct kvm *kvm,
92915c9705fSDavid Hildenbrand 				     struct kvm_device_attr *attr)
93015c9705fSDavid Hildenbrand {
93115c9705fSDavid Hildenbrand 	struct kvm_s390_vm_cpu_feat data;
93215c9705fSDavid Hildenbrand 
93315c9705fSDavid Hildenbrand 	bitmap_copy((unsigned long *) data.feat,
93415c9705fSDavid Hildenbrand 		    kvm_s390_available_cpu_feat,
93515c9705fSDavid Hildenbrand 		    KVM_S390_VM_CPU_FEAT_NR_BITS);
93615c9705fSDavid Hildenbrand 	if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
93715c9705fSDavid Hildenbrand 		return -EFAULT;
93815c9705fSDavid Hildenbrand 	return 0;
93915c9705fSDavid Hildenbrand }
94015c9705fSDavid Hildenbrand 
9410a763c78SDavid Hildenbrand static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
9420a763c78SDavid Hildenbrand 					  struct kvm_device_attr *attr)
9430a763c78SDavid Hildenbrand {
9440a763c78SDavid Hildenbrand 	/*
9450a763c78SDavid Hildenbrand 	 * Once we can actually configure subfunctions (kernel + hw support),
9460a763c78SDavid Hildenbrand 	 * we have to check if they were already set by user space, if so copy
9470a763c78SDavid Hildenbrand 	 * them from kvm->arch.
9480a763c78SDavid Hildenbrand 	 */
9490a763c78SDavid Hildenbrand 	return -ENXIO;
9500a763c78SDavid Hildenbrand }
9510a763c78SDavid Hildenbrand 
9520a763c78SDavid Hildenbrand static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
9530a763c78SDavid Hildenbrand 					struct kvm_device_attr *attr)
9540a763c78SDavid Hildenbrand {
9550a763c78SDavid Hildenbrand 	if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
9560a763c78SDavid Hildenbrand 	    sizeof(struct kvm_s390_vm_cpu_subfunc)))
9570a763c78SDavid Hildenbrand 		return -EFAULT;
9580a763c78SDavid Hildenbrand 	return 0;
9590a763c78SDavid Hildenbrand }
960658b6edaSMichael Mueller static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
961658b6edaSMichael Mueller {
962658b6edaSMichael Mueller 	int ret = -ENXIO;
963658b6edaSMichael Mueller 
964658b6edaSMichael Mueller 	switch (attr->attr) {
965658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_PROCESSOR:
966658b6edaSMichael Mueller 		ret = kvm_s390_get_processor(kvm, attr);
967658b6edaSMichael Mueller 		break;
968658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MACHINE:
969658b6edaSMichael Mueller 		ret = kvm_s390_get_machine(kvm, attr);
970658b6edaSMichael Mueller 		break;
97115c9705fSDavid Hildenbrand 	case KVM_S390_VM_CPU_PROCESSOR_FEAT:
97215c9705fSDavid Hildenbrand 		ret = kvm_s390_get_processor_feat(kvm, attr);
97315c9705fSDavid Hildenbrand 		break;
97415c9705fSDavid Hildenbrand 	case KVM_S390_VM_CPU_MACHINE_FEAT:
97515c9705fSDavid Hildenbrand 		ret = kvm_s390_get_machine_feat(kvm, attr);
97615c9705fSDavid Hildenbrand 		break;
9770a763c78SDavid Hildenbrand 	case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
9780a763c78SDavid Hildenbrand 		ret = kvm_s390_get_processor_subfunc(kvm, attr);
9790a763c78SDavid Hildenbrand 		break;
9800a763c78SDavid Hildenbrand 	case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
9810a763c78SDavid Hildenbrand 		ret = kvm_s390_get_machine_subfunc(kvm, attr);
9820a763c78SDavid Hildenbrand 		break;
983658b6edaSMichael Mueller 	}
984658b6edaSMichael Mueller 	return ret;
985658b6edaSMichael Mueller }
986658b6edaSMichael Mueller 
987f2061656SDominik Dingel static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
988f2061656SDominik Dingel {
989f2061656SDominik Dingel 	int ret;
990f2061656SDominik Dingel 
991f2061656SDominik Dingel 	switch (attr->group) {
9924f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
9938c0a7ce6SDominik Dingel 		ret = kvm_s390_set_mem_control(kvm, attr);
9944f718eabSDominik Dingel 		break;
99572f25020SJason J. Herne 	case KVM_S390_VM_TOD:
99672f25020SJason J. Herne 		ret = kvm_s390_set_tod(kvm, attr);
99772f25020SJason J. Herne 		break;
998658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
999658b6edaSMichael Mueller 		ret = kvm_s390_set_cpu_model(kvm, attr);
1000658b6edaSMichael Mueller 		break;
1001a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO:
1002a374e892STony Krowiak 		ret = kvm_s390_vm_set_crypto(kvm, attr);
1003a374e892STony Krowiak 		break;
1004f2061656SDominik Dingel 	default:
1005f2061656SDominik Dingel 		ret = -ENXIO;
1006f2061656SDominik Dingel 		break;
1007f2061656SDominik Dingel 	}
1008f2061656SDominik Dingel 
1009f2061656SDominik Dingel 	return ret;
1010f2061656SDominik Dingel }
1011f2061656SDominik Dingel 
1012f2061656SDominik Dingel static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1013f2061656SDominik Dingel {
10148c0a7ce6SDominik Dingel 	int ret;
10158c0a7ce6SDominik Dingel 
10168c0a7ce6SDominik Dingel 	switch (attr->group) {
10178c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
10188c0a7ce6SDominik Dingel 		ret = kvm_s390_get_mem_control(kvm, attr);
10198c0a7ce6SDominik Dingel 		break;
102072f25020SJason J. Herne 	case KVM_S390_VM_TOD:
102172f25020SJason J. Herne 		ret = kvm_s390_get_tod(kvm, attr);
102272f25020SJason J. Herne 		break;
1023658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
1024658b6edaSMichael Mueller 		ret = kvm_s390_get_cpu_model(kvm, attr);
1025658b6edaSMichael Mueller 		break;
10268c0a7ce6SDominik Dingel 	default:
10278c0a7ce6SDominik Dingel 		ret = -ENXIO;
10288c0a7ce6SDominik Dingel 		break;
10298c0a7ce6SDominik Dingel 	}
10308c0a7ce6SDominik Dingel 
10318c0a7ce6SDominik Dingel 	return ret;
1032f2061656SDominik Dingel }
1033f2061656SDominik Dingel 
1034f2061656SDominik Dingel static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1035f2061656SDominik Dingel {
1036f2061656SDominik Dingel 	int ret;
1037f2061656SDominik Dingel 
1038f2061656SDominik Dingel 	switch (attr->group) {
10394f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
10404f718eabSDominik Dingel 		switch (attr->attr) {
10414f718eabSDominik Dingel 		case KVM_S390_VM_MEM_ENABLE_CMMA:
10424f718eabSDominik Dingel 		case KVM_S390_VM_MEM_CLR_CMMA:
1043f9cbd9b0SDavid Hildenbrand 			ret = sclp.has_cmma ? 0 : -ENXIO;
1044f9cbd9b0SDavid Hildenbrand 			break;
10458c0a7ce6SDominik Dingel 		case KVM_S390_VM_MEM_LIMIT_SIZE:
10464f718eabSDominik Dingel 			ret = 0;
10474f718eabSDominik Dingel 			break;
10484f718eabSDominik Dingel 		default:
10494f718eabSDominik Dingel 			ret = -ENXIO;
10504f718eabSDominik Dingel 			break;
10514f718eabSDominik Dingel 		}
10524f718eabSDominik Dingel 		break;
105372f25020SJason J. Herne 	case KVM_S390_VM_TOD:
105472f25020SJason J. Herne 		switch (attr->attr) {
105572f25020SJason J. Herne 		case KVM_S390_VM_TOD_LOW:
105672f25020SJason J. Herne 		case KVM_S390_VM_TOD_HIGH:
105772f25020SJason J. Herne 			ret = 0;
105872f25020SJason J. Herne 			break;
105972f25020SJason J. Herne 		default:
106072f25020SJason J. Herne 			ret = -ENXIO;
106172f25020SJason J. Herne 			break;
106272f25020SJason J. Herne 		}
106372f25020SJason J. Herne 		break;
1064658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
1065658b6edaSMichael Mueller 		switch (attr->attr) {
1066658b6edaSMichael Mueller 		case KVM_S390_VM_CPU_PROCESSOR:
1067658b6edaSMichael Mueller 		case KVM_S390_VM_CPU_MACHINE:
106815c9705fSDavid Hildenbrand 		case KVM_S390_VM_CPU_PROCESSOR_FEAT:
106915c9705fSDavid Hildenbrand 		case KVM_S390_VM_CPU_MACHINE_FEAT:
10700a763c78SDavid Hildenbrand 		case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1071658b6edaSMichael Mueller 			ret = 0;
1072658b6edaSMichael Mueller 			break;
10730a763c78SDavid Hildenbrand 		/* configuring subfunctions is not supported yet */
10740a763c78SDavid Hildenbrand 		case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1075658b6edaSMichael Mueller 		default:
1076658b6edaSMichael Mueller 			ret = -ENXIO;
1077658b6edaSMichael Mueller 			break;
1078658b6edaSMichael Mueller 		}
1079658b6edaSMichael Mueller 		break;
1080a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO:
1081a374e892STony Krowiak 		switch (attr->attr) {
1082a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1083a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1084a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1085a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1086a374e892STony Krowiak 			ret = 0;
1087a374e892STony Krowiak 			break;
1088a374e892STony Krowiak 		default:
1089a374e892STony Krowiak 			ret = -ENXIO;
1090a374e892STony Krowiak 			break;
1091a374e892STony Krowiak 		}
1092a374e892STony Krowiak 		break;
1093f2061656SDominik Dingel 	default:
1094f2061656SDominik Dingel 		ret = -ENXIO;
1095f2061656SDominik Dingel 		break;
1096f2061656SDominik Dingel 	}
1097f2061656SDominik Dingel 
1098f2061656SDominik Dingel 	return ret;
1099f2061656SDominik Dingel }
1100f2061656SDominik Dingel 
110130ee2a98SJason J. Herne static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
110230ee2a98SJason J. Herne {
110330ee2a98SJason J. Herne 	uint8_t *keys;
110430ee2a98SJason J. Herne 	uint64_t hva;
110530ee2a98SJason J. Herne 	int i, r = 0;
110630ee2a98SJason J. Herne 
110730ee2a98SJason J. Herne 	if (args->flags != 0)
110830ee2a98SJason J. Herne 		return -EINVAL;
110930ee2a98SJason J. Herne 
111030ee2a98SJason J. Herne 	/* Is this guest using storage keys? */
111130ee2a98SJason J. Herne 	if (!mm_use_skey(current->mm))
111230ee2a98SJason J. Herne 		return KVM_S390_GET_SKEYS_NONE;
111330ee2a98SJason J. Herne 
111430ee2a98SJason J. Herne 	/* Enforce sane limit on memory allocation */
111530ee2a98SJason J. Herne 	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
111630ee2a98SJason J. Herne 		return -EINVAL;
111730ee2a98SJason J. Herne 
111830ee2a98SJason J. Herne 	keys = kmalloc_array(args->count, sizeof(uint8_t),
111930ee2a98SJason J. Herne 			     GFP_KERNEL | __GFP_NOWARN);
112030ee2a98SJason J. Herne 	if (!keys)
112130ee2a98SJason J. Herne 		keys = vmalloc(sizeof(uint8_t) * args->count);
112230ee2a98SJason J. Herne 	if (!keys)
112330ee2a98SJason J. Herne 		return -ENOMEM;
112430ee2a98SJason J. Herne 
1125d3ed1ceeSMartin Schwidefsky 	down_read(&current->mm->mmap_sem);
112630ee2a98SJason J. Herne 	for (i = 0; i < args->count; i++) {
112730ee2a98SJason J. Herne 		hva = gfn_to_hva(kvm, args->start_gfn + i);
112830ee2a98SJason J. Herne 		if (kvm_is_error_hva(hva)) {
112930ee2a98SJason J. Herne 			r = -EFAULT;
1130d3ed1ceeSMartin Schwidefsky 			break;
113130ee2a98SJason J. Herne 		}
113230ee2a98SJason J. Herne 
1133154c8c19SDavid Hildenbrand 		r = get_guest_storage_key(current->mm, hva, &keys[i]);
1134154c8c19SDavid Hildenbrand 		if (r)
1135d3ed1ceeSMartin Schwidefsky 			break;
113630ee2a98SJason J. Herne 	}
1137d3ed1ceeSMartin Schwidefsky 	up_read(&current->mm->mmap_sem);
113830ee2a98SJason J. Herne 
1139d3ed1ceeSMartin Schwidefsky 	if (!r) {
114030ee2a98SJason J. Herne 		r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
114130ee2a98SJason J. Herne 				 sizeof(uint8_t) * args->count);
114230ee2a98SJason J. Herne 		if (r)
114330ee2a98SJason J. Herne 			r = -EFAULT;
1144d3ed1ceeSMartin Schwidefsky 	}
1145d3ed1ceeSMartin Schwidefsky 
114630ee2a98SJason J. Herne 	kvfree(keys);
114730ee2a98SJason J. Herne 	return r;
114830ee2a98SJason J. Herne }
114930ee2a98SJason J. Herne 
115030ee2a98SJason J. Herne static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
115130ee2a98SJason J. Herne {
115230ee2a98SJason J. Herne 	uint8_t *keys;
115330ee2a98SJason J. Herne 	uint64_t hva;
115430ee2a98SJason J. Herne 	int i, r = 0;
115530ee2a98SJason J. Herne 
115630ee2a98SJason J. Herne 	if (args->flags != 0)
115730ee2a98SJason J. Herne 		return -EINVAL;
115830ee2a98SJason J. Herne 
115930ee2a98SJason J. Herne 	/* Enforce sane limit on memory allocation */
116030ee2a98SJason J. Herne 	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
116130ee2a98SJason J. Herne 		return -EINVAL;
116230ee2a98SJason J. Herne 
116330ee2a98SJason J. Herne 	keys = kmalloc_array(args->count, sizeof(uint8_t),
116430ee2a98SJason J. Herne 			     GFP_KERNEL | __GFP_NOWARN);
116530ee2a98SJason J. Herne 	if (!keys)
116630ee2a98SJason J. Herne 		keys = vmalloc(sizeof(uint8_t) * args->count);
116730ee2a98SJason J. Herne 	if (!keys)
116830ee2a98SJason J. Herne 		return -ENOMEM;
116930ee2a98SJason J. Herne 
117030ee2a98SJason J. Herne 	r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
117130ee2a98SJason J. Herne 			   sizeof(uint8_t) * args->count);
117230ee2a98SJason J. Herne 	if (r) {
117330ee2a98SJason J. Herne 		r = -EFAULT;
117430ee2a98SJason J. Herne 		goto out;
117530ee2a98SJason J. Herne 	}
117630ee2a98SJason J. Herne 
117730ee2a98SJason J. Herne 	/* Enable storage key handling for the guest */
117814d4a425SDominik Dingel 	r = s390_enable_skey();
117914d4a425SDominik Dingel 	if (r)
118014d4a425SDominik Dingel 		goto out;
118130ee2a98SJason J. Herne 
1182d3ed1ceeSMartin Schwidefsky 	down_read(&current->mm->mmap_sem);
118330ee2a98SJason J. Herne 	for (i = 0; i < args->count; i++) {
118430ee2a98SJason J. Herne 		hva = gfn_to_hva(kvm, args->start_gfn + i);
118530ee2a98SJason J. Herne 		if (kvm_is_error_hva(hva)) {
118630ee2a98SJason J. Herne 			r = -EFAULT;
1187d3ed1ceeSMartin Schwidefsky 			break;
118830ee2a98SJason J. Herne 		}
118930ee2a98SJason J. Herne 
119030ee2a98SJason J. Herne 		/* Lowest order bit is reserved */
119130ee2a98SJason J. Herne 		if (keys[i] & 0x01) {
119230ee2a98SJason J. Herne 			r = -EINVAL;
1193d3ed1ceeSMartin Schwidefsky 			break;
119430ee2a98SJason J. Herne 		}
119530ee2a98SJason J. Herne 
1196fe69eabfSDavid Hildenbrand 		r = set_guest_storage_key(current->mm, hva, keys[i], 0);
119730ee2a98SJason J. Herne 		if (r)
1198d3ed1ceeSMartin Schwidefsky 			break;
119930ee2a98SJason J. Herne 	}
1200d3ed1ceeSMartin Schwidefsky 	up_read(&current->mm->mmap_sem);
120130ee2a98SJason J. Herne out:
120230ee2a98SJason J. Herne 	kvfree(keys);
120330ee2a98SJason J. Herne 	return r;
120430ee2a98SJason J. Herne }
120530ee2a98SJason J. Herne 
1206b0c632dbSHeiko Carstens long kvm_arch_vm_ioctl(struct file *filp,
1207b0c632dbSHeiko Carstens 		       unsigned int ioctl, unsigned long arg)
1208b0c632dbSHeiko Carstens {
1209b0c632dbSHeiko Carstens 	struct kvm *kvm = filp->private_data;
1210b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
1211f2061656SDominik Dingel 	struct kvm_device_attr attr;
1212b0c632dbSHeiko Carstens 	int r;
1213b0c632dbSHeiko Carstens 
1214b0c632dbSHeiko Carstens 	switch (ioctl) {
1215ba5c1e9bSCarsten Otte 	case KVM_S390_INTERRUPT: {
1216ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
1217ba5c1e9bSCarsten Otte 
1218ba5c1e9bSCarsten Otte 		r = -EFAULT;
1219ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
1220ba5c1e9bSCarsten Otte 			break;
1221ba5c1e9bSCarsten Otte 		r = kvm_s390_inject_vm(kvm, &s390int);
1222ba5c1e9bSCarsten Otte 		break;
1223ba5c1e9bSCarsten Otte 	}
1224d938dc55SCornelia Huck 	case KVM_ENABLE_CAP: {
1225d938dc55SCornelia Huck 		struct kvm_enable_cap cap;
1226d938dc55SCornelia Huck 		r = -EFAULT;
1227d938dc55SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
1228d938dc55SCornelia Huck 			break;
1229d938dc55SCornelia Huck 		r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1230d938dc55SCornelia Huck 		break;
1231d938dc55SCornelia Huck 	}
123284223598SCornelia Huck 	case KVM_CREATE_IRQCHIP: {
123384223598SCornelia Huck 		struct kvm_irq_routing_entry routing;
123484223598SCornelia Huck 
123584223598SCornelia Huck 		r = -EINVAL;
123684223598SCornelia Huck 		if (kvm->arch.use_irqchip) {
123784223598SCornelia Huck 			/* Set up dummy routing. */
123884223598SCornelia Huck 			memset(&routing, 0, sizeof(routing));
1239152b2839SNicholas Krause 			r = kvm_set_irq_routing(kvm, &routing, 0, 0);
124084223598SCornelia Huck 		}
124184223598SCornelia Huck 		break;
124284223598SCornelia Huck 	}
1243f2061656SDominik Dingel 	case KVM_SET_DEVICE_ATTR: {
1244f2061656SDominik Dingel 		r = -EFAULT;
1245f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1246f2061656SDominik Dingel 			break;
1247f2061656SDominik Dingel 		r = kvm_s390_vm_set_attr(kvm, &attr);
1248f2061656SDominik Dingel 		break;
1249f2061656SDominik Dingel 	}
1250f2061656SDominik Dingel 	case KVM_GET_DEVICE_ATTR: {
1251f2061656SDominik Dingel 		r = -EFAULT;
1252f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1253f2061656SDominik Dingel 			break;
1254f2061656SDominik Dingel 		r = kvm_s390_vm_get_attr(kvm, &attr);
1255f2061656SDominik Dingel 		break;
1256f2061656SDominik Dingel 	}
1257f2061656SDominik Dingel 	case KVM_HAS_DEVICE_ATTR: {
1258f2061656SDominik Dingel 		r = -EFAULT;
1259f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1260f2061656SDominik Dingel 			break;
1261f2061656SDominik Dingel 		r = kvm_s390_vm_has_attr(kvm, &attr);
1262f2061656SDominik Dingel 		break;
1263f2061656SDominik Dingel 	}
126430ee2a98SJason J. Herne 	case KVM_S390_GET_SKEYS: {
126530ee2a98SJason J. Herne 		struct kvm_s390_skeys args;
126630ee2a98SJason J. Herne 
126730ee2a98SJason J. Herne 		r = -EFAULT;
126830ee2a98SJason J. Herne 		if (copy_from_user(&args, argp,
126930ee2a98SJason J. Herne 				   sizeof(struct kvm_s390_skeys)))
127030ee2a98SJason J. Herne 			break;
127130ee2a98SJason J. Herne 		r = kvm_s390_get_skeys(kvm, &args);
127230ee2a98SJason J. Herne 		break;
127330ee2a98SJason J. Herne 	}
127430ee2a98SJason J. Herne 	case KVM_S390_SET_SKEYS: {
127530ee2a98SJason J. Herne 		struct kvm_s390_skeys args;
127630ee2a98SJason J. Herne 
127730ee2a98SJason J. Herne 		r = -EFAULT;
127830ee2a98SJason J. Herne 		if (copy_from_user(&args, argp,
127930ee2a98SJason J. Herne 				   sizeof(struct kvm_s390_skeys)))
128030ee2a98SJason J. Herne 			break;
128130ee2a98SJason J. Herne 		r = kvm_s390_set_skeys(kvm, &args);
128230ee2a98SJason J. Herne 		break;
128330ee2a98SJason J. Herne 	}
1284b0c632dbSHeiko Carstens 	default:
1285367e1319SAvi Kivity 		r = -ENOTTY;
1286b0c632dbSHeiko Carstens 	}
1287b0c632dbSHeiko Carstens 
1288b0c632dbSHeiko Carstens 	return r;
1289b0c632dbSHeiko Carstens }
1290b0c632dbSHeiko Carstens 
129145c9b47cSTony Krowiak static int kvm_s390_query_ap_config(u8 *config)
129245c9b47cSTony Krowiak {
129345c9b47cSTony Krowiak 	u32 fcn_code = 0x04000000UL;
129486044c8cSChristian Borntraeger 	u32 cc = 0;
129545c9b47cSTony Krowiak 
129686044c8cSChristian Borntraeger 	memset(config, 0, 128);
129745c9b47cSTony Krowiak 	asm volatile(
129845c9b47cSTony Krowiak 		"lgr 0,%1\n"
129945c9b47cSTony Krowiak 		"lgr 2,%2\n"
130045c9b47cSTony Krowiak 		".long 0xb2af0000\n"		/* PQAP(QCI) */
130186044c8cSChristian Borntraeger 		"0: ipm %0\n"
130245c9b47cSTony Krowiak 		"srl %0,28\n"
130386044c8cSChristian Borntraeger 		"1:\n"
130486044c8cSChristian Borntraeger 		EX_TABLE(0b, 1b)
130586044c8cSChristian Borntraeger 		: "+r" (cc)
130645c9b47cSTony Krowiak 		: "r" (fcn_code), "r" (config)
130745c9b47cSTony Krowiak 		: "cc", "0", "2", "memory"
130845c9b47cSTony Krowiak 	);
130945c9b47cSTony Krowiak 
131045c9b47cSTony Krowiak 	return cc;
131145c9b47cSTony Krowiak }
131245c9b47cSTony Krowiak 
131345c9b47cSTony Krowiak static int kvm_s390_apxa_installed(void)
131445c9b47cSTony Krowiak {
131545c9b47cSTony Krowiak 	u8 config[128];
131645c9b47cSTony Krowiak 	int cc;
131745c9b47cSTony Krowiak 
1318a6aacc3fSHeiko Carstens 	if (test_facility(12)) {
131945c9b47cSTony Krowiak 		cc = kvm_s390_query_ap_config(config);
132045c9b47cSTony Krowiak 
132145c9b47cSTony Krowiak 		if (cc)
132245c9b47cSTony Krowiak 			pr_err("PQAP(QCI) failed with cc=%d", cc);
132345c9b47cSTony Krowiak 		else
132445c9b47cSTony Krowiak 			return config[0] & 0x40;
132545c9b47cSTony Krowiak 	}
132645c9b47cSTony Krowiak 
132745c9b47cSTony Krowiak 	return 0;
132845c9b47cSTony Krowiak }
132945c9b47cSTony Krowiak 
133045c9b47cSTony Krowiak static void kvm_s390_set_crycb_format(struct kvm *kvm)
133145c9b47cSTony Krowiak {
133245c9b47cSTony Krowiak 	kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
133345c9b47cSTony Krowiak 
133445c9b47cSTony Krowiak 	if (kvm_s390_apxa_installed())
133545c9b47cSTony Krowiak 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
133645c9b47cSTony Krowiak 	else
133745c9b47cSTony Krowiak 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
133845c9b47cSTony Krowiak }
133945c9b47cSTony Krowiak 
13409bb0ec09SDavid Hildenbrand static u64 kvm_s390_get_initial_cpuid(void)
13419d8d5786SMichael Mueller {
13429bb0ec09SDavid Hildenbrand 	struct cpuid cpuid;
13439bb0ec09SDavid Hildenbrand 
13449bb0ec09SDavid Hildenbrand 	get_cpu_id(&cpuid);
13459bb0ec09SDavid Hildenbrand 	cpuid.version = 0xff;
13469bb0ec09SDavid Hildenbrand 	return *((u64 *) &cpuid);
13479d8d5786SMichael Mueller }
13489d8d5786SMichael Mueller 
1349c54f0d6aSDavid Hildenbrand static void kvm_s390_crypto_init(struct kvm *kvm)
13505102ee87STony Krowiak {
13519d8d5786SMichael Mueller 	if (!test_kvm_facility(kvm, 76))
1352c54f0d6aSDavid Hildenbrand 		return;
13535102ee87STony Krowiak 
1354c54f0d6aSDavid Hildenbrand 	kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
135545c9b47cSTony Krowiak 	kvm_s390_set_crycb_format(kvm);
13565102ee87STony Krowiak 
1357ed6f76b4STony Krowiak 	/* Enable AES/DEA protected key functions by default */
1358ed6f76b4STony Krowiak 	kvm->arch.crypto.aes_kw = 1;
1359ed6f76b4STony Krowiak 	kvm->arch.crypto.dea_kw = 1;
1360ed6f76b4STony Krowiak 	get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1361ed6f76b4STony Krowiak 			 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1362ed6f76b4STony Krowiak 	get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1363ed6f76b4STony Krowiak 			 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
13645102ee87STony Krowiak }
13655102ee87STony Krowiak 
13667d43bafcSEugene (jno) Dvurechenski static void sca_dispose(struct kvm *kvm)
13677d43bafcSEugene (jno) Dvurechenski {
13687d43bafcSEugene (jno) Dvurechenski 	if (kvm->arch.use_esca)
13695e044315SEugene (jno) Dvurechenski 		free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
13707d43bafcSEugene (jno) Dvurechenski 	else
13717d43bafcSEugene (jno) Dvurechenski 		free_page((unsigned long)(kvm->arch.sca));
13727d43bafcSEugene (jno) Dvurechenski 	kvm->arch.sca = NULL;
13737d43bafcSEugene (jno) Dvurechenski }
13747d43bafcSEugene (jno) Dvurechenski 
1375e08b9637SCarsten Otte int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
1376b0c632dbSHeiko Carstens {
137776a6dd72SDavid Hildenbrand 	gfp_t alloc_flags = GFP_KERNEL;
13789d8d5786SMichael Mueller 	int i, rc;
1379b0c632dbSHeiko Carstens 	char debug_name[16];
1380f6c137ffSChristian Borntraeger 	static unsigned long sca_offset;
1381b0c632dbSHeiko Carstens 
1382e08b9637SCarsten Otte 	rc = -EINVAL;
1383e08b9637SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
1384e08b9637SCarsten Otte 	if (type & ~KVM_VM_S390_UCONTROL)
1385e08b9637SCarsten Otte 		goto out_err;
1386e08b9637SCarsten Otte 	if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1387e08b9637SCarsten Otte 		goto out_err;
1388e08b9637SCarsten Otte #else
1389e08b9637SCarsten Otte 	if (type)
1390e08b9637SCarsten Otte 		goto out_err;
1391e08b9637SCarsten Otte #endif
1392e08b9637SCarsten Otte 
1393b0c632dbSHeiko Carstens 	rc = s390_enable_sie();
1394b0c632dbSHeiko Carstens 	if (rc)
1395d89f5effSJan Kiszka 		goto out_err;
1396b0c632dbSHeiko Carstens 
1397b290411aSCarsten Otte 	rc = -ENOMEM;
1398b290411aSCarsten Otte 
13997d0a5e62SJanosch Frank 	ratelimit_state_init(&kvm->arch.sthyi_limit, 5 * HZ, 500);
14007d0a5e62SJanosch Frank 
14017d43bafcSEugene (jno) Dvurechenski 	kvm->arch.use_esca = 0; /* start with basic SCA */
140276a6dd72SDavid Hildenbrand 	if (!sclp.has_64bscao)
140376a6dd72SDavid Hildenbrand 		alloc_flags |= GFP_DMA;
14045e044315SEugene (jno) Dvurechenski 	rwlock_init(&kvm->arch.sca_lock);
140576a6dd72SDavid Hildenbrand 	kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
1406b0c632dbSHeiko Carstens 	if (!kvm->arch.sca)
1407d89f5effSJan Kiszka 		goto out_err;
1408f6c137ffSChristian Borntraeger 	spin_lock(&kvm_lock);
1409c5c2c393SDavid Hildenbrand 	sca_offset += 16;
1410bc784cceSEugene (jno) Dvurechenski 	if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
1411c5c2c393SDavid Hildenbrand 		sca_offset = 0;
1412bc784cceSEugene (jno) Dvurechenski 	kvm->arch.sca = (struct bsca_block *)
1413bc784cceSEugene (jno) Dvurechenski 			((char *) kvm->arch.sca + sca_offset);
1414f6c137ffSChristian Borntraeger 	spin_unlock(&kvm_lock);
1415b0c632dbSHeiko Carstens 
1416b0c632dbSHeiko Carstens 	sprintf(debug_name, "kvm-%u", current->pid);
1417b0c632dbSHeiko Carstens 
14181cb9cf72SChristian Borntraeger 	kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
1419b0c632dbSHeiko Carstens 	if (!kvm->arch.dbf)
142040f5b735SDominik Dingel 		goto out_err;
1421b0c632dbSHeiko Carstens 
1422c54f0d6aSDavid Hildenbrand 	kvm->arch.sie_page2 =
1423c54f0d6aSDavid Hildenbrand 	     (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1424c54f0d6aSDavid Hildenbrand 	if (!kvm->arch.sie_page2)
142540f5b735SDominik Dingel 		goto out_err;
14269d8d5786SMichael Mueller 
1427fb5bf93fSMichael Mueller 	/* Populate the facility mask initially. */
1428c54f0d6aSDavid Hildenbrand 	memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
142994422ee8SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
14309d8d5786SMichael Mueller 	for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
14319d8d5786SMichael Mueller 		if (i < kvm_s390_fac_list_mask_size())
1432c54f0d6aSDavid Hildenbrand 			kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
14339d8d5786SMichael Mueller 		else
1434c54f0d6aSDavid Hildenbrand 			kvm->arch.model.fac_mask[i] = 0UL;
14359d8d5786SMichael Mueller 	}
14369d8d5786SMichael Mueller 
1437981467c9SMichael Mueller 	/* Populate the facility list initially. */
1438c54f0d6aSDavid Hildenbrand 	kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
1439c54f0d6aSDavid Hildenbrand 	memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask,
1440981467c9SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
1441981467c9SMichael Mueller 
144295ca2cb5SJanosch Frank 	set_kvm_facility(kvm->arch.model.fac_mask, 74);
144395ca2cb5SJanosch Frank 	set_kvm_facility(kvm->arch.model.fac_list, 74);
144495ca2cb5SJanosch Frank 
14459bb0ec09SDavid Hildenbrand 	kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
144637c5f6c8SDavid Hildenbrand 	kvm->arch.model.ibc = sclp.ibc & 0x0fff;
14479d8d5786SMichael Mueller 
1448c54f0d6aSDavid Hildenbrand 	kvm_s390_crypto_init(kvm);
14495102ee87STony Krowiak 
1450ba5c1e9bSCarsten Otte 	spin_lock_init(&kvm->arch.float_int.lock);
14516d3da241SJens Freimann 	for (i = 0; i < FIRQ_LIST_COUNT; i++)
14526d3da241SJens Freimann 		INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
14538a242234SHeiko Carstens 	init_waitqueue_head(&kvm->arch.ipte_wq);
1454a6b7e459SThomas Huth 	mutex_init(&kvm->arch.ipte_mutex);
1455ba5c1e9bSCarsten Otte 
1456b0c632dbSHeiko Carstens 	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
145778f26131SChristian Borntraeger 	VM_EVENT(kvm, 3, "vm created with type %lu", type);
1458b0c632dbSHeiko Carstens 
1459e08b9637SCarsten Otte 	if (type & KVM_VM_S390_UCONTROL) {
1460e08b9637SCarsten Otte 		kvm->arch.gmap = NULL;
1461a3a92c31SDominik Dingel 		kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
1462e08b9637SCarsten Otte 	} else {
146332e6b236SGuenther Hutzl 		if (sclp.hamax == U64_MAX)
1464a3a92c31SDominik Dingel 			kvm->arch.mem_limit = TASK_MAX_SIZE;
146532e6b236SGuenther Hutzl 		else
146632e6b236SGuenther Hutzl 			kvm->arch.mem_limit = min_t(unsigned long, TASK_MAX_SIZE,
146732e6b236SGuenther Hutzl 						    sclp.hamax + 1);
14686ea427bbSMartin Schwidefsky 		kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
1469598841caSCarsten Otte 		if (!kvm->arch.gmap)
147040f5b735SDominik Dingel 			goto out_err;
14712c70fe44SChristian Borntraeger 		kvm->arch.gmap->private = kvm;
147224eb3a82SDominik Dingel 		kvm->arch.gmap->pfault_enabled = 0;
1473e08b9637SCarsten Otte 	}
1474fa6b7fe9SCornelia Huck 
1475fa6b7fe9SCornelia Huck 	kvm->arch.css_support = 0;
147684223598SCornelia Huck 	kvm->arch.use_irqchip = 0;
147772f25020SJason J. Herne 	kvm->arch.epoch = 0;
1478fa6b7fe9SCornelia Huck 
14798ad35755SDavid Hildenbrand 	spin_lock_init(&kvm->arch.start_stop_lock);
1480a3508fbeSDavid Hildenbrand 	kvm_s390_vsie_init(kvm);
14818335713aSChristian Borntraeger 	KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
14828ad35755SDavid Hildenbrand 
1483d89f5effSJan Kiszka 	return 0;
1484d89f5effSJan Kiszka out_err:
1485c54f0d6aSDavid Hildenbrand 	free_page((unsigned long)kvm->arch.sie_page2);
148640f5b735SDominik Dingel 	debug_unregister(kvm->arch.dbf);
14877d43bafcSEugene (jno) Dvurechenski 	sca_dispose(kvm);
148878f26131SChristian Borntraeger 	KVM_EVENT(3, "creation of vm failed: %d", rc);
1489d89f5effSJan Kiszka 	return rc;
1490b0c632dbSHeiko Carstens }
1491b0c632dbSHeiko Carstens 
1492*235539b4SLuiz Capitulino bool kvm_arch_has_vcpu_debugfs(void)
1493*235539b4SLuiz Capitulino {
1494*235539b4SLuiz Capitulino 	return false;
1495*235539b4SLuiz Capitulino }
1496*235539b4SLuiz Capitulino 
1497*235539b4SLuiz Capitulino int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
1498*235539b4SLuiz Capitulino {
1499*235539b4SLuiz Capitulino 	return 0;
1500*235539b4SLuiz Capitulino }
1501*235539b4SLuiz Capitulino 
1502d329c035SChristian Borntraeger void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1503d329c035SChristian Borntraeger {
1504d329c035SChristian Borntraeger 	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
1505ade38c31SCornelia Huck 	trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
150667335e63SChristian Borntraeger 	kvm_s390_clear_local_irqs(vcpu);
15073c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
1508bc784cceSEugene (jno) Dvurechenski 	if (!kvm_is_ucontrol(vcpu->kvm))
1509a6e2f683SEugene (jno) Dvurechenski 		sca_del_vcpu(vcpu);
151027e0393fSCarsten Otte 
151127e0393fSCarsten Otte 	if (kvm_is_ucontrol(vcpu->kvm))
15126ea427bbSMartin Schwidefsky 		gmap_remove(vcpu->arch.gmap);
151327e0393fSCarsten Otte 
1514e6db1d61SDominik Dingel 	if (vcpu->kvm->arch.use_cmma)
1515b31605c1SDominik Dingel 		kvm_s390_vcpu_unsetup_cmma(vcpu);
1516d329c035SChristian Borntraeger 	free_page((unsigned long)(vcpu->arch.sie_block));
1517b31288faSKonstantin Weitz 
15186692cef3SChristian Borntraeger 	kvm_vcpu_uninit(vcpu);
1519b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
1520d329c035SChristian Borntraeger }
1521d329c035SChristian Borntraeger 
1522d329c035SChristian Borntraeger static void kvm_free_vcpus(struct kvm *kvm)
1523d329c035SChristian Borntraeger {
1524d329c035SChristian Borntraeger 	unsigned int i;
1525988a2caeSGleb Natapov 	struct kvm_vcpu *vcpu;
1526d329c035SChristian Borntraeger 
1527988a2caeSGleb Natapov 	kvm_for_each_vcpu(i, vcpu, kvm)
1528988a2caeSGleb Natapov 		kvm_arch_vcpu_destroy(vcpu);
1529988a2caeSGleb Natapov 
1530988a2caeSGleb Natapov 	mutex_lock(&kvm->lock);
1531988a2caeSGleb Natapov 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1532d329c035SChristian Borntraeger 		kvm->vcpus[i] = NULL;
1533988a2caeSGleb Natapov 
1534988a2caeSGleb Natapov 	atomic_set(&kvm->online_vcpus, 0);
1535988a2caeSGleb Natapov 	mutex_unlock(&kvm->lock);
1536d329c035SChristian Borntraeger }
1537d329c035SChristian Borntraeger 
1538b0c632dbSHeiko Carstens void kvm_arch_destroy_vm(struct kvm *kvm)
1539b0c632dbSHeiko Carstens {
1540d329c035SChristian Borntraeger 	kvm_free_vcpus(kvm);
15417d43bafcSEugene (jno) Dvurechenski 	sca_dispose(kvm);
1542d329c035SChristian Borntraeger 	debug_unregister(kvm->arch.dbf);
1543c54f0d6aSDavid Hildenbrand 	free_page((unsigned long)kvm->arch.sie_page2);
154427e0393fSCarsten Otte 	if (!kvm_is_ucontrol(kvm))
15456ea427bbSMartin Schwidefsky 		gmap_remove(kvm->arch.gmap);
1546841b91c5SCornelia Huck 	kvm_s390_destroy_adapters(kvm);
154767335e63SChristian Borntraeger 	kvm_s390_clear_float_irqs(kvm);
1548a3508fbeSDavid Hildenbrand 	kvm_s390_vsie_destroy(kvm);
15498335713aSChristian Borntraeger 	KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
1550b0c632dbSHeiko Carstens }
1551b0c632dbSHeiko Carstens 
1552b0c632dbSHeiko Carstens /* Section: vcpu related */
1553dafd032aSDominik Dingel static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1554b0c632dbSHeiko Carstens {
15556ea427bbSMartin Schwidefsky 	vcpu->arch.gmap = gmap_create(current->mm, -1UL);
155627e0393fSCarsten Otte 	if (!vcpu->arch.gmap)
155727e0393fSCarsten Otte 		return -ENOMEM;
15582c70fe44SChristian Borntraeger 	vcpu->arch.gmap->private = vcpu->kvm;
1559dafd032aSDominik Dingel 
156027e0393fSCarsten Otte 	return 0;
156127e0393fSCarsten Otte }
156227e0393fSCarsten Otte 
1563a6e2f683SEugene (jno) Dvurechenski static void sca_del_vcpu(struct kvm_vcpu *vcpu)
1564a6e2f683SEugene (jno) Dvurechenski {
1565a6940674SDavid Hildenbrand 	if (!kvm_s390_use_sca_entries())
1566a6940674SDavid Hildenbrand 		return;
15675e044315SEugene (jno) Dvurechenski 	read_lock(&vcpu->kvm->arch.sca_lock);
15687d43bafcSEugene (jno) Dvurechenski 	if (vcpu->kvm->arch.use_esca) {
15697d43bafcSEugene (jno) Dvurechenski 		struct esca_block *sca = vcpu->kvm->arch.sca;
15707d43bafcSEugene (jno) Dvurechenski 
15717d43bafcSEugene (jno) Dvurechenski 		clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
15727d43bafcSEugene (jno) Dvurechenski 		sca->cpu[vcpu->vcpu_id].sda = 0;
15737d43bafcSEugene (jno) Dvurechenski 	} else {
1574bc784cceSEugene (jno) Dvurechenski 		struct bsca_block *sca = vcpu->kvm->arch.sca;
1575a6e2f683SEugene (jno) Dvurechenski 
1576a6e2f683SEugene (jno) Dvurechenski 		clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
1577a6e2f683SEugene (jno) Dvurechenski 		sca->cpu[vcpu->vcpu_id].sda = 0;
1578a6e2f683SEugene (jno) Dvurechenski 	}
15795e044315SEugene (jno) Dvurechenski 	read_unlock(&vcpu->kvm->arch.sca_lock);
15807d43bafcSEugene (jno) Dvurechenski }
1581a6e2f683SEugene (jno) Dvurechenski 
1582eaa78f34SDavid Hildenbrand static void sca_add_vcpu(struct kvm_vcpu *vcpu)
1583a6e2f683SEugene (jno) Dvurechenski {
1584a6940674SDavid Hildenbrand 	if (!kvm_s390_use_sca_entries()) {
1585a6940674SDavid Hildenbrand 		struct bsca_block *sca = vcpu->kvm->arch.sca;
1586a6940674SDavid Hildenbrand 
1587a6940674SDavid Hildenbrand 		/* we still need the basic sca for the ipte control */
1588a6940674SDavid Hildenbrand 		vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1589a6940674SDavid Hildenbrand 		vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
1590a6940674SDavid Hildenbrand 	}
1591eaa78f34SDavid Hildenbrand 	read_lock(&vcpu->kvm->arch.sca_lock);
1592eaa78f34SDavid Hildenbrand 	if (vcpu->kvm->arch.use_esca) {
1593eaa78f34SDavid Hildenbrand 		struct esca_block *sca = vcpu->kvm->arch.sca;
15947d43bafcSEugene (jno) Dvurechenski 
1595eaa78f34SDavid Hildenbrand 		sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
15967d43bafcSEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
15977d43bafcSEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
159825508824SDavid Hildenbrand 		vcpu->arch.sie_block->ecb2 |= 0x04U;
1599eaa78f34SDavid Hildenbrand 		set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
16007d43bafcSEugene (jno) Dvurechenski 	} else {
1601eaa78f34SDavid Hildenbrand 		struct bsca_block *sca = vcpu->kvm->arch.sca;
1602a6e2f683SEugene (jno) Dvurechenski 
1603eaa78f34SDavid Hildenbrand 		sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
1604a6e2f683SEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1605a6e2f683SEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
1606eaa78f34SDavid Hildenbrand 		set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
1607a6e2f683SEugene (jno) Dvurechenski 	}
1608eaa78f34SDavid Hildenbrand 	read_unlock(&vcpu->kvm->arch.sca_lock);
16095e044315SEugene (jno) Dvurechenski }
16105e044315SEugene (jno) Dvurechenski 
16115e044315SEugene (jno) Dvurechenski /* Basic SCA to Extended SCA data copy routines */
16125e044315SEugene (jno) Dvurechenski static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
16135e044315SEugene (jno) Dvurechenski {
16145e044315SEugene (jno) Dvurechenski 	d->sda = s->sda;
16155e044315SEugene (jno) Dvurechenski 	d->sigp_ctrl.c = s->sigp_ctrl.c;
16165e044315SEugene (jno) Dvurechenski 	d->sigp_ctrl.scn = s->sigp_ctrl.scn;
16175e044315SEugene (jno) Dvurechenski }
16185e044315SEugene (jno) Dvurechenski 
16195e044315SEugene (jno) Dvurechenski static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
16205e044315SEugene (jno) Dvurechenski {
16215e044315SEugene (jno) Dvurechenski 	int i;
16225e044315SEugene (jno) Dvurechenski 
16235e044315SEugene (jno) Dvurechenski 	d->ipte_control = s->ipte_control;
16245e044315SEugene (jno) Dvurechenski 	d->mcn[0] = s->mcn;
16255e044315SEugene (jno) Dvurechenski 	for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
16265e044315SEugene (jno) Dvurechenski 		sca_copy_entry(&d->cpu[i], &s->cpu[i]);
16275e044315SEugene (jno) Dvurechenski }
16285e044315SEugene (jno) Dvurechenski 
16295e044315SEugene (jno) Dvurechenski static int sca_switch_to_extended(struct kvm *kvm)
16305e044315SEugene (jno) Dvurechenski {
16315e044315SEugene (jno) Dvurechenski 	struct bsca_block *old_sca = kvm->arch.sca;
16325e044315SEugene (jno) Dvurechenski 	struct esca_block *new_sca;
16335e044315SEugene (jno) Dvurechenski 	struct kvm_vcpu *vcpu;
16345e044315SEugene (jno) Dvurechenski 	unsigned int vcpu_idx;
16355e044315SEugene (jno) Dvurechenski 	u32 scaol, scaoh;
16365e044315SEugene (jno) Dvurechenski 
16375e044315SEugene (jno) Dvurechenski 	new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
16385e044315SEugene (jno) Dvurechenski 	if (!new_sca)
16395e044315SEugene (jno) Dvurechenski 		return -ENOMEM;
16405e044315SEugene (jno) Dvurechenski 
16415e044315SEugene (jno) Dvurechenski 	scaoh = (u32)((u64)(new_sca) >> 32);
16425e044315SEugene (jno) Dvurechenski 	scaol = (u32)(u64)(new_sca) & ~0x3fU;
16435e044315SEugene (jno) Dvurechenski 
16445e044315SEugene (jno) Dvurechenski 	kvm_s390_vcpu_block_all(kvm);
16455e044315SEugene (jno) Dvurechenski 	write_lock(&kvm->arch.sca_lock);
16465e044315SEugene (jno) Dvurechenski 
16475e044315SEugene (jno) Dvurechenski 	sca_copy_b_to_e(new_sca, old_sca);
16485e044315SEugene (jno) Dvurechenski 
16495e044315SEugene (jno) Dvurechenski 	kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
16505e044315SEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaoh = scaoh;
16515e044315SEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaol = scaol;
16525e044315SEugene (jno) Dvurechenski 		vcpu->arch.sie_block->ecb2 |= 0x04U;
16535e044315SEugene (jno) Dvurechenski 	}
16545e044315SEugene (jno) Dvurechenski 	kvm->arch.sca = new_sca;
16555e044315SEugene (jno) Dvurechenski 	kvm->arch.use_esca = 1;
16565e044315SEugene (jno) Dvurechenski 
16575e044315SEugene (jno) Dvurechenski 	write_unlock(&kvm->arch.sca_lock);
16585e044315SEugene (jno) Dvurechenski 	kvm_s390_vcpu_unblock_all(kvm);
16595e044315SEugene (jno) Dvurechenski 
16605e044315SEugene (jno) Dvurechenski 	free_page((unsigned long)old_sca);
16615e044315SEugene (jno) Dvurechenski 
16628335713aSChristian Borntraeger 	VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
16638335713aSChristian Borntraeger 		 old_sca, kvm->arch.sca);
16645e044315SEugene (jno) Dvurechenski 	return 0;
16657d43bafcSEugene (jno) Dvurechenski }
1666a6e2f683SEugene (jno) Dvurechenski 
1667a6e2f683SEugene (jno) Dvurechenski static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
1668a6e2f683SEugene (jno) Dvurechenski {
16695e044315SEugene (jno) Dvurechenski 	int rc;
16705e044315SEugene (jno) Dvurechenski 
1671a6940674SDavid Hildenbrand 	if (!kvm_s390_use_sca_entries()) {
1672a6940674SDavid Hildenbrand 		if (id < KVM_MAX_VCPUS)
1673a6940674SDavid Hildenbrand 			return true;
1674a6940674SDavid Hildenbrand 		return false;
1675a6940674SDavid Hildenbrand 	}
16765e044315SEugene (jno) Dvurechenski 	if (id < KVM_S390_BSCA_CPU_SLOTS)
16775e044315SEugene (jno) Dvurechenski 		return true;
167876a6dd72SDavid Hildenbrand 	if (!sclp.has_esca || !sclp.has_64bscao)
16795e044315SEugene (jno) Dvurechenski 		return false;
16805e044315SEugene (jno) Dvurechenski 
16815e044315SEugene (jno) Dvurechenski 	mutex_lock(&kvm->lock);
16825e044315SEugene (jno) Dvurechenski 	rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
16835e044315SEugene (jno) Dvurechenski 	mutex_unlock(&kvm->lock);
16845e044315SEugene (jno) Dvurechenski 
16855e044315SEugene (jno) Dvurechenski 	return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
1686a6e2f683SEugene (jno) Dvurechenski }
1687a6e2f683SEugene (jno) Dvurechenski 
1688dafd032aSDominik Dingel int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1689dafd032aSDominik Dingel {
1690dafd032aSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1691dafd032aSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
169259674c1aSChristian Borntraeger 	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
169359674c1aSChristian Borntraeger 				    KVM_SYNC_GPRS |
16949eed0735SChristian Borntraeger 				    KVM_SYNC_ACRS |
1695b028ee3eSDavid Hildenbrand 				    KVM_SYNC_CRS |
1696b028ee3eSDavid Hildenbrand 				    KVM_SYNC_ARCH0 |
1697b028ee3eSDavid Hildenbrand 				    KVM_SYNC_PFAULT;
169875a4615cSJulius Niedworok 	kvm_s390_set_prefix(vcpu, 0);
1699c6e5f166SFan Zhang 	if (test_kvm_facility(vcpu->kvm, 64))
1700c6e5f166SFan Zhang 		vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
1701f6aa6dc4SDavid Hildenbrand 	/* fprs can be synchronized via vrs, even if the guest has no vx. With
1702f6aa6dc4SDavid Hildenbrand 	 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
1703f6aa6dc4SDavid Hildenbrand 	 */
1704f6aa6dc4SDavid Hildenbrand 	if (MACHINE_HAS_VX)
170568c55750SEric Farman 		vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
17066fd8e67dSDavid Hildenbrand 	else
17076fd8e67dSDavid Hildenbrand 		vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
1708dafd032aSDominik Dingel 
1709dafd032aSDominik Dingel 	if (kvm_is_ucontrol(vcpu->kvm))
1710dafd032aSDominik Dingel 		return __kvm_ucontrol_vcpu_init(vcpu);
1711dafd032aSDominik Dingel 
1712b0c632dbSHeiko Carstens 	return 0;
1713b0c632dbSHeiko Carstens }
1714b0c632dbSHeiko Carstens 
1715db0758b2SDavid Hildenbrand /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1716db0758b2SDavid Hildenbrand static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1717db0758b2SDavid Hildenbrand {
1718db0758b2SDavid Hildenbrand 	WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
17199c23a131SDavid Hildenbrand 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
1720db0758b2SDavid Hildenbrand 	vcpu->arch.cputm_start = get_tod_clock_fast();
17219c23a131SDavid Hildenbrand 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
1722db0758b2SDavid Hildenbrand }
1723db0758b2SDavid Hildenbrand 
1724db0758b2SDavid Hildenbrand /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1725db0758b2SDavid Hildenbrand static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1726db0758b2SDavid Hildenbrand {
1727db0758b2SDavid Hildenbrand 	WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
17289c23a131SDavid Hildenbrand 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
1729db0758b2SDavid Hildenbrand 	vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1730db0758b2SDavid Hildenbrand 	vcpu->arch.cputm_start = 0;
17319c23a131SDavid Hildenbrand 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
1732db0758b2SDavid Hildenbrand }
1733db0758b2SDavid Hildenbrand 
1734db0758b2SDavid Hildenbrand /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1735db0758b2SDavid Hildenbrand static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1736db0758b2SDavid Hildenbrand {
1737db0758b2SDavid Hildenbrand 	WARN_ON_ONCE(vcpu->arch.cputm_enabled);
1738db0758b2SDavid Hildenbrand 	vcpu->arch.cputm_enabled = true;
1739db0758b2SDavid Hildenbrand 	__start_cpu_timer_accounting(vcpu);
1740db0758b2SDavid Hildenbrand }
1741db0758b2SDavid Hildenbrand 
1742db0758b2SDavid Hildenbrand /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1743db0758b2SDavid Hildenbrand static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1744db0758b2SDavid Hildenbrand {
1745db0758b2SDavid Hildenbrand 	WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
1746db0758b2SDavid Hildenbrand 	__stop_cpu_timer_accounting(vcpu);
1747db0758b2SDavid Hildenbrand 	vcpu->arch.cputm_enabled = false;
1748db0758b2SDavid Hildenbrand }
1749db0758b2SDavid Hildenbrand 
1750db0758b2SDavid Hildenbrand static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1751db0758b2SDavid Hildenbrand {
1752db0758b2SDavid Hildenbrand 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1753db0758b2SDavid Hildenbrand 	__enable_cpu_timer_accounting(vcpu);
1754db0758b2SDavid Hildenbrand 	preempt_enable();
1755db0758b2SDavid Hildenbrand }
1756db0758b2SDavid Hildenbrand 
1757db0758b2SDavid Hildenbrand static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1758db0758b2SDavid Hildenbrand {
1759db0758b2SDavid Hildenbrand 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1760db0758b2SDavid Hildenbrand 	__disable_cpu_timer_accounting(vcpu);
1761db0758b2SDavid Hildenbrand 	preempt_enable();
1762db0758b2SDavid Hildenbrand }
1763db0758b2SDavid Hildenbrand 
17644287f247SDavid Hildenbrand /* set the cpu timer - may only be called from the VCPU thread itself */
17654287f247SDavid Hildenbrand void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
17664287f247SDavid Hildenbrand {
1767db0758b2SDavid Hildenbrand 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
17689c23a131SDavid Hildenbrand 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
1769db0758b2SDavid Hildenbrand 	if (vcpu->arch.cputm_enabled)
1770db0758b2SDavid Hildenbrand 		vcpu->arch.cputm_start = get_tod_clock_fast();
17714287f247SDavid Hildenbrand 	vcpu->arch.sie_block->cputm = cputm;
17729c23a131SDavid Hildenbrand 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
1773db0758b2SDavid Hildenbrand 	preempt_enable();
17744287f247SDavid Hildenbrand }
17754287f247SDavid Hildenbrand 
1776db0758b2SDavid Hildenbrand /* update and get the cpu timer - can also be called from other VCPU threads */
17774287f247SDavid Hildenbrand __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
17784287f247SDavid Hildenbrand {
17799c23a131SDavid Hildenbrand 	unsigned int seq;
1780db0758b2SDavid Hildenbrand 	__u64 value;
1781db0758b2SDavid Hildenbrand 
1782db0758b2SDavid Hildenbrand 	if (unlikely(!vcpu->arch.cputm_enabled))
17834287f247SDavid Hildenbrand 		return vcpu->arch.sie_block->cputm;
1784db0758b2SDavid Hildenbrand 
17859c23a131SDavid Hildenbrand 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
17869c23a131SDavid Hildenbrand 	do {
17879c23a131SDavid Hildenbrand 		seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
17889c23a131SDavid Hildenbrand 		/*
17899c23a131SDavid Hildenbrand 		 * If the writer would ever execute a read in the critical
17909c23a131SDavid Hildenbrand 		 * section, e.g. in irq context, we have a deadlock.
17919c23a131SDavid Hildenbrand 		 */
17929c23a131SDavid Hildenbrand 		WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
1793db0758b2SDavid Hildenbrand 		value = vcpu->arch.sie_block->cputm;
17949c23a131SDavid Hildenbrand 		/* if cputm_start is 0, accounting is being started/stopped */
17959c23a131SDavid Hildenbrand 		if (likely(vcpu->arch.cputm_start))
1796db0758b2SDavid Hildenbrand 			value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
17979c23a131SDavid Hildenbrand 	} while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
17989c23a131SDavid Hildenbrand 	preempt_enable();
1799db0758b2SDavid Hildenbrand 	return value;
18004287f247SDavid Hildenbrand }
18014287f247SDavid Hildenbrand 
1802b0c632dbSHeiko Carstens void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1803b0c632dbSHeiko Carstens {
18049977e886SHendrik Brueckner 	/* Save host register state */
1805d0164ee2SHendrik Brueckner 	save_fpu_regs();
18069abc2a08SDavid Hildenbrand 	vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
18079abc2a08SDavid Hildenbrand 	vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
180896b2d7a8SHendrik Brueckner 
18096fd8e67dSDavid Hildenbrand 	if (MACHINE_HAS_VX)
18109abc2a08SDavid Hildenbrand 		current->thread.fpu.regs = vcpu->run->s.regs.vrs;
18116fd8e67dSDavid Hildenbrand 	else
18126fd8e67dSDavid Hildenbrand 		current->thread.fpu.regs = vcpu->run->s.regs.fprs;
18139abc2a08SDavid Hildenbrand 	current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
18149977e886SHendrik Brueckner 	if (test_fp_ctl(current->thread.fpu.fpc))
181596b2d7a8SHendrik Brueckner 		/* User space provided an invalid FPC, let's clear it */
18169977e886SHendrik Brueckner 		current->thread.fpu.fpc = 0;
18179977e886SHendrik Brueckner 
18189977e886SHendrik Brueckner 	save_access_regs(vcpu->arch.host_acrs);
181959674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
182037d9df98SDavid Hildenbrand 	gmap_enable(vcpu->arch.enabled_gmap);
1821805de8f4SPeter Zijlstra 	atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
18225ebda316SDavid Hildenbrand 	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
1823db0758b2SDavid Hildenbrand 		__start_cpu_timer_accounting(vcpu);
182401a745acSDavid Hildenbrand 	vcpu->cpu = cpu;
1825b0c632dbSHeiko Carstens }
1826b0c632dbSHeiko Carstens 
1827b0c632dbSHeiko Carstens void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1828b0c632dbSHeiko Carstens {
182901a745acSDavid Hildenbrand 	vcpu->cpu = -1;
18305ebda316SDavid Hildenbrand 	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
1831db0758b2SDavid Hildenbrand 		__stop_cpu_timer_accounting(vcpu);
1832805de8f4SPeter Zijlstra 	atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
183337d9df98SDavid Hildenbrand 	vcpu->arch.enabled_gmap = gmap_get_enabled();
183437d9df98SDavid Hildenbrand 	gmap_disable(vcpu->arch.enabled_gmap);
18359977e886SHendrik Brueckner 
18369abc2a08SDavid Hildenbrand 	/* Save guest register state */
1837d0164ee2SHendrik Brueckner 	save_fpu_regs();
18389977e886SHendrik Brueckner 	vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
18399abc2a08SDavid Hildenbrand 
18409abc2a08SDavid Hildenbrand 	/* Restore host register state */
18419abc2a08SDavid Hildenbrand 	current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
18429abc2a08SDavid Hildenbrand 	current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
18439977e886SHendrik Brueckner 
18449977e886SHendrik Brueckner 	save_access_regs(vcpu->run->s.regs.acrs);
1845b0c632dbSHeiko Carstens 	restore_access_regs(vcpu->arch.host_acrs);
1846b0c632dbSHeiko Carstens }
1847b0c632dbSHeiko Carstens 
1848b0c632dbSHeiko Carstens static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1849b0c632dbSHeiko Carstens {
1850b0c632dbSHeiko Carstens 	/* this equals initial cpu reset in pop, but we don't switch to ESA */
1851b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.mask = 0UL;
1852b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.addr = 0UL;
18538d26cf7bSChristian Borntraeger 	kvm_s390_set_prefix(vcpu, 0);
18544287f247SDavid Hildenbrand 	kvm_s390_set_cpu_timer(vcpu, 0);
1855b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->ckc       = 0UL;
1856b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->todpr     = 0;
1857b0c632dbSHeiko Carstens 	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1858b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
1859b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
18609abc2a08SDavid Hildenbrand 	/* make sure the new fpc will be lazily loaded */
18619abc2a08SDavid Hildenbrand 	save_fpu_regs();
18629abc2a08SDavid Hildenbrand 	current->thread.fpu.fpc = 0;
1863b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gbea = 1;
1864672550fbSChristian Borntraeger 	vcpu->arch.sie_block->pp = 0;
18653c038e6bSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
18663c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
18676352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
18686852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_stop(vcpu);
18692ed10cc1SJens Freimann 	kvm_s390_clear_local_irqs(vcpu);
1870b0c632dbSHeiko Carstens }
1871b0c632dbSHeiko Carstens 
187231928aa5SDominik Dingel void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
187342897d86SMarcelo Tosatti {
187472f25020SJason J. Herne 	mutex_lock(&vcpu->kvm->lock);
1875fdf03650SFan Zhang 	preempt_disable();
187672f25020SJason J. Herne 	vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
1877fdf03650SFan Zhang 	preempt_enable();
187872f25020SJason J. Herne 	mutex_unlock(&vcpu->kvm->lock);
187925508824SDavid Hildenbrand 	if (!kvm_is_ucontrol(vcpu->kvm)) {
1880dafd032aSDominik Dingel 		vcpu->arch.gmap = vcpu->kvm->arch.gmap;
1881eaa78f34SDavid Hildenbrand 		sca_add_vcpu(vcpu);
188225508824SDavid Hildenbrand 	}
18836502a34cSDavid Hildenbrand 	if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
18846502a34cSDavid Hildenbrand 		vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
188537d9df98SDavid Hildenbrand 	/* make vcpu_load load the right gmap on the first trigger */
188637d9df98SDavid Hildenbrand 	vcpu->arch.enabled_gmap = vcpu->arch.gmap;
188742897d86SMarcelo Tosatti }
188842897d86SMarcelo Tosatti 
18895102ee87STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
18905102ee87STony Krowiak {
18919d8d5786SMichael Mueller 	if (!test_kvm_facility(vcpu->kvm, 76))
18925102ee87STony Krowiak 		return;
18935102ee87STony Krowiak 
1894a374e892STony Krowiak 	vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1895a374e892STony Krowiak 
1896a374e892STony Krowiak 	if (vcpu->kvm->arch.crypto.aes_kw)
1897a374e892STony Krowiak 		vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1898a374e892STony Krowiak 	if (vcpu->kvm->arch.crypto.dea_kw)
1899a374e892STony Krowiak 		vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1900a374e892STony Krowiak 
19015102ee87STony Krowiak 	vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
19025102ee87STony Krowiak }
19035102ee87STony Krowiak 
1904b31605c1SDominik Dingel void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1905b31605c1SDominik Dingel {
1906b31605c1SDominik Dingel 	free_page(vcpu->arch.sie_block->cbrlo);
1907b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = 0;
1908b31605c1SDominik Dingel }
1909b31605c1SDominik Dingel 
1910b31605c1SDominik Dingel int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1911b31605c1SDominik Dingel {
1912b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1913b31605c1SDominik Dingel 	if (!vcpu->arch.sie_block->cbrlo)
1914b31605c1SDominik Dingel 		return -ENOMEM;
1915b31605c1SDominik Dingel 
1916b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 |= 0x80;
1917b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 &= ~0x08;
1918b31605c1SDominik Dingel 	return 0;
1919b31605c1SDominik Dingel }
1920b31605c1SDominik Dingel 
192191520f1aSMichael Mueller static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
192291520f1aSMichael Mueller {
192391520f1aSMichael Mueller 	struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
192491520f1aSMichael Mueller 
192591520f1aSMichael Mueller 	vcpu->arch.sie_block->ibc = model->ibc;
192680bc79dcSDavid Hildenbrand 	if (test_kvm_facility(vcpu->kvm, 7))
1927c54f0d6aSDavid Hildenbrand 		vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
192891520f1aSMichael Mueller }
192991520f1aSMichael Mueller 
1930b0c632dbSHeiko Carstens int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1931b0c632dbSHeiko Carstens {
1932b31605c1SDominik Dingel 	int rc = 0;
1933b31288faSKonstantin Weitz 
19349e6dabefSCornelia Huck 	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
19359e6dabefSCornelia Huck 						    CPUSTAT_SM |
1936a4a4f191SGuenther Hutzl 						    CPUSTAT_STOPPED);
1937a4a4f191SGuenther Hutzl 
193853df84f8SGuenther Hutzl 	if (test_kvm_facility(vcpu->kvm, 78))
1939805de8f4SPeter Zijlstra 		atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
194053df84f8SGuenther Hutzl 	else if (test_kvm_facility(vcpu->kvm, 8))
1941805de8f4SPeter Zijlstra 		atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
1942a4a4f191SGuenther Hutzl 
194391520f1aSMichael Mueller 	kvm_s390_vcpu_setup_model(vcpu);
194491520f1aSMichael Mueller 
1945bdab09f3SDavid Hildenbrand 	/* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
1946bdab09f3SDavid Hildenbrand 	if (MACHINE_HAS_ESOP)
1947bdab09f3SDavid Hildenbrand 		vcpu->arch.sie_block->ecb |= 0x02;
1948bd50e8ecSDavid Hildenbrand 	if (test_kvm_facility(vcpu->kvm, 9))
1949bd50e8ecSDavid Hildenbrand 		vcpu->arch.sie_block->ecb |= 0x04;
1950f597d24eSDavid Hildenbrand 	if (test_kvm_facility(vcpu->kvm, 73))
19517feb6bb8SMichael Mueller 		vcpu->arch.sie_block->ecb |= 0x10;
19527feb6bb8SMichael Mueller 
1953873b425eSDavid Hildenbrand 	if (test_kvm_facility(vcpu->kvm, 8) && sclp.has_pfmfi)
1954d6af0b49SDavid Hildenbrand 		vcpu->arch.sie_block->ecb2 |= 0x08;
195548ee7d3aSDavid Hildenbrand 	vcpu->arch.sie_block->eca = 0x1002000U;
195648ee7d3aSDavid Hildenbrand 	if (sclp.has_cei)
195748ee7d3aSDavid Hildenbrand 		vcpu->arch.sie_block->eca |= 0x80000000U;
195811ad65b7SDavid Hildenbrand 	if (sclp.has_ib)
195911ad65b7SDavid Hildenbrand 		vcpu->arch.sie_block->eca |= 0x40000000U;
196037c5f6c8SDavid Hildenbrand 	if (sclp.has_siif)
1961217a4406SHeiko Carstens 		vcpu->arch.sie_block->eca |= 1;
196237c5f6c8SDavid Hildenbrand 	if (sclp.has_sigpif)
1963ea5f4969SDavid Hildenbrand 		vcpu->arch.sie_block->eca |= 0x10000000U;
196418280d8bSMichael Mueller 	if (test_kvm_facility(vcpu->kvm, 129)) {
196513211ea7SEric Farman 		vcpu->arch.sie_block->eca |= 0x00020000;
196613211ea7SEric Farman 		vcpu->arch.sie_block->ecd |= 0x20000000;
196713211ea7SEric Farman 	}
1968c6e5f166SFan Zhang 	vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
1969492d8642SThomas Huth 	vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
19705a5e6536SMatthew Rosato 
1971e6db1d61SDominik Dingel 	if (vcpu->kvm->arch.use_cmma) {
1972b31605c1SDominik Dingel 		rc = kvm_s390_vcpu_setup_cmma(vcpu);
1973b31605c1SDominik Dingel 		if (rc)
1974b31605c1SDominik Dingel 			return rc;
1975b31288faSKonstantin Weitz 	}
19760ac96cafSDavid Hildenbrand 	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1977ca872302SChristian Borntraeger 	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
19789d8d5786SMichael Mueller 
19795102ee87STony Krowiak 	kvm_s390_vcpu_crypto_setup(vcpu);
19805102ee87STony Krowiak 
1981b31605c1SDominik Dingel 	return rc;
1982b0c632dbSHeiko Carstens }
1983b0c632dbSHeiko Carstens 
1984b0c632dbSHeiko Carstens struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1985b0c632dbSHeiko Carstens 				      unsigned int id)
1986b0c632dbSHeiko Carstens {
19874d47555aSCarsten Otte 	struct kvm_vcpu *vcpu;
19887feb6bb8SMichael Mueller 	struct sie_page *sie_page;
19894d47555aSCarsten Otte 	int rc = -EINVAL;
1990b0c632dbSHeiko Carstens 
19914215825eSDavid Hildenbrand 	if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
19924d47555aSCarsten Otte 		goto out;
19934d47555aSCarsten Otte 
19944d47555aSCarsten Otte 	rc = -ENOMEM;
19954d47555aSCarsten Otte 
1996b110feafSMichael Mueller 	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1997b0c632dbSHeiko Carstens 	if (!vcpu)
19984d47555aSCarsten Otte 		goto out;
1999b0c632dbSHeiko Carstens 
20007feb6bb8SMichael Mueller 	sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
20017feb6bb8SMichael Mueller 	if (!sie_page)
2002b0c632dbSHeiko Carstens 		goto out_free_cpu;
2003b0c632dbSHeiko Carstens 
20047feb6bb8SMichael Mueller 	vcpu->arch.sie_block = &sie_page->sie_block;
20057feb6bb8SMichael Mueller 	vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
20067feb6bb8SMichael Mueller 
2007efed1104SDavid Hildenbrand 	/* the real guest size will always be smaller than msl */
2008efed1104SDavid Hildenbrand 	vcpu->arch.sie_block->mso = 0;
2009efed1104SDavid Hildenbrand 	vcpu->arch.sie_block->msl = sclp.hamax;
2010efed1104SDavid Hildenbrand 
2011b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icpua = id;
2012ba5c1e9bSCarsten Otte 	spin_lock_init(&vcpu->arch.local_int.lock);
2013ba5c1e9bSCarsten Otte 	vcpu->arch.local_int.float_int = &kvm->arch.float_int;
2014d0321a24SChristian Borntraeger 	vcpu->arch.local_int.wq = &vcpu->wq;
20155288fbf0SChristian Borntraeger 	vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
20169c23a131SDavid Hildenbrand 	seqcount_init(&vcpu->arch.cputm_seqcount);
2017ba5c1e9bSCarsten Otte 
2018b0c632dbSHeiko Carstens 	rc = kvm_vcpu_init(vcpu, kvm, id);
2019b0c632dbSHeiko Carstens 	if (rc)
20209abc2a08SDavid Hildenbrand 		goto out_free_sie_block;
20218335713aSChristian Borntraeger 	VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
2022b0c632dbSHeiko Carstens 		 vcpu->arch.sie_block);
2023ade38c31SCornelia Huck 	trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
2024b0c632dbSHeiko Carstens 
2025b0c632dbSHeiko Carstens 	return vcpu;
20267b06bf2fSWei Yongjun out_free_sie_block:
20277b06bf2fSWei Yongjun 	free_page((unsigned long)(vcpu->arch.sie_block));
2028b0c632dbSHeiko Carstens out_free_cpu:
2029b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
20304d47555aSCarsten Otte out:
2031b0c632dbSHeiko Carstens 	return ERR_PTR(rc);
2032b0c632dbSHeiko Carstens }
2033b0c632dbSHeiko Carstens 
2034b0c632dbSHeiko Carstens int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
2035b0c632dbSHeiko Carstens {
20369a022067SDavid Hildenbrand 	return kvm_s390_vcpu_has_irq(vcpu, 0);
2037b0c632dbSHeiko Carstens }
2038b0c632dbSHeiko Carstens 
203927406cd5SChristian Borntraeger void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
204049b99e1eSChristian Borntraeger {
2041805de8f4SPeter Zijlstra 	atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
204261a6df54SDavid Hildenbrand 	exit_sie(vcpu);
204349b99e1eSChristian Borntraeger }
204449b99e1eSChristian Borntraeger 
204527406cd5SChristian Borntraeger void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
204649b99e1eSChristian Borntraeger {
2047805de8f4SPeter Zijlstra 	atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
204849b99e1eSChristian Borntraeger }
204949b99e1eSChristian Borntraeger 
20508e236546SChristian Borntraeger static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
20518e236546SChristian Borntraeger {
2052805de8f4SPeter Zijlstra 	atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
205361a6df54SDavid Hildenbrand 	exit_sie(vcpu);
20548e236546SChristian Borntraeger }
20558e236546SChristian Borntraeger 
20568e236546SChristian Borntraeger static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
20578e236546SChristian Borntraeger {
20589bf9fde2SJason J. Herne 	atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
20598e236546SChristian Borntraeger }
20608e236546SChristian Borntraeger 
206149b99e1eSChristian Borntraeger /*
206249b99e1eSChristian Borntraeger  * Kick a guest cpu out of SIE and wait until SIE is not running.
206349b99e1eSChristian Borntraeger  * If the CPU is not running (e.g. waiting as idle) the function will
206449b99e1eSChristian Borntraeger  * return immediately. */
206549b99e1eSChristian Borntraeger void exit_sie(struct kvm_vcpu *vcpu)
206649b99e1eSChristian Borntraeger {
2067805de8f4SPeter Zijlstra 	atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
206849b99e1eSChristian Borntraeger 	while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
206949b99e1eSChristian Borntraeger 		cpu_relax();
207049b99e1eSChristian Borntraeger }
207149b99e1eSChristian Borntraeger 
20728e236546SChristian Borntraeger /* Kick a guest cpu out of SIE to process a request synchronously */
20738e236546SChristian Borntraeger void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
207449b99e1eSChristian Borntraeger {
20758e236546SChristian Borntraeger 	kvm_make_request(req, vcpu);
20768e236546SChristian Borntraeger 	kvm_s390_vcpu_request(vcpu);
207749b99e1eSChristian Borntraeger }
207849b99e1eSChristian Borntraeger 
2079414d3b07SMartin Schwidefsky static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
2080414d3b07SMartin Schwidefsky 			      unsigned long end)
20812c70fe44SChristian Borntraeger {
20822c70fe44SChristian Borntraeger 	struct kvm *kvm = gmap->private;
20832c70fe44SChristian Borntraeger 	struct kvm_vcpu *vcpu;
2084414d3b07SMartin Schwidefsky 	unsigned long prefix;
2085414d3b07SMartin Schwidefsky 	int i;
20862c70fe44SChristian Borntraeger 
208765d0b0d4SDavid Hildenbrand 	if (gmap_is_shadow(gmap))
208865d0b0d4SDavid Hildenbrand 		return;
2089414d3b07SMartin Schwidefsky 	if (start >= 1UL << 31)
2090414d3b07SMartin Schwidefsky 		/* We are only interested in prefix pages */
2091414d3b07SMartin Schwidefsky 		return;
20922c70fe44SChristian Borntraeger 	kvm_for_each_vcpu(i, vcpu, kvm) {
20932c70fe44SChristian Borntraeger 		/* match against both prefix pages */
2094414d3b07SMartin Schwidefsky 		prefix = kvm_s390_get_prefix(vcpu);
2095414d3b07SMartin Schwidefsky 		if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
2096414d3b07SMartin Schwidefsky 			VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
2097414d3b07SMartin Schwidefsky 				   start, end);
20988e236546SChristian Borntraeger 			kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
20992c70fe44SChristian Borntraeger 		}
21002c70fe44SChristian Borntraeger 	}
21012c70fe44SChristian Borntraeger }
21022c70fe44SChristian Borntraeger 
2103b6d33834SChristoffer Dall int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
2104b6d33834SChristoffer Dall {
2105b6d33834SChristoffer Dall 	/* kvm common code refers to this, but never calls it */
2106b6d33834SChristoffer Dall 	BUG();
2107b6d33834SChristoffer Dall 	return 0;
2108b6d33834SChristoffer Dall }
2109b6d33834SChristoffer Dall 
211014eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
211114eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
211214eebd91SCarsten Otte {
211314eebd91SCarsten Otte 	int r = -EINVAL;
211414eebd91SCarsten Otte 
211514eebd91SCarsten Otte 	switch (reg->id) {
211629b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
211729b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->todpr,
211829b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
211929b7c71bSCarsten Otte 		break;
212029b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
212129b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->epoch,
212229b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
212329b7c71bSCarsten Otte 		break;
212446a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
21254287f247SDavid Hildenbrand 		r = put_user(kvm_s390_get_cpu_timer(vcpu),
212646a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
212746a6dd1cSJason J. herne 		break;
212846a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
212946a6dd1cSJason J. herne 		r = put_user(vcpu->arch.sie_block->ckc,
213046a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
213146a6dd1cSJason J. herne 		break;
2132536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
2133536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_token,
2134536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
2135536336c2SDominik Dingel 		break;
2136536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
2137536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_compare,
2138536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
2139536336c2SDominik Dingel 		break;
2140536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
2141536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_select,
2142536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
2143536336c2SDominik Dingel 		break;
2144672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
2145672550fbSChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->pp,
2146672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
2147672550fbSChristian Borntraeger 		break;
2148afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
2149afa45ff5SChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->gbea,
2150afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
2151afa45ff5SChristian Borntraeger 		break;
215214eebd91SCarsten Otte 	default:
215314eebd91SCarsten Otte 		break;
215414eebd91SCarsten Otte 	}
215514eebd91SCarsten Otte 
215614eebd91SCarsten Otte 	return r;
215714eebd91SCarsten Otte }
215814eebd91SCarsten Otte 
215914eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
216014eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
216114eebd91SCarsten Otte {
216214eebd91SCarsten Otte 	int r = -EINVAL;
21634287f247SDavid Hildenbrand 	__u64 val;
216414eebd91SCarsten Otte 
216514eebd91SCarsten Otte 	switch (reg->id) {
216629b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
216729b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->todpr,
216829b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
216929b7c71bSCarsten Otte 		break;
217029b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
217129b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->epoch,
217229b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
217329b7c71bSCarsten Otte 		break;
217446a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
21754287f247SDavid Hildenbrand 		r = get_user(val, (u64 __user *)reg->addr);
21764287f247SDavid Hildenbrand 		if (!r)
21774287f247SDavid Hildenbrand 			kvm_s390_set_cpu_timer(vcpu, val);
217846a6dd1cSJason J. herne 		break;
217946a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
218046a6dd1cSJason J. herne 		r = get_user(vcpu->arch.sie_block->ckc,
218146a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
218246a6dd1cSJason J. herne 		break;
2183536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
2184536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_token,
2185536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
21869fbd8082SDavid Hildenbrand 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
21879fbd8082SDavid Hildenbrand 			kvm_clear_async_pf_completion_queue(vcpu);
2188536336c2SDominik Dingel 		break;
2189536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
2190536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_compare,
2191536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
2192536336c2SDominik Dingel 		break;
2193536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
2194536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_select,
2195536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
2196536336c2SDominik Dingel 		break;
2197672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
2198672550fbSChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->pp,
2199672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
2200672550fbSChristian Borntraeger 		break;
2201afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
2202afa45ff5SChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->gbea,
2203afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
2204afa45ff5SChristian Borntraeger 		break;
220514eebd91SCarsten Otte 	default:
220614eebd91SCarsten Otte 		break;
220714eebd91SCarsten Otte 	}
220814eebd91SCarsten Otte 
220914eebd91SCarsten Otte 	return r;
221014eebd91SCarsten Otte }
2211b6d33834SChristoffer Dall 
2212b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
2213b0c632dbSHeiko Carstens {
2214b0c632dbSHeiko Carstens 	kvm_s390_vcpu_initial_reset(vcpu);
2215b0c632dbSHeiko Carstens 	return 0;
2216b0c632dbSHeiko Carstens }
2217b0c632dbSHeiko Carstens 
2218b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2219b0c632dbSHeiko Carstens {
22205a32c1afSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
2221b0c632dbSHeiko Carstens 	return 0;
2222b0c632dbSHeiko Carstens }
2223b0c632dbSHeiko Carstens 
2224b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2225b0c632dbSHeiko Carstens {
22265a32c1afSChristian Borntraeger 	memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
2227b0c632dbSHeiko Carstens 	return 0;
2228b0c632dbSHeiko Carstens }
2229b0c632dbSHeiko Carstens 
2230b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2231b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
2232b0c632dbSHeiko Carstens {
223359674c1aSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
2234b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
223559674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
2236b0c632dbSHeiko Carstens 	return 0;
2237b0c632dbSHeiko Carstens }
2238b0c632dbSHeiko Carstens 
2239b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2240b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
2241b0c632dbSHeiko Carstens {
224259674c1aSChristian Borntraeger 	memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
2243b0c632dbSHeiko Carstens 	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
2244b0c632dbSHeiko Carstens 	return 0;
2245b0c632dbSHeiko Carstens }
2246b0c632dbSHeiko Carstens 
2247b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2248b0c632dbSHeiko Carstens {
22499abc2a08SDavid Hildenbrand 	/* make sure the new values will be lazily loaded */
22509abc2a08SDavid Hildenbrand 	save_fpu_regs();
22514725c860SMartin Schwidefsky 	if (test_fp_ctl(fpu->fpc))
22524725c860SMartin Schwidefsky 		return -EINVAL;
22539abc2a08SDavid Hildenbrand 	current->thread.fpu.fpc = fpu->fpc;
22549abc2a08SDavid Hildenbrand 	if (MACHINE_HAS_VX)
22559abc2a08SDavid Hildenbrand 		convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
22569abc2a08SDavid Hildenbrand 	else
22579abc2a08SDavid Hildenbrand 		memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
2258b0c632dbSHeiko Carstens 	return 0;
2259b0c632dbSHeiko Carstens }
2260b0c632dbSHeiko Carstens 
2261b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2262b0c632dbSHeiko Carstens {
22639abc2a08SDavid Hildenbrand 	/* make sure we have the latest values */
22649abc2a08SDavid Hildenbrand 	save_fpu_regs();
22659abc2a08SDavid Hildenbrand 	if (MACHINE_HAS_VX)
22669abc2a08SDavid Hildenbrand 		convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
22679abc2a08SDavid Hildenbrand 	else
22689abc2a08SDavid Hildenbrand 		memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
22699abc2a08SDavid Hildenbrand 	fpu->fpc = current->thread.fpu.fpc;
2270b0c632dbSHeiko Carstens 	return 0;
2271b0c632dbSHeiko Carstens }
2272b0c632dbSHeiko Carstens 
2273b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
2274b0c632dbSHeiko Carstens {
2275b0c632dbSHeiko Carstens 	int rc = 0;
2276b0c632dbSHeiko Carstens 
22777a42fdc2SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
2278b0c632dbSHeiko Carstens 		rc = -EBUSY;
2279d7b0b5ebSCarsten Otte 	else {
2280d7b0b5ebSCarsten Otte 		vcpu->run->psw_mask = psw.mask;
2281d7b0b5ebSCarsten Otte 		vcpu->run->psw_addr = psw.addr;
2282d7b0b5ebSCarsten Otte 	}
2283b0c632dbSHeiko Carstens 	return rc;
2284b0c632dbSHeiko Carstens }
2285b0c632dbSHeiko Carstens 
2286b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2287b0c632dbSHeiko Carstens 				  struct kvm_translation *tr)
2288b0c632dbSHeiko Carstens {
2289b0c632dbSHeiko Carstens 	return -EINVAL; /* not implemented yet */
2290b0c632dbSHeiko Carstens }
2291b0c632dbSHeiko Carstens 
229227291e21SDavid Hildenbrand #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
229327291e21SDavid Hildenbrand 			      KVM_GUESTDBG_USE_HW_BP | \
229427291e21SDavid Hildenbrand 			      KVM_GUESTDBG_ENABLE)
229527291e21SDavid Hildenbrand 
2296d0bfb940SJan Kiszka int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
2297d0bfb940SJan Kiszka 					struct kvm_guest_debug *dbg)
2298b0c632dbSHeiko Carstens {
229927291e21SDavid Hildenbrand 	int rc = 0;
230027291e21SDavid Hildenbrand 
230127291e21SDavid Hildenbrand 	vcpu->guest_debug = 0;
230227291e21SDavid Hildenbrand 	kvm_s390_clear_bp_data(vcpu);
230327291e21SDavid Hildenbrand 
23042de3bfc2SDavid Hildenbrand 	if (dbg->control & ~VALID_GUESTDBG_FLAGS)
230527291e21SDavid Hildenbrand 		return -EINVAL;
230689b5b4deSDavid Hildenbrand 	if (!sclp.has_gpere)
230789b5b4deSDavid Hildenbrand 		return -EINVAL;
230827291e21SDavid Hildenbrand 
230927291e21SDavid Hildenbrand 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
231027291e21SDavid Hildenbrand 		vcpu->guest_debug = dbg->control;
231127291e21SDavid Hildenbrand 		/* enforce guest PER */
2312805de8f4SPeter Zijlstra 		atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
231327291e21SDavid Hildenbrand 
231427291e21SDavid Hildenbrand 		if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
231527291e21SDavid Hildenbrand 			rc = kvm_s390_import_bp_data(vcpu, dbg);
231627291e21SDavid Hildenbrand 	} else {
2317805de8f4SPeter Zijlstra 		atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
231827291e21SDavid Hildenbrand 		vcpu->arch.guestdbg.last_bp = 0;
231927291e21SDavid Hildenbrand 	}
232027291e21SDavid Hildenbrand 
232127291e21SDavid Hildenbrand 	if (rc) {
232227291e21SDavid Hildenbrand 		vcpu->guest_debug = 0;
232327291e21SDavid Hildenbrand 		kvm_s390_clear_bp_data(vcpu);
2324805de8f4SPeter Zijlstra 		atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
232527291e21SDavid Hildenbrand 	}
232627291e21SDavid Hildenbrand 
232727291e21SDavid Hildenbrand 	return rc;
2328b0c632dbSHeiko Carstens }
2329b0c632dbSHeiko Carstens 
233062d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
233162d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
233262d9f0dbSMarcelo Tosatti {
23336352e4d2SDavid Hildenbrand 	/* CHECK_STOP and LOAD are not supported yet */
23346352e4d2SDavid Hildenbrand 	return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
23356352e4d2SDavid Hildenbrand 				       KVM_MP_STATE_OPERATING;
233662d9f0dbSMarcelo Tosatti }
233762d9f0dbSMarcelo Tosatti 
233862d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
233962d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
234062d9f0dbSMarcelo Tosatti {
23416352e4d2SDavid Hildenbrand 	int rc = 0;
23426352e4d2SDavid Hildenbrand 
23436352e4d2SDavid Hildenbrand 	/* user space knows about this interface - let it control the state */
23446352e4d2SDavid Hildenbrand 	vcpu->kvm->arch.user_cpu_state_ctrl = 1;
23456352e4d2SDavid Hildenbrand 
23466352e4d2SDavid Hildenbrand 	switch (mp_state->mp_state) {
23476352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_STOPPED:
23486352e4d2SDavid Hildenbrand 		kvm_s390_vcpu_stop(vcpu);
23496352e4d2SDavid Hildenbrand 		break;
23506352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_OPERATING:
23516352e4d2SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
23526352e4d2SDavid Hildenbrand 		break;
23536352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_LOAD:
23546352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_CHECK_STOP:
23556352e4d2SDavid Hildenbrand 		/* fall through - CHECK_STOP and LOAD are not supported yet */
23566352e4d2SDavid Hildenbrand 	default:
23576352e4d2SDavid Hildenbrand 		rc = -ENXIO;
23586352e4d2SDavid Hildenbrand 	}
23596352e4d2SDavid Hildenbrand 
23606352e4d2SDavid Hildenbrand 	return rc;
236162d9f0dbSMarcelo Tosatti }
236262d9f0dbSMarcelo Tosatti 
23638ad35755SDavid Hildenbrand static bool ibs_enabled(struct kvm_vcpu *vcpu)
23648ad35755SDavid Hildenbrand {
23658ad35755SDavid Hildenbrand 	return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
23668ad35755SDavid Hildenbrand }
23678ad35755SDavid Hildenbrand 
23682c70fe44SChristian Borntraeger static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
23692c70fe44SChristian Borntraeger {
23708ad35755SDavid Hildenbrand retry:
23718e236546SChristian Borntraeger 	kvm_s390_vcpu_request_handled(vcpu);
2372586b7ccdSChristian Borntraeger 	if (!vcpu->requests)
2373586b7ccdSChristian Borntraeger 		return 0;
23742c70fe44SChristian Borntraeger 	/*
23752c70fe44SChristian Borntraeger 	 * We use MMU_RELOAD just to re-arm the ipte notifier for the
2376b2d73b2aSMartin Schwidefsky 	 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
23772c70fe44SChristian Borntraeger 	 * This ensures that the ipte instruction for this request has
23782c70fe44SChristian Borntraeger 	 * already finished. We might race against a second unmapper that
23792c70fe44SChristian Borntraeger 	 * wants to set the blocking bit. Lets just retry the request loop.
23802c70fe44SChristian Borntraeger 	 */
23818ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
23822c70fe44SChristian Borntraeger 		int rc;
2383b2d73b2aSMartin Schwidefsky 		rc = gmap_mprotect_notify(vcpu->arch.gmap,
2384fda902cbSMichael Mueller 					  kvm_s390_get_prefix(vcpu),
2385b2d73b2aSMartin Schwidefsky 					  PAGE_SIZE * 2, PROT_WRITE);
2386aca411a4SJulius Niedworok 		if (rc) {
2387aca411a4SJulius Niedworok 			kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
23882c70fe44SChristian Borntraeger 			return rc;
2389aca411a4SJulius Niedworok 		}
23908ad35755SDavid Hildenbrand 		goto retry;
23912c70fe44SChristian Borntraeger 	}
23928ad35755SDavid Hildenbrand 
2393d3d692c8SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2394d3d692c8SDavid Hildenbrand 		vcpu->arch.sie_block->ihcpu = 0xffff;
2395d3d692c8SDavid Hildenbrand 		goto retry;
2396d3d692c8SDavid Hildenbrand 	}
2397d3d692c8SDavid Hildenbrand 
23988ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
23998ad35755SDavid Hildenbrand 		if (!ibs_enabled(vcpu)) {
24008ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
2401805de8f4SPeter Zijlstra 			atomic_or(CPUSTAT_IBS,
24028ad35755SDavid Hildenbrand 					&vcpu->arch.sie_block->cpuflags);
24038ad35755SDavid Hildenbrand 		}
24048ad35755SDavid Hildenbrand 		goto retry;
24058ad35755SDavid Hildenbrand 	}
24068ad35755SDavid Hildenbrand 
24078ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
24088ad35755SDavid Hildenbrand 		if (ibs_enabled(vcpu)) {
24098ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
2410805de8f4SPeter Zijlstra 			atomic_andnot(CPUSTAT_IBS,
24118ad35755SDavid Hildenbrand 					  &vcpu->arch.sie_block->cpuflags);
24128ad35755SDavid Hildenbrand 		}
24138ad35755SDavid Hildenbrand 		goto retry;
24148ad35755SDavid Hildenbrand 	}
24158ad35755SDavid Hildenbrand 
24166502a34cSDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
24176502a34cSDavid Hildenbrand 		vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
24186502a34cSDavid Hildenbrand 		goto retry;
24196502a34cSDavid Hildenbrand 	}
24206502a34cSDavid Hildenbrand 
24210759d068SDavid Hildenbrand 	/* nothing to do, just clear the request */
24220759d068SDavid Hildenbrand 	clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
24230759d068SDavid Hildenbrand 
24242c70fe44SChristian Borntraeger 	return 0;
24252c70fe44SChristian Borntraeger }
24262c70fe44SChristian Borntraeger 
242725ed1675SDavid Hildenbrand void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
242825ed1675SDavid Hildenbrand {
242925ed1675SDavid Hildenbrand 	struct kvm_vcpu *vcpu;
243025ed1675SDavid Hildenbrand 	int i;
243125ed1675SDavid Hildenbrand 
243225ed1675SDavid Hildenbrand 	mutex_lock(&kvm->lock);
243325ed1675SDavid Hildenbrand 	preempt_disable();
243425ed1675SDavid Hildenbrand 	kvm->arch.epoch = tod - get_tod_clock();
243525ed1675SDavid Hildenbrand 	kvm_s390_vcpu_block_all(kvm);
243625ed1675SDavid Hildenbrand 	kvm_for_each_vcpu(i, vcpu, kvm)
243725ed1675SDavid Hildenbrand 		vcpu->arch.sie_block->epoch = kvm->arch.epoch;
243825ed1675SDavid Hildenbrand 	kvm_s390_vcpu_unblock_all(kvm);
243925ed1675SDavid Hildenbrand 	preempt_enable();
244025ed1675SDavid Hildenbrand 	mutex_unlock(&kvm->lock);
244125ed1675SDavid Hildenbrand }
244225ed1675SDavid Hildenbrand 
2443fa576c58SThomas Huth /**
2444fa576c58SThomas Huth  * kvm_arch_fault_in_page - fault-in guest page if necessary
2445fa576c58SThomas Huth  * @vcpu: The corresponding virtual cpu
2446fa576c58SThomas Huth  * @gpa: Guest physical address
2447fa576c58SThomas Huth  * @writable: Whether the page should be writable or not
2448fa576c58SThomas Huth  *
2449fa576c58SThomas Huth  * Make sure that a guest page has been faulted-in on the host.
2450fa576c58SThomas Huth  *
2451fa576c58SThomas Huth  * Return: Zero on success, negative error code otherwise.
2452fa576c58SThomas Huth  */
2453fa576c58SThomas Huth long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
245424eb3a82SDominik Dingel {
2455527e30b4SMartin Schwidefsky 	return gmap_fault(vcpu->arch.gmap, gpa,
2456527e30b4SMartin Schwidefsky 			  writable ? FAULT_FLAG_WRITE : 0);
245724eb3a82SDominik Dingel }
245824eb3a82SDominik Dingel 
24593c038e6bSDominik Dingel static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
24603c038e6bSDominik Dingel 				      unsigned long token)
24613c038e6bSDominik Dingel {
24623c038e6bSDominik Dingel 	struct kvm_s390_interrupt inti;
2463383d0b05SJens Freimann 	struct kvm_s390_irq irq;
24643c038e6bSDominik Dingel 
24653c038e6bSDominik Dingel 	if (start_token) {
2466383d0b05SJens Freimann 		irq.u.ext.ext_params2 = token;
2467383d0b05SJens Freimann 		irq.type = KVM_S390_INT_PFAULT_INIT;
2468383d0b05SJens Freimann 		WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
24693c038e6bSDominik Dingel 	} else {
24703c038e6bSDominik Dingel 		inti.type = KVM_S390_INT_PFAULT_DONE;
2471383d0b05SJens Freimann 		inti.parm64 = token;
24723c038e6bSDominik Dingel 		WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
24733c038e6bSDominik Dingel 	}
24743c038e6bSDominik Dingel }
24753c038e6bSDominik Dingel 
24763c038e6bSDominik Dingel void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
24773c038e6bSDominik Dingel 				     struct kvm_async_pf *work)
24783c038e6bSDominik Dingel {
24793c038e6bSDominik Dingel 	trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
24803c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
24813c038e6bSDominik Dingel }
24823c038e6bSDominik Dingel 
24833c038e6bSDominik Dingel void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
24843c038e6bSDominik Dingel 				 struct kvm_async_pf *work)
24853c038e6bSDominik Dingel {
24863c038e6bSDominik Dingel 	trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
24873c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
24883c038e6bSDominik Dingel }
24893c038e6bSDominik Dingel 
24903c038e6bSDominik Dingel void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
24913c038e6bSDominik Dingel 			       struct kvm_async_pf *work)
24923c038e6bSDominik Dingel {
24933c038e6bSDominik Dingel 	/* s390 will always inject the page directly */
24943c038e6bSDominik Dingel }
24953c038e6bSDominik Dingel 
24963c038e6bSDominik Dingel bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
24973c038e6bSDominik Dingel {
24983c038e6bSDominik Dingel 	/*
24993c038e6bSDominik Dingel 	 * s390 will always inject the page directly,
25003c038e6bSDominik Dingel 	 * but we still want check_async_completion to cleanup
25013c038e6bSDominik Dingel 	 */
25023c038e6bSDominik Dingel 	return true;
25033c038e6bSDominik Dingel }
25043c038e6bSDominik Dingel 
25053c038e6bSDominik Dingel static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
25063c038e6bSDominik Dingel {
25073c038e6bSDominik Dingel 	hva_t hva;
25083c038e6bSDominik Dingel 	struct kvm_arch_async_pf arch;
25093c038e6bSDominik Dingel 	int rc;
25103c038e6bSDominik Dingel 
25113c038e6bSDominik Dingel 	if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
25123c038e6bSDominik Dingel 		return 0;
25133c038e6bSDominik Dingel 	if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
25143c038e6bSDominik Dingel 	    vcpu->arch.pfault_compare)
25153c038e6bSDominik Dingel 		return 0;
25163c038e6bSDominik Dingel 	if (psw_extint_disabled(vcpu))
25173c038e6bSDominik Dingel 		return 0;
25189a022067SDavid Hildenbrand 	if (kvm_s390_vcpu_has_irq(vcpu, 0))
25193c038e6bSDominik Dingel 		return 0;
25203c038e6bSDominik Dingel 	if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
25213c038e6bSDominik Dingel 		return 0;
25223c038e6bSDominik Dingel 	if (!vcpu->arch.gmap->pfault_enabled)
25233c038e6bSDominik Dingel 		return 0;
25243c038e6bSDominik Dingel 
252581480cc1SHeiko Carstens 	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
252681480cc1SHeiko Carstens 	hva += current->thread.gmap_addr & ~PAGE_MASK;
252781480cc1SHeiko Carstens 	if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
25283c038e6bSDominik Dingel 		return 0;
25293c038e6bSDominik Dingel 
25303c038e6bSDominik Dingel 	rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
25313c038e6bSDominik Dingel 	return rc;
25323c038e6bSDominik Dingel }
25333c038e6bSDominik Dingel 
25343fb4c40fSThomas Huth static int vcpu_pre_run(struct kvm_vcpu *vcpu)
2535b0c632dbSHeiko Carstens {
25363fb4c40fSThomas Huth 	int rc, cpuflags;
2537e168bf8dSCarsten Otte 
25383c038e6bSDominik Dingel 	/*
25393c038e6bSDominik Dingel 	 * On s390 notifications for arriving pages will be delivered directly
25403c038e6bSDominik Dingel 	 * to the guest but the house keeping for completed pfaults is
25413c038e6bSDominik Dingel 	 * handled outside the worker.
25423c038e6bSDominik Dingel 	 */
25433c038e6bSDominik Dingel 	kvm_check_async_pf_completion(vcpu);
25443c038e6bSDominik Dingel 
25457ec7c8c7SChristian Borntraeger 	vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
25467ec7c8c7SChristian Borntraeger 	vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
2547b0c632dbSHeiko Carstens 
2548b0c632dbSHeiko Carstens 	if (need_resched())
2549b0c632dbSHeiko Carstens 		schedule();
2550b0c632dbSHeiko Carstens 
2551d3a73acbSMartin Schwidefsky 	if (test_cpu_flag(CIF_MCCK_PENDING))
255271cde587SChristian Borntraeger 		s390_handle_mcck();
255371cde587SChristian Borntraeger 
255479395031SJens Freimann 	if (!kvm_is_ucontrol(vcpu->kvm)) {
255579395031SJens Freimann 		rc = kvm_s390_deliver_pending_interrupts(vcpu);
255679395031SJens Freimann 		if (rc)
255779395031SJens Freimann 			return rc;
255879395031SJens Freimann 	}
25590ff31867SCarsten Otte 
25602c70fe44SChristian Borntraeger 	rc = kvm_s390_handle_requests(vcpu);
25612c70fe44SChristian Borntraeger 	if (rc)
25622c70fe44SChristian Borntraeger 		return rc;
25632c70fe44SChristian Borntraeger 
256427291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu)) {
256527291e21SDavid Hildenbrand 		kvm_s390_backup_guest_per_regs(vcpu);
256627291e21SDavid Hildenbrand 		kvm_s390_patch_guest_per_regs(vcpu);
256727291e21SDavid Hildenbrand 	}
256827291e21SDavid Hildenbrand 
2569b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icptcode = 0;
25703fb4c40fSThomas Huth 	cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
25713fb4c40fSThomas Huth 	VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
25723fb4c40fSThomas Huth 	trace_kvm_s390_sie_enter(vcpu, cpuflags);
25732b29a9fdSDominik Dingel 
25743fb4c40fSThomas Huth 	return 0;
25753fb4c40fSThomas Huth }
25763fb4c40fSThomas Huth 
2577492d8642SThomas Huth static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
2578492d8642SThomas Huth {
257956317920SDavid Hildenbrand 	struct kvm_s390_pgm_info pgm_info = {
258056317920SDavid Hildenbrand 		.code = PGM_ADDRESSING,
258156317920SDavid Hildenbrand 	};
258256317920SDavid Hildenbrand 	u8 opcode, ilen;
2583492d8642SThomas Huth 	int rc;
2584492d8642SThomas Huth 
2585492d8642SThomas Huth 	VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
2586492d8642SThomas Huth 	trace_kvm_s390_sie_fault(vcpu);
2587492d8642SThomas Huth 
2588492d8642SThomas Huth 	/*
2589492d8642SThomas Huth 	 * We want to inject an addressing exception, which is defined as a
2590492d8642SThomas Huth 	 * suppressing or terminating exception. However, since we came here
2591492d8642SThomas Huth 	 * by a DAT access exception, the PSW still points to the faulting
2592492d8642SThomas Huth 	 * instruction since DAT exceptions are nullifying. So we've got
2593492d8642SThomas Huth 	 * to look up the current opcode to get the length of the instruction
2594492d8642SThomas Huth 	 * to be able to forward the PSW.
2595492d8642SThomas Huth 	 */
259665977322SDavid Hildenbrand 	rc = read_guest_instr(vcpu, &opcode, 1);
259756317920SDavid Hildenbrand 	ilen = insn_length(opcode);
25989b0d721aSDavid Hildenbrand 	if (rc < 0) {
25999b0d721aSDavid Hildenbrand 		return rc;
26009b0d721aSDavid Hildenbrand 	} else if (rc) {
26019b0d721aSDavid Hildenbrand 		/* Instruction-Fetching Exceptions - we can't detect the ilen.
26029b0d721aSDavid Hildenbrand 		 * Forward by arbitrary ilc, injection will take care of
26039b0d721aSDavid Hildenbrand 		 * nullification if necessary.
26049b0d721aSDavid Hildenbrand 		 */
26059b0d721aSDavid Hildenbrand 		pgm_info = vcpu->arch.pgm;
26069b0d721aSDavid Hildenbrand 		ilen = 4;
26079b0d721aSDavid Hildenbrand 	}
260856317920SDavid Hildenbrand 	pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
260956317920SDavid Hildenbrand 	kvm_s390_forward_psw(vcpu, ilen);
261056317920SDavid Hildenbrand 	return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
2611492d8642SThomas Huth }
2612492d8642SThomas Huth 
26133fb4c40fSThomas Huth static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
26143fb4c40fSThomas Huth {
26152b29a9fdSDominik Dingel 	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
26162b29a9fdSDominik Dingel 		   vcpu->arch.sie_block->icptcode);
26172b29a9fdSDominik Dingel 	trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
26182b29a9fdSDominik Dingel 
261927291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu))
262027291e21SDavid Hildenbrand 		kvm_s390_restore_guest_per_regs(vcpu);
262127291e21SDavid Hildenbrand 
26227ec7c8c7SChristian Borntraeger 	vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
26237ec7c8c7SChristian Borntraeger 	vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
262471f116bfSDavid Hildenbrand 
262571f116bfSDavid Hildenbrand 	if (vcpu->arch.sie_block->icptcode > 0) {
262671f116bfSDavid Hildenbrand 		int rc = kvm_handle_sie_intercept(vcpu);
262771f116bfSDavid Hildenbrand 
262871f116bfSDavid Hildenbrand 		if (rc != -EOPNOTSUPP)
262971f116bfSDavid Hildenbrand 			return rc;
263071f116bfSDavid Hildenbrand 		vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
263171f116bfSDavid Hildenbrand 		vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
263271f116bfSDavid Hildenbrand 		vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
263371f116bfSDavid Hildenbrand 		vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
263471f116bfSDavid Hildenbrand 		return -EREMOTE;
263571f116bfSDavid Hildenbrand 	} else if (exit_reason != -EFAULT) {
263671f116bfSDavid Hildenbrand 		vcpu->stat.exit_null++;
263771f116bfSDavid Hildenbrand 		return 0;
2638210b1607SThomas Huth 	} else if (kvm_is_ucontrol(vcpu->kvm)) {
2639210b1607SThomas Huth 		vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
2640210b1607SThomas Huth 		vcpu->run->s390_ucontrol.trans_exc_code =
2641210b1607SThomas Huth 						current->thread.gmap_addr;
2642210b1607SThomas Huth 		vcpu->run->s390_ucontrol.pgm_code = 0x10;
264371f116bfSDavid Hildenbrand 		return -EREMOTE;
264424eb3a82SDominik Dingel 	} else if (current->thread.gmap_pfault) {
26453c038e6bSDominik Dingel 		trace_kvm_s390_major_guest_pfault(vcpu);
264624eb3a82SDominik Dingel 		current->thread.gmap_pfault = 0;
264771f116bfSDavid Hildenbrand 		if (kvm_arch_setup_async_pf(vcpu))
264871f116bfSDavid Hildenbrand 			return 0;
264971f116bfSDavid Hildenbrand 		return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
2650fa576c58SThomas Huth 	}
265171f116bfSDavid Hildenbrand 	return vcpu_post_run_fault_in_sie(vcpu);
26523fb4c40fSThomas Huth }
26533fb4c40fSThomas Huth 
26543fb4c40fSThomas Huth static int __vcpu_run(struct kvm_vcpu *vcpu)
26553fb4c40fSThomas Huth {
26563fb4c40fSThomas Huth 	int rc, exit_reason;
26573fb4c40fSThomas Huth 
2658800c1065SThomas Huth 	/*
2659800c1065SThomas Huth 	 * We try to hold kvm->srcu during most of vcpu_run (except when run-
2660800c1065SThomas Huth 	 * ning the guest), so that memslots (and other stuff) are protected
2661800c1065SThomas Huth 	 */
2662800c1065SThomas Huth 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2663800c1065SThomas Huth 
2664a76ccff6SThomas Huth 	do {
26653fb4c40fSThomas Huth 		rc = vcpu_pre_run(vcpu);
26663fb4c40fSThomas Huth 		if (rc)
2667a76ccff6SThomas Huth 			break;
26683fb4c40fSThomas Huth 
2669800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
26703fb4c40fSThomas Huth 		/*
2671a76ccff6SThomas Huth 		 * As PF_VCPU will be used in fault handler, between
2672a76ccff6SThomas Huth 		 * guest_enter and guest_exit should be no uaccess.
26733fb4c40fSThomas Huth 		 */
26740097d12eSChristian Borntraeger 		local_irq_disable();
26756edaa530SPaolo Bonzini 		guest_enter_irqoff();
2676db0758b2SDavid Hildenbrand 		__disable_cpu_timer_accounting(vcpu);
26770097d12eSChristian Borntraeger 		local_irq_enable();
2678a76ccff6SThomas Huth 		exit_reason = sie64a(vcpu->arch.sie_block,
2679a76ccff6SThomas Huth 				     vcpu->run->s.regs.gprs);
26800097d12eSChristian Borntraeger 		local_irq_disable();
2681db0758b2SDavid Hildenbrand 		__enable_cpu_timer_accounting(vcpu);
26826edaa530SPaolo Bonzini 		guest_exit_irqoff();
26830097d12eSChristian Borntraeger 		local_irq_enable();
2684800c1065SThomas Huth 		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
26853fb4c40fSThomas Huth 
26863fb4c40fSThomas Huth 		rc = vcpu_post_run(vcpu, exit_reason);
268727291e21SDavid Hildenbrand 	} while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
26883fb4c40fSThomas Huth 
2689800c1065SThomas Huth 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
2690e168bf8dSCarsten Otte 	return rc;
2691b0c632dbSHeiko Carstens }
2692b0c632dbSHeiko Carstens 
2693b028ee3eSDavid Hildenbrand static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2694b028ee3eSDavid Hildenbrand {
2695b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2696b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2697b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2698b028ee3eSDavid Hildenbrand 		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2699b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2700b028ee3eSDavid Hildenbrand 		memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
2701d3d692c8SDavid Hildenbrand 		/* some control register changes require a tlb flush */
2702d3d692c8SDavid Hildenbrand 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2703b028ee3eSDavid Hildenbrand 	}
2704b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
27054287f247SDavid Hildenbrand 		kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
2706b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2707b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2708b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2709b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2710b028ee3eSDavid Hildenbrand 	}
2711b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2712b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2713b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2714b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
27159fbd8082SDavid Hildenbrand 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
27169fbd8082SDavid Hildenbrand 			kvm_clear_async_pf_completion_queue(vcpu);
2717b028ee3eSDavid Hildenbrand 	}
271880cd8763SFan Zhang 	/*
271980cd8763SFan Zhang 	 * If userspace sets the riccb (e.g. after migration) to a valid state,
272080cd8763SFan Zhang 	 * we should enable RI here instead of doing the lazy enablement.
272180cd8763SFan Zhang 	 */
272280cd8763SFan Zhang 	if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
272380cd8763SFan Zhang 	    test_kvm_facility(vcpu->kvm, 64)) {
272480cd8763SFan Zhang 		struct runtime_instr_cb *riccb =
272580cd8763SFan Zhang 			(struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
272680cd8763SFan Zhang 
272780cd8763SFan Zhang 		if (riccb->valid)
272880cd8763SFan Zhang 			vcpu->arch.sie_block->ecb3 |= 0x01;
272980cd8763SFan Zhang 	}
273080cd8763SFan Zhang 
2731b028ee3eSDavid Hildenbrand 	kvm_run->kvm_dirty_regs = 0;
2732b028ee3eSDavid Hildenbrand }
2733b028ee3eSDavid Hildenbrand 
2734b028ee3eSDavid Hildenbrand static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2735b028ee3eSDavid Hildenbrand {
2736b028ee3eSDavid Hildenbrand 	kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2737b028ee3eSDavid Hildenbrand 	kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2738b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2739b028ee3eSDavid Hildenbrand 	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
27404287f247SDavid Hildenbrand 	kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
2741b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2742b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2743b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2744b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2745b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2746b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2747b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2748b028ee3eSDavid Hildenbrand }
2749b028ee3eSDavid Hildenbrand 
2750b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2751b0c632dbSHeiko Carstens {
27528f2abe6aSChristian Borntraeger 	int rc;
2753b0c632dbSHeiko Carstens 	sigset_t sigsaved;
2754b0c632dbSHeiko Carstens 
275527291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu)) {
275627291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
275727291e21SDavid Hildenbrand 		return 0;
275827291e21SDavid Hildenbrand 	}
275927291e21SDavid Hildenbrand 
2760b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
2761b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2762b0c632dbSHeiko Carstens 
27636352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
27646852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
27656352e4d2SDavid Hildenbrand 	} else if (is_vcpu_stopped(vcpu)) {
2766ea2cdd27SDavid Hildenbrand 		pr_err_ratelimited("can't run stopped vcpu %d\n",
27676352e4d2SDavid Hildenbrand 				   vcpu->vcpu_id);
27686352e4d2SDavid Hildenbrand 		return -EINVAL;
27696352e4d2SDavid Hildenbrand 	}
2770b0c632dbSHeiko Carstens 
2771b028ee3eSDavid Hildenbrand 	sync_regs(vcpu, kvm_run);
2772db0758b2SDavid Hildenbrand 	enable_cpu_timer_accounting(vcpu);
2773d7b0b5ebSCarsten Otte 
2774dab4079dSHeiko Carstens 	might_fault();
2775e168bf8dSCarsten Otte 	rc = __vcpu_run(vcpu);
27769ace903dSChristian Ehrhardt 
2777b1d16c49SChristian Ehrhardt 	if (signal_pending(current) && !rc) {
2778b1d16c49SChristian Ehrhardt 		kvm_run->exit_reason = KVM_EXIT_INTR;
27798f2abe6aSChristian Borntraeger 		rc = -EINTR;
2780b1d16c49SChristian Ehrhardt 	}
27818f2abe6aSChristian Borntraeger 
278227291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu) && !rc)  {
278327291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
278427291e21SDavid Hildenbrand 		rc = 0;
278527291e21SDavid Hildenbrand 	}
278627291e21SDavid Hildenbrand 
27878f2abe6aSChristian Borntraeger 	if (rc == -EREMOTE) {
278871f116bfSDavid Hildenbrand 		/* userspace support is needed, kvm_run has been prepared */
27898f2abe6aSChristian Borntraeger 		rc = 0;
27908f2abe6aSChristian Borntraeger 	}
27918f2abe6aSChristian Borntraeger 
2792db0758b2SDavid Hildenbrand 	disable_cpu_timer_accounting(vcpu);
2793b028ee3eSDavid Hildenbrand 	store_regs(vcpu, kvm_run);
2794d7b0b5ebSCarsten Otte 
2795b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
2796b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2797b0c632dbSHeiko Carstens 
2798b0c632dbSHeiko Carstens 	vcpu->stat.exit_userspace++;
27997e8e6ab4SHeiko Carstens 	return rc;
2800b0c632dbSHeiko Carstens }
2801b0c632dbSHeiko Carstens 
2802b0c632dbSHeiko Carstens /*
2803b0c632dbSHeiko Carstens  * store status at address
2804b0c632dbSHeiko Carstens  * we use have two special cases:
2805b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2806b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2807b0c632dbSHeiko Carstens  */
2808d0bce605SHeiko Carstens int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
2809b0c632dbSHeiko Carstens {
2810092670cdSCarsten Otte 	unsigned char archmode = 1;
28119abc2a08SDavid Hildenbrand 	freg_t fprs[NUM_FPRS];
2812fda902cbSMichael Mueller 	unsigned int px;
28134287f247SDavid Hildenbrand 	u64 clkcomp, cputm;
2814d0bce605SHeiko Carstens 	int rc;
2815b0c632dbSHeiko Carstens 
2816d9a3a09aSMartin Schwidefsky 	px = kvm_s390_get_prefix(vcpu);
2817d0bce605SHeiko Carstens 	if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2818d0bce605SHeiko Carstens 		if (write_guest_abs(vcpu, 163, &archmode, 1))
2819b0c632dbSHeiko Carstens 			return -EFAULT;
2820d9a3a09aSMartin Schwidefsky 		gpa = 0;
2821d0bce605SHeiko Carstens 	} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2822d0bce605SHeiko Carstens 		if (write_guest_real(vcpu, 163, &archmode, 1))
2823b0c632dbSHeiko Carstens 			return -EFAULT;
2824d9a3a09aSMartin Schwidefsky 		gpa = px;
2825d9a3a09aSMartin Schwidefsky 	} else
2826d9a3a09aSMartin Schwidefsky 		gpa -= __LC_FPREGS_SAVE_AREA;
28279abc2a08SDavid Hildenbrand 
28289abc2a08SDavid Hildenbrand 	/* manually convert vector registers if necessary */
28299abc2a08SDavid Hildenbrand 	if (MACHINE_HAS_VX) {
28309522b37fSDavid Hildenbrand 		convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
2831d9a3a09aSMartin Schwidefsky 		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
28329abc2a08SDavid Hildenbrand 				     fprs, 128);
28339abc2a08SDavid Hildenbrand 	} else {
28349abc2a08SDavid Hildenbrand 		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
28356fd8e67dSDavid Hildenbrand 				     vcpu->run->s.regs.fprs, 128);
28369abc2a08SDavid Hildenbrand 	}
2837d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
2838d0bce605SHeiko Carstens 			      vcpu->run->s.regs.gprs, 128);
2839d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
2840d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gpsw, 16);
2841d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
2842fda902cbSMichael Mueller 			      &px, 4);
2843d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
28449abc2a08SDavid Hildenbrand 			      &vcpu->run->s.regs.fpc, 4);
2845d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
2846d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->todpr, 4);
28474287f247SDavid Hildenbrand 	cputm = kvm_s390_get_cpu_timer(vcpu);
2848d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
28494287f247SDavid Hildenbrand 			      &cputm, 8);
2850178bd789SThomas Huth 	clkcomp = vcpu->arch.sie_block->ckc >> 8;
2851d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
2852d0bce605SHeiko Carstens 			      &clkcomp, 8);
2853d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
2854d0bce605SHeiko Carstens 			      &vcpu->run->s.regs.acrs, 64);
2855d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
2856d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gcr, 128);
2857d0bce605SHeiko Carstens 	return rc ? -EFAULT : 0;
2858b0c632dbSHeiko Carstens }
2859b0c632dbSHeiko Carstens 
2860e879892cSThomas Huth int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2861e879892cSThomas Huth {
2862e879892cSThomas Huth 	/*
2863e879892cSThomas Huth 	 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2864e879892cSThomas Huth 	 * copying in vcpu load/put. Lets update our copies before we save
2865e879892cSThomas Huth 	 * it into the save area
2866e879892cSThomas Huth 	 */
2867d0164ee2SHendrik Brueckner 	save_fpu_regs();
28689abc2a08SDavid Hildenbrand 	vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
2869e879892cSThomas Huth 	save_access_regs(vcpu->run->s.regs.acrs);
2870e879892cSThomas Huth 
2871e879892cSThomas Huth 	return kvm_s390_store_status_unloaded(vcpu, addr);
2872e879892cSThomas Huth }
2873e879892cSThomas Huth 
28748ad35755SDavid Hildenbrand static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
28758ad35755SDavid Hildenbrand {
28768ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
28778e236546SChristian Borntraeger 	kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
28788ad35755SDavid Hildenbrand }
28798ad35755SDavid Hildenbrand 
28808ad35755SDavid Hildenbrand static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
28818ad35755SDavid Hildenbrand {
28828ad35755SDavid Hildenbrand 	unsigned int i;
28838ad35755SDavid Hildenbrand 	struct kvm_vcpu *vcpu;
28848ad35755SDavid Hildenbrand 
28858ad35755SDavid Hildenbrand 	kvm_for_each_vcpu(i, vcpu, kvm) {
28868ad35755SDavid Hildenbrand 		__disable_ibs_on_vcpu(vcpu);
28878ad35755SDavid Hildenbrand 	}
28888ad35755SDavid Hildenbrand }
28898ad35755SDavid Hildenbrand 
28908ad35755SDavid Hildenbrand static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
28918ad35755SDavid Hildenbrand {
289209a400e7SDavid Hildenbrand 	if (!sclp.has_ibs)
289309a400e7SDavid Hildenbrand 		return;
28948ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
28958e236546SChristian Borntraeger 	kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
28968ad35755SDavid Hildenbrand }
28978ad35755SDavid Hildenbrand 
28986852d7b6SDavid Hildenbrand void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
28996852d7b6SDavid Hildenbrand {
29008ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
29018ad35755SDavid Hildenbrand 
29028ad35755SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
29038ad35755SDavid Hildenbrand 		return;
29048ad35755SDavid Hildenbrand 
29056852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
29068ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
2907433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
29088ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
29098ad35755SDavid Hildenbrand 
29108ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
29118ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
29128ad35755SDavid Hildenbrand 			started_vcpus++;
29138ad35755SDavid Hildenbrand 	}
29148ad35755SDavid Hildenbrand 
29158ad35755SDavid Hildenbrand 	if (started_vcpus == 0) {
29168ad35755SDavid Hildenbrand 		/* we're the only active VCPU -> speed it up */
29178ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(vcpu);
29188ad35755SDavid Hildenbrand 	} else if (started_vcpus == 1) {
29198ad35755SDavid Hildenbrand 		/*
29208ad35755SDavid Hildenbrand 		 * As we are starting a second VCPU, we have to disable
29218ad35755SDavid Hildenbrand 		 * the IBS facility on all VCPUs to remove potentially
29228ad35755SDavid Hildenbrand 		 * oustanding ENABLE requests.
29238ad35755SDavid Hildenbrand 		 */
29248ad35755SDavid Hildenbrand 		__disable_ibs_on_all_vcpus(vcpu->kvm);
29258ad35755SDavid Hildenbrand 	}
29268ad35755SDavid Hildenbrand 
2927805de8f4SPeter Zijlstra 	atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
29288ad35755SDavid Hildenbrand 	/*
29298ad35755SDavid Hildenbrand 	 * Another VCPU might have used IBS while we were offline.
29308ad35755SDavid Hildenbrand 	 * Let's play safe and flush the VCPU at startup.
29318ad35755SDavid Hildenbrand 	 */
2932d3d692c8SDavid Hildenbrand 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2933433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
29348ad35755SDavid Hildenbrand 	return;
29356852d7b6SDavid Hildenbrand }
29366852d7b6SDavid Hildenbrand 
29376852d7b6SDavid Hildenbrand void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
29386852d7b6SDavid Hildenbrand {
29398ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
29408ad35755SDavid Hildenbrand 	struct kvm_vcpu *started_vcpu = NULL;
29418ad35755SDavid Hildenbrand 
29428ad35755SDavid Hildenbrand 	if (is_vcpu_stopped(vcpu))
29438ad35755SDavid Hildenbrand 		return;
29448ad35755SDavid Hildenbrand 
29456852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
29468ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
2947433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
29488ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
29498ad35755SDavid Hildenbrand 
295032f5ff63SDavid Hildenbrand 	/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
29516cddd432SDavid Hildenbrand 	kvm_s390_clear_stop_irq(vcpu);
295232f5ff63SDavid Hildenbrand 
2953805de8f4SPeter Zijlstra 	atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
29548ad35755SDavid Hildenbrand 	__disable_ibs_on_vcpu(vcpu);
29558ad35755SDavid Hildenbrand 
29568ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
29578ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
29588ad35755SDavid Hildenbrand 			started_vcpus++;
29598ad35755SDavid Hildenbrand 			started_vcpu = vcpu->kvm->vcpus[i];
29608ad35755SDavid Hildenbrand 		}
29618ad35755SDavid Hildenbrand 	}
29628ad35755SDavid Hildenbrand 
29638ad35755SDavid Hildenbrand 	if (started_vcpus == 1) {
29648ad35755SDavid Hildenbrand 		/*
29658ad35755SDavid Hildenbrand 		 * As we only have one VCPU left, we want to enable the
29668ad35755SDavid Hildenbrand 		 * IBS facility for that VCPU to speed it up.
29678ad35755SDavid Hildenbrand 		 */
29688ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(started_vcpu);
29698ad35755SDavid Hildenbrand 	}
29708ad35755SDavid Hildenbrand 
2971433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
29728ad35755SDavid Hildenbrand 	return;
29736852d7b6SDavid Hildenbrand }
29746852d7b6SDavid Hildenbrand 
2975d6712df9SCornelia Huck static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2976d6712df9SCornelia Huck 				     struct kvm_enable_cap *cap)
2977d6712df9SCornelia Huck {
2978d6712df9SCornelia Huck 	int r;
2979d6712df9SCornelia Huck 
2980d6712df9SCornelia Huck 	if (cap->flags)
2981d6712df9SCornelia Huck 		return -EINVAL;
2982d6712df9SCornelia Huck 
2983d6712df9SCornelia Huck 	switch (cap->cap) {
2984fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
2985fa6b7fe9SCornelia Huck 		if (!vcpu->kvm->arch.css_support) {
2986fa6b7fe9SCornelia Huck 			vcpu->kvm->arch.css_support = 1;
2987c92ea7b9SChristian Borntraeger 			VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
2988fa6b7fe9SCornelia Huck 			trace_kvm_s390_enable_css(vcpu->kvm);
2989fa6b7fe9SCornelia Huck 		}
2990fa6b7fe9SCornelia Huck 		r = 0;
2991fa6b7fe9SCornelia Huck 		break;
2992d6712df9SCornelia Huck 	default:
2993d6712df9SCornelia Huck 		r = -EINVAL;
2994d6712df9SCornelia Huck 		break;
2995d6712df9SCornelia Huck 	}
2996d6712df9SCornelia Huck 	return r;
2997d6712df9SCornelia Huck }
2998d6712df9SCornelia Huck 
299941408c28SThomas Huth static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
300041408c28SThomas Huth 				  struct kvm_s390_mem_op *mop)
300141408c28SThomas Huth {
300241408c28SThomas Huth 	void __user *uaddr = (void __user *)mop->buf;
300341408c28SThomas Huth 	void *tmpbuf = NULL;
300441408c28SThomas Huth 	int r, srcu_idx;
300541408c28SThomas Huth 	const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
300641408c28SThomas Huth 				    | KVM_S390_MEMOP_F_CHECK_ONLY;
300741408c28SThomas Huth 
300841408c28SThomas Huth 	if (mop->flags & ~supported_flags)
300941408c28SThomas Huth 		return -EINVAL;
301041408c28SThomas Huth 
301141408c28SThomas Huth 	if (mop->size > MEM_OP_MAX_SIZE)
301241408c28SThomas Huth 		return -E2BIG;
301341408c28SThomas Huth 
301441408c28SThomas Huth 	if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
301541408c28SThomas Huth 		tmpbuf = vmalloc(mop->size);
301641408c28SThomas Huth 		if (!tmpbuf)
301741408c28SThomas Huth 			return -ENOMEM;
301841408c28SThomas Huth 	}
301941408c28SThomas Huth 
302041408c28SThomas Huth 	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
302141408c28SThomas Huth 
302241408c28SThomas Huth 	switch (mop->op) {
302341408c28SThomas Huth 	case KVM_S390_MEMOP_LOGICAL_READ:
302441408c28SThomas Huth 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
302592c96321SDavid Hildenbrand 			r = check_gva_range(vcpu, mop->gaddr, mop->ar,
302692c96321SDavid Hildenbrand 					    mop->size, GACC_FETCH);
302741408c28SThomas Huth 			break;
302841408c28SThomas Huth 		}
302941408c28SThomas Huth 		r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
303041408c28SThomas Huth 		if (r == 0) {
303141408c28SThomas Huth 			if (copy_to_user(uaddr, tmpbuf, mop->size))
303241408c28SThomas Huth 				r = -EFAULT;
303341408c28SThomas Huth 		}
303441408c28SThomas Huth 		break;
303541408c28SThomas Huth 	case KVM_S390_MEMOP_LOGICAL_WRITE:
303641408c28SThomas Huth 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
303792c96321SDavid Hildenbrand 			r = check_gva_range(vcpu, mop->gaddr, mop->ar,
303892c96321SDavid Hildenbrand 					    mop->size, GACC_STORE);
303941408c28SThomas Huth 			break;
304041408c28SThomas Huth 		}
304141408c28SThomas Huth 		if (copy_from_user(tmpbuf, uaddr, mop->size)) {
304241408c28SThomas Huth 			r = -EFAULT;
304341408c28SThomas Huth 			break;
304441408c28SThomas Huth 		}
304541408c28SThomas Huth 		r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
304641408c28SThomas Huth 		break;
304741408c28SThomas Huth 	default:
304841408c28SThomas Huth 		r = -EINVAL;
304941408c28SThomas Huth 	}
305041408c28SThomas Huth 
305141408c28SThomas Huth 	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
305241408c28SThomas Huth 
305341408c28SThomas Huth 	if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
305441408c28SThomas Huth 		kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
305541408c28SThomas Huth 
305641408c28SThomas Huth 	vfree(tmpbuf);
305741408c28SThomas Huth 	return r;
305841408c28SThomas Huth }
305941408c28SThomas Huth 
3060b0c632dbSHeiko Carstens long kvm_arch_vcpu_ioctl(struct file *filp,
3061b0c632dbSHeiko Carstens 			 unsigned int ioctl, unsigned long arg)
3062b0c632dbSHeiko Carstens {
3063b0c632dbSHeiko Carstens 	struct kvm_vcpu *vcpu = filp->private_data;
3064b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
3065800c1065SThomas Huth 	int idx;
3066bc923cc9SAvi Kivity 	long r;
3067b0c632dbSHeiko Carstens 
306893736624SAvi Kivity 	switch (ioctl) {
306947b43c52SJens Freimann 	case KVM_S390_IRQ: {
307047b43c52SJens Freimann 		struct kvm_s390_irq s390irq;
307147b43c52SJens Freimann 
307247b43c52SJens Freimann 		r = -EFAULT;
307347b43c52SJens Freimann 		if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
307447b43c52SJens Freimann 			break;
307547b43c52SJens Freimann 		r = kvm_s390_inject_vcpu(vcpu, &s390irq);
307647b43c52SJens Freimann 		break;
307747b43c52SJens Freimann 	}
307893736624SAvi Kivity 	case KVM_S390_INTERRUPT: {
3079ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
3080383d0b05SJens Freimann 		struct kvm_s390_irq s390irq;
3081ba5c1e9bSCarsten Otte 
308293736624SAvi Kivity 		r = -EFAULT;
3083ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
308493736624SAvi Kivity 			break;
3085383d0b05SJens Freimann 		if (s390int_to_s390irq(&s390int, &s390irq))
3086383d0b05SJens Freimann 			return -EINVAL;
3087383d0b05SJens Freimann 		r = kvm_s390_inject_vcpu(vcpu, &s390irq);
308893736624SAvi Kivity 		break;
3089ba5c1e9bSCarsten Otte 	}
3090b0c632dbSHeiko Carstens 	case KVM_S390_STORE_STATUS:
3091800c1065SThomas Huth 		idx = srcu_read_lock(&vcpu->kvm->srcu);
3092bc923cc9SAvi Kivity 		r = kvm_s390_vcpu_store_status(vcpu, arg);
3093800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
3094bc923cc9SAvi Kivity 		break;
3095b0c632dbSHeiko Carstens 	case KVM_S390_SET_INITIAL_PSW: {
3096b0c632dbSHeiko Carstens 		psw_t psw;
3097b0c632dbSHeiko Carstens 
3098bc923cc9SAvi Kivity 		r = -EFAULT;
3099b0c632dbSHeiko Carstens 		if (copy_from_user(&psw, argp, sizeof(psw)))
3100bc923cc9SAvi Kivity 			break;
3101bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
3102bc923cc9SAvi Kivity 		break;
3103b0c632dbSHeiko Carstens 	}
3104b0c632dbSHeiko Carstens 	case KVM_S390_INITIAL_RESET:
3105bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
3106bc923cc9SAvi Kivity 		break;
310714eebd91SCarsten Otte 	case KVM_SET_ONE_REG:
310814eebd91SCarsten Otte 	case KVM_GET_ONE_REG: {
310914eebd91SCarsten Otte 		struct kvm_one_reg reg;
311014eebd91SCarsten Otte 		r = -EFAULT;
311114eebd91SCarsten Otte 		if (copy_from_user(&reg, argp, sizeof(reg)))
311214eebd91SCarsten Otte 			break;
311314eebd91SCarsten Otte 		if (ioctl == KVM_SET_ONE_REG)
311414eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
311514eebd91SCarsten Otte 		else
311614eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
311714eebd91SCarsten Otte 		break;
311814eebd91SCarsten Otte 	}
311927e0393fSCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
312027e0393fSCarsten Otte 	case KVM_S390_UCAS_MAP: {
312127e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
312227e0393fSCarsten Otte 
312327e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
312427e0393fSCarsten Otte 			r = -EFAULT;
312527e0393fSCarsten Otte 			break;
312627e0393fSCarsten Otte 		}
312727e0393fSCarsten Otte 
312827e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
312927e0393fSCarsten Otte 			r = -EINVAL;
313027e0393fSCarsten Otte 			break;
313127e0393fSCarsten Otte 		}
313227e0393fSCarsten Otte 
313327e0393fSCarsten Otte 		r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
313427e0393fSCarsten Otte 				     ucasmap.vcpu_addr, ucasmap.length);
313527e0393fSCarsten Otte 		break;
313627e0393fSCarsten Otte 	}
313727e0393fSCarsten Otte 	case KVM_S390_UCAS_UNMAP: {
313827e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
313927e0393fSCarsten Otte 
314027e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
314127e0393fSCarsten Otte 			r = -EFAULT;
314227e0393fSCarsten Otte 			break;
314327e0393fSCarsten Otte 		}
314427e0393fSCarsten Otte 
314527e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
314627e0393fSCarsten Otte 			r = -EINVAL;
314727e0393fSCarsten Otte 			break;
314827e0393fSCarsten Otte 		}
314927e0393fSCarsten Otte 
315027e0393fSCarsten Otte 		r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
315127e0393fSCarsten Otte 			ucasmap.length);
315227e0393fSCarsten Otte 		break;
315327e0393fSCarsten Otte 	}
315427e0393fSCarsten Otte #endif
3155ccc7910fSCarsten Otte 	case KVM_S390_VCPU_FAULT: {
3156527e30b4SMartin Schwidefsky 		r = gmap_fault(vcpu->arch.gmap, arg, 0);
3157ccc7910fSCarsten Otte 		break;
3158ccc7910fSCarsten Otte 	}
3159d6712df9SCornelia Huck 	case KVM_ENABLE_CAP:
3160d6712df9SCornelia Huck 	{
3161d6712df9SCornelia Huck 		struct kvm_enable_cap cap;
3162d6712df9SCornelia Huck 		r = -EFAULT;
3163d6712df9SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
3164d6712df9SCornelia Huck 			break;
3165d6712df9SCornelia Huck 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
3166d6712df9SCornelia Huck 		break;
3167d6712df9SCornelia Huck 	}
316841408c28SThomas Huth 	case KVM_S390_MEM_OP: {
316941408c28SThomas Huth 		struct kvm_s390_mem_op mem_op;
317041408c28SThomas Huth 
317141408c28SThomas Huth 		if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
317241408c28SThomas Huth 			r = kvm_s390_guest_mem_op(vcpu, &mem_op);
317341408c28SThomas Huth 		else
317441408c28SThomas Huth 			r = -EFAULT;
317541408c28SThomas Huth 		break;
317641408c28SThomas Huth 	}
3177816c7667SJens Freimann 	case KVM_S390_SET_IRQ_STATE: {
3178816c7667SJens Freimann 		struct kvm_s390_irq_state irq_state;
3179816c7667SJens Freimann 
3180816c7667SJens Freimann 		r = -EFAULT;
3181816c7667SJens Freimann 		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3182816c7667SJens Freimann 			break;
3183816c7667SJens Freimann 		if (irq_state.len > VCPU_IRQS_MAX_BUF ||
3184816c7667SJens Freimann 		    irq_state.len == 0 ||
3185816c7667SJens Freimann 		    irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
3186816c7667SJens Freimann 			r = -EINVAL;
3187816c7667SJens Freimann 			break;
3188816c7667SJens Freimann 		}
3189816c7667SJens Freimann 		r = kvm_s390_set_irq_state(vcpu,
3190816c7667SJens Freimann 					   (void __user *) irq_state.buf,
3191816c7667SJens Freimann 					   irq_state.len);
3192816c7667SJens Freimann 		break;
3193816c7667SJens Freimann 	}
3194816c7667SJens Freimann 	case KVM_S390_GET_IRQ_STATE: {
3195816c7667SJens Freimann 		struct kvm_s390_irq_state irq_state;
3196816c7667SJens Freimann 
3197816c7667SJens Freimann 		r = -EFAULT;
3198816c7667SJens Freimann 		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3199816c7667SJens Freimann 			break;
3200816c7667SJens Freimann 		if (irq_state.len == 0) {
3201816c7667SJens Freimann 			r = -EINVAL;
3202816c7667SJens Freimann 			break;
3203816c7667SJens Freimann 		}
3204816c7667SJens Freimann 		r = kvm_s390_get_irq_state(vcpu,
3205816c7667SJens Freimann 					   (__u8 __user *)  irq_state.buf,
3206816c7667SJens Freimann 					   irq_state.len);
3207816c7667SJens Freimann 		break;
3208816c7667SJens Freimann 	}
3209b0c632dbSHeiko Carstens 	default:
32103e6afcf1SCarsten Otte 		r = -ENOTTY;
3211b0c632dbSHeiko Carstens 	}
3212bc923cc9SAvi Kivity 	return r;
3213b0c632dbSHeiko Carstens }
3214b0c632dbSHeiko Carstens 
32155b1c1493SCarsten Otte int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
32165b1c1493SCarsten Otte {
32175b1c1493SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
32185b1c1493SCarsten Otte 	if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
32195b1c1493SCarsten Otte 		 && (kvm_is_ucontrol(vcpu->kvm))) {
32205b1c1493SCarsten Otte 		vmf->page = virt_to_page(vcpu->arch.sie_block);
32215b1c1493SCarsten Otte 		get_page(vmf->page);
32225b1c1493SCarsten Otte 		return 0;
32235b1c1493SCarsten Otte 	}
32245b1c1493SCarsten Otte #endif
32255b1c1493SCarsten Otte 	return VM_FAULT_SIGBUS;
32265b1c1493SCarsten Otte }
32275b1c1493SCarsten Otte 
32285587027cSAneesh Kumar K.V int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
32295587027cSAneesh Kumar K.V 			    unsigned long npages)
3230db3fe4ebSTakuya Yoshikawa {
3231db3fe4ebSTakuya Yoshikawa 	return 0;
3232db3fe4ebSTakuya Yoshikawa }
3233db3fe4ebSTakuya Yoshikawa 
3234b0c632dbSHeiko Carstens /* Section: memory related */
3235f7784b8eSMarcelo Tosatti int kvm_arch_prepare_memory_region(struct kvm *kvm,
3236f7784b8eSMarcelo Tosatti 				   struct kvm_memory_slot *memslot,
323709170a49SPaolo Bonzini 				   const struct kvm_userspace_memory_region *mem,
32387b6195a9STakuya Yoshikawa 				   enum kvm_mr_change change)
3239b0c632dbSHeiko Carstens {
3240dd2887e7SNick Wang 	/* A few sanity checks. We can have memory slots which have to be
3241dd2887e7SNick Wang 	   located/ended at a segment boundary (1MB). The memory in userland is
3242dd2887e7SNick Wang 	   ok to be fragmented into various different vmas. It is okay to mmap()
3243dd2887e7SNick Wang 	   and munmap() stuff in this slot after doing this call at any time */
3244b0c632dbSHeiko Carstens 
3245598841caSCarsten Otte 	if (mem->userspace_addr & 0xffffful)
3246b0c632dbSHeiko Carstens 		return -EINVAL;
3247b0c632dbSHeiko Carstens 
3248598841caSCarsten Otte 	if (mem->memory_size & 0xffffful)
3249b0c632dbSHeiko Carstens 		return -EINVAL;
3250b0c632dbSHeiko Carstens 
3251a3a92c31SDominik Dingel 	if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
3252a3a92c31SDominik Dingel 		return -EINVAL;
3253a3a92c31SDominik Dingel 
3254f7784b8eSMarcelo Tosatti 	return 0;
3255f7784b8eSMarcelo Tosatti }
3256f7784b8eSMarcelo Tosatti 
3257f7784b8eSMarcelo Tosatti void kvm_arch_commit_memory_region(struct kvm *kvm,
325809170a49SPaolo Bonzini 				const struct kvm_userspace_memory_region *mem,
32598482644aSTakuya Yoshikawa 				const struct kvm_memory_slot *old,
3260f36f3f28SPaolo Bonzini 				const struct kvm_memory_slot *new,
32618482644aSTakuya Yoshikawa 				enum kvm_mr_change change)
3262f7784b8eSMarcelo Tosatti {
3263f7850c92SCarsten Otte 	int rc;
3264f7784b8eSMarcelo Tosatti 
32652cef4debSChristian Borntraeger 	/* If the basics of the memslot do not change, we do not want
32662cef4debSChristian Borntraeger 	 * to update the gmap. Every update causes several unnecessary
32672cef4debSChristian Borntraeger 	 * segment translation exceptions. This is usually handled just
32682cef4debSChristian Borntraeger 	 * fine by the normal fault handler + gmap, but it will also
32692cef4debSChristian Borntraeger 	 * cause faults on the prefix page of running guest CPUs.
32702cef4debSChristian Borntraeger 	 */
32712cef4debSChristian Borntraeger 	if (old->userspace_addr == mem->userspace_addr &&
32722cef4debSChristian Borntraeger 	    old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
32732cef4debSChristian Borntraeger 	    old->npages * PAGE_SIZE == mem->memory_size)
32742cef4debSChristian Borntraeger 		return;
3275598841caSCarsten Otte 
3276598841caSCarsten Otte 	rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
3277598841caSCarsten Otte 		mem->guest_phys_addr, mem->memory_size);
3278598841caSCarsten Otte 	if (rc)
3279ea2cdd27SDavid Hildenbrand 		pr_warn("failed to commit memory region\n");
3280598841caSCarsten Otte 	return;
3281b0c632dbSHeiko Carstens }
3282b0c632dbSHeiko Carstens 
328360a37709SAlexander Yarygin static inline unsigned long nonhyp_mask(int i)
328460a37709SAlexander Yarygin {
328560a37709SAlexander Yarygin 	unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
328660a37709SAlexander Yarygin 
328760a37709SAlexander Yarygin 	return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
328860a37709SAlexander Yarygin }
328960a37709SAlexander Yarygin 
32903491caf2SChristian Borntraeger void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
32913491caf2SChristian Borntraeger {
32923491caf2SChristian Borntraeger 	vcpu->valid_wakeup = false;
32933491caf2SChristian Borntraeger }
32943491caf2SChristian Borntraeger 
3295b0c632dbSHeiko Carstens static int __init kvm_s390_init(void)
3296b0c632dbSHeiko Carstens {
329760a37709SAlexander Yarygin 	int i;
329860a37709SAlexander Yarygin 
329907197fd0SDavid Hildenbrand 	if (!sclp.has_sief2) {
330007197fd0SDavid Hildenbrand 		pr_info("SIE not available\n");
330107197fd0SDavid Hildenbrand 		return -ENODEV;
330207197fd0SDavid Hildenbrand 	}
330307197fd0SDavid Hildenbrand 
330460a37709SAlexander Yarygin 	for (i = 0; i < 16; i++)
330560a37709SAlexander Yarygin 		kvm_s390_fac_list_mask[i] |=
330660a37709SAlexander Yarygin 			S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
330760a37709SAlexander Yarygin 
33089d8d5786SMichael Mueller 	return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
3309b0c632dbSHeiko Carstens }
3310b0c632dbSHeiko Carstens 
3311b0c632dbSHeiko Carstens static void __exit kvm_s390_exit(void)
3312b0c632dbSHeiko Carstens {
3313b0c632dbSHeiko Carstens 	kvm_exit();
3314b0c632dbSHeiko Carstens }
3315b0c632dbSHeiko Carstens 
3316b0c632dbSHeiko Carstens module_init(kvm_s390_init);
3317b0c632dbSHeiko Carstens module_exit(kvm_s390_exit);
3318566af940SCornelia Huck 
3319566af940SCornelia Huck /*
3320566af940SCornelia Huck  * Enable autoloading of the kvm module.
3321566af940SCornelia Huck  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
3322566af940SCornelia Huck  * since x86 takes a different approach.
3323566af940SCornelia Huck  */
3324566af940SCornelia Huck #include <linux/miscdevice.h>
3325566af940SCornelia Huck MODULE_ALIAS_MISCDEV(KVM_MINOR);
3326566af940SCornelia Huck MODULE_ALIAS("devname:kvm");
3327