xref: /linux/arch/s390/kvm/kvm-s390.c (revision a011eeb2a3d6cd778eb63bea0bf149ebbe658ab5)
1b0c632dbSHeiko Carstens /*
2a53c8fabSHeiko Carstens  * hosting zSeries kernel virtual machines
3b0c632dbSHeiko Carstens  *
4628eb9b8SChristian Ehrhardt  * Copyright IBM Corp. 2008, 2009
5b0c632dbSHeiko Carstens  *
6b0c632dbSHeiko Carstens  * This program is free software; you can redistribute it and/or modify
7b0c632dbSHeiko Carstens  * it under the terms of the GNU General Public License (version 2 only)
8b0c632dbSHeiko Carstens  * as published by the Free Software Foundation.
9b0c632dbSHeiko Carstens  *
10b0c632dbSHeiko Carstens  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11b0c632dbSHeiko Carstens  *               Christian Borntraeger <borntraeger@de.ibm.com>
12b0c632dbSHeiko Carstens  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13628eb9b8SChristian Ehrhardt  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
1415f36ebdSJason J. Herne  *               Jason J. Herne <jjherne@us.ibm.com>
15b0c632dbSHeiko Carstens  */
16b0c632dbSHeiko Carstens 
17b0c632dbSHeiko Carstens #include <linux/compiler.h>
18b0c632dbSHeiko Carstens #include <linux/err.h>
19b0c632dbSHeiko Carstens #include <linux/fs.h>
20ca872302SChristian Borntraeger #include <linux/hrtimer.h>
21b0c632dbSHeiko Carstens #include <linux/init.h>
22b0c632dbSHeiko Carstens #include <linux/kvm.h>
23b0c632dbSHeiko Carstens #include <linux/kvm_host.h>
24b0c632dbSHeiko Carstens #include <linux/module.h>
25a374e892STony Krowiak #include <linux/random.h>
26b0c632dbSHeiko Carstens #include <linux/slab.h>
27ba5c1e9bSCarsten Otte #include <linux/timer.h>
2841408c28SThomas Huth #include <linux/vmalloc.h>
29cbb870c8SHeiko Carstens #include <asm/asm-offsets.h>
30b0c632dbSHeiko Carstens #include <asm/lowcore.h>
31fdf03650SFan Zhang #include <asm/etr.h>
32b0c632dbSHeiko Carstens #include <asm/pgtable.h>
331e133ab2SMartin Schwidefsky #include <asm/gmap.h>
34f5daba1dSHeiko Carstens #include <asm/nmi.h>
35a0616cdeSDavid Howells #include <asm/switch_to.h>
366d3da241SJens Freimann #include <asm/isc.h>
371526bf9cSChristian Borntraeger #include <asm/sclp.h>
388f2abe6aSChristian Borntraeger #include "kvm-s390.h"
39b0c632dbSHeiko Carstens #include "gaccess.h"
40b0c632dbSHeiko Carstens 
41ea2cdd27SDavid Hildenbrand #define KMSG_COMPONENT "kvm-s390"
42ea2cdd27SDavid Hildenbrand #undef pr_fmt
43ea2cdd27SDavid Hildenbrand #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
44ea2cdd27SDavid Hildenbrand 
455786fffaSCornelia Huck #define CREATE_TRACE_POINTS
465786fffaSCornelia Huck #include "trace.h"
47ade38c31SCornelia Huck #include "trace-s390.h"
485786fffaSCornelia Huck 
4941408c28SThomas Huth #define MEM_OP_MAX_SIZE 65536	/* Maximum transfer size for KVM_S390_MEM_OP */
50816c7667SJens Freimann #define LOCAL_IRQS 32
51816c7667SJens Freimann #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
52816c7667SJens Freimann 			   (KVM_MAX_VCPUS + LOCAL_IRQS))
5341408c28SThomas Huth 
54b0c632dbSHeiko Carstens #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
55b0c632dbSHeiko Carstens 
56b0c632dbSHeiko Carstens struct kvm_stats_debugfs_item debugfs_entries[] = {
57b0c632dbSHeiko Carstens 	{ "userspace_handled", VCPU_STAT(exit_userspace) },
580eaeafa1SChristian Borntraeger 	{ "exit_null", VCPU_STAT(exit_null) },
598f2abe6aSChristian Borntraeger 	{ "exit_validity", VCPU_STAT(exit_validity) },
608f2abe6aSChristian Borntraeger 	{ "exit_stop_request", VCPU_STAT(exit_stop_request) },
618f2abe6aSChristian Borntraeger 	{ "exit_external_request", VCPU_STAT(exit_external_request) },
628f2abe6aSChristian Borntraeger 	{ "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
63ba5c1e9bSCarsten Otte 	{ "exit_instruction", VCPU_STAT(exit_instruction) },
64ba5c1e9bSCarsten Otte 	{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
65ba5c1e9bSCarsten Otte 	{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
66*a011eeb2SJanosch Frank 	{ "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
67f7819512SPaolo Bonzini 	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
6862bea5bfSPaolo Bonzini 	{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
693491caf2SChristian Borntraeger 	{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
70ce2e4f0bSDavid Hildenbrand 	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
71f5e10b09SChristian Borntraeger 	{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
72ba5c1e9bSCarsten Otte 	{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
73aba07508SDavid Hildenbrand 	{ "instruction_stctl", VCPU_STAT(instruction_stctl) },
74aba07508SDavid Hildenbrand 	{ "instruction_stctg", VCPU_STAT(instruction_stctg) },
75ba5c1e9bSCarsten Otte 	{ "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
767697e71fSChristian Ehrhardt 	{ "deliver_external_call", VCPU_STAT(deliver_external_call) },
77ba5c1e9bSCarsten Otte 	{ "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
78ba5c1e9bSCarsten Otte 	{ "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
79ba5c1e9bSCarsten Otte 	{ "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
80ba5c1e9bSCarsten Otte 	{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
81ba5c1e9bSCarsten Otte 	{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
82ba5c1e9bSCarsten Otte 	{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
83ba5c1e9bSCarsten Otte 	{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
8469d0d3a3SChristian Borntraeger 	{ "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
85453423dcSChristian Borntraeger 	{ "instruction_stidp", VCPU_STAT(instruction_stidp) },
86453423dcSChristian Borntraeger 	{ "instruction_spx", VCPU_STAT(instruction_spx) },
87453423dcSChristian Borntraeger 	{ "instruction_stpx", VCPU_STAT(instruction_stpx) },
88453423dcSChristian Borntraeger 	{ "instruction_stap", VCPU_STAT(instruction_stap) },
89453423dcSChristian Borntraeger 	{ "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
908a242234SHeiko Carstens 	{ "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
91453423dcSChristian Borntraeger 	{ "instruction_stsch", VCPU_STAT(instruction_stsch) },
92453423dcSChristian Borntraeger 	{ "instruction_chsc", VCPU_STAT(instruction_chsc) },
93b31288faSKonstantin Weitz 	{ "instruction_essa", VCPU_STAT(instruction_essa) },
94453423dcSChristian Borntraeger 	{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
95453423dcSChristian Borntraeger 	{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
96bb25b9baSChristian Borntraeger 	{ "instruction_tprot", VCPU_STAT(instruction_tprot) },
975288fbf0SChristian Borntraeger 	{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
98bd59d3a4SCornelia Huck 	{ "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
997697e71fSChristian Ehrhardt 	{ "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
1005288fbf0SChristian Borntraeger 	{ "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
10142cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
10242cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
1035288fbf0SChristian Borntraeger 	{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
10442cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
10542cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
106cd7b4b61SEric Farman 	{ "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
1075288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
1085288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
1095288fbf0SChristian Borntraeger 	{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
11042cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
11142cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
11242cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
113388186bcSChristian Borntraeger 	{ "diagnose_10", VCPU_STAT(diagnose_10) },
114e28acfeaSChristian Borntraeger 	{ "diagnose_44", VCPU_STAT(diagnose_44) },
11541628d33SKonstantin Weitz 	{ "diagnose_9c", VCPU_STAT(diagnose_9c) },
116175a5c9eSChristian Borntraeger 	{ "diagnose_258", VCPU_STAT(diagnose_258) },
117175a5c9eSChristian Borntraeger 	{ "diagnose_308", VCPU_STAT(diagnose_308) },
118175a5c9eSChristian Borntraeger 	{ "diagnose_500", VCPU_STAT(diagnose_500) },
119b0c632dbSHeiko Carstens 	{ NULL }
120b0c632dbSHeiko Carstens };
121b0c632dbSHeiko Carstens 
1229d8d5786SMichael Mueller /* upper facilities limit for kvm */
12360a37709SAlexander Yarygin unsigned long kvm_s390_fac_list_mask[16] = {
12460a37709SAlexander Yarygin 	0xffe6000000000000UL,
12560a37709SAlexander Yarygin 	0x005e000000000000UL,
1269d8d5786SMichael Mueller };
127b0c632dbSHeiko Carstens 
1289d8d5786SMichael Mueller unsigned long kvm_s390_fac_list_mask_size(void)
12978c4b59fSMichael Mueller {
1309d8d5786SMichael Mueller 	BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
1319d8d5786SMichael Mueller 	return ARRAY_SIZE(kvm_s390_fac_list_mask);
13278c4b59fSMichael Mueller }
13378c4b59fSMichael Mueller 
1349d8d5786SMichael Mueller static struct gmap_notifier gmap_notifier;
13578f26131SChristian Borntraeger debug_info_t *kvm_s390_dbf;
1369d8d5786SMichael Mueller 
137b0c632dbSHeiko Carstens /* Section: not file related */
13813a34e06SRadim Krčmář int kvm_arch_hardware_enable(void)
139b0c632dbSHeiko Carstens {
140b0c632dbSHeiko Carstens 	/* every s390 is virtualization enabled ;-) */
14110474ae8SAlexander Graf 	return 0;
142b0c632dbSHeiko Carstens }
143b0c632dbSHeiko Carstens 
1442c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
1452c70fe44SChristian Borntraeger 
146fdf03650SFan Zhang /*
147fdf03650SFan Zhang  * This callback is executed during stop_machine(). All CPUs are therefore
148fdf03650SFan Zhang  * temporarily stopped. In order not to change guest behavior, we have to
149fdf03650SFan Zhang  * disable preemption whenever we touch the epoch of kvm and the VCPUs,
150fdf03650SFan Zhang  * so a CPU won't be stopped while calculating with the epoch.
151fdf03650SFan Zhang  */
152fdf03650SFan Zhang static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
153fdf03650SFan Zhang 			  void *v)
154fdf03650SFan Zhang {
155fdf03650SFan Zhang 	struct kvm *kvm;
156fdf03650SFan Zhang 	struct kvm_vcpu *vcpu;
157fdf03650SFan Zhang 	int i;
158fdf03650SFan Zhang 	unsigned long long *delta = v;
159fdf03650SFan Zhang 
160fdf03650SFan Zhang 	list_for_each_entry(kvm, &vm_list, vm_list) {
161fdf03650SFan Zhang 		kvm->arch.epoch -= *delta;
162fdf03650SFan Zhang 		kvm_for_each_vcpu(i, vcpu, kvm) {
163fdf03650SFan Zhang 			vcpu->arch.sie_block->epoch -= *delta;
164db0758b2SDavid Hildenbrand 			if (vcpu->arch.cputm_enabled)
165db0758b2SDavid Hildenbrand 				vcpu->arch.cputm_start += *delta;
166fdf03650SFan Zhang 		}
167fdf03650SFan Zhang 	}
168fdf03650SFan Zhang 	return NOTIFY_OK;
169fdf03650SFan Zhang }
170fdf03650SFan Zhang 
171fdf03650SFan Zhang static struct notifier_block kvm_clock_notifier = {
172fdf03650SFan Zhang 	.notifier_call = kvm_clock_sync,
173fdf03650SFan Zhang };
174fdf03650SFan Zhang 
175b0c632dbSHeiko Carstens int kvm_arch_hardware_setup(void)
176b0c632dbSHeiko Carstens {
1772c70fe44SChristian Borntraeger 	gmap_notifier.notifier_call = kvm_gmap_notifier;
1782c70fe44SChristian Borntraeger 	gmap_register_ipte_notifier(&gmap_notifier);
179fdf03650SFan Zhang 	atomic_notifier_chain_register(&s390_epoch_delta_notifier,
180fdf03650SFan Zhang 				       &kvm_clock_notifier);
181b0c632dbSHeiko Carstens 	return 0;
182b0c632dbSHeiko Carstens }
183b0c632dbSHeiko Carstens 
184b0c632dbSHeiko Carstens void kvm_arch_hardware_unsetup(void)
185b0c632dbSHeiko Carstens {
1862c70fe44SChristian Borntraeger 	gmap_unregister_ipte_notifier(&gmap_notifier);
187fdf03650SFan Zhang 	atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
188fdf03650SFan Zhang 					 &kvm_clock_notifier);
189b0c632dbSHeiko Carstens }
190b0c632dbSHeiko Carstens 
191b0c632dbSHeiko Carstens int kvm_arch_init(void *opaque)
192b0c632dbSHeiko Carstens {
19378f26131SChristian Borntraeger 	kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
19478f26131SChristian Borntraeger 	if (!kvm_s390_dbf)
19578f26131SChristian Borntraeger 		return -ENOMEM;
19678f26131SChristian Borntraeger 
19778f26131SChristian Borntraeger 	if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
19878f26131SChristian Borntraeger 		debug_unregister(kvm_s390_dbf);
19978f26131SChristian Borntraeger 		return -ENOMEM;
20078f26131SChristian Borntraeger 	}
20178f26131SChristian Borntraeger 
20284877d93SCornelia Huck 	/* Register floating interrupt controller interface. */
20384877d93SCornelia Huck 	return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
204b0c632dbSHeiko Carstens }
205b0c632dbSHeiko Carstens 
20678f26131SChristian Borntraeger void kvm_arch_exit(void)
20778f26131SChristian Borntraeger {
20878f26131SChristian Borntraeger 	debug_unregister(kvm_s390_dbf);
20978f26131SChristian Borntraeger }
21078f26131SChristian Borntraeger 
211b0c632dbSHeiko Carstens /* Section: device related */
212b0c632dbSHeiko Carstens long kvm_arch_dev_ioctl(struct file *filp,
213b0c632dbSHeiko Carstens 			unsigned int ioctl, unsigned long arg)
214b0c632dbSHeiko Carstens {
215b0c632dbSHeiko Carstens 	if (ioctl == KVM_S390_ENABLE_SIE)
216b0c632dbSHeiko Carstens 		return s390_enable_sie();
217b0c632dbSHeiko Carstens 	return -EINVAL;
218b0c632dbSHeiko Carstens }
219b0c632dbSHeiko Carstens 
220784aa3d7SAlexander Graf int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
221b0c632dbSHeiko Carstens {
222d7b0b5ebSCarsten Otte 	int r;
223d7b0b5ebSCarsten Otte 
2242bd0ac4eSCarsten Otte 	switch (ext) {
225d7b0b5ebSCarsten Otte 	case KVM_CAP_S390_PSW:
226b6cf8788SChristian Borntraeger 	case KVM_CAP_S390_GMAP:
22752e16b18SChristian Borntraeger 	case KVM_CAP_SYNC_MMU:
2281efd0f59SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
2291efd0f59SCarsten Otte 	case KVM_CAP_S390_UCONTROL:
2301efd0f59SCarsten Otte #endif
2313c038e6bSDominik Dingel 	case KVM_CAP_ASYNC_PF:
23260b413c9SChristian Borntraeger 	case KVM_CAP_SYNC_REGS:
23314eebd91SCarsten Otte 	case KVM_CAP_ONE_REG:
234d6712df9SCornelia Huck 	case KVM_CAP_ENABLE_CAP:
235fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
23610ccaa1eSCornelia Huck 	case KVM_CAP_IOEVENTFD:
237c05c4186SJens Freimann 	case KVM_CAP_DEVICE_CTRL:
238d938dc55SCornelia Huck 	case KVM_CAP_ENABLE_CAP_VM:
23978599d90SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
240f2061656SDominik Dingel 	case KVM_CAP_VM_ATTRIBUTES:
2416352e4d2SDavid Hildenbrand 	case KVM_CAP_MP_STATE:
24247b43c52SJens Freimann 	case KVM_CAP_S390_INJECT_IRQ:
2432444b352SDavid Hildenbrand 	case KVM_CAP_S390_USER_SIGP:
244e44fc8c9SEkaterina Tumanova 	case KVM_CAP_S390_USER_STSI:
24530ee2a98SJason J. Herne 	case KVM_CAP_S390_SKEYS:
246816c7667SJens Freimann 	case KVM_CAP_S390_IRQ_STATE:
247d7b0b5ebSCarsten Otte 		r = 1;
248d7b0b5ebSCarsten Otte 		break;
24941408c28SThomas Huth 	case KVM_CAP_S390_MEM_OP:
25041408c28SThomas Huth 		r = MEM_OP_MAX_SIZE;
25141408c28SThomas Huth 		break;
252e726b1bdSChristian Borntraeger 	case KVM_CAP_NR_VCPUS:
253e726b1bdSChristian Borntraeger 	case KVM_CAP_MAX_VCPUS:
254fe0edcb7SEugene (jno) Dvurechenski 		r = sclp.has_esca ? KVM_S390_ESCA_CPU_SLOTS
255fe0edcb7SEugene (jno) Dvurechenski 				  : KVM_S390_BSCA_CPU_SLOTS;
256e726b1bdSChristian Borntraeger 		break;
257e1e2e605SNick Wang 	case KVM_CAP_NR_MEMSLOTS:
258e1e2e605SNick Wang 		r = KVM_USER_MEM_SLOTS;
259e1e2e605SNick Wang 		break;
2601526bf9cSChristian Borntraeger 	case KVM_CAP_S390_COW:
261abf09bedSMartin Schwidefsky 		r = MACHINE_HAS_ESOP;
2621526bf9cSChristian Borntraeger 		break;
26368c55750SEric Farman 	case KVM_CAP_S390_VECTOR_REGISTERS:
26468c55750SEric Farman 		r = MACHINE_HAS_VX;
26568c55750SEric Farman 		break;
266c6e5f166SFan Zhang 	case KVM_CAP_S390_RI:
267c6e5f166SFan Zhang 		r = test_facility(64);
268c6e5f166SFan Zhang 		break;
2692bd0ac4eSCarsten Otte 	default:
270d7b0b5ebSCarsten Otte 		r = 0;
271b0c632dbSHeiko Carstens 	}
272d7b0b5ebSCarsten Otte 	return r;
2732bd0ac4eSCarsten Otte }
274b0c632dbSHeiko Carstens 
27515f36ebdSJason J. Herne static void kvm_s390_sync_dirty_log(struct kvm *kvm,
27615f36ebdSJason J. Herne 					struct kvm_memory_slot *memslot)
27715f36ebdSJason J. Herne {
27815f36ebdSJason J. Herne 	gfn_t cur_gfn, last_gfn;
27915f36ebdSJason J. Herne 	unsigned long address;
28015f36ebdSJason J. Herne 	struct gmap *gmap = kvm->arch.gmap;
28115f36ebdSJason J. Herne 
28215f36ebdSJason J. Herne 	/* Loop over all guest pages */
28315f36ebdSJason J. Herne 	last_gfn = memslot->base_gfn + memslot->npages;
28415f36ebdSJason J. Herne 	for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
28515f36ebdSJason J. Herne 		address = gfn_to_hva_memslot(memslot, cur_gfn);
28615f36ebdSJason J. Herne 
2871e133ab2SMartin Schwidefsky 		if (test_and_clear_guest_dirty(gmap->mm, address))
28815f36ebdSJason J. Herne 			mark_page_dirty(kvm, cur_gfn);
2891763f8d0SChristian Borntraeger 		if (fatal_signal_pending(current))
2901763f8d0SChristian Borntraeger 			return;
29170c88a00SChristian Borntraeger 		cond_resched();
29215f36ebdSJason J. Herne 	}
29315f36ebdSJason J. Herne }
29415f36ebdSJason J. Herne 
295b0c632dbSHeiko Carstens /* Section: vm related */
296a6e2f683SEugene (jno) Dvurechenski static void sca_del_vcpu(struct kvm_vcpu *vcpu);
297a6e2f683SEugene (jno) Dvurechenski 
298b0c632dbSHeiko Carstens /*
299b0c632dbSHeiko Carstens  * Get (and clear) the dirty memory log for a memory slot.
300b0c632dbSHeiko Carstens  */
301b0c632dbSHeiko Carstens int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
302b0c632dbSHeiko Carstens 			       struct kvm_dirty_log *log)
303b0c632dbSHeiko Carstens {
30415f36ebdSJason J. Herne 	int r;
30515f36ebdSJason J. Herne 	unsigned long n;
3069f6b8029SPaolo Bonzini 	struct kvm_memslots *slots;
30715f36ebdSJason J. Herne 	struct kvm_memory_slot *memslot;
30815f36ebdSJason J. Herne 	int is_dirty = 0;
30915f36ebdSJason J. Herne 
31015f36ebdSJason J. Herne 	mutex_lock(&kvm->slots_lock);
31115f36ebdSJason J. Herne 
31215f36ebdSJason J. Herne 	r = -EINVAL;
31315f36ebdSJason J. Herne 	if (log->slot >= KVM_USER_MEM_SLOTS)
31415f36ebdSJason J. Herne 		goto out;
31515f36ebdSJason J. Herne 
3169f6b8029SPaolo Bonzini 	slots = kvm_memslots(kvm);
3179f6b8029SPaolo Bonzini 	memslot = id_to_memslot(slots, log->slot);
31815f36ebdSJason J. Herne 	r = -ENOENT;
31915f36ebdSJason J. Herne 	if (!memslot->dirty_bitmap)
32015f36ebdSJason J. Herne 		goto out;
32115f36ebdSJason J. Herne 
32215f36ebdSJason J. Herne 	kvm_s390_sync_dirty_log(kvm, memslot);
32315f36ebdSJason J. Herne 	r = kvm_get_dirty_log(kvm, log, &is_dirty);
32415f36ebdSJason J. Herne 	if (r)
32515f36ebdSJason J. Herne 		goto out;
32615f36ebdSJason J. Herne 
32715f36ebdSJason J. Herne 	/* Clear the dirty log */
32815f36ebdSJason J. Herne 	if (is_dirty) {
32915f36ebdSJason J. Herne 		n = kvm_dirty_bitmap_bytes(memslot);
33015f36ebdSJason J. Herne 		memset(memslot->dirty_bitmap, 0, n);
33115f36ebdSJason J. Herne 	}
33215f36ebdSJason J. Herne 	r = 0;
33315f36ebdSJason J. Herne out:
33415f36ebdSJason J. Herne 	mutex_unlock(&kvm->slots_lock);
33515f36ebdSJason J. Herne 	return r;
336b0c632dbSHeiko Carstens }
337b0c632dbSHeiko Carstens 
338d938dc55SCornelia Huck static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
339d938dc55SCornelia Huck {
340d938dc55SCornelia Huck 	int r;
341d938dc55SCornelia Huck 
342d938dc55SCornelia Huck 	if (cap->flags)
343d938dc55SCornelia Huck 		return -EINVAL;
344d938dc55SCornelia Huck 
345d938dc55SCornelia Huck 	switch (cap->cap) {
34684223598SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
347c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
34884223598SCornelia Huck 		kvm->arch.use_irqchip = 1;
34984223598SCornelia Huck 		r = 0;
35084223598SCornelia Huck 		break;
3512444b352SDavid Hildenbrand 	case KVM_CAP_S390_USER_SIGP:
352c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
3532444b352SDavid Hildenbrand 		kvm->arch.user_sigp = 1;
3542444b352SDavid Hildenbrand 		r = 0;
3552444b352SDavid Hildenbrand 		break;
35668c55750SEric Farman 	case KVM_CAP_S390_VECTOR_REGISTERS:
3575967c17bSDavid Hildenbrand 		mutex_lock(&kvm->lock);
3585967c17bSDavid Hildenbrand 		if (atomic_read(&kvm->online_vcpus)) {
3595967c17bSDavid Hildenbrand 			r = -EBUSY;
3605967c17bSDavid Hildenbrand 		} else if (MACHINE_HAS_VX) {
361c54f0d6aSDavid Hildenbrand 			set_kvm_facility(kvm->arch.model.fac_mask, 129);
362c54f0d6aSDavid Hildenbrand 			set_kvm_facility(kvm->arch.model.fac_list, 129);
36318280d8bSMichael Mueller 			r = 0;
36418280d8bSMichael Mueller 		} else
36518280d8bSMichael Mueller 			r = -EINVAL;
3665967c17bSDavid Hildenbrand 		mutex_unlock(&kvm->lock);
367c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
368c92ea7b9SChristian Borntraeger 			 r ? "(not available)" : "(success)");
36968c55750SEric Farman 		break;
370c6e5f166SFan Zhang 	case KVM_CAP_S390_RI:
371c6e5f166SFan Zhang 		r = -EINVAL;
372c6e5f166SFan Zhang 		mutex_lock(&kvm->lock);
373c6e5f166SFan Zhang 		if (atomic_read(&kvm->online_vcpus)) {
374c6e5f166SFan Zhang 			r = -EBUSY;
375c6e5f166SFan Zhang 		} else if (test_facility(64)) {
376c54f0d6aSDavid Hildenbrand 			set_kvm_facility(kvm->arch.model.fac_mask, 64);
377c54f0d6aSDavid Hildenbrand 			set_kvm_facility(kvm->arch.model.fac_list, 64);
378c6e5f166SFan Zhang 			r = 0;
379c6e5f166SFan Zhang 		}
380c6e5f166SFan Zhang 		mutex_unlock(&kvm->lock);
381c6e5f166SFan Zhang 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
382c6e5f166SFan Zhang 			 r ? "(not available)" : "(success)");
383c6e5f166SFan Zhang 		break;
384e44fc8c9SEkaterina Tumanova 	case KVM_CAP_S390_USER_STSI:
385c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
386e44fc8c9SEkaterina Tumanova 		kvm->arch.user_stsi = 1;
387e44fc8c9SEkaterina Tumanova 		r = 0;
388e44fc8c9SEkaterina Tumanova 		break;
389d938dc55SCornelia Huck 	default:
390d938dc55SCornelia Huck 		r = -EINVAL;
391d938dc55SCornelia Huck 		break;
392d938dc55SCornelia Huck 	}
393d938dc55SCornelia Huck 	return r;
394d938dc55SCornelia Huck }
395d938dc55SCornelia Huck 
3968c0a7ce6SDominik Dingel static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
3978c0a7ce6SDominik Dingel {
3988c0a7ce6SDominik Dingel 	int ret;
3998c0a7ce6SDominik Dingel 
4008c0a7ce6SDominik Dingel 	switch (attr->attr) {
4018c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_LIMIT_SIZE:
4028c0a7ce6SDominik Dingel 		ret = 0;
403c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
404a3a92c31SDominik Dingel 			 kvm->arch.mem_limit);
405a3a92c31SDominik Dingel 		if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
4068c0a7ce6SDominik Dingel 			ret = -EFAULT;
4078c0a7ce6SDominik Dingel 		break;
4088c0a7ce6SDominik Dingel 	default:
4098c0a7ce6SDominik Dingel 		ret = -ENXIO;
4108c0a7ce6SDominik Dingel 		break;
4118c0a7ce6SDominik Dingel 	}
4128c0a7ce6SDominik Dingel 	return ret;
4138c0a7ce6SDominik Dingel }
4148c0a7ce6SDominik Dingel 
4158c0a7ce6SDominik Dingel static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
4164f718eabSDominik Dingel {
4174f718eabSDominik Dingel 	int ret;
4184f718eabSDominik Dingel 	unsigned int idx;
4194f718eabSDominik Dingel 	switch (attr->attr) {
4204f718eabSDominik Dingel 	case KVM_S390_VM_MEM_ENABLE_CMMA:
421e6db1d61SDominik Dingel 		/* enable CMMA only for z10 and later (EDAT_1) */
422e6db1d61SDominik Dingel 		ret = -EINVAL;
423e6db1d61SDominik Dingel 		if (!MACHINE_IS_LPAR || !MACHINE_HAS_EDAT1)
424e6db1d61SDominik Dingel 			break;
425e6db1d61SDominik Dingel 
4264f718eabSDominik Dingel 		ret = -EBUSY;
427c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
4284f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
4294f718eabSDominik Dingel 		if (atomic_read(&kvm->online_vcpus) == 0) {
4304f718eabSDominik Dingel 			kvm->arch.use_cmma = 1;
4314f718eabSDominik Dingel 			ret = 0;
4324f718eabSDominik Dingel 		}
4334f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
4344f718eabSDominik Dingel 		break;
4354f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CLR_CMMA:
436c3489155SDominik Dingel 		ret = -EINVAL;
437c3489155SDominik Dingel 		if (!kvm->arch.use_cmma)
438c3489155SDominik Dingel 			break;
439c3489155SDominik Dingel 
440c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
4414f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
4424f718eabSDominik Dingel 		idx = srcu_read_lock(&kvm->srcu);
443a13cff31SDominik Dingel 		s390_reset_cmma(kvm->arch.gmap->mm);
4444f718eabSDominik Dingel 		srcu_read_unlock(&kvm->srcu, idx);
4454f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
4464f718eabSDominik Dingel 		ret = 0;
4474f718eabSDominik Dingel 		break;
4488c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_LIMIT_SIZE: {
4498c0a7ce6SDominik Dingel 		unsigned long new_limit;
4508c0a7ce6SDominik Dingel 
4518c0a7ce6SDominik Dingel 		if (kvm_is_ucontrol(kvm))
4528c0a7ce6SDominik Dingel 			return -EINVAL;
4538c0a7ce6SDominik Dingel 
4548c0a7ce6SDominik Dingel 		if (get_user(new_limit, (u64 __user *)attr->addr))
4558c0a7ce6SDominik Dingel 			return -EFAULT;
4568c0a7ce6SDominik Dingel 
457a3a92c31SDominik Dingel 		if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
458a3a92c31SDominik Dingel 		    new_limit > kvm->arch.mem_limit)
4598c0a7ce6SDominik Dingel 			return -E2BIG;
4608c0a7ce6SDominik Dingel 
461a3a92c31SDominik Dingel 		if (!new_limit)
462a3a92c31SDominik Dingel 			return -EINVAL;
463a3a92c31SDominik Dingel 
464a3a92c31SDominik Dingel 		/* gmap_alloc takes last usable address */
465a3a92c31SDominik Dingel 		if (new_limit != KVM_S390_NO_MEM_LIMIT)
466a3a92c31SDominik Dingel 			new_limit -= 1;
467a3a92c31SDominik Dingel 
4688c0a7ce6SDominik Dingel 		ret = -EBUSY;
4698c0a7ce6SDominik Dingel 		mutex_lock(&kvm->lock);
4708c0a7ce6SDominik Dingel 		if (atomic_read(&kvm->online_vcpus) == 0) {
4718c0a7ce6SDominik Dingel 			/* gmap_alloc will round the limit up */
4728c0a7ce6SDominik Dingel 			struct gmap *new = gmap_alloc(current->mm, new_limit);
4738c0a7ce6SDominik Dingel 
4748c0a7ce6SDominik Dingel 			if (!new) {
4758c0a7ce6SDominik Dingel 				ret = -ENOMEM;
4768c0a7ce6SDominik Dingel 			} else {
4778c0a7ce6SDominik Dingel 				gmap_free(kvm->arch.gmap);
4788c0a7ce6SDominik Dingel 				new->private = kvm;
4798c0a7ce6SDominik Dingel 				kvm->arch.gmap = new;
4808c0a7ce6SDominik Dingel 				ret = 0;
4818c0a7ce6SDominik Dingel 			}
4828c0a7ce6SDominik Dingel 		}
4838c0a7ce6SDominik Dingel 		mutex_unlock(&kvm->lock);
484a3a92c31SDominik Dingel 		VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
485a3a92c31SDominik Dingel 		VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
486a3a92c31SDominik Dingel 			 (void *) kvm->arch.gmap->asce);
4878c0a7ce6SDominik Dingel 		break;
4888c0a7ce6SDominik Dingel 	}
4894f718eabSDominik Dingel 	default:
4904f718eabSDominik Dingel 		ret = -ENXIO;
4914f718eabSDominik Dingel 		break;
4924f718eabSDominik Dingel 	}
4934f718eabSDominik Dingel 	return ret;
4944f718eabSDominik Dingel }
4954f718eabSDominik Dingel 
496a374e892STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
497a374e892STony Krowiak 
498a374e892STony Krowiak static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
499a374e892STony Krowiak {
500a374e892STony Krowiak 	struct kvm_vcpu *vcpu;
501a374e892STony Krowiak 	int i;
502a374e892STony Krowiak 
5039d8d5786SMichael Mueller 	if (!test_kvm_facility(kvm, 76))
504a374e892STony Krowiak 		return -EINVAL;
505a374e892STony Krowiak 
506a374e892STony Krowiak 	mutex_lock(&kvm->lock);
507a374e892STony Krowiak 	switch (attr->attr) {
508a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
509a374e892STony Krowiak 		get_random_bytes(
510a374e892STony Krowiak 			kvm->arch.crypto.crycb->aes_wrapping_key_mask,
511a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
512a374e892STony Krowiak 		kvm->arch.crypto.aes_kw = 1;
513c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
514a374e892STony Krowiak 		break;
515a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
516a374e892STony Krowiak 		get_random_bytes(
517a374e892STony Krowiak 			kvm->arch.crypto.crycb->dea_wrapping_key_mask,
518a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
519a374e892STony Krowiak 		kvm->arch.crypto.dea_kw = 1;
520c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
521a374e892STony Krowiak 		break;
522a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
523a374e892STony Krowiak 		kvm->arch.crypto.aes_kw = 0;
524a374e892STony Krowiak 		memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
525a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
526c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
527a374e892STony Krowiak 		break;
528a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
529a374e892STony Krowiak 		kvm->arch.crypto.dea_kw = 0;
530a374e892STony Krowiak 		memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
531a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
532c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
533a374e892STony Krowiak 		break;
534a374e892STony Krowiak 	default:
535a374e892STony Krowiak 		mutex_unlock(&kvm->lock);
536a374e892STony Krowiak 		return -ENXIO;
537a374e892STony Krowiak 	}
538a374e892STony Krowiak 
539a374e892STony Krowiak 	kvm_for_each_vcpu(i, vcpu, kvm) {
540a374e892STony Krowiak 		kvm_s390_vcpu_crypto_setup(vcpu);
541a374e892STony Krowiak 		exit_sie(vcpu);
542a374e892STony Krowiak 	}
543a374e892STony Krowiak 	mutex_unlock(&kvm->lock);
544a374e892STony Krowiak 	return 0;
545a374e892STony Krowiak }
546a374e892STony Krowiak 
54772f25020SJason J. Herne static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
54872f25020SJason J. Herne {
54972f25020SJason J. Herne 	u8 gtod_high;
55072f25020SJason J. Herne 
55172f25020SJason J. Herne 	if (copy_from_user(&gtod_high, (void __user *)attr->addr,
55272f25020SJason J. Herne 					   sizeof(gtod_high)))
55372f25020SJason J. Herne 		return -EFAULT;
55472f25020SJason J. Herne 
55572f25020SJason J. Herne 	if (gtod_high != 0)
55672f25020SJason J. Herne 		return -EINVAL;
55758c383c6SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
55872f25020SJason J. Herne 
55972f25020SJason J. Herne 	return 0;
56072f25020SJason J. Herne }
56172f25020SJason J. Herne 
56272f25020SJason J. Herne static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
56372f25020SJason J. Herne {
5645a3d883aSDavid Hildenbrand 	u64 gtod;
56572f25020SJason J. Herne 
56672f25020SJason J. Herne 	if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
56772f25020SJason J. Herne 		return -EFAULT;
56872f25020SJason J. Herne 
56925ed1675SDavid Hildenbrand 	kvm_s390_set_tod_clock(kvm, gtod);
57058c383c6SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
57172f25020SJason J. Herne 	return 0;
57272f25020SJason J. Herne }
57372f25020SJason J. Herne 
57472f25020SJason J. Herne static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
57572f25020SJason J. Herne {
57672f25020SJason J. Herne 	int ret;
57772f25020SJason J. Herne 
57872f25020SJason J. Herne 	if (attr->flags)
57972f25020SJason J. Herne 		return -EINVAL;
58072f25020SJason J. Herne 
58172f25020SJason J. Herne 	switch (attr->attr) {
58272f25020SJason J. Herne 	case KVM_S390_VM_TOD_HIGH:
58372f25020SJason J. Herne 		ret = kvm_s390_set_tod_high(kvm, attr);
58472f25020SJason J. Herne 		break;
58572f25020SJason J. Herne 	case KVM_S390_VM_TOD_LOW:
58672f25020SJason J. Herne 		ret = kvm_s390_set_tod_low(kvm, attr);
58772f25020SJason J. Herne 		break;
58872f25020SJason J. Herne 	default:
58972f25020SJason J. Herne 		ret = -ENXIO;
59072f25020SJason J. Herne 		break;
59172f25020SJason J. Herne 	}
59272f25020SJason J. Herne 	return ret;
59372f25020SJason J. Herne }
59472f25020SJason J. Herne 
59572f25020SJason J. Herne static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
59672f25020SJason J. Herne {
59772f25020SJason J. Herne 	u8 gtod_high = 0;
59872f25020SJason J. Herne 
59972f25020SJason J. Herne 	if (copy_to_user((void __user *)attr->addr, &gtod_high,
60072f25020SJason J. Herne 					 sizeof(gtod_high)))
60172f25020SJason J. Herne 		return -EFAULT;
60258c383c6SChristian Borntraeger 	VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
60372f25020SJason J. Herne 
60472f25020SJason J. Herne 	return 0;
60572f25020SJason J. Herne }
60672f25020SJason J. Herne 
60772f25020SJason J. Herne static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
60872f25020SJason J. Herne {
6095a3d883aSDavid Hildenbrand 	u64 gtod;
61072f25020SJason J. Herne 
61160417fccSDavid Hildenbrand 	gtod = kvm_s390_get_tod_clock_fast(kvm);
61272f25020SJason J. Herne 	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
61372f25020SJason J. Herne 		return -EFAULT;
61458c383c6SChristian Borntraeger 	VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
61572f25020SJason J. Herne 
61672f25020SJason J. Herne 	return 0;
61772f25020SJason J. Herne }
61872f25020SJason J. Herne 
61972f25020SJason J. Herne static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
62072f25020SJason J. Herne {
62172f25020SJason J. Herne 	int ret;
62272f25020SJason J. Herne 
62372f25020SJason J. Herne 	if (attr->flags)
62472f25020SJason J. Herne 		return -EINVAL;
62572f25020SJason J. Herne 
62672f25020SJason J. Herne 	switch (attr->attr) {
62772f25020SJason J. Herne 	case KVM_S390_VM_TOD_HIGH:
62872f25020SJason J. Herne 		ret = kvm_s390_get_tod_high(kvm, attr);
62972f25020SJason J. Herne 		break;
63072f25020SJason J. Herne 	case KVM_S390_VM_TOD_LOW:
63172f25020SJason J. Herne 		ret = kvm_s390_get_tod_low(kvm, attr);
63272f25020SJason J. Herne 		break;
63372f25020SJason J. Herne 	default:
63472f25020SJason J. Herne 		ret = -ENXIO;
63572f25020SJason J. Herne 		break;
63672f25020SJason J. Herne 	}
63772f25020SJason J. Herne 	return ret;
63872f25020SJason J. Herne }
63972f25020SJason J. Herne 
640658b6edaSMichael Mueller static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
641658b6edaSMichael Mueller {
642658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_processor *proc;
643053dd230SDavid Hildenbrand 	u16 lowest_ibc, unblocked_ibc;
644658b6edaSMichael Mueller 	int ret = 0;
645658b6edaSMichael Mueller 
646658b6edaSMichael Mueller 	mutex_lock(&kvm->lock);
647658b6edaSMichael Mueller 	if (atomic_read(&kvm->online_vcpus)) {
648658b6edaSMichael Mueller 		ret = -EBUSY;
649658b6edaSMichael Mueller 		goto out;
650658b6edaSMichael Mueller 	}
651658b6edaSMichael Mueller 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
652658b6edaSMichael Mueller 	if (!proc) {
653658b6edaSMichael Mueller 		ret = -ENOMEM;
654658b6edaSMichael Mueller 		goto out;
655658b6edaSMichael Mueller 	}
656658b6edaSMichael Mueller 	if (!copy_from_user(proc, (void __user *)attr->addr,
657658b6edaSMichael Mueller 			    sizeof(*proc))) {
6589bb0ec09SDavid Hildenbrand 		kvm->arch.model.cpuid = proc->cpuid;
659053dd230SDavid Hildenbrand 		lowest_ibc = sclp.ibc >> 16 & 0xfff;
660053dd230SDavid Hildenbrand 		unblocked_ibc = sclp.ibc & 0xfff;
661053dd230SDavid Hildenbrand 		if (lowest_ibc) {
662053dd230SDavid Hildenbrand 			if (proc->ibc > unblocked_ibc)
663053dd230SDavid Hildenbrand 				kvm->arch.model.ibc = unblocked_ibc;
664053dd230SDavid Hildenbrand 			else if (proc->ibc < lowest_ibc)
665053dd230SDavid Hildenbrand 				kvm->arch.model.ibc = lowest_ibc;
666053dd230SDavid Hildenbrand 			else
667658b6edaSMichael Mueller 				kvm->arch.model.ibc = proc->ibc;
668053dd230SDavid Hildenbrand 		}
669c54f0d6aSDavid Hildenbrand 		memcpy(kvm->arch.model.fac_list, proc->fac_list,
670658b6edaSMichael Mueller 		       S390_ARCH_FAC_LIST_SIZE_BYTE);
671658b6edaSMichael Mueller 	} else
672658b6edaSMichael Mueller 		ret = -EFAULT;
673658b6edaSMichael Mueller 	kfree(proc);
674658b6edaSMichael Mueller out:
675658b6edaSMichael Mueller 	mutex_unlock(&kvm->lock);
676658b6edaSMichael Mueller 	return ret;
677658b6edaSMichael Mueller }
678658b6edaSMichael Mueller 
679658b6edaSMichael Mueller static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
680658b6edaSMichael Mueller {
681658b6edaSMichael Mueller 	int ret = -ENXIO;
682658b6edaSMichael Mueller 
683658b6edaSMichael Mueller 	switch (attr->attr) {
684658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_PROCESSOR:
685658b6edaSMichael Mueller 		ret = kvm_s390_set_processor(kvm, attr);
686658b6edaSMichael Mueller 		break;
687658b6edaSMichael Mueller 	}
688658b6edaSMichael Mueller 	return ret;
689658b6edaSMichael Mueller }
690658b6edaSMichael Mueller 
691658b6edaSMichael Mueller static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
692658b6edaSMichael Mueller {
693658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_processor *proc;
694658b6edaSMichael Mueller 	int ret = 0;
695658b6edaSMichael Mueller 
696658b6edaSMichael Mueller 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
697658b6edaSMichael Mueller 	if (!proc) {
698658b6edaSMichael Mueller 		ret = -ENOMEM;
699658b6edaSMichael Mueller 		goto out;
700658b6edaSMichael Mueller 	}
7019bb0ec09SDavid Hildenbrand 	proc->cpuid = kvm->arch.model.cpuid;
702658b6edaSMichael Mueller 	proc->ibc = kvm->arch.model.ibc;
703c54f0d6aSDavid Hildenbrand 	memcpy(&proc->fac_list, kvm->arch.model.fac_list,
704c54f0d6aSDavid Hildenbrand 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
705658b6edaSMichael Mueller 	if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
706658b6edaSMichael Mueller 		ret = -EFAULT;
707658b6edaSMichael Mueller 	kfree(proc);
708658b6edaSMichael Mueller out:
709658b6edaSMichael Mueller 	return ret;
710658b6edaSMichael Mueller }
711658b6edaSMichael Mueller 
712658b6edaSMichael Mueller static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
713658b6edaSMichael Mueller {
714658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_machine *mach;
715658b6edaSMichael Mueller 	int ret = 0;
716658b6edaSMichael Mueller 
717658b6edaSMichael Mueller 	mach = kzalloc(sizeof(*mach), GFP_KERNEL);
718658b6edaSMichael Mueller 	if (!mach) {
719658b6edaSMichael Mueller 		ret = -ENOMEM;
720658b6edaSMichael Mueller 		goto out;
721658b6edaSMichael Mueller 	}
722658b6edaSMichael Mueller 	get_cpu_id((struct cpuid *) &mach->cpuid);
72337c5f6c8SDavid Hildenbrand 	mach->ibc = sclp.ibc;
724c54f0d6aSDavid Hildenbrand 	memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
725981467c9SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
726658b6edaSMichael Mueller 	memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
72794422ee8SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
728658b6edaSMichael Mueller 	if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
729658b6edaSMichael Mueller 		ret = -EFAULT;
730658b6edaSMichael Mueller 	kfree(mach);
731658b6edaSMichael Mueller out:
732658b6edaSMichael Mueller 	return ret;
733658b6edaSMichael Mueller }
734658b6edaSMichael Mueller 
735658b6edaSMichael Mueller static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
736658b6edaSMichael Mueller {
737658b6edaSMichael Mueller 	int ret = -ENXIO;
738658b6edaSMichael Mueller 
739658b6edaSMichael Mueller 	switch (attr->attr) {
740658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_PROCESSOR:
741658b6edaSMichael Mueller 		ret = kvm_s390_get_processor(kvm, attr);
742658b6edaSMichael Mueller 		break;
743658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MACHINE:
744658b6edaSMichael Mueller 		ret = kvm_s390_get_machine(kvm, attr);
745658b6edaSMichael Mueller 		break;
746658b6edaSMichael Mueller 	}
747658b6edaSMichael Mueller 	return ret;
748658b6edaSMichael Mueller }
749658b6edaSMichael Mueller 
750f2061656SDominik Dingel static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
751f2061656SDominik Dingel {
752f2061656SDominik Dingel 	int ret;
753f2061656SDominik Dingel 
754f2061656SDominik Dingel 	switch (attr->group) {
7554f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
7568c0a7ce6SDominik Dingel 		ret = kvm_s390_set_mem_control(kvm, attr);
7574f718eabSDominik Dingel 		break;
75872f25020SJason J. Herne 	case KVM_S390_VM_TOD:
75972f25020SJason J. Herne 		ret = kvm_s390_set_tod(kvm, attr);
76072f25020SJason J. Herne 		break;
761658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
762658b6edaSMichael Mueller 		ret = kvm_s390_set_cpu_model(kvm, attr);
763658b6edaSMichael Mueller 		break;
764a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO:
765a374e892STony Krowiak 		ret = kvm_s390_vm_set_crypto(kvm, attr);
766a374e892STony Krowiak 		break;
767f2061656SDominik Dingel 	default:
768f2061656SDominik Dingel 		ret = -ENXIO;
769f2061656SDominik Dingel 		break;
770f2061656SDominik Dingel 	}
771f2061656SDominik Dingel 
772f2061656SDominik Dingel 	return ret;
773f2061656SDominik Dingel }
774f2061656SDominik Dingel 
775f2061656SDominik Dingel static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
776f2061656SDominik Dingel {
7778c0a7ce6SDominik Dingel 	int ret;
7788c0a7ce6SDominik Dingel 
7798c0a7ce6SDominik Dingel 	switch (attr->group) {
7808c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
7818c0a7ce6SDominik Dingel 		ret = kvm_s390_get_mem_control(kvm, attr);
7828c0a7ce6SDominik Dingel 		break;
78372f25020SJason J. Herne 	case KVM_S390_VM_TOD:
78472f25020SJason J. Herne 		ret = kvm_s390_get_tod(kvm, attr);
78572f25020SJason J. Herne 		break;
786658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
787658b6edaSMichael Mueller 		ret = kvm_s390_get_cpu_model(kvm, attr);
788658b6edaSMichael Mueller 		break;
7898c0a7ce6SDominik Dingel 	default:
7908c0a7ce6SDominik Dingel 		ret = -ENXIO;
7918c0a7ce6SDominik Dingel 		break;
7928c0a7ce6SDominik Dingel 	}
7938c0a7ce6SDominik Dingel 
7948c0a7ce6SDominik Dingel 	return ret;
795f2061656SDominik Dingel }
796f2061656SDominik Dingel 
797f2061656SDominik Dingel static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
798f2061656SDominik Dingel {
799f2061656SDominik Dingel 	int ret;
800f2061656SDominik Dingel 
801f2061656SDominik Dingel 	switch (attr->group) {
8024f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
8034f718eabSDominik Dingel 		switch (attr->attr) {
8044f718eabSDominik Dingel 		case KVM_S390_VM_MEM_ENABLE_CMMA:
8054f718eabSDominik Dingel 		case KVM_S390_VM_MEM_CLR_CMMA:
8068c0a7ce6SDominik Dingel 		case KVM_S390_VM_MEM_LIMIT_SIZE:
8074f718eabSDominik Dingel 			ret = 0;
8084f718eabSDominik Dingel 			break;
8094f718eabSDominik Dingel 		default:
8104f718eabSDominik Dingel 			ret = -ENXIO;
8114f718eabSDominik Dingel 			break;
8124f718eabSDominik Dingel 		}
8134f718eabSDominik Dingel 		break;
81472f25020SJason J. Herne 	case KVM_S390_VM_TOD:
81572f25020SJason J. Herne 		switch (attr->attr) {
81672f25020SJason J. Herne 		case KVM_S390_VM_TOD_LOW:
81772f25020SJason J. Herne 		case KVM_S390_VM_TOD_HIGH:
81872f25020SJason J. Herne 			ret = 0;
81972f25020SJason J. Herne 			break;
82072f25020SJason J. Herne 		default:
82172f25020SJason J. Herne 			ret = -ENXIO;
82272f25020SJason J. Herne 			break;
82372f25020SJason J. Herne 		}
82472f25020SJason J. Herne 		break;
825658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
826658b6edaSMichael Mueller 		switch (attr->attr) {
827658b6edaSMichael Mueller 		case KVM_S390_VM_CPU_PROCESSOR:
828658b6edaSMichael Mueller 		case KVM_S390_VM_CPU_MACHINE:
829658b6edaSMichael Mueller 			ret = 0;
830658b6edaSMichael Mueller 			break;
831658b6edaSMichael Mueller 		default:
832658b6edaSMichael Mueller 			ret = -ENXIO;
833658b6edaSMichael Mueller 			break;
834658b6edaSMichael Mueller 		}
835658b6edaSMichael Mueller 		break;
836a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO:
837a374e892STony Krowiak 		switch (attr->attr) {
838a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
839a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
840a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
841a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
842a374e892STony Krowiak 			ret = 0;
843a374e892STony Krowiak 			break;
844a374e892STony Krowiak 		default:
845a374e892STony Krowiak 			ret = -ENXIO;
846a374e892STony Krowiak 			break;
847a374e892STony Krowiak 		}
848a374e892STony Krowiak 		break;
849f2061656SDominik Dingel 	default:
850f2061656SDominik Dingel 		ret = -ENXIO;
851f2061656SDominik Dingel 		break;
852f2061656SDominik Dingel 	}
853f2061656SDominik Dingel 
854f2061656SDominik Dingel 	return ret;
855f2061656SDominik Dingel }
856f2061656SDominik Dingel 
85730ee2a98SJason J. Herne static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
85830ee2a98SJason J. Herne {
85930ee2a98SJason J. Herne 	uint8_t *keys;
86030ee2a98SJason J. Herne 	uint64_t hva;
86130ee2a98SJason J. Herne 	unsigned long curkey;
86230ee2a98SJason J. Herne 	int i, r = 0;
86330ee2a98SJason J. Herne 
86430ee2a98SJason J. Herne 	if (args->flags != 0)
86530ee2a98SJason J. Herne 		return -EINVAL;
86630ee2a98SJason J. Herne 
86730ee2a98SJason J. Herne 	/* Is this guest using storage keys? */
86830ee2a98SJason J. Herne 	if (!mm_use_skey(current->mm))
86930ee2a98SJason J. Herne 		return KVM_S390_GET_SKEYS_NONE;
87030ee2a98SJason J. Herne 
87130ee2a98SJason J. Herne 	/* Enforce sane limit on memory allocation */
87230ee2a98SJason J. Herne 	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
87330ee2a98SJason J. Herne 		return -EINVAL;
87430ee2a98SJason J. Herne 
87530ee2a98SJason J. Herne 	keys = kmalloc_array(args->count, sizeof(uint8_t),
87630ee2a98SJason J. Herne 			     GFP_KERNEL | __GFP_NOWARN);
87730ee2a98SJason J. Herne 	if (!keys)
87830ee2a98SJason J. Herne 		keys = vmalloc(sizeof(uint8_t) * args->count);
87930ee2a98SJason J. Herne 	if (!keys)
88030ee2a98SJason J. Herne 		return -ENOMEM;
88130ee2a98SJason J. Herne 
88230ee2a98SJason J. Herne 	for (i = 0; i < args->count; i++) {
88330ee2a98SJason J. Herne 		hva = gfn_to_hva(kvm, args->start_gfn + i);
88430ee2a98SJason J. Herne 		if (kvm_is_error_hva(hva)) {
88530ee2a98SJason J. Herne 			r = -EFAULT;
88630ee2a98SJason J. Herne 			goto out;
88730ee2a98SJason J. Herne 		}
88830ee2a98SJason J. Herne 
88930ee2a98SJason J. Herne 		curkey = get_guest_storage_key(current->mm, hva);
89030ee2a98SJason J. Herne 		if (IS_ERR_VALUE(curkey)) {
89130ee2a98SJason J. Herne 			r = curkey;
89230ee2a98SJason J. Herne 			goto out;
89330ee2a98SJason J. Herne 		}
89430ee2a98SJason J. Herne 		keys[i] = curkey;
89530ee2a98SJason J. Herne 	}
89630ee2a98SJason J. Herne 
89730ee2a98SJason J. Herne 	r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
89830ee2a98SJason J. Herne 			 sizeof(uint8_t) * args->count);
89930ee2a98SJason J. Herne 	if (r)
90030ee2a98SJason J. Herne 		r = -EFAULT;
90130ee2a98SJason J. Herne out:
90230ee2a98SJason J. Herne 	kvfree(keys);
90330ee2a98SJason J. Herne 	return r;
90430ee2a98SJason J. Herne }
90530ee2a98SJason J. Herne 
90630ee2a98SJason J. Herne static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
90730ee2a98SJason J. Herne {
90830ee2a98SJason J. Herne 	uint8_t *keys;
90930ee2a98SJason J. Herne 	uint64_t hva;
91030ee2a98SJason J. Herne 	int i, r = 0;
91130ee2a98SJason J. Herne 
91230ee2a98SJason J. Herne 	if (args->flags != 0)
91330ee2a98SJason J. Herne 		return -EINVAL;
91430ee2a98SJason J. Herne 
91530ee2a98SJason J. Herne 	/* Enforce sane limit on memory allocation */
91630ee2a98SJason J. Herne 	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
91730ee2a98SJason J. Herne 		return -EINVAL;
91830ee2a98SJason J. Herne 
91930ee2a98SJason J. Herne 	keys = kmalloc_array(args->count, sizeof(uint8_t),
92030ee2a98SJason J. Herne 			     GFP_KERNEL | __GFP_NOWARN);
92130ee2a98SJason J. Herne 	if (!keys)
92230ee2a98SJason J. Herne 		keys = vmalloc(sizeof(uint8_t) * args->count);
92330ee2a98SJason J. Herne 	if (!keys)
92430ee2a98SJason J. Herne 		return -ENOMEM;
92530ee2a98SJason J. Herne 
92630ee2a98SJason J. Herne 	r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
92730ee2a98SJason J. Herne 			   sizeof(uint8_t) * args->count);
92830ee2a98SJason J. Herne 	if (r) {
92930ee2a98SJason J. Herne 		r = -EFAULT;
93030ee2a98SJason J. Herne 		goto out;
93130ee2a98SJason J. Herne 	}
93230ee2a98SJason J. Herne 
93330ee2a98SJason J. Herne 	/* Enable storage key handling for the guest */
93414d4a425SDominik Dingel 	r = s390_enable_skey();
93514d4a425SDominik Dingel 	if (r)
93614d4a425SDominik Dingel 		goto out;
93730ee2a98SJason J. Herne 
93830ee2a98SJason J. Herne 	for (i = 0; i < args->count; i++) {
93930ee2a98SJason J. Herne 		hva = gfn_to_hva(kvm, args->start_gfn + i);
94030ee2a98SJason J. Herne 		if (kvm_is_error_hva(hva)) {
94130ee2a98SJason J. Herne 			r = -EFAULT;
94230ee2a98SJason J. Herne 			goto out;
94330ee2a98SJason J. Herne 		}
94430ee2a98SJason J. Herne 
94530ee2a98SJason J. Herne 		/* Lowest order bit is reserved */
94630ee2a98SJason J. Herne 		if (keys[i] & 0x01) {
94730ee2a98SJason J. Herne 			r = -EINVAL;
94830ee2a98SJason J. Herne 			goto out;
94930ee2a98SJason J. Herne 		}
95030ee2a98SJason J. Herne 
95130ee2a98SJason J. Herne 		r = set_guest_storage_key(current->mm, hva,
95230ee2a98SJason J. Herne 					  (unsigned long)keys[i], 0);
95330ee2a98SJason J. Herne 		if (r)
95430ee2a98SJason J. Herne 			goto out;
95530ee2a98SJason J. Herne 	}
95630ee2a98SJason J. Herne out:
95730ee2a98SJason J. Herne 	kvfree(keys);
95830ee2a98SJason J. Herne 	return r;
95930ee2a98SJason J. Herne }
96030ee2a98SJason J. Herne 
961b0c632dbSHeiko Carstens long kvm_arch_vm_ioctl(struct file *filp,
962b0c632dbSHeiko Carstens 		       unsigned int ioctl, unsigned long arg)
963b0c632dbSHeiko Carstens {
964b0c632dbSHeiko Carstens 	struct kvm *kvm = filp->private_data;
965b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
966f2061656SDominik Dingel 	struct kvm_device_attr attr;
967b0c632dbSHeiko Carstens 	int r;
968b0c632dbSHeiko Carstens 
969b0c632dbSHeiko Carstens 	switch (ioctl) {
970ba5c1e9bSCarsten Otte 	case KVM_S390_INTERRUPT: {
971ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
972ba5c1e9bSCarsten Otte 
973ba5c1e9bSCarsten Otte 		r = -EFAULT;
974ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
975ba5c1e9bSCarsten Otte 			break;
976ba5c1e9bSCarsten Otte 		r = kvm_s390_inject_vm(kvm, &s390int);
977ba5c1e9bSCarsten Otte 		break;
978ba5c1e9bSCarsten Otte 	}
979d938dc55SCornelia Huck 	case KVM_ENABLE_CAP: {
980d938dc55SCornelia Huck 		struct kvm_enable_cap cap;
981d938dc55SCornelia Huck 		r = -EFAULT;
982d938dc55SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
983d938dc55SCornelia Huck 			break;
984d938dc55SCornelia Huck 		r = kvm_vm_ioctl_enable_cap(kvm, &cap);
985d938dc55SCornelia Huck 		break;
986d938dc55SCornelia Huck 	}
98784223598SCornelia Huck 	case KVM_CREATE_IRQCHIP: {
98884223598SCornelia Huck 		struct kvm_irq_routing_entry routing;
98984223598SCornelia Huck 
99084223598SCornelia Huck 		r = -EINVAL;
99184223598SCornelia Huck 		if (kvm->arch.use_irqchip) {
99284223598SCornelia Huck 			/* Set up dummy routing. */
99384223598SCornelia Huck 			memset(&routing, 0, sizeof(routing));
994152b2839SNicholas Krause 			r = kvm_set_irq_routing(kvm, &routing, 0, 0);
99584223598SCornelia Huck 		}
99684223598SCornelia Huck 		break;
99784223598SCornelia Huck 	}
998f2061656SDominik Dingel 	case KVM_SET_DEVICE_ATTR: {
999f2061656SDominik Dingel 		r = -EFAULT;
1000f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1001f2061656SDominik Dingel 			break;
1002f2061656SDominik Dingel 		r = kvm_s390_vm_set_attr(kvm, &attr);
1003f2061656SDominik Dingel 		break;
1004f2061656SDominik Dingel 	}
1005f2061656SDominik Dingel 	case KVM_GET_DEVICE_ATTR: {
1006f2061656SDominik Dingel 		r = -EFAULT;
1007f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1008f2061656SDominik Dingel 			break;
1009f2061656SDominik Dingel 		r = kvm_s390_vm_get_attr(kvm, &attr);
1010f2061656SDominik Dingel 		break;
1011f2061656SDominik Dingel 	}
1012f2061656SDominik Dingel 	case KVM_HAS_DEVICE_ATTR: {
1013f2061656SDominik Dingel 		r = -EFAULT;
1014f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1015f2061656SDominik Dingel 			break;
1016f2061656SDominik Dingel 		r = kvm_s390_vm_has_attr(kvm, &attr);
1017f2061656SDominik Dingel 		break;
1018f2061656SDominik Dingel 	}
101930ee2a98SJason J. Herne 	case KVM_S390_GET_SKEYS: {
102030ee2a98SJason J. Herne 		struct kvm_s390_skeys args;
102130ee2a98SJason J. Herne 
102230ee2a98SJason J. Herne 		r = -EFAULT;
102330ee2a98SJason J. Herne 		if (copy_from_user(&args, argp,
102430ee2a98SJason J. Herne 				   sizeof(struct kvm_s390_skeys)))
102530ee2a98SJason J. Herne 			break;
102630ee2a98SJason J. Herne 		r = kvm_s390_get_skeys(kvm, &args);
102730ee2a98SJason J. Herne 		break;
102830ee2a98SJason J. Herne 	}
102930ee2a98SJason J. Herne 	case KVM_S390_SET_SKEYS: {
103030ee2a98SJason J. Herne 		struct kvm_s390_skeys args;
103130ee2a98SJason J. Herne 
103230ee2a98SJason J. Herne 		r = -EFAULT;
103330ee2a98SJason J. Herne 		if (copy_from_user(&args, argp,
103430ee2a98SJason J. Herne 				   sizeof(struct kvm_s390_skeys)))
103530ee2a98SJason J. Herne 			break;
103630ee2a98SJason J. Herne 		r = kvm_s390_set_skeys(kvm, &args);
103730ee2a98SJason J. Herne 		break;
103830ee2a98SJason J. Herne 	}
1039b0c632dbSHeiko Carstens 	default:
1040367e1319SAvi Kivity 		r = -ENOTTY;
1041b0c632dbSHeiko Carstens 	}
1042b0c632dbSHeiko Carstens 
1043b0c632dbSHeiko Carstens 	return r;
1044b0c632dbSHeiko Carstens }
1045b0c632dbSHeiko Carstens 
104645c9b47cSTony Krowiak static int kvm_s390_query_ap_config(u8 *config)
104745c9b47cSTony Krowiak {
104845c9b47cSTony Krowiak 	u32 fcn_code = 0x04000000UL;
104986044c8cSChristian Borntraeger 	u32 cc = 0;
105045c9b47cSTony Krowiak 
105186044c8cSChristian Borntraeger 	memset(config, 0, 128);
105245c9b47cSTony Krowiak 	asm volatile(
105345c9b47cSTony Krowiak 		"lgr 0,%1\n"
105445c9b47cSTony Krowiak 		"lgr 2,%2\n"
105545c9b47cSTony Krowiak 		".long 0xb2af0000\n"		/* PQAP(QCI) */
105686044c8cSChristian Borntraeger 		"0: ipm %0\n"
105745c9b47cSTony Krowiak 		"srl %0,28\n"
105886044c8cSChristian Borntraeger 		"1:\n"
105986044c8cSChristian Borntraeger 		EX_TABLE(0b, 1b)
106086044c8cSChristian Borntraeger 		: "+r" (cc)
106145c9b47cSTony Krowiak 		: "r" (fcn_code), "r" (config)
106245c9b47cSTony Krowiak 		: "cc", "0", "2", "memory"
106345c9b47cSTony Krowiak 	);
106445c9b47cSTony Krowiak 
106545c9b47cSTony Krowiak 	return cc;
106645c9b47cSTony Krowiak }
106745c9b47cSTony Krowiak 
106845c9b47cSTony Krowiak static int kvm_s390_apxa_installed(void)
106945c9b47cSTony Krowiak {
107045c9b47cSTony Krowiak 	u8 config[128];
107145c9b47cSTony Krowiak 	int cc;
107245c9b47cSTony Krowiak 
1073a6aacc3fSHeiko Carstens 	if (test_facility(12)) {
107445c9b47cSTony Krowiak 		cc = kvm_s390_query_ap_config(config);
107545c9b47cSTony Krowiak 
107645c9b47cSTony Krowiak 		if (cc)
107745c9b47cSTony Krowiak 			pr_err("PQAP(QCI) failed with cc=%d", cc);
107845c9b47cSTony Krowiak 		else
107945c9b47cSTony Krowiak 			return config[0] & 0x40;
108045c9b47cSTony Krowiak 	}
108145c9b47cSTony Krowiak 
108245c9b47cSTony Krowiak 	return 0;
108345c9b47cSTony Krowiak }
108445c9b47cSTony Krowiak 
108545c9b47cSTony Krowiak static void kvm_s390_set_crycb_format(struct kvm *kvm)
108645c9b47cSTony Krowiak {
108745c9b47cSTony Krowiak 	kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
108845c9b47cSTony Krowiak 
108945c9b47cSTony Krowiak 	if (kvm_s390_apxa_installed())
109045c9b47cSTony Krowiak 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
109145c9b47cSTony Krowiak 	else
109245c9b47cSTony Krowiak 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
109345c9b47cSTony Krowiak }
109445c9b47cSTony Krowiak 
10959bb0ec09SDavid Hildenbrand static u64 kvm_s390_get_initial_cpuid(void)
10969d8d5786SMichael Mueller {
10979bb0ec09SDavid Hildenbrand 	struct cpuid cpuid;
10989bb0ec09SDavid Hildenbrand 
10999bb0ec09SDavid Hildenbrand 	get_cpu_id(&cpuid);
11009bb0ec09SDavid Hildenbrand 	cpuid.version = 0xff;
11019bb0ec09SDavid Hildenbrand 	return *((u64 *) &cpuid);
11029d8d5786SMichael Mueller }
11039d8d5786SMichael Mueller 
1104c54f0d6aSDavid Hildenbrand static void kvm_s390_crypto_init(struct kvm *kvm)
11055102ee87STony Krowiak {
11069d8d5786SMichael Mueller 	if (!test_kvm_facility(kvm, 76))
1107c54f0d6aSDavid Hildenbrand 		return;
11085102ee87STony Krowiak 
1109c54f0d6aSDavid Hildenbrand 	kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
111045c9b47cSTony Krowiak 	kvm_s390_set_crycb_format(kvm);
11115102ee87STony Krowiak 
1112ed6f76b4STony Krowiak 	/* Enable AES/DEA protected key functions by default */
1113ed6f76b4STony Krowiak 	kvm->arch.crypto.aes_kw = 1;
1114ed6f76b4STony Krowiak 	kvm->arch.crypto.dea_kw = 1;
1115ed6f76b4STony Krowiak 	get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1116ed6f76b4STony Krowiak 			 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1117ed6f76b4STony Krowiak 	get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1118ed6f76b4STony Krowiak 			 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
11195102ee87STony Krowiak }
11205102ee87STony Krowiak 
11217d43bafcSEugene (jno) Dvurechenski static void sca_dispose(struct kvm *kvm)
11227d43bafcSEugene (jno) Dvurechenski {
11237d43bafcSEugene (jno) Dvurechenski 	if (kvm->arch.use_esca)
11245e044315SEugene (jno) Dvurechenski 		free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
11257d43bafcSEugene (jno) Dvurechenski 	else
11267d43bafcSEugene (jno) Dvurechenski 		free_page((unsigned long)(kvm->arch.sca));
11277d43bafcSEugene (jno) Dvurechenski 	kvm->arch.sca = NULL;
11287d43bafcSEugene (jno) Dvurechenski }
11297d43bafcSEugene (jno) Dvurechenski 
1130e08b9637SCarsten Otte int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
1131b0c632dbSHeiko Carstens {
11329d8d5786SMichael Mueller 	int i, rc;
1133b0c632dbSHeiko Carstens 	char debug_name[16];
1134f6c137ffSChristian Borntraeger 	static unsigned long sca_offset;
1135b0c632dbSHeiko Carstens 
1136e08b9637SCarsten Otte 	rc = -EINVAL;
1137e08b9637SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
1138e08b9637SCarsten Otte 	if (type & ~KVM_VM_S390_UCONTROL)
1139e08b9637SCarsten Otte 		goto out_err;
1140e08b9637SCarsten Otte 	if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1141e08b9637SCarsten Otte 		goto out_err;
1142e08b9637SCarsten Otte #else
1143e08b9637SCarsten Otte 	if (type)
1144e08b9637SCarsten Otte 		goto out_err;
1145e08b9637SCarsten Otte #endif
1146e08b9637SCarsten Otte 
1147b0c632dbSHeiko Carstens 	rc = s390_enable_sie();
1148b0c632dbSHeiko Carstens 	if (rc)
1149d89f5effSJan Kiszka 		goto out_err;
1150b0c632dbSHeiko Carstens 
1151b290411aSCarsten Otte 	rc = -ENOMEM;
1152b290411aSCarsten Otte 
11537d43bafcSEugene (jno) Dvurechenski 	kvm->arch.use_esca = 0; /* start with basic SCA */
11545e044315SEugene (jno) Dvurechenski 	rwlock_init(&kvm->arch.sca_lock);
1155bc784cceSEugene (jno) Dvurechenski 	kvm->arch.sca = (struct bsca_block *) get_zeroed_page(GFP_KERNEL);
1156b0c632dbSHeiko Carstens 	if (!kvm->arch.sca)
1157d89f5effSJan Kiszka 		goto out_err;
1158f6c137ffSChristian Borntraeger 	spin_lock(&kvm_lock);
1159c5c2c393SDavid Hildenbrand 	sca_offset += 16;
1160bc784cceSEugene (jno) Dvurechenski 	if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
1161c5c2c393SDavid Hildenbrand 		sca_offset = 0;
1162bc784cceSEugene (jno) Dvurechenski 	kvm->arch.sca = (struct bsca_block *)
1163bc784cceSEugene (jno) Dvurechenski 			((char *) kvm->arch.sca + sca_offset);
1164f6c137ffSChristian Borntraeger 	spin_unlock(&kvm_lock);
1165b0c632dbSHeiko Carstens 
1166b0c632dbSHeiko Carstens 	sprintf(debug_name, "kvm-%u", current->pid);
1167b0c632dbSHeiko Carstens 
11681cb9cf72SChristian Borntraeger 	kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
1169b0c632dbSHeiko Carstens 	if (!kvm->arch.dbf)
117040f5b735SDominik Dingel 		goto out_err;
1171b0c632dbSHeiko Carstens 
1172c54f0d6aSDavid Hildenbrand 	kvm->arch.sie_page2 =
1173c54f0d6aSDavid Hildenbrand 	     (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1174c54f0d6aSDavid Hildenbrand 	if (!kvm->arch.sie_page2)
117540f5b735SDominik Dingel 		goto out_err;
11769d8d5786SMichael Mueller 
1177fb5bf93fSMichael Mueller 	/* Populate the facility mask initially. */
1178c54f0d6aSDavid Hildenbrand 	memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
117994422ee8SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
11809d8d5786SMichael Mueller 	for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
11819d8d5786SMichael Mueller 		if (i < kvm_s390_fac_list_mask_size())
1182c54f0d6aSDavid Hildenbrand 			kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
11839d8d5786SMichael Mueller 		else
1184c54f0d6aSDavid Hildenbrand 			kvm->arch.model.fac_mask[i] = 0UL;
11859d8d5786SMichael Mueller 	}
11869d8d5786SMichael Mueller 
1187981467c9SMichael Mueller 	/* Populate the facility list initially. */
1188c54f0d6aSDavid Hildenbrand 	kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
1189c54f0d6aSDavid Hildenbrand 	memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask,
1190981467c9SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
1191981467c9SMichael Mueller 
11929bb0ec09SDavid Hildenbrand 	kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
119337c5f6c8SDavid Hildenbrand 	kvm->arch.model.ibc = sclp.ibc & 0x0fff;
11949d8d5786SMichael Mueller 
1195c54f0d6aSDavid Hildenbrand 	kvm_s390_crypto_init(kvm);
11965102ee87STony Krowiak 
1197ba5c1e9bSCarsten Otte 	spin_lock_init(&kvm->arch.float_int.lock);
11986d3da241SJens Freimann 	for (i = 0; i < FIRQ_LIST_COUNT; i++)
11996d3da241SJens Freimann 		INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
12008a242234SHeiko Carstens 	init_waitqueue_head(&kvm->arch.ipte_wq);
1201a6b7e459SThomas Huth 	mutex_init(&kvm->arch.ipte_mutex);
1202ba5c1e9bSCarsten Otte 
1203b0c632dbSHeiko Carstens 	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
120478f26131SChristian Borntraeger 	VM_EVENT(kvm, 3, "vm created with type %lu", type);
1205b0c632dbSHeiko Carstens 
1206e08b9637SCarsten Otte 	if (type & KVM_VM_S390_UCONTROL) {
1207e08b9637SCarsten Otte 		kvm->arch.gmap = NULL;
1208a3a92c31SDominik Dingel 		kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
1209e08b9637SCarsten Otte 	} else {
121032e6b236SGuenther Hutzl 		if (sclp.hamax == U64_MAX)
1211a3a92c31SDominik Dingel 			kvm->arch.mem_limit = TASK_MAX_SIZE;
121232e6b236SGuenther Hutzl 		else
121332e6b236SGuenther Hutzl 			kvm->arch.mem_limit = min_t(unsigned long, TASK_MAX_SIZE,
121432e6b236SGuenther Hutzl 						    sclp.hamax + 1);
1215a3a92c31SDominik Dingel 		kvm->arch.gmap = gmap_alloc(current->mm, kvm->arch.mem_limit - 1);
1216598841caSCarsten Otte 		if (!kvm->arch.gmap)
121740f5b735SDominik Dingel 			goto out_err;
12182c70fe44SChristian Borntraeger 		kvm->arch.gmap->private = kvm;
121924eb3a82SDominik Dingel 		kvm->arch.gmap->pfault_enabled = 0;
1220e08b9637SCarsten Otte 	}
1221fa6b7fe9SCornelia Huck 
1222fa6b7fe9SCornelia Huck 	kvm->arch.css_support = 0;
122384223598SCornelia Huck 	kvm->arch.use_irqchip = 0;
122472f25020SJason J. Herne 	kvm->arch.epoch = 0;
1225fa6b7fe9SCornelia Huck 
12268ad35755SDavid Hildenbrand 	spin_lock_init(&kvm->arch.start_stop_lock);
12278335713aSChristian Borntraeger 	KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
12288ad35755SDavid Hildenbrand 
1229d89f5effSJan Kiszka 	return 0;
1230d89f5effSJan Kiszka out_err:
1231c54f0d6aSDavid Hildenbrand 	free_page((unsigned long)kvm->arch.sie_page2);
123240f5b735SDominik Dingel 	debug_unregister(kvm->arch.dbf);
12337d43bafcSEugene (jno) Dvurechenski 	sca_dispose(kvm);
123478f26131SChristian Borntraeger 	KVM_EVENT(3, "creation of vm failed: %d", rc);
1235d89f5effSJan Kiszka 	return rc;
1236b0c632dbSHeiko Carstens }
1237b0c632dbSHeiko Carstens 
1238d329c035SChristian Borntraeger void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1239d329c035SChristian Borntraeger {
1240d329c035SChristian Borntraeger 	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
1241ade38c31SCornelia Huck 	trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
124267335e63SChristian Borntraeger 	kvm_s390_clear_local_irqs(vcpu);
12433c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
1244bc784cceSEugene (jno) Dvurechenski 	if (!kvm_is_ucontrol(vcpu->kvm))
1245a6e2f683SEugene (jno) Dvurechenski 		sca_del_vcpu(vcpu);
124627e0393fSCarsten Otte 
124727e0393fSCarsten Otte 	if (kvm_is_ucontrol(vcpu->kvm))
124827e0393fSCarsten Otte 		gmap_free(vcpu->arch.gmap);
124927e0393fSCarsten Otte 
1250e6db1d61SDominik Dingel 	if (vcpu->kvm->arch.use_cmma)
1251b31605c1SDominik Dingel 		kvm_s390_vcpu_unsetup_cmma(vcpu);
1252d329c035SChristian Borntraeger 	free_page((unsigned long)(vcpu->arch.sie_block));
1253b31288faSKonstantin Weitz 
12546692cef3SChristian Borntraeger 	kvm_vcpu_uninit(vcpu);
1255b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
1256d329c035SChristian Borntraeger }
1257d329c035SChristian Borntraeger 
1258d329c035SChristian Borntraeger static void kvm_free_vcpus(struct kvm *kvm)
1259d329c035SChristian Borntraeger {
1260d329c035SChristian Borntraeger 	unsigned int i;
1261988a2caeSGleb Natapov 	struct kvm_vcpu *vcpu;
1262d329c035SChristian Borntraeger 
1263988a2caeSGleb Natapov 	kvm_for_each_vcpu(i, vcpu, kvm)
1264988a2caeSGleb Natapov 		kvm_arch_vcpu_destroy(vcpu);
1265988a2caeSGleb Natapov 
1266988a2caeSGleb Natapov 	mutex_lock(&kvm->lock);
1267988a2caeSGleb Natapov 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1268d329c035SChristian Borntraeger 		kvm->vcpus[i] = NULL;
1269988a2caeSGleb Natapov 
1270988a2caeSGleb Natapov 	atomic_set(&kvm->online_vcpus, 0);
1271988a2caeSGleb Natapov 	mutex_unlock(&kvm->lock);
1272d329c035SChristian Borntraeger }
1273d329c035SChristian Borntraeger 
1274b0c632dbSHeiko Carstens void kvm_arch_destroy_vm(struct kvm *kvm)
1275b0c632dbSHeiko Carstens {
1276d329c035SChristian Borntraeger 	kvm_free_vcpus(kvm);
12777d43bafcSEugene (jno) Dvurechenski 	sca_dispose(kvm);
1278d329c035SChristian Borntraeger 	debug_unregister(kvm->arch.dbf);
1279c54f0d6aSDavid Hildenbrand 	free_page((unsigned long)kvm->arch.sie_page2);
128027e0393fSCarsten Otte 	if (!kvm_is_ucontrol(kvm))
1281598841caSCarsten Otte 		gmap_free(kvm->arch.gmap);
1282841b91c5SCornelia Huck 	kvm_s390_destroy_adapters(kvm);
128367335e63SChristian Borntraeger 	kvm_s390_clear_float_irqs(kvm);
12848335713aSChristian Borntraeger 	KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
1285b0c632dbSHeiko Carstens }
1286b0c632dbSHeiko Carstens 
1287b0c632dbSHeiko Carstens /* Section: vcpu related */
1288dafd032aSDominik Dingel static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1289b0c632dbSHeiko Carstens {
1290c6c956b8SMartin Schwidefsky 	vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
129127e0393fSCarsten Otte 	if (!vcpu->arch.gmap)
129227e0393fSCarsten Otte 		return -ENOMEM;
12932c70fe44SChristian Borntraeger 	vcpu->arch.gmap->private = vcpu->kvm;
1294dafd032aSDominik Dingel 
129527e0393fSCarsten Otte 	return 0;
129627e0393fSCarsten Otte }
129727e0393fSCarsten Otte 
1298a6e2f683SEugene (jno) Dvurechenski static void sca_del_vcpu(struct kvm_vcpu *vcpu)
1299a6e2f683SEugene (jno) Dvurechenski {
13005e044315SEugene (jno) Dvurechenski 	read_lock(&vcpu->kvm->arch.sca_lock);
13017d43bafcSEugene (jno) Dvurechenski 	if (vcpu->kvm->arch.use_esca) {
13027d43bafcSEugene (jno) Dvurechenski 		struct esca_block *sca = vcpu->kvm->arch.sca;
13037d43bafcSEugene (jno) Dvurechenski 
13047d43bafcSEugene (jno) Dvurechenski 		clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
13057d43bafcSEugene (jno) Dvurechenski 		sca->cpu[vcpu->vcpu_id].sda = 0;
13067d43bafcSEugene (jno) Dvurechenski 	} else {
1307bc784cceSEugene (jno) Dvurechenski 		struct bsca_block *sca = vcpu->kvm->arch.sca;
1308a6e2f683SEugene (jno) Dvurechenski 
1309a6e2f683SEugene (jno) Dvurechenski 		clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
1310a6e2f683SEugene (jno) Dvurechenski 		sca->cpu[vcpu->vcpu_id].sda = 0;
1311a6e2f683SEugene (jno) Dvurechenski 	}
13125e044315SEugene (jno) Dvurechenski 	read_unlock(&vcpu->kvm->arch.sca_lock);
13137d43bafcSEugene (jno) Dvurechenski }
1314a6e2f683SEugene (jno) Dvurechenski 
1315eaa78f34SDavid Hildenbrand static void sca_add_vcpu(struct kvm_vcpu *vcpu)
1316a6e2f683SEugene (jno) Dvurechenski {
1317eaa78f34SDavid Hildenbrand 	read_lock(&vcpu->kvm->arch.sca_lock);
1318eaa78f34SDavid Hildenbrand 	if (vcpu->kvm->arch.use_esca) {
1319eaa78f34SDavid Hildenbrand 		struct esca_block *sca = vcpu->kvm->arch.sca;
13207d43bafcSEugene (jno) Dvurechenski 
1321eaa78f34SDavid Hildenbrand 		sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
13227d43bafcSEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
13237d43bafcSEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
132425508824SDavid Hildenbrand 		vcpu->arch.sie_block->ecb2 |= 0x04U;
1325eaa78f34SDavid Hildenbrand 		set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
13267d43bafcSEugene (jno) Dvurechenski 	} else {
1327eaa78f34SDavid Hildenbrand 		struct bsca_block *sca = vcpu->kvm->arch.sca;
1328a6e2f683SEugene (jno) Dvurechenski 
1329eaa78f34SDavid Hildenbrand 		sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
1330a6e2f683SEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1331a6e2f683SEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
1332eaa78f34SDavid Hildenbrand 		set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
1333a6e2f683SEugene (jno) Dvurechenski 	}
1334eaa78f34SDavid Hildenbrand 	read_unlock(&vcpu->kvm->arch.sca_lock);
13355e044315SEugene (jno) Dvurechenski }
13365e044315SEugene (jno) Dvurechenski 
13375e044315SEugene (jno) Dvurechenski /* Basic SCA to Extended SCA data copy routines */
13385e044315SEugene (jno) Dvurechenski static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
13395e044315SEugene (jno) Dvurechenski {
13405e044315SEugene (jno) Dvurechenski 	d->sda = s->sda;
13415e044315SEugene (jno) Dvurechenski 	d->sigp_ctrl.c = s->sigp_ctrl.c;
13425e044315SEugene (jno) Dvurechenski 	d->sigp_ctrl.scn = s->sigp_ctrl.scn;
13435e044315SEugene (jno) Dvurechenski }
13445e044315SEugene (jno) Dvurechenski 
13455e044315SEugene (jno) Dvurechenski static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
13465e044315SEugene (jno) Dvurechenski {
13475e044315SEugene (jno) Dvurechenski 	int i;
13485e044315SEugene (jno) Dvurechenski 
13495e044315SEugene (jno) Dvurechenski 	d->ipte_control = s->ipte_control;
13505e044315SEugene (jno) Dvurechenski 	d->mcn[0] = s->mcn;
13515e044315SEugene (jno) Dvurechenski 	for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
13525e044315SEugene (jno) Dvurechenski 		sca_copy_entry(&d->cpu[i], &s->cpu[i]);
13535e044315SEugene (jno) Dvurechenski }
13545e044315SEugene (jno) Dvurechenski 
13555e044315SEugene (jno) Dvurechenski static int sca_switch_to_extended(struct kvm *kvm)
13565e044315SEugene (jno) Dvurechenski {
13575e044315SEugene (jno) Dvurechenski 	struct bsca_block *old_sca = kvm->arch.sca;
13585e044315SEugene (jno) Dvurechenski 	struct esca_block *new_sca;
13595e044315SEugene (jno) Dvurechenski 	struct kvm_vcpu *vcpu;
13605e044315SEugene (jno) Dvurechenski 	unsigned int vcpu_idx;
13615e044315SEugene (jno) Dvurechenski 	u32 scaol, scaoh;
13625e044315SEugene (jno) Dvurechenski 
13635e044315SEugene (jno) Dvurechenski 	new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
13645e044315SEugene (jno) Dvurechenski 	if (!new_sca)
13655e044315SEugene (jno) Dvurechenski 		return -ENOMEM;
13665e044315SEugene (jno) Dvurechenski 
13675e044315SEugene (jno) Dvurechenski 	scaoh = (u32)((u64)(new_sca) >> 32);
13685e044315SEugene (jno) Dvurechenski 	scaol = (u32)(u64)(new_sca) & ~0x3fU;
13695e044315SEugene (jno) Dvurechenski 
13705e044315SEugene (jno) Dvurechenski 	kvm_s390_vcpu_block_all(kvm);
13715e044315SEugene (jno) Dvurechenski 	write_lock(&kvm->arch.sca_lock);
13725e044315SEugene (jno) Dvurechenski 
13735e044315SEugene (jno) Dvurechenski 	sca_copy_b_to_e(new_sca, old_sca);
13745e044315SEugene (jno) Dvurechenski 
13755e044315SEugene (jno) Dvurechenski 	kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
13765e044315SEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaoh = scaoh;
13775e044315SEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaol = scaol;
13785e044315SEugene (jno) Dvurechenski 		vcpu->arch.sie_block->ecb2 |= 0x04U;
13795e044315SEugene (jno) Dvurechenski 	}
13805e044315SEugene (jno) Dvurechenski 	kvm->arch.sca = new_sca;
13815e044315SEugene (jno) Dvurechenski 	kvm->arch.use_esca = 1;
13825e044315SEugene (jno) Dvurechenski 
13835e044315SEugene (jno) Dvurechenski 	write_unlock(&kvm->arch.sca_lock);
13845e044315SEugene (jno) Dvurechenski 	kvm_s390_vcpu_unblock_all(kvm);
13855e044315SEugene (jno) Dvurechenski 
13865e044315SEugene (jno) Dvurechenski 	free_page((unsigned long)old_sca);
13875e044315SEugene (jno) Dvurechenski 
13888335713aSChristian Borntraeger 	VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
13898335713aSChristian Borntraeger 		 old_sca, kvm->arch.sca);
13905e044315SEugene (jno) Dvurechenski 	return 0;
13917d43bafcSEugene (jno) Dvurechenski }
1392a6e2f683SEugene (jno) Dvurechenski 
1393a6e2f683SEugene (jno) Dvurechenski static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
1394a6e2f683SEugene (jno) Dvurechenski {
13955e044315SEugene (jno) Dvurechenski 	int rc;
13965e044315SEugene (jno) Dvurechenski 
13975e044315SEugene (jno) Dvurechenski 	if (id < KVM_S390_BSCA_CPU_SLOTS)
13985e044315SEugene (jno) Dvurechenski 		return true;
13995e044315SEugene (jno) Dvurechenski 	if (!sclp.has_esca)
14005e044315SEugene (jno) Dvurechenski 		return false;
14015e044315SEugene (jno) Dvurechenski 
14025e044315SEugene (jno) Dvurechenski 	mutex_lock(&kvm->lock);
14035e044315SEugene (jno) Dvurechenski 	rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
14045e044315SEugene (jno) Dvurechenski 	mutex_unlock(&kvm->lock);
14055e044315SEugene (jno) Dvurechenski 
14065e044315SEugene (jno) Dvurechenski 	return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
1407a6e2f683SEugene (jno) Dvurechenski }
1408a6e2f683SEugene (jno) Dvurechenski 
1409dafd032aSDominik Dingel int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1410dafd032aSDominik Dingel {
1411dafd032aSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1412dafd032aSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
141359674c1aSChristian Borntraeger 	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
141459674c1aSChristian Borntraeger 				    KVM_SYNC_GPRS |
14159eed0735SChristian Borntraeger 				    KVM_SYNC_ACRS |
1416b028ee3eSDavid Hildenbrand 				    KVM_SYNC_CRS |
1417b028ee3eSDavid Hildenbrand 				    KVM_SYNC_ARCH0 |
1418b028ee3eSDavid Hildenbrand 				    KVM_SYNC_PFAULT;
1419c6e5f166SFan Zhang 	if (test_kvm_facility(vcpu->kvm, 64))
1420c6e5f166SFan Zhang 		vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
1421f6aa6dc4SDavid Hildenbrand 	/* fprs can be synchronized via vrs, even if the guest has no vx. With
1422f6aa6dc4SDavid Hildenbrand 	 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
1423f6aa6dc4SDavid Hildenbrand 	 */
1424f6aa6dc4SDavid Hildenbrand 	if (MACHINE_HAS_VX)
142568c55750SEric Farman 		vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
14266fd8e67dSDavid Hildenbrand 	else
14276fd8e67dSDavid Hildenbrand 		vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
1428dafd032aSDominik Dingel 
1429dafd032aSDominik Dingel 	if (kvm_is_ucontrol(vcpu->kvm))
1430dafd032aSDominik Dingel 		return __kvm_ucontrol_vcpu_init(vcpu);
1431dafd032aSDominik Dingel 
1432b0c632dbSHeiko Carstens 	return 0;
1433b0c632dbSHeiko Carstens }
1434b0c632dbSHeiko Carstens 
1435db0758b2SDavid Hildenbrand /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1436db0758b2SDavid Hildenbrand static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1437db0758b2SDavid Hildenbrand {
1438db0758b2SDavid Hildenbrand 	WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
14399c23a131SDavid Hildenbrand 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
1440db0758b2SDavid Hildenbrand 	vcpu->arch.cputm_start = get_tod_clock_fast();
14419c23a131SDavid Hildenbrand 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
1442db0758b2SDavid Hildenbrand }
1443db0758b2SDavid Hildenbrand 
1444db0758b2SDavid Hildenbrand /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1445db0758b2SDavid Hildenbrand static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1446db0758b2SDavid Hildenbrand {
1447db0758b2SDavid Hildenbrand 	WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
14489c23a131SDavid Hildenbrand 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
1449db0758b2SDavid Hildenbrand 	vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1450db0758b2SDavid Hildenbrand 	vcpu->arch.cputm_start = 0;
14519c23a131SDavid Hildenbrand 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
1452db0758b2SDavid Hildenbrand }
1453db0758b2SDavid Hildenbrand 
1454db0758b2SDavid Hildenbrand /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1455db0758b2SDavid Hildenbrand static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1456db0758b2SDavid Hildenbrand {
1457db0758b2SDavid Hildenbrand 	WARN_ON_ONCE(vcpu->arch.cputm_enabled);
1458db0758b2SDavid Hildenbrand 	vcpu->arch.cputm_enabled = true;
1459db0758b2SDavid Hildenbrand 	__start_cpu_timer_accounting(vcpu);
1460db0758b2SDavid Hildenbrand }
1461db0758b2SDavid Hildenbrand 
1462db0758b2SDavid Hildenbrand /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1463db0758b2SDavid Hildenbrand static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1464db0758b2SDavid Hildenbrand {
1465db0758b2SDavid Hildenbrand 	WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
1466db0758b2SDavid Hildenbrand 	__stop_cpu_timer_accounting(vcpu);
1467db0758b2SDavid Hildenbrand 	vcpu->arch.cputm_enabled = false;
1468db0758b2SDavid Hildenbrand }
1469db0758b2SDavid Hildenbrand 
1470db0758b2SDavid Hildenbrand static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1471db0758b2SDavid Hildenbrand {
1472db0758b2SDavid Hildenbrand 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1473db0758b2SDavid Hildenbrand 	__enable_cpu_timer_accounting(vcpu);
1474db0758b2SDavid Hildenbrand 	preempt_enable();
1475db0758b2SDavid Hildenbrand }
1476db0758b2SDavid Hildenbrand 
1477db0758b2SDavid Hildenbrand static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1478db0758b2SDavid Hildenbrand {
1479db0758b2SDavid Hildenbrand 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1480db0758b2SDavid Hildenbrand 	__disable_cpu_timer_accounting(vcpu);
1481db0758b2SDavid Hildenbrand 	preempt_enable();
1482db0758b2SDavid Hildenbrand }
1483db0758b2SDavid Hildenbrand 
14844287f247SDavid Hildenbrand /* set the cpu timer - may only be called from the VCPU thread itself */
14854287f247SDavid Hildenbrand void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
14864287f247SDavid Hildenbrand {
1487db0758b2SDavid Hildenbrand 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
14889c23a131SDavid Hildenbrand 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
1489db0758b2SDavid Hildenbrand 	if (vcpu->arch.cputm_enabled)
1490db0758b2SDavid Hildenbrand 		vcpu->arch.cputm_start = get_tod_clock_fast();
14914287f247SDavid Hildenbrand 	vcpu->arch.sie_block->cputm = cputm;
14929c23a131SDavid Hildenbrand 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
1493db0758b2SDavid Hildenbrand 	preempt_enable();
14944287f247SDavid Hildenbrand }
14954287f247SDavid Hildenbrand 
1496db0758b2SDavid Hildenbrand /* update and get the cpu timer - can also be called from other VCPU threads */
14974287f247SDavid Hildenbrand __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
14984287f247SDavid Hildenbrand {
14999c23a131SDavid Hildenbrand 	unsigned int seq;
1500db0758b2SDavid Hildenbrand 	__u64 value;
1501db0758b2SDavid Hildenbrand 
1502db0758b2SDavid Hildenbrand 	if (unlikely(!vcpu->arch.cputm_enabled))
15034287f247SDavid Hildenbrand 		return vcpu->arch.sie_block->cputm;
1504db0758b2SDavid Hildenbrand 
15059c23a131SDavid Hildenbrand 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
15069c23a131SDavid Hildenbrand 	do {
15079c23a131SDavid Hildenbrand 		seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
15089c23a131SDavid Hildenbrand 		/*
15099c23a131SDavid Hildenbrand 		 * If the writer would ever execute a read in the critical
15109c23a131SDavid Hildenbrand 		 * section, e.g. in irq context, we have a deadlock.
15119c23a131SDavid Hildenbrand 		 */
15129c23a131SDavid Hildenbrand 		WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
1513db0758b2SDavid Hildenbrand 		value = vcpu->arch.sie_block->cputm;
15149c23a131SDavid Hildenbrand 		/* if cputm_start is 0, accounting is being started/stopped */
15159c23a131SDavid Hildenbrand 		if (likely(vcpu->arch.cputm_start))
1516db0758b2SDavid Hildenbrand 			value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
15179c23a131SDavid Hildenbrand 	} while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
15189c23a131SDavid Hildenbrand 	preempt_enable();
1519db0758b2SDavid Hildenbrand 	return value;
15204287f247SDavid Hildenbrand }
15214287f247SDavid Hildenbrand 
1522b0c632dbSHeiko Carstens void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1523b0c632dbSHeiko Carstens {
15249977e886SHendrik Brueckner 	/* Save host register state */
1525d0164ee2SHendrik Brueckner 	save_fpu_regs();
15269abc2a08SDavid Hildenbrand 	vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
15279abc2a08SDavid Hildenbrand 	vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
152896b2d7a8SHendrik Brueckner 
15296fd8e67dSDavid Hildenbrand 	if (MACHINE_HAS_VX)
15309abc2a08SDavid Hildenbrand 		current->thread.fpu.regs = vcpu->run->s.regs.vrs;
15316fd8e67dSDavid Hildenbrand 	else
15326fd8e67dSDavid Hildenbrand 		current->thread.fpu.regs = vcpu->run->s.regs.fprs;
15339abc2a08SDavid Hildenbrand 	current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
15349977e886SHendrik Brueckner 	if (test_fp_ctl(current->thread.fpu.fpc))
153596b2d7a8SHendrik Brueckner 		/* User space provided an invalid FPC, let's clear it */
15369977e886SHendrik Brueckner 		current->thread.fpu.fpc = 0;
15379977e886SHendrik Brueckner 
15389977e886SHendrik Brueckner 	save_access_regs(vcpu->arch.host_acrs);
153959674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
1540480e5926SChristian Borntraeger 	gmap_enable(vcpu->arch.gmap);
1541805de8f4SPeter Zijlstra 	atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
15425ebda316SDavid Hildenbrand 	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
1543db0758b2SDavid Hildenbrand 		__start_cpu_timer_accounting(vcpu);
154401a745acSDavid Hildenbrand 	vcpu->cpu = cpu;
1545b0c632dbSHeiko Carstens }
1546b0c632dbSHeiko Carstens 
1547b0c632dbSHeiko Carstens void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1548b0c632dbSHeiko Carstens {
154901a745acSDavid Hildenbrand 	vcpu->cpu = -1;
15505ebda316SDavid Hildenbrand 	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
1551db0758b2SDavid Hildenbrand 		__stop_cpu_timer_accounting(vcpu);
1552805de8f4SPeter Zijlstra 	atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1553480e5926SChristian Borntraeger 	gmap_disable(vcpu->arch.gmap);
15549977e886SHendrik Brueckner 
15559abc2a08SDavid Hildenbrand 	/* Save guest register state */
1556d0164ee2SHendrik Brueckner 	save_fpu_regs();
15579977e886SHendrik Brueckner 	vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
15589abc2a08SDavid Hildenbrand 
15599abc2a08SDavid Hildenbrand 	/* Restore host register state */
15609abc2a08SDavid Hildenbrand 	current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
15619abc2a08SDavid Hildenbrand 	current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
15629977e886SHendrik Brueckner 
15639977e886SHendrik Brueckner 	save_access_regs(vcpu->run->s.regs.acrs);
1564b0c632dbSHeiko Carstens 	restore_access_regs(vcpu->arch.host_acrs);
1565b0c632dbSHeiko Carstens }
1566b0c632dbSHeiko Carstens 
1567b0c632dbSHeiko Carstens static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1568b0c632dbSHeiko Carstens {
1569b0c632dbSHeiko Carstens 	/* this equals initial cpu reset in pop, but we don't switch to ESA */
1570b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.mask = 0UL;
1571b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.addr = 0UL;
15728d26cf7bSChristian Borntraeger 	kvm_s390_set_prefix(vcpu, 0);
15734287f247SDavid Hildenbrand 	kvm_s390_set_cpu_timer(vcpu, 0);
1574b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->ckc       = 0UL;
1575b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->todpr     = 0;
1576b0c632dbSHeiko Carstens 	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1577b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
1578b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
15799abc2a08SDavid Hildenbrand 	/* make sure the new fpc will be lazily loaded */
15809abc2a08SDavid Hildenbrand 	save_fpu_regs();
15819abc2a08SDavid Hildenbrand 	current->thread.fpu.fpc = 0;
1582b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gbea = 1;
1583672550fbSChristian Borntraeger 	vcpu->arch.sie_block->pp = 0;
15843c038e6bSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
15853c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
15866352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
15876852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_stop(vcpu);
15882ed10cc1SJens Freimann 	kvm_s390_clear_local_irqs(vcpu);
1589b0c632dbSHeiko Carstens }
1590b0c632dbSHeiko Carstens 
159131928aa5SDominik Dingel void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
159242897d86SMarcelo Tosatti {
159372f25020SJason J. Herne 	mutex_lock(&vcpu->kvm->lock);
1594fdf03650SFan Zhang 	preempt_disable();
159572f25020SJason J. Herne 	vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
1596fdf03650SFan Zhang 	preempt_enable();
159772f25020SJason J. Herne 	mutex_unlock(&vcpu->kvm->lock);
159825508824SDavid Hildenbrand 	if (!kvm_is_ucontrol(vcpu->kvm)) {
1599dafd032aSDominik Dingel 		vcpu->arch.gmap = vcpu->kvm->arch.gmap;
1600eaa78f34SDavid Hildenbrand 		sca_add_vcpu(vcpu);
160125508824SDavid Hildenbrand 	}
160225508824SDavid Hildenbrand 
160342897d86SMarcelo Tosatti }
160442897d86SMarcelo Tosatti 
16055102ee87STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
16065102ee87STony Krowiak {
16079d8d5786SMichael Mueller 	if (!test_kvm_facility(vcpu->kvm, 76))
16085102ee87STony Krowiak 		return;
16095102ee87STony Krowiak 
1610a374e892STony Krowiak 	vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1611a374e892STony Krowiak 
1612a374e892STony Krowiak 	if (vcpu->kvm->arch.crypto.aes_kw)
1613a374e892STony Krowiak 		vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1614a374e892STony Krowiak 	if (vcpu->kvm->arch.crypto.dea_kw)
1615a374e892STony Krowiak 		vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1616a374e892STony Krowiak 
16175102ee87STony Krowiak 	vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
16185102ee87STony Krowiak }
16195102ee87STony Krowiak 
1620b31605c1SDominik Dingel void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1621b31605c1SDominik Dingel {
1622b31605c1SDominik Dingel 	free_page(vcpu->arch.sie_block->cbrlo);
1623b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = 0;
1624b31605c1SDominik Dingel }
1625b31605c1SDominik Dingel 
1626b31605c1SDominik Dingel int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1627b31605c1SDominik Dingel {
1628b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1629b31605c1SDominik Dingel 	if (!vcpu->arch.sie_block->cbrlo)
1630b31605c1SDominik Dingel 		return -ENOMEM;
1631b31605c1SDominik Dingel 
1632b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 |= 0x80;
1633b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 &= ~0x08;
1634b31605c1SDominik Dingel 	return 0;
1635b31605c1SDominik Dingel }
1636b31605c1SDominik Dingel 
163791520f1aSMichael Mueller static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
163891520f1aSMichael Mueller {
163991520f1aSMichael Mueller 	struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
164091520f1aSMichael Mueller 
164191520f1aSMichael Mueller 	vcpu->arch.sie_block->ibc = model->ibc;
164280bc79dcSDavid Hildenbrand 	if (test_kvm_facility(vcpu->kvm, 7))
1643c54f0d6aSDavid Hildenbrand 		vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
164491520f1aSMichael Mueller }
164591520f1aSMichael Mueller 
1646b0c632dbSHeiko Carstens int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1647b0c632dbSHeiko Carstens {
1648b31605c1SDominik Dingel 	int rc = 0;
1649b31288faSKonstantin Weitz 
16509e6dabefSCornelia Huck 	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
16519e6dabefSCornelia Huck 						    CPUSTAT_SM |
1652a4a4f191SGuenther Hutzl 						    CPUSTAT_STOPPED);
1653a4a4f191SGuenther Hutzl 
165453df84f8SGuenther Hutzl 	if (test_kvm_facility(vcpu->kvm, 78))
1655805de8f4SPeter Zijlstra 		atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
165653df84f8SGuenther Hutzl 	else if (test_kvm_facility(vcpu->kvm, 8))
1657805de8f4SPeter Zijlstra 		atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
1658a4a4f191SGuenther Hutzl 
165991520f1aSMichael Mueller 	kvm_s390_vcpu_setup_model(vcpu);
166091520f1aSMichael Mueller 
1661bd50e8ecSDavid Hildenbrand 	vcpu->arch.sie_block->ecb = 0x02;
1662bd50e8ecSDavid Hildenbrand 	if (test_kvm_facility(vcpu->kvm, 9))
1663bd50e8ecSDavid Hildenbrand 		vcpu->arch.sie_block->ecb |= 0x04;
16649d8d5786SMichael Mueller 	if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
16657feb6bb8SMichael Mueller 		vcpu->arch.sie_block->ecb |= 0x10;
16667feb6bb8SMichael Mueller 
1667d6af0b49SDavid Hildenbrand 	if (test_kvm_facility(vcpu->kvm, 8))
1668d6af0b49SDavid Hildenbrand 		vcpu->arch.sie_block->ecb2 |= 0x08;
1669ea5f4969SDavid Hildenbrand 	vcpu->arch.sie_block->eca   = 0xC1002000U;
167037c5f6c8SDavid Hildenbrand 	if (sclp.has_siif)
1671217a4406SHeiko Carstens 		vcpu->arch.sie_block->eca |= 1;
167237c5f6c8SDavid Hildenbrand 	if (sclp.has_sigpif)
1673ea5f4969SDavid Hildenbrand 		vcpu->arch.sie_block->eca |= 0x10000000U;
1674c6e5f166SFan Zhang 	if (test_kvm_facility(vcpu->kvm, 64))
1675c6e5f166SFan Zhang 		vcpu->arch.sie_block->ecb3 |= 0x01;
167618280d8bSMichael Mueller 	if (test_kvm_facility(vcpu->kvm, 129)) {
167713211ea7SEric Farman 		vcpu->arch.sie_block->eca |= 0x00020000;
167813211ea7SEric Farman 		vcpu->arch.sie_block->ecd |= 0x20000000;
167913211ea7SEric Farman 	}
1680c6e5f166SFan Zhang 	vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
1681492d8642SThomas Huth 	vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
16825a5e6536SMatthew Rosato 
1683e6db1d61SDominik Dingel 	if (vcpu->kvm->arch.use_cmma) {
1684b31605c1SDominik Dingel 		rc = kvm_s390_vcpu_setup_cmma(vcpu);
1685b31605c1SDominik Dingel 		if (rc)
1686b31605c1SDominik Dingel 			return rc;
1687b31288faSKonstantin Weitz 	}
16880ac96cafSDavid Hildenbrand 	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1689ca872302SChristian Borntraeger 	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
16909d8d5786SMichael Mueller 
16915102ee87STony Krowiak 	kvm_s390_vcpu_crypto_setup(vcpu);
16925102ee87STony Krowiak 
1693b31605c1SDominik Dingel 	return rc;
1694b0c632dbSHeiko Carstens }
1695b0c632dbSHeiko Carstens 
1696b0c632dbSHeiko Carstens struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1697b0c632dbSHeiko Carstens 				      unsigned int id)
1698b0c632dbSHeiko Carstens {
16994d47555aSCarsten Otte 	struct kvm_vcpu *vcpu;
17007feb6bb8SMichael Mueller 	struct sie_page *sie_page;
17014d47555aSCarsten Otte 	int rc = -EINVAL;
1702b0c632dbSHeiko Carstens 
17034215825eSDavid Hildenbrand 	if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
17044d47555aSCarsten Otte 		goto out;
17054d47555aSCarsten Otte 
17064d47555aSCarsten Otte 	rc = -ENOMEM;
17074d47555aSCarsten Otte 
1708b110feafSMichael Mueller 	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1709b0c632dbSHeiko Carstens 	if (!vcpu)
17104d47555aSCarsten Otte 		goto out;
1711b0c632dbSHeiko Carstens 
17127feb6bb8SMichael Mueller 	sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
17137feb6bb8SMichael Mueller 	if (!sie_page)
1714b0c632dbSHeiko Carstens 		goto out_free_cpu;
1715b0c632dbSHeiko Carstens 
17167feb6bb8SMichael Mueller 	vcpu->arch.sie_block = &sie_page->sie_block;
17177feb6bb8SMichael Mueller 	vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
17187feb6bb8SMichael Mueller 
1719b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icpua = id;
1720ba5c1e9bSCarsten Otte 	spin_lock_init(&vcpu->arch.local_int.lock);
1721ba5c1e9bSCarsten Otte 	vcpu->arch.local_int.float_int = &kvm->arch.float_int;
1722d0321a24SChristian Borntraeger 	vcpu->arch.local_int.wq = &vcpu->wq;
17235288fbf0SChristian Borntraeger 	vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
17249c23a131SDavid Hildenbrand 	seqcount_init(&vcpu->arch.cputm_seqcount);
1725ba5c1e9bSCarsten Otte 
1726b0c632dbSHeiko Carstens 	rc = kvm_vcpu_init(vcpu, kvm, id);
1727b0c632dbSHeiko Carstens 	if (rc)
17289abc2a08SDavid Hildenbrand 		goto out_free_sie_block;
17298335713aSChristian Borntraeger 	VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
1730b0c632dbSHeiko Carstens 		 vcpu->arch.sie_block);
1731ade38c31SCornelia Huck 	trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
1732b0c632dbSHeiko Carstens 
1733b0c632dbSHeiko Carstens 	return vcpu;
17347b06bf2fSWei Yongjun out_free_sie_block:
17357b06bf2fSWei Yongjun 	free_page((unsigned long)(vcpu->arch.sie_block));
1736b0c632dbSHeiko Carstens out_free_cpu:
1737b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
17384d47555aSCarsten Otte out:
1739b0c632dbSHeiko Carstens 	return ERR_PTR(rc);
1740b0c632dbSHeiko Carstens }
1741b0c632dbSHeiko Carstens 
1742b0c632dbSHeiko Carstens int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1743b0c632dbSHeiko Carstens {
17449a022067SDavid Hildenbrand 	return kvm_s390_vcpu_has_irq(vcpu, 0);
1745b0c632dbSHeiko Carstens }
1746b0c632dbSHeiko Carstens 
174727406cd5SChristian Borntraeger void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
174849b99e1eSChristian Borntraeger {
1749805de8f4SPeter Zijlstra 	atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
175061a6df54SDavid Hildenbrand 	exit_sie(vcpu);
175149b99e1eSChristian Borntraeger }
175249b99e1eSChristian Borntraeger 
175327406cd5SChristian Borntraeger void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
175449b99e1eSChristian Borntraeger {
1755805de8f4SPeter Zijlstra 	atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
175649b99e1eSChristian Borntraeger }
175749b99e1eSChristian Borntraeger 
17588e236546SChristian Borntraeger static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
17598e236546SChristian Borntraeger {
1760805de8f4SPeter Zijlstra 	atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
176161a6df54SDavid Hildenbrand 	exit_sie(vcpu);
17628e236546SChristian Borntraeger }
17638e236546SChristian Borntraeger 
17648e236546SChristian Borntraeger static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
17658e236546SChristian Borntraeger {
17669bf9fde2SJason J. Herne 	atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
17678e236546SChristian Borntraeger }
17688e236546SChristian Borntraeger 
176949b99e1eSChristian Borntraeger /*
177049b99e1eSChristian Borntraeger  * Kick a guest cpu out of SIE and wait until SIE is not running.
177149b99e1eSChristian Borntraeger  * If the CPU is not running (e.g. waiting as idle) the function will
177249b99e1eSChristian Borntraeger  * return immediately. */
177349b99e1eSChristian Borntraeger void exit_sie(struct kvm_vcpu *vcpu)
177449b99e1eSChristian Borntraeger {
1775805de8f4SPeter Zijlstra 	atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
177649b99e1eSChristian Borntraeger 	while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
177749b99e1eSChristian Borntraeger 		cpu_relax();
177849b99e1eSChristian Borntraeger }
177949b99e1eSChristian Borntraeger 
17808e236546SChristian Borntraeger /* Kick a guest cpu out of SIE to process a request synchronously */
17818e236546SChristian Borntraeger void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
178249b99e1eSChristian Borntraeger {
17838e236546SChristian Borntraeger 	kvm_make_request(req, vcpu);
17848e236546SChristian Borntraeger 	kvm_s390_vcpu_request(vcpu);
178549b99e1eSChristian Borntraeger }
178649b99e1eSChristian Borntraeger 
17872c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
17882c70fe44SChristian Borntraeger {
17892c70fe44SChristian Borntraeger 	int i;
17902c70fe44SChristian Borntraeger 	struct kvm *kvm = gmap->private;
17912c70fe44SChristian Borntraeger 	struct kvm_vcpu *vcpu;
17922c70fe44SChristian Borntraeger 
17932c70fe44SChristian Borntraeger 	kvm_for_each_vcpu(i, vcpu, kvm) {
17942c70fe44SChristian Borntraeger 		/* match against both prefix pages */
1795fda902cbSMichael Mueller 		if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
17962c70fe44SChristian Borntraeger 			VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
17978e236546SChristian Borntraeger 			kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
17982c70fe44SChristian Borntraeger 		}
17992c70fe44SChristian Borntraeger 	}
18002c70fe44SChristian Borntraeger }
18012c70fe44SChristian Borntraeger 
1802b6d33834SChristoffer Dall int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1803b6d33834SChristoffer Dall {
1804b6d33834SChristoffer Dall 	/* kvm common code refers to this, but never calls it */
1805b6d33834SChristoffer Dall 	BUG();
1806b6d33834SChristoffer Dall 	return 0;
1807b6d33834SChristoffer Dall }
1808b6d33834SChristoffer Dall 
180914eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
181014eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
181114eebd91SCarsten Otte {
181214eebd91SCarsten Otte 	int r = -EINVAL;
181314eebd91SCarsten Otte 
181414eebd91SCarsten Otte 	switch (reg->id) {
181529b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
181629b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->todpr,
181729b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
181829b7c71bSCarsten Otte 		break;
181929b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
182029b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->epoch,
182129b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
182229b7c71bSCarsten Otte 		break;
182346a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
18244287f247SDavid Hildenbrand 		r = put_user(kvm_s390_get_cpu_timer(vcpu),
182546a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
182646a6dd1cSJason J. herne 		break;
182746a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
182846a6dd1cSJason J. herne 		r = put_user(vcpu->arch.sie_block->ckc,
182946a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
183046a6dd1cSJason J. herne 		break;
1831536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
1832536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_token,
1833536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1834536336c2SDominik Dingel 		break;
1835536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
1836536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_compare,
1837536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1838536336c2SDominik Dingel 		break;
1839536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
1840536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_select,
1841536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1842536336c2SDominik Dingel 		break;
1843672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
1844672550fbSChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->pp,
1845672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
1846672550fbSChristian Borntraeger 		break;
1847afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
1848afa45ff5SChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->gbea,
1849afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
1850afa45ff5SChristian Borntraeger 		break;
185114eebd91SCarsten Otte 	default:
185214eebd91SCarsten Otte 		break;
185314eebd91SCarsten Otte 	}
185414eebd91SCarsten Otte 
185514eebd91SCarsten Otte 	return r;
185614eebd91SCarsten Otte }
185714eebd91SCarsten Otte 
185814eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
185914eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
186014eebd91SCarsten Otte {
186114eebd91SCarsten Otte 	int r = -EINVAL;
18624287f247SDavid Hildenbrand 	__u64 val;
186314eebd91SCarsten Otte 
186414eebd91SCarsten Otte 	switch (reg->id) {
186529b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
186629b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->todpr,
186729b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
186829b7c71bSCarsten Otte 		break;
186929b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
187029b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->epoch,
187129b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
187229b7c71bSCarsten Otte 		break;
187346a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
18744287f247SDavid Hildenbrand 		r = get_user(val, (u64 __user *)reg->addr);
18754287f247SDavid Hildenbrand 		if (!r)
18764287f247SDavid Hildenbrand 			kvm_s390_set_cpu_timer(vcpu, val);
187746a6dd1cSJason J. herne 		break;
187846a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
187946a6dd1cSJason J. herne 		r = get_user(vcpu->arch.sie_block->ckc,
188046a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
188146a6dd1cSJason J. herne 		break;
1882536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
1883536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_token,
1884536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
18859fbd8082SDavid Hildenbrand 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
18869fbd8082SDavid Hildenbrand 			kvm_clear_async_pf_completion_queue(vcpu);
1887536336c2SDominik Dingel 		break;
1888536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
1889536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_compare,
1890536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1891536336c2SDominik Dingel 		break;
1892536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
1893536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_select,
1894536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1895536336c2SDominik Dingel 		break;
1896672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
1897672550fbSChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->pp,
1898672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
1899672550fbSChristian Borntraeger 		break;
1900afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
1901afa45ff5SChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->gbea,
1902afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
1903afa45ff5SChristian Borntraeger 		break;
190414eebd91SCarsten Otte 	default:
190514eebd91SCarsten Otte 		break;
190614eebd91SCarsten Otte 	}
190714eebd91SCarsten Otte 
190814eebd91SCarsten Otte 	return r;
190914eebd91SCarsten Otte }
1910b6d33834SChristoffer Dall 
1911b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1912b0c632dbSHeiko Carstens {
1913b0c632dbSHeiko Carstens 	kvm_s390_vcpu_initial_reset(vcpu);
1914b0c632dbSHeiko Carstens 	return 0;
1915b0c632dbSHeiko Carstens }
1916b0c632dbSHeiko Carstens 
1917b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1918b0c632dbSHeiko Carstens {
19195a32c1afSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
1920b0c632dbSHeiko Carstens 	return 0;
1921b0c632dbSHeiko Carstens }
1922b0c632dbSHeiko Carstens 
1923b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1924b0c632dbSHeiko Carstens {
19255a32c1afSChristian Borntraeger 	memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
1926b0c632dbSHeiko Carstens 	return 0;
1927b0c632dbSHeiko Carstens }
1928b0c632dbSHeiko Carstens 
1929b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1930b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
1931b0c632dbSHeiko Carstens {
193259674c1aSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
1933b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
193459674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
1935b0c632dbSHeiko Carstens 	return 0;
1936b0c632dbSHeiko Carstens }
1937b0c632dbSHeiko Carstens 
1938b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1939b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
1940b0c632dbSHeiko Carstens {
194159674c1aSChristian Borntraeger 	memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
1942b0c632dbSHeiko Carstens 	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
1943b0c632dbSHeiko Carstens 	return 0;
1944b0c632dbSHeiko Carstens }
1945b0c632dbSHeiko Carstens 
1946b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1947b0c632dbSHeiko Carstens {
19489abc2a08SDavid Hildenbrand 	/* make sure the new values will be lazily loaded */
19499abc2a08SDavid Hildenbrand 	save_fpu_regs();
19504725c860SMartin Schwidefsky 	if (test_fp_ctl(fpu->fpc))
19514725c860SMartin Schwidefsky 		return -EINVAL;
19529abc2a08SDavid Hildenbrand 	current->thread.fpu.fpc = fpu->fpc;
19539abc2a08SDavid Hildenbrand 	if (MACHINE_HAS_VX)
19549abc2a08SDavid Hildenbrand 		convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
19559abc2a08SDavid Hildenbrand 	else
19569abc2a08SDavid Hildenbrand 		memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
1957b0c632dbSHeiko Carstens 	return 0;
1958b0c632dbSHeiko Carstens }
1959b0c632dbSHeiko Carstens 
1960b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1961b0c632dbSHeiko Carstens {
19629abc2a08SDavid Hildenbrand 	/* make sure we have the latest values */
19639abc2a08SDavid Hildenbrand 	save_fpu_regs();
19649abc2a08SDavid Hildenbrand 	if (MACHINE_HAS_VX)
19659abc2a08SDavid Hildenbrand 		convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
19669abc2a08SDavid Hildenbrand 	else
19679abc2a08SDavid Hildenbrand 		memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
19689abc2a08SDavid Hildenbrand 	fpu->fpc = current->thread.fpu.fpc;
1969b0c632dbSHeiko Carstens 	return 0;
1970b0c632dbSHeiko Carstens }
1971b0c632dbSHeiko Carstens 
1972b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1973b0c632dbSHeiko Carstens {
1974b0c632dbSHeiko Carstens 	int rc = 0;
1975b0c632dbSHeiko Carstens 
19767a42fdc2SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
1977b0c632dbSHeiko Carstens 		rc = -EBUSY;
1978d7b0b5ebSCarsten Otte 	else {
1979d7b0b5ebSCarsten Otte 		vcpu->run->psw_mask = psw.mask;
1980d7b0b5ebSCarsten Otte 		vcpu->run->psw_addr = psw.addr;
1981d7b0b5ebSCarsten Otte 	}
1982b0c632dbSHeiko Carstens 	return rc;
1983b0c632dbSHeiko Carstens }
1984b0c632dbSHeiko Carstens 
1985b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1986b0c632dbSHeiko Carstens 				  struct kvm_translation *tr)
1987b0c632dbSHeiko Carstens {
1988b0c632dbSHeiko Carstens 	return -EINVAL; /* not implemented yet */
1989b0c632dbSHeiko Carstens }
1990b0c632dbSHeiko Carstens 
199127291e21SDavid Hildenbrand #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
199227291e21SDavid Hildenbrand 			      KVM_GUESTDBG_USE_HW_BP | \
199327291e21SDavid Hildenbrand 			      KVM_GUESTDBG_ENABLE)
199427291e21SDavid Hildenbrand 
1995d0bfb940SJan Kiszka int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1996d0bfb940SJan Kiszka 					struct kvm_guest_debug *dbg)
1997b0c632dbSHeiko Carstens {
199827291e21SDavid Hildenbrand 	int rc = 0;
199927291e21SDavid Hildenbrand 
200027291e21SDavid Hildenbrand 	vcpu->guest_debug = 0;
200127291e21SDavid Hildenbrand 	kvm_s390_clear_bp_data(vcpu);
200227291e21SDavid Hildenbrand 
20032de3bfc2SDavid Hildenbrand 	if (dbg->control & ~VALID_GUESTDBG_FLAGS)
200427291e21SDavid Hildenbrand 		return -EINVAL;
200527291e21SDavid Hildenbrand 
200627291e21SDavid Hildenbrand 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
200727291e21SDavid Hildenbrand 		vcpu->guest_debug = dbg->control;
200827291e21SDavid Hildenbrand 		/* enforce guest PER */
2009805de8f4SPeter Zijlstra 		atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
201027291e21SDavid Hildenbrand 
201127291e21SDavid Hildenbrand 		if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
201227291e21SDavid Hildenbrand 			rc = kvm_s390_import_bp_data(vcpu, dbg);
201327291e21SDavid Hildenbrand 	} else {
2014805de8f4SPeter Zijlstra 		atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
201527291e21SDavid Hildenbrand 		vcpu->arch.guestdbg.last_bp = 0;
201627291e21SDavid Hildenbrand 	}
201727291e21SDavid Hildenbrand 
201827291e21SDavid Hildenbrand 	if (rc) {
201927291e21SDavid Hildenbrand 		vcpu->guest_debug = 0;
202027291e21SDavid Hildenbrand 		kvm_s390_clear_bp_data(vcpu);
2021805de8f4SPeter Zijlstra 		atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
202227291e21SDavid Hildenbrand 	}
202327291e21SDavid Hildenbrand 
202427291e21SDavid Hildenbrand 	return rc;
2025b0c632dbSHeiko Carstens }
2026b0c632dbSHeiko Carstens 
202762d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
202862d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
202962d9f0dbSMarcelo Tosatti {
20306352e4d2SDavid Hildenbrand 	/* CHECK_STOP and LOAD are not supported yet */
20316352e4d2SDavid Hildenbrand 	return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
20326352e4d2SDavid Hildenbrand 				       KVM_MP_STATE_OPERATING;
203362d9f0dbSMarcelo Tosatti }
203462d9f0dbSMarcelo Tosatti 
203562d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
203662d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
203762d9f0dbSMarcelo Tosatti {
20386352e4d2SDavid Hildenbrand 	int rc = 0;
20396352e4d2SDavid Hildenbrand 
20406352e4d2SDavid Hildenbrand 	/* user space knows about this interface - let it control the state */
20416352e4d2SDavid Hildenbrand 	vcpu->kvm->arch.user_cpu_state_ctrl = 1;
20426352e4d2SDavid Hildenbrand 
20436352e4d2SDavid Hildenbrand 	switch (mp_state->mp_state) {
20446352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_STOPPED:
20456352e4d2SDavid Hildenbrand 		kvm_s390_vcpu_stop(vcpu);
20466352e4d2SDavid Hildenbrand 		break;
20476352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_OPERATING:
20486352e4d2SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
20496352e4d2SDavid Hildenbrand 		break;
20506352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_LOAD:
20516352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_CHECK_STOP:
20526352e4d2SDavid Hildenbrand 		/* fall through - CHECK_STOP and LOAD are not supported yet */
20536352e4d2SDavid Hildenbrand 	default:
20546352e4d2SDavid Hildenbrand 		rc = -ENXIO;
20556352e4d2SDavid Hildenbrand 	}
20566352e4d2SDavid Hildenbrand 
20576352e4d2SDavid Hildenbrand 	return rc;
205862d9f0dbSMarcelo Tosatti }
205962d9f0dbSMarcelo Tosatti 
20608ad35755SDavid Hildenbrand static bool ibs_enabled(struct kvm_vcpu *vcpu)
20618ad35755SDavid Hildenbrand {
20628ad35755SDavid Hildenbrand 	return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
20638ad35755SDavid Hildenbrand }
20648ad35755SDavid Hildenbrand 
20652c70fe44SChristian Borntraeger static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
20662c70fe44SChristian Borntraeger {
20678ad35755SDavid Hildenbrand retry:
20688e236546SChristian Borntraeger 	kvm_s390_vcpu_request_handled(vcpu);
2069586b7ccdSChristian Borntraeger 	if (!vcpu->requests)
2070586b7ccdSChristian Borntraeger 		return 0;
20712c70fe44SChristian Borntraeger 	/*
20722c70fe44SChristian Borntraeger 	 * We use MMU_RELOAD just to re-arm the ipte notifier for the
20732c70fe44SChristian Borntraeger 	 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
20742c70fe44SChristian Borntraeger 	 * This ensures that the ipte instruction for this request has
20752c70fe44SChristian Borntraeger 	 * already finished. We might race against a second unmapper that
20762c70fe44SChristian Borntraeger 	 * wants to set the blocking bit. Lets just retry the request loop.
20772c70fe44SChristian Borntraeger 	 */
20788ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
20792c70fe44SChristian Borntraeger 		int rc;
20802c70fe44SChristian Borntraeger 		rc = gmap_ipte_notify(vcpu->arch.gmap,
2081fda902cbSMichael Mueller 				      kvm_s390_get_prefix(vcpu),
20822c70fe44SChristian Borntraeger 				      PAGE_SIZE * 2);
20832c70fe44SChristian Borntraeger 		if (rc)
20842c70fe44SChristian Borntraeger 			return rc;
20858ad35755SDavid Hildenbrand 		goto retry;
20862c70fe44SChristian Borntraeger 	}
20878ad35755SDavid Hildenbrand 
2088d3d692c8SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2089d3d692c8SDavid Hildenbrand 		vcpu->arch.sie_block->ihcpu = 0xffff;
2090d3d692c8SDavid Hildenbrand 		goto retry;
2091d3d692c8SDavid Hildenbrand 	}
2092d3d692c8SDavid Hildenbrand 
20938ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
20948ad35755SDavid Hildenbrand 		if (!ibs_enabled(vcpu)) {
20958ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
2096805de8f4SPeter Zijlstra 			atomic_or(CPUSTAT_IBS,
20978ad35755SDavid Hildenbrand 					&vcpu->arch.sie_block->cpuflags);
20988ad35755SDavid Hildenbrand 		}
20998ad35755SDavid Hildenbrand 		goto retry;
21008ad35755SDavid Hildenbrand 	}
21018ad35755SDavid Hildenbrand 
21028ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
21038ad35755SDavid Hildenbrand 		if (ibs_enabled(vcpu)) {
21048ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
2105805de8f4SPeter Zijlstra 			atomic_andnot(CPUSTAT_IBS,
21068ad35755SDavid Hildenbrand 					  &vcpu->arch.sie_block->cpuflags);
21078ad35755SDavid Hildenbrand 		}
21088ad35755SDavid Hildenbrand 		goto retry;
21098ad35755SDavid Hildenbrand 	}
21108ad35755SDavid Hildenbrand 
21110759d068SDavid Hildenbrand 	/* nothing to do, just clear the request */
21120759d068SDavid Hildenbrand 	clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
21130759d068SDavid Hildenbrand 
21142c70fe44SChristian Borntraeger 	return 0;
21152c70fe44SChristian Borntraeger }
21162c70fe44SChristian Borntraeger 
211725ed1675SDavid Hildenbrand void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
211825ed1675SDavid Hildenbrand {
211925ed1675SDavid Hildenbrand 	struct kvm_vcpu *vcpu;
212025ed1675SDavid Hildenbrand 	int i;
212125ed1675SDavid Hildenbrand 
212225ed1675SDavid Hildenbrand 	mutex_lock(&kvm->lock);
212325ed1675SDavid Hildenbrand 	preempt_disable();
212425ed1675SDavid Hildenbrand 	kvm->arch.epoch = tod - get_tod_clock();
212525ed1675SDavid Hildenbrand 	kvm_s390_vcpu_block_all(kvm);
212625ed1675SDavid Hildenbrand 	kvm_for_each_vcpu(i, vcpu, kvm)
212725ed1675SDavid Hildenbrand 		vcpu->arch.sie_block->epoch = kvm->arch.epoch;
212825ed1675SDavid Hildenbrand 	kvm_s390_vcpu_unblock_all(kvm);
212925ed1675SDavid Hildenbrand 	preempt_enable();
213025ed1675SDavid Hildenbrand 	mutex_unlock(&kvm->lock);
213125ed1675SDavid Hildenbrand }
213225ed1675SDavid Hildenbrand 
2133fa576c58SThomas Huth /**
2134fa576c58SThomas Huth  * kvm_arch_fault_in_page - fault-in guest page if necessary
2135fa576c58SThomas Huth  * @vcpu: The corresponding virtual cpu
2136fa576c58SThomas Huth  * @gpa: Guest physical address
2137fa576c58SThomas Huth  * @writable: Whether the page should be writable or not
2138fa576c58SThomas Huth  *
2139fa576c58SThomas Huth  * Make sure that a guest page has been faulted-in on the host.
2140fa576c58SThomas Huth  *
2141fa576c58SThomas Huth  * Return: Zero on success, negative error code otherwise.
2142fa576c58SThomas Huth  */
2143fa576c58SThomas Huth long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
214424eb3a82SDominik Dingel {
2145527e30b4SMartin Schwidefsky 	return gmap_fault(vcpu->arch.gmap, gpa,
2146527e30b4SMartin Schwidefsky 			  writable ? FAULT_FLAG_WRITE : 0);
214724eb3a82SDominik Dingel }
214824eb3a82SDominik Dingel 
21493c038e6bSDominik Dingel static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
21503c038e6bSDominik Dingel 				      unsigned long token)
21513c038e6bSDominik Dingel {
21523c038e6bSDominik Dingel 	struct kvm_s390_interrupt inti;
2153383d0b05SJens Freimann 	struct kvm_s390_irq irq;
21543c038e6bSDominik Dingel 
21553c038e6bSDominik Dingel 	if (start_token) {
2156383d0b05SJens Freimann 		irq.u.ext.ext_params2 = token;
2157383d0b05SJens Freimann 		irq.type = KVM_S390_INT_PFAULT_INIT;
2158383d0b05SJens Freimann 		WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
21593c038e6bSDominik Dingel 	} else {
21603c038e6bSDominik Dingel 		inti.type = KVM_S390_INT_PFAULT_DONE;
2161383d0b05SJens Freimann 		inti.parm64 = token;
21623c038e6bSDominik Dingel 		WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
21633c038e6bSDominik Dingel 	}
21643c038e6bSDominik Dingel }
21653c038e6bSDominik Dingel 
21663c038e6bSDominik Dingel void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
21673c038e6bSDominik Dingel 				     struct kvm_async_pf *work)
21683c038e6bSDominik Dingel {
21693c038e6bSDominik Dingel 	trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
21703c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
21713c038e6bSDominik Dingel }
21723c038e6bSDominik Dingel 
21733c038e6bSDominik Dingel void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
21743c038e6bSDominik Dingel 				 struct kvm_async_pf *work)
21753c038e6bSDominik Dingel {
21763c038e6bSDominik Dingel 	trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
21773c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
21783c038e6bSDominik Dingel }
21793c038e6bSDominik Dingel 
21803c038e6bSDominik Dingel void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
21813c038e6bSDominik Dingel 			       struct kvm_async_pf *work)
21823c038e6bSDominik Dingel {
21833c038e6bSDominik Dingel 	/* s390 will always inject the page directly */
21843c038e6bSDominik Dingel }
21853c038e6bSDominik Dingel 
21863c038e6bSDominik Dingel bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
21873c038e6bSDominik Dingel {
21883c038e6bSDominik Dingel 	/*
21893c038e6bSDominik Dingel 	 * s390 will always inject the page directly,
21903c038e6bSDominik Dingel 	 * but we still want check_async_completion to cleanup
21913c038e6bSDominik Dingel 	 */
21923c038e6bSDominik Dingel 	return true;
21933c038e6bSDominik Dingel }
21943c038e6bSDominik Dingel 
21953c038e6bSDominik Dingel static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
21963c038e6bSDominik Dingel {
21973c038e6bSDominik Dingel 	hva_t hva;
21983c038e6bSDominik Dingel 	struct kvm_arch_async_pf arch;
21993c038e6bSDominik Dingel 	int rc;
22003c038e6bSDominik Dingel 
22013c038e6bSDominik Dingel 	if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
22023c038e6bSDominik Dingel 		return 0;
22033c038e6bSDominik Dingel 	if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
22043c038e6bSDominik Dingel 	    vcpu->arch.pfault_compare)
22053c038e6bSDominik Dingel 		return 0;
22063c038e6bSDominik Dingel 	if (psw_extint_disabled(vcpu))
22073c038e6bSDominik Dingel 		return 0;
22089a022067SDavid Hildenbrand 	if (kvm_s390_vcpu_has_irq(vcpu, 0))
22093c038e6bSDominik Dingel 		return 0;
22103c038e6bSDominik Dingel 	if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
22113c038e6bSDominik Dingel 		return 0;
22123c038e6bSDominik Dingel 	if (!vcpu->arch.gmap->pfault_enabled)
22133c038e6bSDominik Dingel 		return 0;
22143c038e6bSDominik Dingel 
221581480cc1SHeiko Carstens 	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
221681480cc1SHeiko Carstens 	hva += current->thread.gmap_addr & ~PAGE_MASK;
221781480cc1SHeiko Carstens 	if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
22183c038e6bSDominik Dingel 		return 0;
22193c038e6bSDominik Dingel 
22203c038e6bSDominik Dingel 	rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
22213c038e6bSDominik Dingel 	return rc;
22223c038e6bSDominik Dingel }
22233c038e6bSDominik Dingel 
22243fb4c40fSThomas Huth static int vcpu_pre_run(struct kvm_vcpu *vcpu)
2225b0c632dbSHeiko Carstens {
22263fb4c40fSThomas Huth 	int rc, cpuflags;
2227e168bf8dSCarsten Otte 
22283c038e6bSDominik Dingel 	/*
22293c038e6bSDominik Dingel 	 * On s390 notifications for arriving pages will be delivered directly
22303c038e6bSDominik Dingel 	 * to the guest but the house keeping for completed pfaults is
22313c038e6bSDominik Dingel 	 * handled outside the worker.
22323c038e6bSDominik Dingel 	 */
22333c038e6bSDominik Dingel 	kvm_check_async_pf_completion(vcpu);
22343c038e6bSDominik Dingel 
22357ec7c8c7SChristian Borntraeger 	vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
22367ec7c8c7SChristian Borntraeger 	vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
2237b0c632dbSHeiko Carstens 
2238b0c632dbSHeiko Carstens 	if (need_resched())
2239b0c632dbSHeiko Carstens 		schedule();
2240b0c632dbSHeiko Carstens 
2241d3a73acbSMartin Schwidefsky 	if (test_cpu_flag(CIF_MCCK_PENDING))
224271cde587SChristian Borntraeger 		s390_handle_mcck();
224371cde587SChristian Borntraeger 
224479395031SJens Freimann 	if (!kvm_is_ucontrol(vcpu->kvm)) {
224579395031SJens Freimann 		rc = kvm_s390_deliver_pending_interrupts(vcpu);
224679395031SJens Freimann 		if (rc)
224779395031SJens Freimann 			return rc;
224879395031SJens Freimann 	}
22490ff31867SCarsten Otte 
22502c70fe44SChristian Borntraeger 	rc = kvm_s390_handle_requests(vcpu);
22512c70fe44SChristian Borntraeger 	if (rc)
22522c70fe44SChristian Borntraeger 		return rc;
22532c70fe44SChristian Borntraeger 
225427291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu)) {
225527291e21SDavid Hildenbrand 		kvm_s390_backup_guest_per_regs(vcpu);
225627291e21SDavid Hildenbrand 		kvm_s390_patch_guest_per_regs(vcpu);
225727291e21SDavid Hildenbrand 	}
225827291e21SDavid Hildenbrand 
2259b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icptcode = 0;
22603fb4c40fSThomas Huth 	cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
22613fb4c40fSThomas Huth 	VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
22623fb4c40fSThomas Huth 	trace_kvm_s390_sie_enter(vcpu, cpuflags);
22632b29a9fdSDominik Dingel 
22643fb4c40fSThomas Huth 	return 0;
22653fb4c40fSThomas Huth }
22663fb4c40fSThomas Huth 
2267492d8642SThomas Huth static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
2268492d8642SThomas Huth {
226956317920SDavid Hildenbrand 	struct kvm_s390_pgm_info pgm_info = {
227056317920SDavid Hildenbrand 		.code = PGM_ADDRESSING,
227156317920SDavid Hildenbrand 	};
227256317920SDavid Hildenbrand 	u8 opcode, ilen;
2273492d8642SThomas Huth 	int rc;
2274492d8642SThomas Huth 
2275492d8642SThomas Huth 	VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
2276492d8642SThomas Huth 	trace_kvm_s390_sie_fault(vcpu);
2277492d8642SThomas Huth 
2278492d8642SThomas Huth 	/*
2279492d8642SThomas Huth 	 * We want to inject an addressing exception, which is defined as a
2280492d8642SThomas Huth 	 * suppressing or terminating exception. However, since we came here
2281492d8642SThomas Huth 	 * by a DAT access exception, the PSW still points to the faulting
2282492d8642SThomas Huth 	 * instruction since DAT exceptions are nullifying. So we've got
2283492d8642SThomas Huth 	 * to look up the current opcode to get the length of the instruction
2284492d8642SThomas Huth 	 * to be able to forward the PSW.
2285492d8642SThomas Huth 	 */
228665977322SDavid Hildenbrand 	rc = read_guest_instr(vcpu, &opcode, 1);
228756317920SDavid Hildenbrand 	ilen = insn_length(opcode);
22889b0d721aSDavid Hildenbrand 	if (rc < 0) {
22899b0d721aSDavid Hildenbrand 		return rc;
22909b0d721aSDavid Hildenbrand 	} else if (rc) {
22919b0d721aSDavid Hildenbrand 		/* Instruction-Fetching Exceptions - we can't detect the ilen.
22929b0d721aSDavid Hildenbrand 		 * Forward by arbitrary ilc, injection will take care of
22939b0d721aSDavid Hildenbrand 		 * nullification if necessary.
22949b0d721aSDavid Hildenbrand 		 */
22959b0d721aSDavid Hildenbrand 		pgm_info = vcpu->arch.pgm;
22969b0d721aSDavid Hildenbrand 		ilen = 4;
22979b0d721aSDavid Hildenbrand 	}
229856317920SDavid Hildenbrand 	pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
229956317920SDavid Hildenbrand 	kvm_s390_forward_psw(vcpu, ilen);
230056317920SDavid Hildenbrand 	return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
2301492d8642SThomas Huth }
2302492d8642SThomas Huth 
23033fb4c40fSThomas Huth static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
23043fb4c40fSThomas Huth {
23052b29a9fdSDominik Dingel 	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
23062b29a9fdSDominik Dingel 		   vcpu->arch.sie_block->icptcode);
23072b29a9fdSDominik Dingel 	trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
23082b29a9fdSDominik Dingel 
230927291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu))
231027291e21SDavid Hildenbrand 		kvm_s390_restore_guest_per_regs(vcpu);
231127291e21SDavid Hildenbrand 
23127ec7c8c7SChristian Borntraeger 	vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
23137ec7c8c7SChristian Borntraeger 	vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
231471f116bfSDavid Hildenbrand 
231571f116bfSDavid Hildenbrand 	if (vcpu->arch.sie_block->icptcode > 0) {
231671f116bfSDavid Hildenbrand 		int rc = kvm_handle_sie_intercept(vcpu);
231771f116bfSDavid Hildenbrand 
231871f116bfSDavid Hildenbrand 		if (rc != -EOPNOTSUPP)
231971f116bfSDavid Hildenbrand 			return rc;
232071f116bfSDavid Hildenbrand 		vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
232171f116bfSDavid Hildenbrand 		vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
232271f116bfSDavid Hildenbrand 		vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
232371f116bfSDavid Hildenbrand 		vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
232471f116bfSDavid Hildenbrand 		return -EREMOTE;
232571f116bfSDavid Hildenbrand 	} else if (exit_reason != -EFAULT) {
232671f116bfSDavid Hildenbrand 		vcpu->stat.exit_null++;
232771f116bfSDavid Hildenbrand 		return 0;
2328210b1607SThomas Huth 	} else if (kvm_is_ucontrol(vcpu->kvm)) {
2329210b1607SThomas Huth 		vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
2330210b1607SThomas Huth 		vcpu->run->s390_ucontrol.trans_exc_code =
2331210b1607SThomas Huth 						current->thread.gmap_addr;
2332210b1607SThomas Huth 		vcpu->run->s390_ucontrol.pgm_code = 0x10;
233371f116bfSDavid Hildenbrand 		return -EREMOTE;
233424eb3a82SDominik Dingel 	} else if (current->thread.gmap_pfault) {
23353c038e6bSDominik Dingel 		trace_kvm_s390_major_guest_pfault(vcpu);
233624eb3a82SDominik Dingel 		current->thread.gmap_pfault = 0;
233771f116bfSDavid Hildenbrand 		if (kvm_arch_setup_async_pf(vcpu))
233871f116bfSDavid Hildenbrand 			return 0;
233971f116bfSDavid Hildenbrand 		return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
2340fa576c58SThomas Huth 	}
234171f116bfSDavid Hildenbrand 	return vcpu_post_run_fault_in_sie(vcpu);
23423fb4c40fSThomas Huth }
23433fb4c40fSThomas Huth 
23443fb4c40fSThomas Huth static int __vcpu_run(struct kvm_vcpu *vcpu)
23453fb4c40fSThomas Huth {
23463fb4c40fSThomas Huth 	int rc, exit_reason;
23473fb4c40fSThomas Huth 
2348800c1065SThomas Huth 	/*
2349800c1065SThomas Huth 	 * We try to hold kvm->srcu during most of vcpu_run (except when run-
2350800c1065SThomas Huth 	 * ning the guest), so that memslots (and other stuff) are protected
2351800c1065SThomas Huth 	 */
2352800c1065SThomas Huth 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2353800c1065SThomas Huth 
2354a76ccff6SThomas Huth 	do {
23553fb4c40fSThomas Huth 		rc = vcpu_pre_run(vcpu);
23563fb4c40fSThomas Huth 		if (rc)
2357a76ccff6SThomas Huth 			break;
23583fb4c40fSThomas Huth 
2359800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
23603fb4c40fSThomas Huth 		/*
2361a76ccff6SThomas Huth 		 * As PF_VCPU will be used in fault handler, between
2362a76ccff6SThomas Huth 		 * guest_enter and guest_exit should be no uaccess.
23633fb4c40fSThomas Huth 		 */
23640097d12eSChristian Borntraeger 		local_irq_disable();
23650097d12eSChristian Borntraeger 		__kvm_guest_enter();
2366db0758b2SDavid Hildenbrand 		__disable_cpu_timer_accounting(vcpu);
23670097d12eSChristian Borntraeger 		local_irq_enable();
2368a76ccff6SThomas Huth 		exit_reason = sie64a(vcpu->arch.sie_block,
2369a76ccff6SThomas Huth 				     vcpu->run->s.regs.gprs);
23700097d12eSChristian Borntraeger 		local_irq_disable();
2371db0758b2SDavid Hildenbrand 		__enable_cpu_timer_accounting(vcpu);
23720097d12eSChristian Borntraeger 		__kvm_guest_exit();
23730097d12eSChristian Borntraeger 		local_irq_enable();
2374800c1065SThomas Huth 		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
23753fb4c40fSThomas Huth 
23763fb4c40fSThomas Huth 		rc = vcpu_post_run(vcpu, exit_reason);
237727291e21SDavid Hildenbrand 	} while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
23783fb4c40fSThomas Huth 
2379800c1065SThomas Huth 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
2380e168bf8dSCarsten Otte 	return rc;
2381b0c632dbSHeiko Carstens }
2382b0c632dbSHeiko Carstens 
2383b028ee3eSDavid Hildenbrand static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2384b028ee3eSDavid Hildenbrand {
2385b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2386b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2387b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2388b028ee3eSDavid Hildenbrand 		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2389b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2390b028ee3eSDavid Hildenbrand 		memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
2391d3d692c8SDavid Hildenbrand 		/* some control register changes require a tlb flush */
2392d3d692c8SDavid Hildenbrand 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2393b028ee3eSDavid Hildenbrand 	}
2394b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
23954287f247SDavid Hildenbrand 		kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
2396b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2397b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2398b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2399b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2400b028ee3eSDavid Hildenbrand 	}
2401b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2402b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2403b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2404b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
24059fbd8082SDavid Hildenbrand 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
24069fbd8082SDavid Hildenbrand 			kvm_clear_async_pf_completion_queue(vcpu);
2407b028ee3eSDavid Hildenbrand 	}
2408b028ee3eSDavid Hildenbrand 	kvm_run->kvm_dirty_regs = 0;
2409b028ee3eSDavid Hildenbrand }
2410b028ee3eSDavid Hildenbrand 
2411b028ee3eSDavid Hildenbrand static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2412b028ee3eSDavid Hildenbrand {
2413b028ee3eSDavid Hildenbrand 	kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2414b028ee3eSDavid Hildenbrand 	kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2415b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2416b028ee3eSDavid Hildenbrand 	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
24174287f247SDavid Hildenbrand 	kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
2418b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2419b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2420b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2421b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2422b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2423b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2424b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2425b028ee3eSDavid Hildenbrand }
2426b028ee3eSDavid Hildenbrand 
2427b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2428b0c632dbSHeiko Carstens {
24298f2abe6aSChristian Borntraeger 	int rc;
2430b0c632dbSHeiko Carstens 	sigset_t sigsaved;
2431b0c632dbSHeiko Carstens 
243227291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu)) {
243327291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
243427291e21SDavid Hildenbrand 		return 0;
243527291e21SDavid Hildenbrand 	}
243627291e21SDavid Hildenbrand 
2437b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
2438b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2439b0c632dbSHeiko Carstens 
24406352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
24416852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
24426352e4d2SDavid Hildenbrand 	} else if (is_vcpu_stopped(vcpu)) {
2443ea2cdd27SDavid Hildenbrand 		pr_err_ratelimited("can't run stopped vcpu %d\n",
24446352e4d2SDavid Hildenbrand 				   vcpu->vcpu_id);
24456352e4d2SDavid Hildenbrand 		return -EINVAL;
24466352e4d2SDavid Hildenbrand 	}
2447b0c632dbSHeiko Carstens 
2448b028ee3eSDavid Hildenbrand 	sync_regs(vcpu, kvm_run);
2449db0758b2SDavid Hildenbrand 	enable_cpu_timer_accounting(vcpu);
2450d7b0b5ebSCarsten Otte 
2451dab4079dSHeiko Carstens 	might_fault();
2452e168bf8dSCarsten Otte 	rc = __vcpu_run(vcpu);
24539ace903dSChristian Ehrhardt 
2454b1d16c49SChristian Ehrhardt 	if (signal_pending(current) && !rc) {
2455b1d16c49SChristian Ehrhardt 		kvm_run->exit_reason = KVM_EXIT_INTR;
24568f2abe6aSChristian Borntraeger 		rc = -EINTR;
2457b1d16c49SChristian Ehrhardt 	}
24588f2abe6aSChristian Borntraeger 
245927291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu) && !rc)  {
246027291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
246127291e21SDavid Hildenbrand 		rc = 0;
246227291e21SDavid Hildenbrand 	}
246327291e21SDavid Hildenbrand 
24648f2abe6aSChristian Borntraeger 	if (rc == -EREMOTE) {
246571f116bfSDavid Hildenbrand 		/* userspace support is needed, kvm_run has been prepared */
24668f2abe6aSChristian Borntraeger 		rc = 0;
24678f2abe6aSChristian Borntraeger 	}
24688f2abe6aSChristian Borntraeger 
2469db0758b2SDavid Hildenbrand 	disable_cpu_timer_accounting(vcpu);
2470b028ee3eSDavid Hildenbrand 	store_regs(vcpu, kvm_run);
2471d7b0b5ebSCarsten Otte 
2472b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
2473b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2474b0c632dbSHeiko Carstens 
2475b0c632dbSHeiko Carstens 	vcpu->stat.exit_userspace++;
24767e8e6ab4SHeiko Carstens 	return rc;
2477b0c632dbSHeiko Carstens }
2478b0c632dbSHeiko Carstens 
2479b0c632dbSHeiko Carstens /*
2480b0c632dbSHeiko Carstens  * store status at address
2481b0c632dbSHeiko Carstens  * we use have two special cases:
2482b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2483b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2484b0c632dbSHeiko Carstens  */
2485d0bce605SHeiko Carstens int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
2486b0c632dbSHeiko Carstens {
2487092670cdSCarsten Otte 	unsigned char archmode = 1;
24889abc2a08SDavid Hildenbrand 	freg_t fprs[NUM_FPRS];
2489fda902cbSMichael Mueller 	unsigned int px;
24904287f247SDavid Hildenbrand 	u64 clkcomp, cputm;
2491d0bce605SHeiko Carstens 	int rc;
2492b0c632dbSHeiko Carstens 
2493d9a3a09aSMartin Schwidefsky 	px = kvm_s390_get_prefix(vcpu);
2494d0bce605SHeiko Carstens 	if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2495d0bce605SHeiko Carstens 		if (write_guest_abs(vcpu, 163, &archmode, 1))
2496b0c632dbSHeiko Carstens 			return -EFAULT;
2497d9a3a09aSMartin Schwidefsky 		gpa = 0;
2498d0bce605SHeiko Carstens 	} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2499d0bce605SHeiko Carstens 		if (write_guest_real(vcpu, 163, &archmode, 1))
2500b0c632dbSHeiko Carstens 			return -EFAULT;
2501d9a3a09aSMartin Schwidefsky 		gpa = px;
2502d9a3a09aSMartin Schwidefsky 	} else
2503d9a3a09aSMartin Schwidefsky 		gpa -= __LC_FPREGS_SAVE_AREA;
25049abc2a08SDavid Hildenbrand 
25059abc2a08SDavid Hildenbrand 	/* manually convert vector registers if necessary */
25069abc2a08SDavid Hildenbrand 	if (MACHINE_HAS_VX) {
25079522b37fSDavid Hildenbrand 		convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
2508d9a3a09aSMartin Schwidefsky 		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
25099abc2a08SDavid Hildenbrand 				     fprs, 128);
25109abc2a08SDavid Hildenbrand 	} else {
25119abc2a08SDavid Hildenbrand 		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
25126fd8e67dSDavid Hildenbrand 				     vcpu->run->s.regs.fprs, 128);
25139abc2a08SDavid Hildenbrand 	}
2514d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
2515d0bce605SHeiko Carstens 			      vcpu->run->s.regs.gprs, 128);
2516d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
2517d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gpsw, 16);
2518d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
2519fda902cbSMichael Mueller 			      &px, 4);
2520d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
25219abc2a08SDavid Hildenbrand 			      &vcpu->run->s.regs.fpc, 4);
2522d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
2523d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->todpr, 4);
25244287f247SDavid Hildenbrand 	cputm = kvm_s390_get_cpu_timer(vcpu);
2525d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
25264287f247SDavid Hildenbrand 			      &cputm, 8);
2527178bd789SThomas Huth 	clkcomp = vcpu->arch.sie_block->ckc >> 8;
2528d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
2529d0bce605SHeiko Carstens 			      &clkcomp, 8);
2530d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
2531d0bce605SHeiko Carstens 			      &vcpu->run->s.regs.acrs, 64);
2532d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
2533d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gcr, 128);
2534d0bce605SHeiko Carstens 	return rc ? -EFAULT : 0;
2535b0c632dbSHeiko Carstens }
2536b0c632dbSHeiko Carstens 
2537e879892cSThomas Huth int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2538e879892cSThomas Huth {
2539e879892cSThomas Huth 	/*
2540e879892cSThomas Huth 	 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2541e879892cSThomas Huth 	 * copying in vcpu load/put. Lets update our copies before we save
2542e879892cSThomas Huth 	 * it into the save area
2543e879892cSThomas Huth 	 */
2544d0164ee2SHendrik Brueckner 	save_fpu_regs();
25459abc2a08SDavid Hildenbrand 	vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
2546e879892cSThomas Huth 	save_access_regs(vcpu->run->s.regs.acrs);
2547e879892cSThomas Huth 
2548e879892cSThomas Huth 	return kvm_s390_store_status_unloaded(vcpu, addr);
2549e879892cSThomas Huth }
2550e879892cSThomas Huth 
2551bc17de7cSEric Farman /*
2552bc17de7cSEric Farman  * store additional status at address
2553bc17de7cSEric Farman  */
2554bc17de7cSEric Farman int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2555bc17de7cSEric Farman 					unsigned long gpa)
2556bc17de7cSEric Farman {
2557bc17de7cSEric Farman 	/* Only bits 0-53 are used for address formation */
2558bc17de7cSEric Farman 	if (!(gpa & ~0x3ff))
2559bc17de7cSEric Farman 		return 0;
2560bc17de7cSEric Farman 
2561bc17de7cSEric Farman 	return write_guest_abs(vcpu, gpa & ~0x3ff,
2562bc17de7cSEric Farman 			       (void *)&vcpu->run->s.regs.vrs, 512);
2563bc17de7cSEric Farman }
2564bc17de7cSEric Farman 
2565bc17de7cSEric Farman int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2566bc17de7cSEric Farman {
2567bc17de7cSEric Farman 	if (!test_kvm_facility(vcpu->kvm, 129))
2568bc17de7cSEric Farman 		return 0;
2569bc17de7cSEric Farman 
2570bc17de7cSEric Farman 	/*
2571bc17de7cSEric Farman 	 * The guest VXRS are in the host VXRs due to the lazy
25729977e886SHendrik Brueckner 	 * copying in vcpu load/put. We can simply call save_fpu_regs()
25739977e886SHendrik Brueckner 	 * to save the current register state because we are in the
25749977e886SHendrik Brueckner 	 * middle of a load/put cycle.
25759977e886SHendrik Brueckner 	 *
25769977e886SHendrik Brueckner 	 * Let's update our copies before we save it into the save area.
2577bc17de7cSEric Farman 	 */
2578d0164ee2SHendrik Brueckner 	save_fpu_regs();
2579bc17de7cSEric Farman 
2580bc17de7cSEric Farman 	return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2581bc17de7cSEric Farman }
2582bc17de7cSEric Farman 
25838ad35755SDavid Hildenbrand static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
25848ad35755SDavid Hildenbrand {
25858ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
25868e236546SChristian Borntraeger 	kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
25878ad35755SDavid Hildenbrand }
25888ad35755SDavid Hildenbrand 
25898ad35755SDavid Hildenbrand static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
25908ad35755SDavid Hildenbrand {
25918ad35755SDavid Hildenbrand 	unsigned int i;
25928ad35755SDavid Hildenbrand 	struct kvm_vcpu *vcpu;
25938ad35755SDavid Hildenbrand 
25948ad35755SDavid Hildenbrand 	kvm_for_each_vcpu(i, vcpu, kvm) {
25958ad35755SDavid Hildenbrand 		__disable_ibs_on_vcpu(vcpu);
25968ad35755SDavid Hildenbrand 	}
25978ad35755SDavid Hildenbrand }
25988ad35755SDavid Hildenbrand 
25998ad35755SDavid Hildenbrand static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
26008ad35755SDavid Hildenbrand {
26018ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
26028e236546SChristian Borntraeger 	kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
26038ad35755SDavid Hildenbrand }
26048ad35755SDavid Hildenbrand 
26056852d7b6SDavid Hildenbrand void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
26066852d7b6SDavid Hildenbrand {
26078ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
26088ad35755SDavid Hildenbrand 
26098ad35755SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
26108ad35755SDavid Hildenbrand 		return;
26118ad35755SDavid Hildenbrand 
26126852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
26138ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
2614433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
26158ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
26168ad35755SDavid Hildenbrand 
26178ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
26188ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
26198ad35755SDavid Hildenbrand 			started_vcpus++;
26208ad35755SDavid Hildenbrand 	}
26218ad35755SDavid Hildenbrand 
26228ad35755SDavid Hildenbrand 	if (started_vcpus == 0) {
26238ad35755SDavid Hildenbrand 		/* we're the only active VCPU -> speed it up */
26248ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(vcpu);
26258ad35755SDavid Hildenbrand 	} else if (started_vcpus == 1) {
26268ad35755SDavid Hildenbrand 		/*
26278ad35755SDavid Hildenbrand 		 * As we are starting a second VCPU, we have to disable
26288ad35755SDavid Hildenbrand 		 * the IBS facility on all VCPUs to remove potentially
26298ad35755SDavid Hildenbrand 		 * oustanding ENABLE requests.
26308ad35755SDavid Hildenbrand 		 */
26318ad35755SDavid Hildenbrand 		__disable_ibs_on_all_vcpus(vcpu->kvm);
26328ad35755SDavid Hildenbrand 	}
26338ad35755SDavid Hildenbrand 
2634805de8f4SPeter Zijlstra 	atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
26358ad35755SDavid Hildenbrand 	/*
26368ad35755SDavid Hildenbrand 	 * Another VCPU might have used IBS while we were offline.
26378ad35755SDavid Hildenbrand 	 * Let's play safe and flush the VCPU at startup.
26388ad35755SDavid Hildenbrand 	 */
2639d3d692c8SDavid Hildenbrand 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2640433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
26418ad35755SDavid Hildenbrand 	return;
26426852d7b6SDavid Hildenbrand }
26436852d7b6SDavid Hildenbrand 
26446852d7b6SDavid Hildenbrand void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
26456852d7b6SDavid Hildenbrand {
26468ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
26478ad35755SDavid Hildenbrand 	struct kvm_vcpu *started_vcpu = NULL;
26488ad35755SDavid Hildenbrand 
26498ad35755SDavid Hildenbrand 	if (is_vcpu_stopped(vcpu))
26508ad35755SDavid Hildenbrand 		return;
26518ad35755SDavid Hildenbrand 
26526852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
26538ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
2654433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
26558ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
26568ad35755SDavid Hildenbrand 
265732f5ff63SDavid Hildenbrand 	/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
26586cddd432SDavid Hildenbrand 	kvm_s390_clear_stop_irq(vcpu);
265932f5ff63SDavid Hildenbrand 
2660805de8f4SPeter Zijlstra 	atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
26618ad35755SDavid Hildenbrand 	__disable_ibs_on_vcpu(vcpu);
26628ad35755SDavid Hildenbrand 
26638ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
26648ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
26658ad35755SDavid Hildenbrand 			started_vcpus++;
26668ad35755SDavid Hildenbrand 			started_vcpu = vcpu->kvm->vcpus[i];
26678ad35755SDavid Hildenbrand 		}
26688ad35755SDavid Hildenbrand 	}
26698ad35755SDavid Hildenbrand 
26708ad35755SDavid Hildenbrand 	if (started_vcpus == 1) {
26718ad35755SDavid Hildenbrand 		/*
26728ad35755SDavid Hildenbrand 		 * As we only have one VCPU left, we want to enable the
26738ad35755SDavid Hildenbrand 		 * IBS facility for that VCPU to speed it up.
26748ad35755SDavid Hildenbrand 		 */
26758ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(started_vcpu);
26768ad35755SDavid Hildenbrand 	}
26778ad35755SDavid Hildenbrand 
2678433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
26798ad35755SDavid Hildenbrand 	return;
26806852d7b6SDavid Hildenbrand }
26816852d7b6SDavid Hildenbrand 
2682d6712df9SCornelia Huck static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2683d6712df9SCornelia Huck 				     struct kvm_enable_cap *cap)
2684d6712df9SCornelia Huck {
2685d6712df9SCornelia Huck 	int r;
2686d6712df9SCornelia Huck 
2687d6712df9SCornelia Huck 	if (cap->flags)
2688d6712df9SCornelia Huck 		return -EINVAL;
2689d6712df9SCornelia Huck 
2690d6712df9SCornelia Huck 	switch (cap->cap) {
2691fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
2692fa6b7fe9SCornelia Huck 		if (!vcpu->kvm->arch.css_support) {
2693fa6b7fe9SCornelia Huck 			vcpu->kvm->arch.css_support = 1;
2694c92ea7b9SChristian Borntraeger 			VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
2695fa6b7fe9SCornelia Huck 			trace_kvm_s390_enable_css(vcpu->kvm);
2696fa6b7fe9SCornelia Huck 		}
2697fa6b7fe9SCornelia Huck 		r = 0;
2698fa6b7fe9SCornelia Huck 		break;
2699d6712df9SCornelia Huck 	default:
2700d6712df9SCornelia Huck 		r = -EINVAL;
2701d6712df9SCornelia Huck 		break;
2702d6712df9SCornelia Huck 	}
2703d6712df9SCornelia Huck 	return r;
2704d6712df9SCornelia Huck }
2705d6712df9SCornelia Huck 
270641408c28SThomas Huth static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
270741408c28SThomas Huth 				  struct kvm_s390_mem_op *mop)
270841408c28SThomas Huth {
270941408c28SThomas Huth 	void __user *uaddr = (void __user *)mop->buf;
271041408c28SThomas Huth 	void *tmpbuf = NULL;
271141408c28SThomas Huth 	int r, srcu_idx;
271241408c28SThomas Huth 	const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
271341408c28SThomas Huth 				    | KVM_S390_MEMOP_F_CHECK_ONLY;
271441408c28SThomas Huth 
271541408c28SThomas Huth 	if (mop->flags & ~supported_flags)
271641408c28SThomas Huth 		return -EINVAL;
271741408c28SThomas Huth 
271841408c28SThomas Huth 	if (mop->size > MEM_OP_MAX_SIZE)
271941408c28SThomas Huth 		return -E2BIG;
272041408c28SThomas Huth 
272141408c28SThomas Huth 	if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
272241408c28SThomas Huth 		tmpbuf = vmalloc(mop->size);
272341408c28SThomas Huth 		if (!tmpbuf)
272441408c28SThomas Huth 			return -ENOMEM;
272541408c28SThomas Huth 	}
272641408c28SThomas Huth 
272741408c28SThomas Huth 	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
272841408c28SThomas Huth 
272941408c28SThomas Huth 	switch (mop->op) {
273041408c28SThomas Huth 	case KVM_S390_MEMOP_LOGICAL_READ:
273141408c28SThomas Huth 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
273292c96321SDavid Hildenbrand 			r = check_gva_range(vcpu, mop->gaddr, mop->ar,
273392c96321SDavid Hildenbrand 					    mop->size, GACC_FETCH);
273441408c28SThomas Huth 			break;
273541408c28SThomas Huth 		}
273641408c28SThomas Huth 		r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
273741408c28SThomas Huth 		if (r == 0) {
273841408c28SThomas Huth 			if (copy_to_user(uaddr, tmpbuf, mop->size))
273941408c28SThomas Huth 				r = -EFAULT;
274041408c28SThomas Huth 		}
274141408c28SThomas Huth 		break;
274241408c28SThomas Huth 	case KVM_S390_MEMOP_LOGICAL_WRITE:
274341408c28SThomas Huth 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
274492c96321SDavid Hildenbrand 			r = check_gva_range(vcpu, mop->gaddr, mop->ar,
274592c96321SDavid Hildenbrand 					    mop->size, GACC_STORE);
274641408c28SThomas Huth 			break;
274741408c28SThomas Huth 		}
274841408c28SThomas Huth 		if (copy_from_user(tmpbuf, uaddr, mop->size)) {
274941408c28SThomas Huth 			r = -EFAULT;
275041408c28SThomas Huth 			break;
275141408c28SThomas Huth 		}
275241408c28SThomas Huth 		r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
275341408c28SThomas Huth 		break;
275441408c28SThomas Huth 	default:
275541408c28SThomas Huth 		r = -EINVAL;
275641408c28SThomas Huth 	}
275741408c28SThomas Huth 
275841408c28SThomas Huth 	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
275941408c28SThomas Huth 
276041408c28SThomas Huth 	if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
276141408c28SThomas Huth 		kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
276241408c28SThomas Huth 
276341408c28SThomas Huth 	vfree(tmpbuf);
276441408c28SThomas Huth 	return r;
276541408c28SThomas Huth }
276641408c28SThomas Huth 
2767b0c632dbSHeiko Carstens long kvm_arch_vcpu_ioctl(struct file *filp,
2768b0c632dbSHeiko Carstens 			 unsigned int ioctl, unsigned long arg)
2769b0c632dbSHeiko Carstens {
2770b0c632dbSHeiko Carstens 	struct kvm_vcpu *vcpu = filp->private_data;
2771b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
2772800c1065SThomas Huth 	int idx;
2773bc923cc9SAvi Kivity 	long r;
2774b0c632dbSHeiko Carstens 
277593736624SAvi Kivity 	switch (ioctl) {
277647b43c52SJens Freimann 	case KVM_S390_IRQ: {
277747b43c52SJens Freimann 		struct kvm_s390_irq s390irq;
277847b43c52SJens Freimann 
277947b43c52SJens Freimann 		r = -EFAULT;
278047b43c52SJens Freimann 		if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
278147b43c52SJens Freimann 			break;
278247b43c52SJens Freimann 		r = kvm_s390_inject_vcpu(vcpu, &s390irq);
278347b43c52SJens Freimann 		break;
278447b43c52SJens Freimann 	}
278593736624SAvi Kivity 	case KVM_S390_INTERRUPT: {
2786ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
2787383d0b05SJens Freimann 		struct kvm_s390_irq s390irq;
2788ba5c1e9bSCarsten Otte 
278993736624SAvi Kivity 		r = -EFAULT;
2790ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
279193736624SAvi Kivity 			break;
2792383d0b05SJens Freimann 		if (s390int_to_s390irq(&s390int, &s390irq))
2793383d0b05SJens Freimann 			return -EINVAL;
2794383d0b05SJens Freimann 		r = kvm_s390_inject_vcpu(vcpu, &s390irq);
279593736624SAvi Kivity 		break;
2796ba5c1e9bSCarsten Otte 	}
2797b0c632dbSHeiko Carstens 	case KVM_S390_STORE_STATUS:
2798800c1065SThomas Huth 		idx = srcu_read_lock(&vcpu->kvm->srcu);
2799bc923cc9SAvi Kivity 		r = kvm_s390_vcpu_store_status(vcpu, arg);
2800800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
2801bc923cc9SAvi Kivity 		break;
2802b0c632dbSHeiko Carstens 	case KVM_S390_SET_INITIAL_PSW: {
2803b0c632dbSHeiko Carstens 		psw_t psw;
2804b0c632dbSHeiko Carstens 
2805bc923cc9SAvi Kivity 		r = -EFAULT;
2806b0c632dbSHeiko Carstens 		if (copy_from_user(&psw, argp, sizeof(psw)))
2807bc923cc9SAvi Kivity 			break;
2808bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2809bc923cc9SAvi Kivity 		break;
2810b0c632dbSHeiko Carstens 	}
2811b0c632dbSHeiko Carstens 	case KVM_S390_INITIAL_RESET:
2812bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2813bc923cc9SAvi Kivity 		break;
281414eebd91SCarsten Otte 	case KVM_SET_ONE_REG:
281514eebd91SCarsten Otte 	case KVM_GET_ONE_REG: {
281614eebd91SCarsten Otte 		struct kvm_one_reg reg;
281714eebd91SCarsten Otte 		r = -EFAULT;
281814eebd91SCarsten Otte 		if (copy_from_user(&reg, argp, sizeof(reg)))
281914eebd91SCarsten Otte 			break;
282014eebd91SCarsten Otte 		if (ioctl == KVM_SET_ONE_REG)
282114eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
282214eebd91SCarsten Otte 		else
282314eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
282414eebd91SCarsten Otte 		break;
282514eebd91SCarsten Otte 	}
282627e0393fSCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
282727e0393fSCarsten Otte 	case KVM_S390_UCAS_MAP: {
282827e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
282927e0393fSCarsten Otte 
283027e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
283127e0393fSCarsten Otte 			r = -EFAULT;
283227e0393fSCarsten Otte 			break;
283327e0393fSCarsten Otte 		}
283427e0393fSCarsten Otte 
283527e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
283627e0393fSCarsten Otte 			r = -EINVAL;
283727e0393fSCarsten Otte 			break;
283827e0393fSCarsten Otte 		}
283927e0393fSCarsten Otte 
284027e0393fSCarsten Otte 		r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
284127e0393fSCarsten Otte 				     ucasmap.vcpu_addr, ucasmap.length);
284227e0393fSCarsten Otte 		break;
284327e0393fSCarsten Otte 	}
284427e0393fSCarsten Otte 	case KVM_S390_UCAS_UNMAP: {
284527e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
284627e0393fSCarsten Otte 
284727e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
284827e0393fSCarsten Otte 			r = -EFAULT;
284927e0393fSCarsten Otte 			break;
285027e0393fSCarsten Otte 		}
285127e0393fSCarsten Otte 
285227e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
285327e0393fSCarsten Otte 			r = -EINVAL;
285427e0393fSCarsten Otte 			break;
285527e0393fSCarsten Otte 		}
285627e0393fSCarsten Otte 
285727e0393fSCarsten Otte 		r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
285827e0393fSCarsten Otte 			ucasmap.length);
285927e0393fSCarsten Otte 		break;
286027e0393fSCarsten Otte 	}
286127e0393fSCarsten Otte #endif
2862ccc7910fSCarsten Otte 	case KVM_S390_VCPU_FAULT: {
2863527e30b4SMartin Schwidefsky 		r = gmap_fault(vcpu->arch.gmap, arg, 0);
2864ccc7910fSCarsten Otte 		break;
2865ccc7910fSCarsten Otte 	}
2866d6712df9SCornelia Huck 	case KVM_ENABLE_CAP:
2867d6712df9SCornelia Huck 	{
2868d6712df9SCornelia Huck 		struct kvm_enable_cap cap;
2869d6712df9SCornelia Huck 		r = -EFAULT;
2870d6712df9SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
2871d6712df9SCornelia Huck 			break;
2872d6712df9SCornelia Huck 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2873d6712df9SCornelia Huck 		break;
2874d6712df9SCornelia Huck 	}
287541408c28SThomas Huth 	case KVM_S390_MEM_OP: {
287641408c28SThomas Huth 		struct kvm_s390_mem_op mem_op;
287741408c28SThomas Huth 
287841408c28SThomas Huth 		if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
287941408c28SThomas Huth 			r = kvm_s390_guest_mem_op(vcpu, &mem_op);
288041408c28SThomas Huth 		else
288141408c28SThomas Huth 			r = -EFAULT;
288241408c28SThomas Huth 		break;
288341408c28SThomas Huth 	}
2884816c7667SJens Freimann 	case KVM_S390_SET_IRQ_STATE: {
2885816c7667SJens Freimann 		struct kvm_s390_irq_state irq_state;
2886816c7667SJens Freimann 
2887816c7667SJens Freimann 		r = -EFAULT;
2888816c7667SJens Freimann 		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2889816c7667SJens Freimann 			break;
2890816c7667SJens Freimann 		if (irq_state.len > VCPU_IRQS_MAX_BUF ||
2891816c7667SJens Freimann 		    irq_state.len == 0 ||
2892816c7667SJens Freimann 		    irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
2893816c7667SJens Freimann 			r = -EINVAL;
2894816c7667SJens Freimann 			break;
2895816c7667SJens Freimann 		}
2896816c7667SJens Freimann 		r = kvm_s390_set_irq_state(vcpu,
2897816c7667SJens Freimann 					   (void __user *) irq_state.buf,
2898816c7667SJens Freimann 					   irq_state.len);
2899816c7667SJens Freimann 		break;
2900816c7667SJens Freimann 	}
2901816c7667SJens Freimann 	case KVM_S390_GET_IRQ_STATE: {
2902816c7667SJens Freimann 		struct kvm_s390_irq_state irq_state;
2903816c7667SJens Freimann 
2904816c7667SJens Freimann 		r = -EFAULT;
2905816c7667SJens Freimann 		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2906816c7667SJens Freimann 			break;
2907816c7667SJens Freimann 		if (irq_state.len == 0) {
2908816c7667SJens Freimann 			r = -EINVAL;
2909816c7667SJens Freimann 			break;
2910816c7667SJens Freimann 		}
2911816c7667SJens Freimann 		r = kvm_s390_get_irq_state(vcpu,
2912816c7667SJens Freimann 					   (__u8 __user *)  irq_state.buf,
2913816c7667SJens Freimann 					   irq_state.len);
2914816c7667SJens Freimann 		break;
2915816c7667SJens Freimann 	}
2916b0c632dbSHeiko Carstens 	default:
29173e6afcf1SCarsten Otte 		r = -ENOTTY;
2918b0c632dbSHeiko Carstens 	}
2919bc923cc9SAvi Kivity 	return r;
2920b0c632dbSHeiko Carstens }
2921b0c632dbSHeiko Carstens 
29225b1c1493SCarsten Otte int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
29235b1c1493SCarsten Otte {
29245b1c1493SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
29255b1c1493SCarsten Otte 	if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
29265b1c1493SCarsten Otte 		 && (kvm_is_ucontrol(vcpu->kvm))) {
29275b1c1493SCarsten Otte 		vmf->page = virt_to_page(vcpu->arch.sie_block);
29285b1c1493SCarsten Otte 		get_page(vmf->page);
29295b1c1493SCarsten Otte 		return 0;
29305b1c1493SCarsten Otte 	}
29315b1c1493SCarsten Otte #endif
29325b1c1493SCarsten Otte 	return VM_FAULT_SIGBUS;
29335b1c1493SCarsten Otte }
29345b1c1493SCarsten Otte 
29355587027cSAneesh Kumar K.V int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
29365587027cSAneesh Kumar K.V 			    unsigned long npages)
2937db3fe4ebSTakuya Yoshikawa {
2938db3fe4ebSTakuya Yoshikawa 	return 0;
2939db3fe4ebSTakuya Yoshikawa }
2940db3fe4ebSTakuya Yoshikawa 
2941b0c632dbSHeiko Carstens /* Section: memory related */
2942f7784b8eSMarcelo Tosatti int kvm_arch_prepare_memory_region(struct kvm *kvm,
2943f7784b8eSMarcelo Tosatti 				   struct kvm_memory_slot *memslot,
294409170a49SPaolo Bonzini 				   const struct kvm_userspace_memory_region *mem,
29457b6195a9STakuya Yoshikawa 				   enum kvm_mr_change change)
2946b0c632dbSHeiko Carstens {
2947dd2887e7SNick Wang 	/* A few sanity checks. We can have memory slots which have to be
2948dd2887e7SNick Wang 	   located/ended at a segment boundary (1MB). The memory in userland is
2949dd2887e7SNick Wang 	   ok to be fragmented into various different vmas. It is okay to mmap()
2950dd2887e7SNick Wang 	   and munmap() stuff in this slot after doing this call at any time */
2951b0c632dbSHeiko Carstens 
2952598841caSCarsten Otte 	if (mem->userspace_addr & 0xffffful)
2953b0c632dbSHeiko Carstens 		return -EINVAL;
2954b0c632dbSHeiko Carstens 
2955598841caSCarsten Otte 	if (mem->memory_size & 0xffffful)
2956b0c632dbSHeiko Carstens 		return -EINVAL;
2957b0c632dbSHeiko Carstens 
2958a3a92c31SDominik Dingel 	if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
2959a3a92c31SDominik Dingel 		return -EINVAL;
2960a3a92c31SDominik Dingel 
2961f7784b8eSMarcelo Tosatti 	return 0;
2962f7784b8eSMarcelo Tosatti }
2963f7784b8eSMarcelo Tosatti 
2964f7784b8eSMarcelo Tosatti void kvm_arch_commit_memory_region(struct kvm *kvm,
296509170a49SPaolo Bonzini 				const struct kvm_userspace_memory_region *mem,
29668482644aSTakuya Yoshikawa 				const struct kvm_memory_slot *old,
2967f36f3f28SPaolo Bonzini 				const struct kvm_memory_slot *new,
29688482644aSTakuya Yoshikawa 				enum kvm_mr_change change)
2969f7784b8eSMarcelo Tosatti {
2970f7850c92SCarsten Otte 	int rc;
2971f7784b8eSMarcelo Tosatti 
29722cef4debSChristian Borntraeger 	/* If the basics of the memslot do not change, we do not want
29732cef4debSChristian Borntraeger 	 * to update the gmap. Every update causes several unnecessary
29742cef4debSChristian Borntraeger 	 * segment translation exceptions. This is usually handled just
29752cef4debSChristian Borntraeger 	 * fine by the normal fault handler + gmap, but it will also
29762cef4debSChristian Borntraeger 	 * cause faults on the prefix page of running guest CPUs.
29772cef4debSChristian Borntraeger 	 */
29782cef4debSChristian Borntraeger 	if (old->userspace_addr == mem->userspace_addr &&
29792cef4debSChristian Borntraeger 	    old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
29802cef4debSChristian Borntraeger 	    old->npages * PAGE_SIZE == mem->memory_size)
29812cef4debSChristian Borntraeger 		return;
2982598841caSCarsten Otte 
2983598841caSCarsten Otte 	rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2984598841caSCarsten Otte 		mem->guest_phys_addr, mem->memory_size);
2985598841caSCarsten Otte 	if (rc)
2986ea2cdd27SDavid Hildenbrand 		pr_warn("failed to commit memory region\n");
2987598841caSCarsten Otte 	return;
2988b0c632dbSHeiko Carstens }
2989b0c632dbSHeiko Carstens 
299060a37709SAlexander Yarygin static inline unsigned long nonhyp_mask(int i)
299160a37709SAlexander Yarygin {
299260a37709SAlexander Yarygin 	unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
299360a37709SAlexander Yarygin 
299460a37709SAlexander Yarygin 	return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
299560a37709SAlexander Yarygin }
299660a37709SAlexander Yarygin 
29973491caf2SChristian Borntraeger void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
29983491caf2SChristian Borntraeger {
29993491caf2SChristian Borntraeger 	vcpu->valid_wakeup = false;
30003491caf2SChristian Borntraeger }
30013491caf2SChristian Borntraeger 
3002b0c632dbSHeiko Carstens static int __init kvm_s390_init(void)
3003b0c632dbSHeiko Carstens {
300460a37709SAlexander Yarygin 	int i;
300560a37709SAlexander Yarygin 
300607197fd0SDavid Hildenbrand 	if (!sclp.has_sief2) {
300707197fd0SDavid Hildenbrand 		pr_info("SIE not available\n");
300807197fd0SDavid Hildenbrand 		return -ENODEV;
300907197fd0SDavid Hildenbrand 	}
301007197fd0SDavid Hildenbrand 
301160a37709SAlexander Yarygin 	for (i = 0; i < 16; i++)
301260a37709SAlexander Yarygin 		kvm_s390_fac_list_mask[i] |=
301360a37709SAlexander Yarygin 			S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
301460a37709SAlexander Yarygin 
30159d8d5786SMichael Mueller 	return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
3016b0c632dbSHeiko Carstens }
3017b0c632dbSHeiko Carstens 
3018b0c632dbSHeiko Carstens static void __exit kvm_s390_exit(void)
3019b0c632dbSHeiko Carstens {
3020b0c632dbSHeiko Carstens 	kvm_exit();
3021b0c632dbSHeiko Carstens }
3022b0c632dbSHeiko Carstens 
3023b0c632dbSHeiko Carstens module_init(kvm_s390_init);
3024b0c632dbSHeiko Carstens module_exit(kvm_s390_exit);
3025566af940SCornelia Huck 
3026566af940SCornelia Huck /*
3027566af940SCornelia Huck  * Enable autoloading of the kvm module.
3028566af940SCornelia Huck  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
3029566af940SCornelia Huck  * since x86 takes a different approach.
3030566af940SCornelia Huck  */
3031566af940SCornelia Huck #include <linux/miscdevice.h>
3032566af940SCornelia Huck MODULE_ALIAS_MISCDEV(KVM_MINOR);
3033566af940SCornelia Huck MODULE_ALIAS("devname:kvm");
3034