xref: /linux/arch/s390/kvm/kvm-s390.c (revision e44fc8c9dab215ac0e398622a05574cffd5f5184)
1b0c632dbSHeiko Carstens /*
2a53c8fabSHeiko Carstens  * hosting zSeries kernel virtual machines
3b0c632dbSHeiko Carstens  *
4628eb9b8SChristian Ehrhardt  * Copyright IBM Corp. 2008, 2009
5b0c632dbSHeiko Carstens  *
6b0c632dbSHeiko Carstens  * This program is free software; you can redistribute it and/or modify
7b0c632dbSHeiko Carstens  * it under the terms of the GNU General Public License (version 2 only)
8b0c632dbSHeiko Carstens  * as published by the Free Software Foundation.
9b0c632dbSHeiko Carstens  *
10b0c632dbSHeiko Carstens  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11b0c632dbSHeiko Carstens  *               Christian Borntraeger <borntraeger@de.ibm.com>
12b0c632dbSHeiko Carstens  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13628eb9b8SChristian Ehrhardt  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
1415f36ebdSJason J. Herne  *               Jason J. Herne <jjherne@us.ibm.com>
15b0c632dbSHeiko Carstens  */
16b0c632dbSHeiko Carstens 
17b0c632dbSHeiko Carstens #include <linux/compiler.h>
18b0c632dbSHeiko Carstens #include <linux/err.h>
19b0c632dbSHeiko Carstens #include <linux/fs.h>
20ca872302SChristian Borntraeger #include <linux/hrtimer.h>
21b0c632dbSHeiko Carstens #include <linux/init.h>
22b0c632dbSHeiko Carstens #include <linux/kvm.h>
23b0c632dbSHeiko Carstens #include <linux/kvm_host.h>
24b0c632dbSHeiko Carstens #include <linux/module.h>
25a374e892STony Krowiak #include <linux/random.h>
26b0c632dbSHeiko Carstens #include <linux/slab.h>
27ba5c1e9bSCarsten Otte #include <linux/timer.h>
2841408c28SThomas Huth #include <linux/vmalloc.h>
29cbb870c8SHeiko Carstens #include <asm/asm-offsets.h>
30b0c632dbSHeiko Carstens #include <asm/lowcore.h>
31b0c632dbSHeiko Carstens #include <asm/pgtable.h>
32f5daba1dSHeiko Carstens #include <asm/nmi.h>
33a0616cdeSDavid Howells #include <asm/switch_to.h>
341526bf9cSChristian Borntraeger #include <asm/sclp.h>
358f2abe6aSChristian Borntraeger #include "kvm-s390.h"
36b0c632dbSHeiko Carstens #include "gaccess.h"
37b0c632dbSHeiko Carstens 
385786fffaSCornelia Huck #define CREATE_TRACE_POINTS
395786fffaSCornelia Huck #include "trace.h"
40ade38c31SCornelia Huck #include "trace-s390.h"
415786fffaSCornelia Huck 
4241408c28SThomas Huth #define MEM_OP_MAX_SIZE 65536	/* Maximum transfer size for KVM_S390_MEM_OP */
4341408c28SThomas Huth 
44b0c632dbSHeiko Carstens #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
45b0c632dbSHeiko Carstens 
46b0c632dbSHeiko Carstens struct kvm_stats_debugfs_item debugfs_entries[] = {
47b0c632dbSHeiko Carstens 	{ "userspace_handled", VCPU_STAT(exit_userspace) },
480eaeafa1SChristian Borntraeger 	{ "exit_null", VCPU_STAT(exit_null) },
498f2abe6aSChristian Borntraeger 	{ "exit_validity", VCPU_STAT(exit_validity) },
508f2abe6aSChristian Borntraeger 	{ "exit_stop_request", VCPU_STAT(exit_stop_request) },
518f2abe6aSChristian Borntraeger 	{ "exit_external_request", VCPU_STAT(exit_external_request) },
528f2abe6aSChristian Borntraeger 	{ "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
53ba5c1e9bSCarsten Otte 	{ "exit_instruction", VCPU_STAT(exit_instruction) },
54ba5c1e9bSCarsten Otte 	{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
55ba5c1e9bSCarsten Otte 	{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
56f7819512SPaolo Bonzini 	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
57ce2e4f0bSDavid Hildenbrand 	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
58f5e10b09SChristian Borntraeger 	{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
59ba5c1e9bSCarsten Otte 	{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
60aba07508SDavid Hildenbrand 	{ "instruction_stctl", VCPU_STAT(instruction_stctl) },
61aba07508SDavid Hildenbrand 	{ "instruction_stctg", VCPU_STAT(instruction_stctg) },
62ba5c1e9bSCarsten Otte 	{ "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
637697e71fSChristian Ehrhardt 	{ "deliver_external_call", VCPU_STAT(deliver_external_call) },
64ba5c1e9bSCarsten Otte 	{ "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
65ba5c1e9bSCarsten Otte 	{ "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
66ba5c1e9bSCarsten Otte 	{ "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
67ba5c1e9bSCarsten Otte 	{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
68ba5c1e9bSCarsten Otte 	{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
69ba5c1e9bSCarsten Otte 	{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
70ba5c1e9bSCarsten Otte 	{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
7169d0d3a3SChristian Borntraeger 	{ "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
72453423dcSChristian Borntraeger 	{ "instruction_stidp", VCPU_STAT(instruction_stidp) },
73453423dcSChristian Borntraeger 	{ "instruction_spx", VCPU_STAT(instruction_spx) },
74453423dcSChristian Borntraeger 	{ "instruction_stpx", VCPU_STAT(instruction_stpx) },
75453423dcSChristian Borntraeger 	{ "instruction_stap", VCPU_STAT(instruction_stap) },
76453423dcSChristian Borntraeger 	{ "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
778a242234SHeiko Carstens 	{ "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
78453423dcSChristian Borntraeger 	{ "instruction_stsch", VCPU_STAT(instruction_stsch) },
79453423dcSChristian Borntraeger 	{ "instruction_chsc", VCPU_STAT(instruction_chsc) },
80b31288faSKonstantin Weitz 	{ "instruction_essa", VCPU_STAT(instruction_essa) },
81453423dcSChristian Borntraeger 	{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
82453423dcSChristian Borntraeger 	{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
83bb25b9baSChristian Borntraeger 	{ "instruction_tprot", VCPU_STAT(instruction_tprot) },
845288fbf0SChristian Borntraeger 	{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
85bd59d3a4SCornelia Huck 	{ "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
867697e71fSChristian Ehrhardt 	{ "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
875288fbf0SChristian Borntraeger 	{ "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
8842cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
8942cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
905288fbf0SChristian Borntraeger 	{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
9142cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
9242cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
93cd7b4b61SEric Farman 	{ "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
945288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
955288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
965288fbf0SChristian Borntraeger 	{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
9742cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
9842cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
9942cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
100388186bcSChristian Borntraeger 	{ "diagnose_10", VCPU_STAT(diagnose_10) },
101e28acfeaSChristian Borntraeger 	{ "diagnose_44", VCPU_STAT(diagnose_44) },
10241628d33SKonstantin Weitz 	{ "diagnose_9c", VCPU_STAT(diagnose_9c) },
103b0c632dbSHeiko Carstens 	{ NULL }
104b0c632dbSHeiko Carstens };
105b0c632dbSHeiko Carstens 
1069d8d5786SMichael Mueller /* upper facilities limit for kvm */
1079d8d5786SMichael Mueller unsigned long kvm_s390_fac_list_mask[] = {
1089d8d5786SMichael Mueller 	0xff82fffbf4fc2000UL,
1099d8d5786SMichael Mueller 	0x005c000000000000UL,
11013211ea7SEric Farman 	0x4000000000000000UL,
1119d8d5786SMichael Mueller };
112b0c632dbSHeiko Carstens 
1139d8d5786SMichael Mueller unsigned long kvm_s390_fac_list_mask_size(void)
11478c4b59fSMichael Mueller {
1159d8d5786SMichael Mueller 	BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
1169d8d5786SMichael Mueller 	return ARRAY_SIZE(kvm_s390_fac_list_mask);
11778c4b59fSMichael Mueller }
11878c4b59fSMichael Mueller 
1199d8d5786SMichael Mueller static struct gmap_notifier gmap_notifier;
1209d8d5786SMichael Mueller 
121b0c632dbSHeiko Carstens /* Section: not file related */
12213a34e06SRadim Krčmář int kvm_arch_hardware_enable(void)
123b0c632dbSHeiko Carstens {
124b0c632dbSHeiko Carstens 	/* every s390 is virtualization enabled ;-) */
12510474ae8SAlexander Graf 	return 0;
126b0c632dbSHeiko Carstens }
127b0c632dbSHeiko Carstens 
1282c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
1292c70fe44SChristian Borntraeger 
130b0c632dbSHeiko Carstens int kvm_arch_hardware_setup(void)
131b0c632dbSHeiko Carstens {
1322c70fe44SChristian Borntraeger 	gmap_notifier.notifier_call = kvm_gmap_notifier;
1332c70fe44SChristian Borntraeger 	gmap_register_ipte_notifier(&gmap_notifier);
134b0c632dbSHeiko Carstens 	return 0;
135b0c632dbSHeiko Carstens }
136b0c632dbSHeiko Carstens 
137b0c632dbSHeiko Carstens void kvm_arch_hardware_unsetup(void)
138b0c632dbSHeiko Carstens {
1392c70fe44SChristian Borntraeger 	gmap_unregister_ipte_notifier(&gmap_notifier);
140b0c632dbSHeiko Carstens }
141b0c632dbSHeiko Carstens 
142b0c632dbSHeiko Carstens int kvm_arch_init(void *opaque)
143b0c632dbSHeiko Carstens {
14484877d93SCornelia Huck 	/* Register floating interrupt controller interface. */
14584877d93SCornelia Huck 	return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
146b0c632dbSHeiko Carstens }
147b0c632dbSHeiko Carstens 
148b0c632dbSHeiko Carstens /* Section: device related */
149b0c632dbSHeiko Carstens long kvm_arch_dev_ioctl(struct file *filp,
150b0c632dbSHeiko Carstens 			unsigned int ioctl, unsigned long arg)
151b0c632dbSHeiko Carstens {
152b0c632dbSHeiko Carstens 	if (ioctl == KVM_S390_ENABLE_SIE)
153b0c632dbSHeiko Carstens 		return s390_enable_sie();
154b0c632dbSHeiko Carstens 	return -EINVAL;
155b0c632dbSHeiko Carstens }
156b0c632dbSHeiko Carstens 
157784aa3d7SAlexander Graf int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
158b0c632dbSHeiko Carstens {
159d7b0b5ebSCarsten Otte 	int r;
160d7b0b5ebSCarsten Otte 
1612bd0ac4eSCarsten Otte 	switch (ext) {
162d7b0b5ebSCarsten Otte 	case KVM_CAP_S390_PSW:
163b6cf8788SChristian Borntraeger 	case KVM_CAP_S390_GMAP:
16452e16b18SChristian Borntraeger 	case KVM_CAP_SYNC_MMU:
1651efd0f59SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
1661efd0f59SCarsten Otte 	case KVM_CAP_S390_UCONTROL:
1671efd0f59SCarsten Otte #endif
1683c038e6bSDominik Dingel 	case KVM_CAP_ASYNC_PF:
16960b413c9SChristian Borntraeger 	case KVM_CAP_SYNC_REGS:
17014eebd91SCarsten Otte 	case KVM_CAP_ONE_REG:
171d6712df9SCornelia Huck 	case KVM_CAP_ENABLE_CAP:
172fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
173ebc32262SCornelia Huck 	case KVM_CAP_IRQFD:
17410ccaa1eSCornelia Huck 	case KVM_CAP_IOEVENTFD:
175c05c4186SJens Freimann 	case KVM_CAP_DEVICE_CTRL:
176d938dc55SCornelia Huck 	case KVM_CAP_ENABLE_CAP_VM:
17778599d90SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
178f2061656SDominik Dingel 	case KVM_CAP_VM_ATTRIBUTES:
1796352e4d2SDavid Hildenbrand 	case KVM_CAP_MP_STATE:
1802444b352SDavid Hildenbrand 	case KVM_CAP_S390_USER_SIGP:
181*e44fc8c9SEkaterina Tumanova 	case KVM_CAP_S390_USER_STSI:
182d7b0b5ebSCarsten Otte 		r = 1;
183d7b0b5ebSCarsten Otte 		break;
18441408c28SThomas Huth 	case KVM_CAP_S390_MEM_OP:
18541408c28SThomas Huth 		r = MEM_OP_MAX_SIZE;
18641408c28SThomas Huth 		break;
187e726b1bdSChristian Borntraeger 	case KVM_CAP_NR_VCPUS:
188e726b1bdSChristian Borntraeger 	case KVM_CAP_MAX_VCPUS:
189e726b1bdSChristian Borntraeger 		r = KVM_MAX_VCPUS;
190e726b1bdSChristian Borntraeger 		break;
191e1e2e605SNick Wang 	case KVM_CAP_NR_MEMSLOTS:
192e1e2e605SNick Wang 		r = KVM_USER_MEM_SLOTS;
193e1e2e605SNick Wang 		break;
1941526bf9cSChristian Borntraeger 	case KVM_CAP_S390_COW:
195abf09bedSMartin Schwidefsky 		r = MACHINE_HAS_ESOP;
1961526bf9cSChristian Borntraeger 		break;
19768c55750SEric Farman 	case KVM_CAP_S390_VECTOR_REGISTERS:
19868c55750SEric Farman 		r = MACHINE_HAS_VX;
19968c55750SEric Farman 		break;
2002bd0ac4eSCarsten Otte 	default:
201d7b0b5ebSCarsten Otte 		r = 0;
202b0c632dbSHeiko Carstens 	}
203d7b0b5ebSCarsten Otte 	return r;
2042bd0ac4eSCarsten Otte }
205b0c632dbSHeiko Carstens 
20615f36ebdSJason J. Herne static void kvm_s390_sync_dirty_log(struct kvm *kvm,
20715f36ebdSJason J. Herne 					struct kvm_memory_slot *memslot)
20815f36ebdSJason J. Herne {
20915f36ebdSJason J. Herne 	gfn_t cur_gfn, last_gfn;
21015f36ebdSJason J. Herne 	unsigned long address;
21115f36ebdSJason J. Herne 	struct gmap *gmap = kvm->arch.gmap;
21215f36ebdSJason J. Herne 
21315f36ebdSJason J. Herne 	down_read(&gmap->mm->mmap_sem);
21415f36ebdSJason J. Herne 	/* Loop over all guest pages */
21515f36ebdSJason J. Herne 	last_gfn = memslot->base_gfn + memslot->npages;
21615f36ebdSJason J. Herne 	for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
21715f36ebdSJason J. Herne 		address = gfn_to_hva_memslot(memslot, cur_gfn);
21815f36ebdSJason J. Herne 
21915f36ebdSJason J. Herne 		if (gmap_test_and_clear_dirty(address, gmap))
22015f36ebdSJason J. Herne 			mark_page_dirty(kvm, cur_gfn);
22115f36ebdSJason J. Herne 	}
22215f36ebdSJason J. Herne 	up_read(&gmap->mm->mmap_sem);
22315f36ebdSJason J. Herne }
22415f36ebdSJason J. Herne 
225b0c632dbSHeiko Carstens /* Section: vm related */
226b0c632dbSHeiko Carstens /*
227b0c632dbSHeiko Carstens  * Get (and clear) the dirty memory log for a memory slot.
228b0c632dbSHeiko Carstens  */
229b0c632dbSHeiko Carstens int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
230b0c632dbSHeiko Carstens 			       struct kvm_dirty_log *log)
231b0c632dbSHeiko Carstens {
23215f36ebdSJason J. Herne 	int r;
23315f36ebdSJason J. Herne 	unsigned long n;
23415f36ebdSJason J. Herne 	struct kvm_memory_slot *memslot;
23515f36ebdSJason J. Herne 	int is_dirty = 0;
23615f36ebdSJason J. Herne 
23715f36ebdSJason J. Herne 	mutex_lock(&kvm->slots_lock);
23815f36ebdSJason J. Herne 
23915f36ebdSJason J. Herne 	r = -EINVAL;
24015f36ebdSJason J. Herne 	if (log->slot >= KVM_USER_MEM_SLOTS)
24115f36ebdSJason J. Herne 		goto out;
24215f36ebdSJason J. Herne 
24315f36ebdSJason J. Herne 	memslot = id_to_memslot(kvm->memslots, log->slot);
24415f36ebdSJason J. Herne 	r = -ENOENT;
24515f36ebdSJason J. Herne 	if (!memslot->dirty_bitmap)
24615f36ebdSJason J. Herne 		goto out;
24715f36ebdSJason J. Herne 
24815f36ebdSJason J. Herne 	kvm_s390_sync_dirty_log(kvm, memslot);
24915f36ebdSJason J. Herne 	r = kvm_get_dirty_log(kvm, log, &is_dirty);
25015f36ebdSJason J. Herne 	if (r)
25115f36ebdSJason J. Herne 		goto out;
25215f36ebdSJason J. Herne 
25315f36ebdSJason J. Herne 	/* Clear the dirty log */
25415f36ebdSJason J. Herne 	if (is_dirty) {
25515f36ebdSJason J. Herne 		n = kvm_dirty_bitmap_bytes(memslot);
25615f36ebdSJason J. Herne 		memset(memslot->dirty_bitmap, 0, n);
25715f36ebdSJason J. Herne 	}
25815f36ebdSJason J. Herne 	r = 0;
25915f36ebdSJason J. Herne out:
26015f36ebdSJason J. Herne 	mutex_unlock(&kvm->slots_lock);
26115f36ebdSJason J. Herne 	return r;
262b0c632dbSHeiko Carstens }
263b0c632dbSHeiko Carstens 
264d938dc55SCornelia Huck static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
265d938dc55SCornelia Huck {
266d938dc55SCornelia Huck 	int r;
267d938dc55SCornelia Huck 
268d938dc55SCornelia Huck 	if (cap->flags)
269d938dc55SCornelia Huck 		return -EINVAL;
270d938dc55SCornelia Huck 
271d938dc55SCornelia Huck 	switch (cap->cap) {
27284223598SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
27384223598SCornelia Huck 		kvm->arch.use_irqchip = 1;
27484223598SCornelia Huck 		r = 0;
27584223598SCornelia Huck 		break;
2762444b352SDavid Hildenbrand 	case KVM_CAP_S390_USER_SIGP:
2772444b352SDavid Hildenbrand 		kvm->arch.user_sigp = 1;
2782444b352SDavid Hildenbrand 		r = 0;
2792444b352SDavid Hildenbrand 		break;
28068c55750SEric Farman 	case KVM_CAP_S390_VECTOR_REGISTERS:
28168c55750SEric Farman 		kvm->arch.use_vectors = MACHINE_HAS_VX;
28268c55750SEric Farman 		r = MACHINE_HAS_VX ? 0 : -EINVAL;
28368c55750SEric Farman 		break;
284*e44fc8c9SEkaterina Tumanova 	case KVM_CAP_S390_USER_STSI:
285*e44fc8c9SEkaterina Tumanova 		kvm->arch.user_stsi = 1;
286*e44fc8c9SEkaterina Tumanova 		r = 0;
287*e44fc8c9SEkaterina Tumanova 		break;
288d938dc55SCornelia Huck 	default:
289d938dc55SCornelia Huck 		r = -EINVAL;
290d938dc55SCornelia Huck 		break;
291d938dc55SCornelia Huck 	}
292d938dc55SCornelia Huck 	return r;
293d938dc55SCornelia Huck }
294d938dc55SCornelia Huck 
2958c0a7ce6SDominik Dingel static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
2968c0a7ce6SDominik Dingel {
2978c0a7ce6SDominik Dingel 	int ret;
2988c0a7ce6SDominik Dingel 
2998c0a7ce6SDominik Dingel 	switch (attr->attr) {
3008c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_LIMIT_SIZE:
3018c0a7ce6SDominik Dingel 		ret = 0;
3028c0a7ce6SDominik Dingel 		if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
3038c0a7ce6SDominik Dingel 			ret = -EFAULT;
3048c0a7ce6SDominik Dingel 		break;
3058c0a7ce6SDominik Dingel 	default:
3068c0a7ce6SDominik Dingel 		ret = -ENXIO;
3078c0a7ce6SDominik Dingel 		break;
3088c0a7ce6SDominik Dingel 	}
3098c0a7ce6SDominik Dingel 	return ret;
3108c0a7ce6SDominik Dingel }
3118c0a7ce6SDominik Dingel 
3128c0a7ce6SDominik Dingel static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
3134f718eabSDominik Dingel {
3144f718eabSDominik Dingel 	int ret;
3154f718eabSDominik Dingel 	unsigned int idx;
3164f718eabSDominik Dingel 	switch (attr->attr) {
3174f718eabSDominik Dingel 	case KVM_S390_VM_MEM_ENABLE_CMMA:
3184f718eabSDominik Dingel 		ret = -EBUSY;
3194f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
3204f718eabSDominik Dingel 		if (atomic_read(&kvm->online_vcpus) == 0) {
3214f718eabSDominik Dingel 			kvm->arch.use_cmma = 1;
3224f718eabSDominik Dingel 			ret = 0;
3234f718eabSDominik Dingel 		}
3244f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
3254f718eabSDominik Dingel 		break;
3264f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CLR_CMMA:
3274f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
3284f718eabSDominik Dingel 		idx = srcu_read_lock(&kvm->srcu);
329a13cff31SDominik Dingel 		s390_reset_cmma(kvm->arch.gmap->mm);
3304f718eabSDominik Dingel 		srcu_read_unlock(&kvm->srcu, idx);
3314f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
3324f718eabSDominik Dingel 		ret = 0;
3334f718eabSDominik Dingel 		break;
3348c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_LIMIT_SIZE: {
3358c0a7ce6SDominik Dingel 		unsigned long new_limit;
3368c0a7ce6SDominik Dingel 
3378c0a7ce6SDominik Dingel 		if (kvm_is_ucontrol(kvm))
3388c0a7ce6SDominik Dingel 			return -EINVAL;
3398c0a7ce6SDominik Dingel 
3408c0a7ce6SDominik Dingel 		if (get_user(new_limit, (u64 __user *)attr->addr))
3418c0a7ce6SDominik Dingel 			return -EFAULT;
3428c0a7ce6SDominik Dingel 
3438c0a7ce6SDominik Dingel 		if (new_limit > kvm->arch.gmap->asce_end)
3448c0a7ce6SDominik Dingel 			return -E2BIG;
3458c0a7ce6SDominik Dingel 
3468c0a7ce6SDominik Dingel 		ret = -EBUSY;
3478c0a7ce6SDominik Dingel 		mutex_lock(&kvm->lock);
3488c0a7ce6SDominik Dingel 		if (atomic_read(&kvm->online_vcpus) == 0) {
3498c0a7ce6SDominik Dingel 			/* gmap_alloc will round the limit up */
3508c0a7ce6SDominik Dingel 			struct gmap *new = gmap_alloc(current->mm, new_limit);
3518c0a7ce6SDominik Dingel 
3528c0a7ce6SDominik Dingel 			if (!new) {
3538c0a7ce6SDominik Dingel 				ret = -ENOMEM;
3548c0a7ce6SDominik Dingel 			} else {
3558c0a7ce6SDominik Dingel 				gmap_free(kvm->arch.gmap);
3568c0a7ce6SDominik Dingel 				new->private = kvm;
3578c0a7ce6SDominik Dingel 				kvm->arch.gmap = new;
3588c0a7ce6SDominik Dingel 				ret = 0;
3598c0a7ce6SDominik Dingel 			}
3608c0a7ce6SDominik Dingel 		}
3618c0a7ce6SDominik Dingel 		mutex_unlock(&kvm->lock);
3628c0a7ce6SDominik Dingel 		break;
3638c0a7ce6SDominik Dingel 	}
3644f718eabSDominik Dingel 	default:
3654f718eabSDominik Dingel 		ret = -ENXIO;
3664f718eabSDominik Dingel 		break;
3674f718eabSDominik Dingel 	}
3684f718eabSDominik Dingel 	return ret;
3694f718eabSDominik Dingel }
3704f718eabSDominik Dingel 
371a374e892STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
372a374e892STony Krowiak 
373a374e892STony Krowiak static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
374a374e892STony Krowiak {
375a374e892STony Krowiak 	struct kvm_vcpu *vcpu;
376a374e892STony Krowiak 	int i;
377a374e892STony Krowiak 
3789d8d5786SMichael Mueller 	if (!test_kvm_facility(kvm, 76))
379a374e892STony Krowiak 		return -EINVAL;
380a374e892STony Krowiak 
381a374e892STony Krowiak 	mutex_lock(&kvm->lock);
382a374e892STony Krowiak 	switch (attr->attr) {
383a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
384a374e892STony Krowiak 		get_random_bytes(
385a374e892STony Krowiak 			kvm->arch.crypto.crycb->aes_wrapping_key_mask,
386a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
387a374e892STony Krowiak 		kvm->arch.crypto.aes_kw = 1;
388a374e892STony Krowiak 		break;
389a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
390a374e892STony Krowiak 		get_random_bytes(
391a374e892STony Krowiak 			kvm->arch.crypto.crycb->dea_wrapping_key_mask,
392a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
393a374e892STony Krowiak 		kvm->arch.crypto.dea_kw = 1;
394a374e892STony Krowiak 		break;
395a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
396a374e892STony Krowiak 		kvm->arch.crypto.aes_kw = 0;
397a374e892STony Krowiak 		memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
398a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
399a374e892STony Krowiak 		break;
400a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
401a374e892STony Krowiak 		kvm->arch.crypto.dea_kw = 0;
402a374e892STony Krowiak 		memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
403a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
404a374e892STony Krowiak 		break;
405a374e892STony Krowiak 	default:
406a374e892STony Krowiak 		mutex_unlock(&kvm->lock);
407a374e892STony Krowiak 		return -ENXIO;
408a374e892STony Krowiak 	}
409a374e892STony Krowiak 
410a374e892STony Krowiak 	kvm_for_each_vcpu(i, vcpu, kvm) {
411a374e892STony Krowiak 		kvm_s390_vcpu_crypto_setup(vcpu);
412a374e892STony Krowiak 		exit_sie(vcpu);
413a374e892STony Krowiak 	}
414a374e892STony Krowiak 	mutex_unlock(&kvm->lock);
415a374e892STony Krowiak 	return 0;
416a374e892STony Krowiak }
417a374e892STony Krowiak 
41872f25020SJason J. Herne static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
41972f25020SJason J. Herne {
42072f25020SJason J. Herne 	u8 gtod_high;
42172f25020SJason J. Herne 
42272f25020SJason J. Herne 	if (copy_from_user(&gtod_high, (void __user *)attr->addr,
42372f25020SJason J. Herne 					   sizeof(gtod_high)))
42472f25020SJason J. Herne 		return -EFAULT;
42572f25020SJason J. Herne 
42672f25020SJason J. Herne 	if (gtod_high != 0)
42772f25020SJason J. Herne 		return -EINVAL;
42872f25020SJason J. Herne 
42972f25020SJason J. Herne 	return 0;
43072f25020SJason J. Herne }
43172f25020SJason J. Herne 
43272f25020SJason J. Herne static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
43372f25020SJason J. Herne {
43472f25020SJason J. Herne 	struct kvm_vcpu *cur_vcpu;
43572f25020SJason J. Herne 	unsigned int vcpu_idx;
43672f25020SJason J. Herne 	u64 host_tod, gtod;
43772f25020SJason J. Herne 	int r;
43872f25020SJason J. Herne 
43972f25020SJason J. Herne 	if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
44072f25020SJason J. Herne 		return -EFAULT;
44172f25020SJason J. Herne 
44272f25020SJason J. Herne 	r = store_tod_clock(&host_tod);
44372f25020SJason J. Herne 	if (r)
44472f25020SJason J. Herne 		return r;
44572f25020SJason J. Herne 
44672f25020SJason J. Herne 	mutex_lock(&kvm->lock);
44772f25020SJason J. Herne 	kvm->arch.epoch = gtod - host_tod;
44872f25020SJason J. Herne 	kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) {
44972f25020SJason J. Herne 		cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
45072f25020SJason J. Herne 		exit_sie(cur_vcpu);
45172f25020SJason J. Herne 	}
45272f25020SJason J. Herne 	mutex_unlock(&kvm->lock);
45372f25020SJason J. Herne 	return 0;
45472f25020SJason J. Herne }
45572f25020SJason J. Herne 
45672f25020SJason J. Herne static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
45772f25020SJason J. Herne {
45872f25020SJason J. Herne 	int ret;
45972f25020SJason J. Herne 
46072f25020SJason J. Herne 	if (attr->flags)
46172f25020SJason J. Herne 		return -EINVAL;
46272f25020SJason J. Herne 
46372f25020SJason J. Herne 	switch (attr->attr) {
46472f25020SJason J. Herne 	case KVM_S390_VM_TOD_HIGH:
46572f25020SJason J. Herne 		ret = kvm_s390_set_tod_high(kvm, attr);
46672f25020SJason J. Herne 		break;
46772f25020SJason J. Herne 	case KVM_S390_VM_TOD_LOW:
46872f25020SJason J. Herne 		ret = kvm_s390_set_tod_low(kvm, attr);
46972f25020SJason J. Herne 		break;
47072f25020SJason J. Herne 	default:
47172f25020SJason J. Herne 		ret = -ENXIO;
47272f25020SJason J. Herne 		break;
47372f25020SJason J. Herne 	}
47472f25020SJason J. Herne 	return ret;
47572f25020SJason J. Herne }
47672f25020SJason J. Herne 
47772f25020SJason J. Herne static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
47872f25020SJason J. Herne {
47972f25020SJason J. Herne 	u8 gtod_high = 0;
48072f25020SJason J. Herne 
48172f25020SJason J. Herne 	if (copy_to_user((void __user *)attr->addr, &gtod_high,
48272f25020SJason J. Herne 					 sizeof(gtod_high)))
48372f25020SJason J. Herne 		return -EFAULT;
48472f25020SJason J. Herne 
48572f25020SJason J. Herne 	return 0;
48672f25020SJason J. Herne }
48772f25020SJason J. Herne 
48872f25020SJason J. Herne static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
48972f25020SJason J. Herne {
49072f25020SJason J. Herne 	u64 host_tod, gtod;
49172f25020SJason J. Herne 	int r;
49272f25020SJason J. Herne 
49372f25020SJason J. Herne 	r = store_tod_clock(&host_tod);
49472f25020SJason J. Herne 	if (r)
49572f25020SJason J. Herne 		return r;
49672f25020SJason J. Herne 
49772f25020SJason J. Herne 	gtod = host_tod + kvm->arch.epoch;
49872f25020SJason J. Herne 	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
49972f25020SJason J. Herne 		return -EFAULT;
50072f25020SJason J. Herne 
50172f25020SJason J. Herne 	return 0;
50272f25020SJason J. Herne }
50372f25020SJason J. Herne 
50472f25020SJason J. Herne static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
50572f25020SJason J. Herne {
50672f25020SJason J. Herne 	int ret;
50772f25020SJason J. Herne 
50872f25020SJason J. Herne 	if (attr->flags)
50972f25020SJason J. Herne 		return -EINVAL;
51072f25020SJason J. Herne 
51172f25020SJason J. Herne 	switch (attr->attr) {
51272f25020SJason J. Herne 	case KVM_S390_VM_TOD_HIGH:
51372f25020SJason J. Herne 		ret = kvm_s390_get_tod_high(kvm, attr);
51472f25020SJason J. Herne 		break;
51572f25020SJason J. Herne 	case KVM_S390_VM_TOD_LOW:
51672f25020SJason J. Herne 		ret = kvm_s390_get_tod_low(kvm, attr);
51772f25020SJason J. Herne 		break;
51872f25020SJason J. Herne 	default:
51972f25020SJason J. Herne 		ret = -ENXIO;
52072f25020SJason J. Herne 		break;
52172f25020SJason J. Herne 	}
52272f25020SJason J. Herne 	return ret;
52372f25020SJason J. Herne }
52472f25020SJason J. Herne 
525658b6edaSMichael Mueller static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
526658b6edaSMichael Mueller {
527658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_processor *proc;
528658b6edaSMichael Mueller 	int ret = 0;
529658b6edaSMichael Mueller 
530658b6edaSMichael Mueller 	mutex_lock(&kvm->lock);
531658b6edaSMichael Mueller 	if (atomic_read(&kvm->online_vcpus)) {
532658b6edaSMichael Mueller 		ret = -EBUSY;
533658b6edaSMichael Mueller 		goto out;
534658b6edaSMichael Mueller 	}
535658b6edaSMichael Mueller 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
536658b6edaSMichael Mueller 	if (!proc) {
537658b6edaSMichael Mueller 		ret = -ENOMEM;
538658b6edaSMichael Mueller 		goto out;
539658b6edaSMichael Mueller 	}
540658b6edaSMichael Mueller 	if (!copy_from_user(proc, (void __user *)attr->addr,
541658b6edaSMichael Mueller 			    sizeof(*proc))) {
542658b6edaSMichael Mueller 		memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
543658b6edaSMichael Mueller 		       sizeof(struct cpuid));
544658b6edaSMichael Mueller 		kvm->arch.model.ibc = proc->ibc;
545981467c9SMichael Mueller 		memcpy(kvm->arch.model.fac->list, proc->fac_list,
546658b6edaSMichael Mueller 		       S390_ARCH_FAC_LIST_SIZE_BYTE);
547658b6edaSMichael Mueller 	} else
548658b6edaSMichael Mueller 		ret = -EFAULT;
549658b6edaSMichael Mueller 	kfree(proc);
550658b6edaSMichael Mueller out:
551658b6edaSMichael Mueller 	mutex_unlock(&kvm->lock);
552658b6edaSMichael Mueller 	return ret;
553658b6edaSMichael Mueller }
554658b6edaSMichael Mueller 
555658b6edaSMichael Mueller static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
556658b6edaSMichael Mueller {
557658b6edaSMichael Mueller 	int ret = -ENXIO;
558658b6edaSMichael Mueller 
559658b6edaSMichael Mueller 	switch (attr->attr) {
560658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_PROCESSOR:
561658b6edaSMichael Mueller 		ret = kvm_s390_set_processor(kvm, attr);
562658b6edaSMichael Mueller 		break;
563658b6edaSMichael Mueller 	}
564658b6edaSMichael Mueller 	return ret;
565658b6edaSMichael Mueller }
566658b6edaSMichael Mueller 
567658b6edaSMichael Mueller static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
568658b6edaSMichael Mueller {
569658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_processor *proc;
570658b6edaSMichael Mueller 	int ret = 0;
571658b6edaSMichael Mueller 
572658b6edaSMichael Mueller 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
573658b6edaSMichael Mueller 	if (!proc) {
574658b6edaSMichael Mueller 		ret = -ENOMEM;
575658b6edaSMichael Mueller 		goto out;
576658b6edaSMichael Mueller 	}
577658b6edaSMichael Mueller 	memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
578658b6edaSMichael Mueller 	proc->ibc = kvm->arch.model.ibc;
579981467c9SMichael Mueller 	memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
580658b6edaSMichael Mueller 	if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
581658b6edaSMichael Mueller 		ret = -EFAULT;
582658b6edaSMichael Mueller 	kfree(proc);
583658b6edaSMichael Mueller out:
584658b6edaSMichael Mueller 	return ret;
585658b6edaSMichael Mueller }
586658b6edaSMichael Mueller 
587658b6edaSMichael Mueller static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
588658b6edaSMichael Mueller {
589658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_machine *mach;
590658b6edaSMichael Mueller 	int ret = 0;
591658b6edaSMichael Mueller 
592658b6edaSMichael Mueller 	mach = kzalloc(sizeof(*mach), GFP_KERNEL);
593658b6edaSMichael Mueller 	if (!mach) {
594658b6edaSMichael Mueller 		ret = -ENOMEM;
595658b6edaSMichael Mueller 		goto out;
596658b6edaSMichael Mueller 	}
597658b6edaSMichael Mueller 	get_cpu_id((struct cpuid *) &mach->cpuid);
598658b6edaSMichael Mueller 	mach->ibc = sclp_get_ibc();
599981467c9SMichael Mueller 	memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
600981467c9SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
601658b6edaSMichael Mueller 	memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
60294422ee8SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
603658b6edaSMichael Mueller 	if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
604658b6edaSMichael Mueller 		ret = -EFAULT;
605658b6edaSMichael Mueller 	kfree(mach);
606658b6edaSMichael Mueller out:
607658b6edaSMichael Mueller 	return ret;
608658b6edaSMichael Mueller }
609658b6edaSMichael Mueller 
610658b6edaSMichael Mueller static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
611658b6edaSMichael Mueller {
612658b6edaSMichael Mueller 	int ret = -ENXIO;
613658b6edaSMichael Mueller 
614658b6edaSMichael Mueller 	switch (attr->attr) {
615658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_PROCESSOR:
616658b6edaSMichael Mueller 		ret = kvm_s390_get_processor(kvm, attr);
617658b6edaSMichael Mueller 		break;
618658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MACHINE:
619658b6edaSMichael Mueller 		ret = kvm_s390_get_machine(kvm, attr);
620658b6edaSMichael Mueller 		break;
621658b6edaSMichael Mueller 	}
622658b6edaSMichael Mueller 	return ret;
623658b6edaSMichael Mueller }
624658b6edaSMichael Mueller 
625f2061656SDominik Dingel static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
626f2061656SDominik Dingel {
627f2061656SDominik Dingel 	int ret;
628f2061656SDominik Dingel 
629f2061656SDominik Dingel 	switch (attr->group) {
6304f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
6318c0a7ce6SDominik Dingel 		ret = kvm_s390_set_mem_control(kvm, attr);
6324f718eabSDominik Dingel 		break;
63372f25020SJason J. Herne 	case KVM_S390_VM_TOD:
63472f25020SJason J. Herne 		ret = kvm_s390_set_tod(kvm, attr);
63572f25020SJason J. Herne 		break;
636658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
637658b6edaSMichael Mueller 		ret = kvm_s390_set_cpu_model(kvm, attr);
638658b6edaSMichael Mueller 		break;
639a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO:
640a374e892STony Krowiak 		ret = kvm_s390_vm_set_crypto(kvm, attr);
641a374e892STony Krowiak 		break;
642f2061656SDominik Dingel 	default:
643f2061656SDominik Dingel 		ret = -ENXIO;
644f2061656SDominik Dingel 		break;
645f2061656SDominik Dingel 	}
646f2061656SDominik Dingel 
647f2061656SDominik Dingel 	return ret;
648f2061656SDominik Dingel }
649f2061656SDominik Dingel 
650f2061656SDominik Dingel static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
651f2061656SDominik Dingel {
6528c0a7ce6SDominik Dingel 	int ret;
6538c0a7ce6SDominik Dingel 
6548c0a7ce6SDominik Dingel 	switch (attr->group) {
6558c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
6568c0a7ce6SDominik Dingel 		ret = kvm_s390_get_mem_control(kvm, attr);
6578c0a7ce6SDominik Dingel 		break;
65872f25020SJason J. Herne 	case KVM_S390_VM_TOD:
65972f25020SJason J. Herne 		ret = kvm_s390_get_tod(kvm, attr);
66072f25020SJason J. Herne 		break;
661658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
662658b6edaSMichael Mueller 		ret = kvm_s390_get_cpu_model(kvm, attr);
663658b6edaSMichael Mueller 		break;
6648c0a7ce6SDominik Dingel 	default:
6658c0a7ce6SDominik Dingel 		ret = -ENXIO;
6668c0a7ce6SDominik Dingel 		break;
6678c0a7ce6SDominik Dingel 	}
6688c0a7ce6SDominik Dingel 
6698c0a7ce6SDominik Dingel 	return ret;
670f2061656SDominik Dingel }
671f2061656SDominik Dingel 
672f2061656SDominik Dingel static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
673f2061656SDominik Dingel {
674f2061656SDominik Dingel 	int ret;
675f2061656SDominik Dingel 
676f2061656SDominik Dingel 	switch (attr->group) {
6774f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
6784f718eabSDominik Dingel 		switch (attr->attr) {
6794f718eabSDominik Dingel 		case KVM_S390_VM_MEM_ENABLE_CMMA:
6804f718eabSDominik Dingel 		case KVM_S390_VM_MEM_CLR_CMMA:
6818c0a7ce6SDominik Dingel 		case KVM_S390_VM_MEM_LIMIT_SIZE:
6824f718eabSDominik Dingel 			ret = 0;
6834f718eabSDominik Dingel 			break;
6844f718eabSDominik Dingel 		default:
6854f718eabSDominik Dingel 			ret = -ENXIO;
6864f718eabSDominik Dingel 			break;
6874f718eabSDominik Dingel 		}
6884f718eabSDominik Dingel 		break;
68972f25020SJason J. Herne 	case KVM_S390_VM_TOD:
69072f25020SJason J. Herne 		switch (attr->attr) {
69172f25020SJason J. Herne 		case KVM_S390_VM_TOD_LOW:
69272f25020SJason J. Herne 		case KVM_S390_VM_TOD_HIGH:
69372f25020SJason J. Herne 			ret = 0;
69472f25020SJason J. Herne 			break;
69572f25020SJason J. Herne 		default:
69672f25020SJason J. Herne 			ret = -ENXIO;
69772f25020SJason J. Herne 			break;
69872f25020SJason J. Herne 		}
69972f25020SJason J. Herne 		break;
700658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
701658b6edaSMichael Mueller 		switch (attr->attr) {
702658b6edaSMichael Mueller 		case KVM_S390_VM_CPU_PROCESSOR:
703658b6edaSMichael Mueller 		case KVM_S390_VM_CPU_MACHINE:
704658b6edaSMichael Mueller 			ret = 0;
705658b6edaSMichael Mueller 			break;
706658b6edaSMichael Mueller 		default:
707658b6edaSMichael Mueller 			ret = -ENXIO;
708658b6edaSMichael Mueller 			break;
709658b6edaSMichael Mueller 		}
710658b6edaSMichael Mueller 		break;
711a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO:
712a374e892STony Krowiak 		switch (attr->attr) {
713a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
714a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
715a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
716a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
717a374e892STony Krowiak 			ret = 0;
718a374e892STony Krowiak 			break;
719a374e892STony Krowiak 		default:
720a374e892STony Krowiak 			ret = -ENXIO;
721a374e892STony Krowiak 			break;
722a374e892STony Krowiak 		}
723a374e892STony Krowiak 		break;
724f2061656SDominik Dingel 	default:
725f2061656SDominik Dingel 		ret = -ENXIO;
726f2061656SDominik Dingel 		break;
727f2061656SDominik Dingel 	}
728f2061656SDominik Dingel 
729f2061656SDominik Dingel 	return ret;
730f2061656SDominik Dingel }
731f2061656SDominik Dingel 
732b0c632dbSHeiko Carstens long kvm_arch_vm_ioctl(struct file *filp,
733b0c632dbSHeiko Carstens 		       unsigned int ioctl, unsigned long arg)
734b0c632dbSHeiko Carstens {
735b0c632dbSHeiko Carstens 	struct kvm *kvm = filp->private_data;
736b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
737f2061656SDominik Dingel 	struct kvm_device_attr attr;
738b0c632dbSHeiko Carstens 	int r;
739b0c632dbSHeiko Carstens 
740b0c632dbSHeiko Carstens 	switch (ioctl) {
741ba5c1e9bSCarsten Otte 	case KVM_S390_INTERRUPT: {
742ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
743ba5c1e9bSCarsten Otte 
744ba5c1e9bSCarsten Otte 		r = -EFAULT;
745ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
746ba5c1e9bSCarsten Otte 			break;
747ba5c1e9bSCarsten Otte 		r = kvm_s390_inject_vm(kvm, &s390int);
748ba5c1e9bSCarsten Otte 		break;
749ba5c1e9bSCarsten Otte 	}
750d938dc55SCornelia Huck 	case KVM_ENABLE_CAP: {
751d938dc55SCornelia Huck 		struct kvm_enable_cap cap;
752d938dc55SCornelia Huck 		r = -EFAULT;
753d938dc55SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
754d938dc55SCornelia Huck 			break;
755d938dc55SCornelia Huck 		r = kvm_vm_ioctl_enable_cap(kvm, &cap);
756d938dc55SCornelia Huck 		break;
757d938dc55SCornelia Huck 	}
75884223598SCornelia Huck 	case KVM_CREATE_IRQCHIP: {
75984223598SCornelia Huck 		struct kvm_irq_routing_entry routing;
76084223598SCornelia Huck 
76184223598SCornelia Huck 		r = -EINVAL;
76284223598SCornelia Huck 		if (kvm->arch.use_irqchip) {
76384223598SCornelia Huck 			/* Set up dummy routing. */
76484223598SCornelia Huck 			memset(&routing, 0, sizeof(routing));
76584223598SCornelia Huck 			kvm_set_irq_routing(kvm, &routing, 0, 0);
76684223598SCornelia Huck 			r = 0;
76784223598SCornelia Huck 		}
76884223598SCornelia Huck 		break;
76984223598SCornelia Huck 	}
770f2061656SDominik Dingel 	case KVM_SET_DEVICE_ATTR: {
771f2061656SDominik Dingel 		r = -EFAULT;
772f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
773f2061656SDominik Dingel 			break;
774f2061656SDominik Dingel 		r = kvm_s390_vm_set_attr(kvm, &attr);
775f2061656SDominik Dingel 		break;
776f2061656SDominik Dingel 	}
777f2061656SDominik Dingel 	case KVM_GET_DEVICE_ATTR: {
778f2061656SDominik Dingel 		r = -EFAULT;
779f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
780f2061656SDominik Dingel 			break;
781f2061656SDominik Dingel 		r = kvm_s390_vm_get_attr(kvm, &attr);
782f2061656SDominik Dingel 		break;
783f2061656SDominik Dingel 	}
784f2061656SDominik Dingel 	case KVM_HAS_DEVICE_ATTR: {
785f2061656SDominik Dingel 		r = -EFAULT;
786f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
787f2061656SDominik Dingel 			break;
788f2061656SDominik Dingel 		r = kvm_s390_vm_has_attr(kvm, &attr);
789f2061656SDominik Dingel 		break;
790f2061656SDominik Dingel 	}
791b0c632dbSHeiko Carstens 	default:
792367e1319SAvi Kivity 		r = -ENOTTY;
793b0c632dbSHeiko Carstens 	}
794b0c632dbSHeiko Carstens 
795b0c632dbSHeiko Carstens 	return r;
796b0c632dbSHeiko Carstens }
797b0c632dbSHeiko Carstens 
79845c9b47cSTony Krowiak static int kvm_s390_query_ap_config(u8 *config)
79945c9b47cSTony Krowiak {
80045c9b47cSTony Krowiak 	u32 fcn_code = 0x04000000UL;
80186044c8cSChristian Borntraeger 	u32 cc = 0;
80245c9b47cSTony Krowiak 
80386044c8cSChristian Borntraeger 	memset(config, 0, 128);
80445c9b47cSTony Krowiak 	asm volatile(
80545c9b47cSTony Krowiak 		"lgr 0,%1\n"
80645c9b47cSTony Krowiak 		"lgr 2,%2\n"
80745c9b47cSTony Krowiak 		".long 0xb2af0000\n"		/* PQAP(QCI) */
80886044c8cSChristian Borntraeger 		"0: ipm %0\n"
80945c9b47cSTony Krowiak 		"srl %0,28\n"
81086044c8cSChristian Borntraeger 		"1:\n"
81186044c8cSChristian Borntraeger 		EX_TABLE(0b, 1b)
81286044c8cSChristian Borntraeger 		: "+r" (cc)
81345c9b47cSTony Krowiak 		: "r" (fcn_code), "r" (config)
81445c9b47cSTony Krowiak 		: "cc", "0", "2", "memory"
81545c9b47cSTony Krowiak 	);
81645c9b47cSTony Krowiak 
81745c9b47cSTony Krowiak 	return cc;
81845c9b47cSTony Krowiak }
81945c9b47cSTony Krowiak 
82045c9b47cSTony Krowiak static int kvm_s390_apxa_installed(void)
82145c9b47cSTony Krowiak {
82245c9b47cSTony Krowiak 	u8 config[128];
82345c9b47cSTony Krowiak 	int cc;
82445c9b47cSTony Krowiak 
82545c9b47cSTony Krowiak 	if (test_facility(2) && test_facility(12)) {
82645c9b47cSTony Krowiak 		cc = kvm_s390_query_ap_config(config);
82745c9b47cSTony Krowiak 
82845c9b47cSTony Krowiak 		if (cc)
82945c9b47cSTony Krowiak 			pr_err("PQAP(QCI) failed with cc=%d", cc);
83045c9b47cSTony Krowiak 		else
83145c9b47cSTony Krowiak 			return config[0] & 0x40;
83245c9b47cSTony Krowiak 	}
83345c9b47cSTony Krowiak 
83445c9b47cSTony Krowiak 	return 0;
83545c9b47cSTony Krowiak }
83645c9b47cSTony Krowiak 
83745c9b47cSTony Krowiak static void kvm_s390_set_crycb_format(struct kvm *kvm)
83845c9b47cSTony Krowiak {
83945c9b47cSTony Krowiak 	kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
84045c9b47cSTony Krowiak 
84145c9b47cSTony Krowiak 	if (kvm_s390_apxa_installed())
84245c9b47cSTony Krowiak 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
84345c9b47cSTony Krowiak 	else
84445c9b47cSTony Krowiak 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
84545c9b47cSTony Krowiak }
84645c9b47cSTony Krowiak 
8479d8d5786SMichael Mueller static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
8489d8d5786SMichael Mueller {
8499d8d5786SMichael Mueller 	get_cpu_id(cpu_id);
8509d8d5786SMichael Mueller 	cpu_id->version = 0xff;
8519d8d5786SMichael Mueller }
8529d8d5786SMichael Mueller 
8535102ee87STony Krowiak static int kvm_s390_crypto_init(struct kvm *kvm)
8545102ee87STony Krowiak {
8559d8d5786SMichael Mueller 	if (!test_kvm_facility(kvm, 76))
8565102ee87STony Krowiak 		return 0;
8575102ee87STony Krowiak 
8585102ee87STony Krowiak 	kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
8595102ee87STony Krowiak 					 GFP_KERNEL | GFP_DMA);
8605102ee87STony Krowiak 	if (!kvm->arch.crypto.crycb)
8615102ee87STony Krowiak 		return -ENOMEM;
8625102ee87STony Krowiak 
86345c9b47cSTony Krowiak 	kvm_s390_set_crycb_format(kvm);
8645102ee87STony Krowiak 
865ed6f76b4STony Krowiak 	/* Enable AES/DEA protected key functions by default */
866ed6f76b4STony Krowiak 	kvm->arch.crypto.aes_kw = 1;
867ed6f76b4STony Krowiak 	kvm->arch.crypto.dea_kw = 1;
868ed6f76b4STony Krowiak 	get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
869ed6f76b4STony Krowiak 			 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
870ed6f76b4STony Krowiak 	get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
871ed6f76b4STony Krowiak 			 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
872a374e892STony Krowiak 
8735102ee87STony Krowiak 	return 0;
8745102ee87STony Krowiak }
8755102ee87STony Krowiak 
876e08b9637SCarsten Otte int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
877b0c632dbSHeiko Carstens {
8789d8d5786SMichael Mueller 	int i, rc;
879b0c632dbSHeiko Carstens 	char debug_name[16];
880f6c137ffSChristian Borntraeger 	static unsigned long sca_offset;
881b0c632dbSHeiko Carstens 
882e08b9637SCarsten Otte 	rc = -EINVAL;
883e08b9637SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
884e08b9637SCarsten Otte 	if (type & ~KVM_VM_S390_UCONTROL)
885e08b9637SCarsten Otte 		goto out_err;
886e08b9637SCarsten Otte 	if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
887e08b9637SCarsten Otte 		goto out_err;
888e08b9637SCarsten Otte #else
889e08b9637SCarsten Otte 	if (type)
890e08b9637SCarsten Otte 		goto out_err;
891e08b9637SCarsten Otte #endif
892e08b9637SCarsten Otte 
893b0c632dbSHeiko Carstens 	rc = s390_enable_sie();
894b0c632dbSHeiko Carstens 	if (rc)
895d89f5effSJan Kiszka 		goto out_err;
896b0c632dbSHeiko Carstens 
897b290411aSCarsten Otte 	rc = -ENOMEM;
898b290411aSCarsten Otte 
899b0c632dbSHeiko Carstens 	kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
900b0c632dbSHeiko Carstens 	if (!kvm->arch.sca)
901d89f5effSJan Kiszka 		goto out_err;
902f6c137ffSChristian Borntraeger 	spin_lock(&kvm_lock);
903f6c137ffSChristian Borntraeger 	sca_offset = (sca_offset + 16) & 0x7f0;
904f6c137ffSChristian Borntraeger 	kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
905f6c137ffSChristian Borntraeger 	spin_unlock(&kvm_lock);
906b0c632dbSHeiko Carstens 
907b0c632dbSHeiko Carstens 	sprintf(debug_name, "kvm-%u", current->pid);
908b0c632dbSHeiko Carstens 
909b0c632dbSHeiko Carstens 	kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
910b0c632dbSHeiko Carstens 	if (!kvm->arch.dbf)
91140f5b735SDominik Dingel 		goto out_err;
912b0c632dbSHeiko Carstens 
9139d8d5786SMichael Mueller 	/*
9149d8d5786SMichael Mueller 	 * The architectural maximum amount of facilities is 16 kbit. To store
9159d8d5786SMichael Mueller 	 * this amount, 2 kbyte of memory is required. Thus we need a full
916981467c9SMichael Mueller 	 * page to hold the guest facility list (arch.model.fac->list) and the
917981467c9SMichael Mueller 	 * facility mask (arch.model.fac->mask). Its address size has to be
9189d8d5786SMichael Mueller 	 * 31 bits and word aligned.
9199d8d5786SMichael Mueller 	 */
9209d8d5786SMichael Mueller 	kvm->arch.model.fac =
921981467c9SMichael Mueller 		(struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
9229d8d5786SMichael Mueller 	if (!kvm->arch.model.fac)
92340f5b735SDominik Dingel 		goto out_err;
9249d8d5786SMichael Mueller 
925fb5bf93fSMichael Mueller 	/* Populate the facility mask initially. */
926981467c9SMichael Mueller 	memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
92794422ee8SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
9289d8d5786SMichael Mueller 	for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
9299d8d5786SMichael Mueller 		if (i < kvm_s390_fac_list_mask_size())
930981467c9SMichael Mueller 			kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
9319d8d5786SMichael Mueller 		else
932981467c9SMichael Mueller 			kvm->arch.model.fac->mask[i] = 0UL;
9339d8d5786SMichael Mueller 	}
9349d8d5786SMichael Mueller 
935981467c9SMichael Mueller 	/* Populate the facility list initially. */
936981467c9SMichael Mueller 	memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
937981467c9SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
938981467c9SMichael Mueller 
9399d8d5786SMichael Mueller 	kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
940658b6edaSMichael Mueller 	kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff;
9419d8d5786SMichael Mueller 
9425102ee87STony Krowiak 	if (kvm_s390_crypto_init(kvm) < 0)
94340f5b735SDominik Dingel 		goto out_err;
9445102ee87STony Krowiak 
945ba5c1e9bSCarsten Otte 	spin_lock_init(&kvm->arch.float_int.lock);
946ba5c1e9bSCarsten Otte 	INIT_LIST_HEAD(&kvm->arch.float_int.list);
9478a242234SHeiko Carstens 	init_waitqueue_head(&kvm->arch.ipte_wq);
948a6b7e459SThomas Huth 	mutex_init(&kvm->arch.ipte_mutex);
949ba5c1e9bSCarsten Otte 
950b0c632dbSHeiko Carstens 	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
951b0c632dbSHeiko Carstens 	VM_EVENT(kvm, 3, "%s", "vm created");
952b0c632dbSHeiko Carstens 
953e08b9637SCarsten Otte 	if (type & KVM_VM_S390_UCONTROL) {
954e08b9637SCarsten Otte 		kvm->arch.gmap = NULL;
955e08b9637SCarsten Otte 	} else {
9560349985aSChristian Borntraeger 		kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
957598841caSCarsten Otte 		if (!kvm->arch.gmap)
95840f5b735SDominik Dingel 			goto out_err;
9592c70fe44SChristian Borntraeger 		kvm->arch.gmap->private = kvm;
96024eb3a82SDominik Dingel 		kvm->arch.gmap->pfault_enabled = 0;
961e08b9637SCarsten Otte 	}
962fa6b7fe9SCornelia Huck 
963fa6b7fe9SCornelia Huck 	kvm->arch.css_support = 0;
96484223598SCornelia Huck 	kvm->arch.use_irqchip = 0;
96568c55750SEric Farman 	kvm->arch.use_vectors = 0;
96672f25020SJason J. Herne 	kvm->arch.epoch = 0;
967fa6b7fe9SCornelia Huck 
9688ad35755SDavid Hildenbrand 	spin_lock_init(&kvm->arch.start_stop_lock);
9698ad35755SDavid Hildenbrand 
970d89f5effSJan Kiszka 	return 0;
971d89f5effSJan Kiszka out_err:
97240f5b735SDominik Dingel 	kfree(kvm->arch.crypto.crycb);
97340f5b735SDominik Dingel 	free_page((unsigned long)kvm->arch.model.fac);
97440f5b735SDominik Dingel 	debug_unregister(kvm->arch.dbf);
97540f5b735SDominik Dingel 	free_page((unsigned long)(kvm->arch.sca));
976d89f5effSJan Kiszka 	return rc;
977b0c632dbSHeiko Carstens }
978b0c632dbSHeiko Carstens 
979d329c035SChristian Borntraeger void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
980d329c035SChristian Borntraeger {
981d329c035SChristian Borntraeger 	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
982ade38c31SCornelia Huck 	trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
98367335e63SChristian Borntraeger 	kvm_s390_clear_local_irqs(vcpu);
9843c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
98558f9460bSCarsten Otte 	if (!kvm_is_ucontrol(vcpu->kvm)) {
98658f9460bSCarsten Otte 		clear_bit(63 - vcpu->vcpu_id,
98758f9460bSCarsten Otte 			  (unsigned long *) &vcpu->kvm->arch.sca->mcn);
988abf4a71eSCarsten Otte 		if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
989abf4a71eSCarsten Otte 		    (__u64) vcpu->arch.sie_block)
990abf4a71eSCarsten Otte 			vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
99158f9460bSCarsten Otte 	}
992abf4a71eSCarsten Otte 	smp_mb();
99327e0393fSCarsten Otte 
99427e0393fSCarsten Otte 	if (kvm_is_ucontrol(vcpu->kvm))
99527e0393fSCarsten Otte 		gmap_free(vcpu->arch.gmap);
99627e0393fSCarsten Otte 
997b31605c1SDominik Dingel 	if (kvm_s390_cmma_enabled(vcpu->kvm))
998b31605c1SDominik Dingel 		kvm_s390_vcpu_unsetup_cmma(vcpu);
999d329c035SChristian Borntraeger 	free_page((unsigned long)(vcpu->arch.sie_block));
1000b31288faSKonstantin Weitz 
10016692cef3SChristian Borntraeger 	kvm_vcpu_uninit(vcpu);
1002b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
1003d329c035SChristian Borntraeger }
1004d329c035SChristian Borntraeger 
1005d329c035SChristian Borntraeger static void kvm_free_vcpus(struct kvm *kvm)
1006d329c035SChristian Borntraeger {
1007d329c035SChristian Borntraeger 	unsigned int i;
1008988a2caeSGleb Natapov 	struct kvm_vcpu *vcpu;
1009d329c035SChristian Borntraeger 
1010988a2caeSGleb Natapov 	kvm_for_each_vcpu(i, vcpu, kvm)
1011988a2caeSGleb Natapov 		kvm_arch_vcpu_destroy(vcpu);
1012988a2caeSGleb Natapov 
1013988a2caeSGleb Natapov 	mutex_lock(&kvm->lock);
1014988a2caeSGleb Natapov 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1015d329c035SChristian Borntraeger 		kvm->vcpus[i] = NULL;
1016988a2caeSGleb Natapov 
1017988a2caeSGleb Natapov 	atomic_set(&kvm->online_vcpus, 0);
1018988a2caeSGleb Natapov 	mutex_unlock(&kvm->lock);
1019d329c035SChristian Borntraeger }
1020d329c035SChristian Borntraeger 
1021b0c632dbSHeiko Carstens void kvm_arch_destroy_vm(struct kvm *kvm)
1022b0c632dbSHeiko Carstens {
1023d329c035SChristian Borntraeger 	kvm_free_vcpus(kvm);
10249d8d5786SMichael Mueller 	free_page((unsigned long)kvm->arch.model.fac);
1025b0c632dbSHeiko Carstens 	free_page((unsigned long)(kvm->arch.sca));
1026d329c035SChristian Borntraeger 	debug_unregister(kvm->arch.dbf);
10275102ee87STony Krowiak 	kfree(kvm->arch.crypto.crycb);
102827e0393fSCarsten Otte 	if (!kvm_is_ucontrol(kvm))
1029598841caSCarsten Otte 		gmap_free(kvm->arch.gmap);
1030841b91c5SCornelia Huck 	kvm_s390_destroy_adapters(kvm);
103167335e63SChristian Borntraeger 	kvm_s390_clear_float_irqs(kvm);
1032b0c632dbSHeiko Carstens }
1033b0c632dbSHeiko Carstens 
1034b0c632dbSHeiko Carstens /* Section: vcpu related */
1035dafd032aSDominik Dingel static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1036b0c632dbSHeiko Carstens {
1037c6c956b8SMartin Schwidefsky 	vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
103827e0393fSCarsten Otte 	if (!vcpu->arch.gmap)
103927e0393fSCarsten Otte 		return -ENOMEM;
10402c70fe44SChristian Borntraeger 	vcpu->arch.gmap->private = vcpu->kvm;
1041dafd032aSDominik Dingel 
104227e0393fSCarsten Otte 	return 0;
104327e0393fSCarsten Otte }
104427e0393fSCarsten Otte 
1045dafd032aSDominik Dingel int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1046dafd032aSDominik Dingel {
1047dafd032aSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1048dafd032aSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
104959674c1aSChristian Borntraeger 	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
105059674c1aSChristian Borntraeger 				    KVM_SYNC_GPRS |
10519eed0735SChristian Borntraeger 				    KVM_SYNC_ACRS |
1052b028ee3eSDavid Hildenbrand 				    KVM_SYNC_CRS |
1053b028ee3eSDavid Hildenbrand 				    KVM_SYNC_ARCH0 |
1054b028ee3eSDavid Hildenbrand 				    KVM_SYNC_PFAULT;
105568c55750SEric Farman 	if (test_kvm_facility(vcpu->kvm, 129))
105668c55750SEric Farman 		vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
1057dafd032aSDominik Dingel 
1058dafd032aSDominik Dingel 	if (kvm_is_ucontrol(vcpu->kvm))
1059dafd032aSDominik Dingel 		return __kvm_ucontrol_vcpu_init(vcpu);
1060dafd032aSDominik Dingel 
1061b0c632dbSHeiko Carstens 	return 0;
1062b0c632dbSHeiko Carstens }
1063b0c632dbSHeiko Carstens 
1064b0c632dbSHeiko Carstens void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1065b0c632dbSHeiko Carstens {
10664725c860SMartin Schwidefsky 	save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
106768c55750SEric Farman 	if (vcpu->kvm->arch.use_vectors)
106868c55750SEric Farman 		save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
106968c55750SEric Farman 	else
10704725c860SMartin Schwidefsky 		save_fp_regs(vcpu->arch.host_fpregs.fprs);
1071b0c632dbSHeiko Carstens 	save_access_regs(vcpu->arch.host_acrs);
107268c55750SEric Farman 	if (vcpu->kvm->arch.use_vectors) {
107368c55750SEric Farman 		restore_fp_ctl(&vcpu->run->s.regs.fpc);
107468c55750SEric Farman 		restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
107568c55750SEric Farman 	} else {
10764725c860SMartin Schwidefsky 		restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
10774725c860SMartin Schwidefsky 		restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
107868c55750SEric Farman 	}
107959674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
1080480e5926SChristian Borntraeger 	gmap_enable(vcpu->arch.gmap);
10819e6dabefSCornelia Huck 	atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1082b0c632dbSHeiko Carstens }
1083b0c632dbSHeiko Carstens 
1084b0c632dbSHeiko Carstens void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1085b0c632dbSHeiko Carstens {
10869e6dabefSCornelia Huck 	atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1087480e5926SChristian Borntraeger 	gmap_disable(vcpu->arch.gmap);
108868c55750SEric Farman 	if (vcpu->kvm->arch.use_vectors) {
108968c55750SEric Farman 		save_fp_ctl(&vcpu->run->s.regs.fpc);
109068c55750SEric Farman 		save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
109168c55750SEric Farman 	} else {
10924725c860SMartin Schwidefsky 		save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
10934725c860SMartin Schwidefsky 		save_fp_regs(vcpu->arch.guest_fpregs.fprs);
109468c55750SEric Farman 	}
109559674c1aSChristian Borntraeger 	save_access_regs(vcpu->run->s.regs.acrs);
10964725c860SMartin Schwidefsky 	restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
109768c55750SEric Farman 	if (vcpu->kvm->arch.use_vectors)
109868c55750SEric Farman 		restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
109968c55750SEric Farman 	else
11004725c860SMartin Schwidefsky 		restore_fp_regs(vcpu->arch.host_fpregs.fprs);
1101b0c632dbSHeiko Carstens 	restore_access_regs(vcpu->arch.host_acrs);
1102b0c632dbSHeiko Carstens }
1103b0c632dbSHeiko Carstens 
1104b0c632dbSHeiko Carstens static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1105b0c632dbSHeiko Carstens {
1106b0c632dbSHeiko Carstens 	/* this equals initial cpu reset in pop, but we don't switch to ESA */
1107b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.mask = 0UL;
1108b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.addr = 0UL;
11098d26cf7bSChristian Borntraeger 	kvm_s390_set_prefix(vcpu, 0);
1110b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->cputm     = 0UL;
1111b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->ckc       = 0UL;
1112b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->todpr     = 0;
1113b0c632dbSHeiko Carstens 	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1114b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
1115b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
1116b0c632dbSHeiko Carstens 	vcpu->arch.guest_fpregs.fpc = 0;
1117b0c632dbSHeiko Carstens 	asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
1118b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gbea = 1;
1119672550fbSChristian Borntraeger 	vcpu->arch.sie_block->pp = 0;
11203c038e6bSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
11213c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
11226352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
11236852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_stop(vcpu);
11242ed10cc1SJens Freimann 	kvm_s390_clear_local_irqs(vcpu);
1125b0c632dbSHeiko Carstens }
1126b0c632dbSHeiko Carstens 
112731928aa5SDominik Dingel void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
112842897d86SMarcelo Tosatti {
112972f25020SJason J. Herne 	mutex_lock(&vcpu->kvm->lock);
113072f25020SJason J. Herne 	vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
113172f25020SJason J. Herne 	mutex_unlock(&vcpu->kvm->lock);
1132dafd032aSDominik Dingel 	if (!kvm_is_ucontrol(vcpu->kvm))
1133dafd032aSDominik Dingel 		vcpu->arch.gmap = vcpu->kvm->arch.gmap;
113442897d86SMarcelo Tosatti }
113542897d86SMarcelo Tosatti 
11365102ee87STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
11375102ee87STony Krowiak {
11389d8d5786SMichael Mueller 	if (!test_kvm_facility(vcpu->kvm, 76))
11395102ee87STony Krowiak 		return;
11405102ee87STony Krowiak 
1141a374e892STony Krowiak 	vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1142a374e892STony Krowiak 
1143a374e892STony Krowiak 	if (vcpu->kvm->arch.crypto.aes_kw)
1144a374e892STony Krowiak 		vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1145a374e892STony Krowiak 	if (vcpu->kvm->arch.crypto.dea_kw)
1146a374e892STony Krowiak 		vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1147a374e892STony Krowiak 
11485102ee87STony Krowiak 	vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
11495102ee87STony Krowiak }
11505102ee87STony Krowiak 
1151b31605c1SDominik Dingel void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1152b31605c1SDominik Dingel {
1153b31605c1SDominik Dingel 	free_page(vcpu->arch.sie_block->cbrlo);
1154b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = 0;
1155b31605c1SDominik Dingel }
1156b31605c1SDominik Dingel 
1157b31605c1SDominik Dingel int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1158b31605c1SDominik Dingel {
1159b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1160b31605c1SDominik Dingel 	if (!vcpu->arch.sie_block->cbrlo)
1161b31605c1SDominik Dingel 		return -ENOMEM;
1162b31605c1SDominik Dingel 
1163b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 |= 0x80;
1164b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 &= ~0x08;
1165b31605c1SDominik Dingel 	return 0;
1166b31605c1SDominik Dingel }
1167b31605c1SDominik Dingel 
116891520f1aSMichael Mueller static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
116991520f1aSMichael Mueller {
117091520f1aSMichael Mueller 	struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
117191520f1aSMichael Mueller 
117291520f1aSMichael Mueller 	vcpu->arch.cpu_id = model->cpu_id;
117391520f1aSMichael Mueller 	vcpu->arch.sie_block->ibc = model->ibc;
117491520f1aSMichael Mueller 	vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
117591520f1aSMichael Mueller }
117691520f1aSMichael Mueller 
1177b0c632dbSHeiko Carstens int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1178b0c632dbSHeiko Carstens {
1179b31605c1SDominik Dingel 	int rc = 0;
1180b31288faSKonstantin Weitz 
11819e6dabefSCornelia Huck 	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
11829e6dabefSCornelia Huck 						    CPUSTAT_SM |
118369d0d3a3SChristian Borntraeger 						    CPUSTAT_STOPPED |
118469d0d3a3SChristian Borntraeger 						    CPUSTAT_GED);
118591520f1aSMichael Mueller 	kvm_s390_vcpu_setup_model(vcpu);
118691520f1aSMichael Mueller 
1187fc34531dSChristian Borntraeger 	vcpu->arch.sie_block->ecb   = 6;
11889d8d5786SMichael Mueller 	if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
11897feb6bb8SMichael Mueller 		vcpu->arch.sie_block->ecb |= 0x10;
11907feb6bb8SMichael Mueller 
119169d0d3a3SChristian Borntraeger 	vcpu->arch.sie_block->ecb2  = 8;
1192ea5f4969SDavid Hildenbrand 	vcpu->arch.sie_block->eca   = 0xC1002000U;
1193217a4406SHeiko Carstens 	if (sclp_has_siif())
1194217a4406SHeiko Carstens 		vcpu->arch.sie_block->eca |= 1;
1195ea5f4969SDavid Hildenbrand 	if (sclp_has_sigpif())
1196ea5f4969SDavid Hildenbrand 		vcpu->arch.sie_block->eca |= 0x10000000U;
119713211ea7SEric Farman 	if (vcpu->kvm->arch.use_vectors) {
119813211ea7SEric Farman 		vcpu->arch.sie_block->eca |= 0x00020000;
119913211ea7SEric Farman 		vcpu->arch.sie_block->ecd |= 0x20000000;
120013211ea7SEric Farman 	}
1201492d8642SThomas Huth 	vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
12025a5e6536SMatthew Rosato 
1203b31605c1SDominik Dingel 	if (kvm_s390_cmma_enabled(vcpu->kvm)) {
1204b31605c1SDominik Dingel 		rc = kvm_s390_vcpu_setup_cmma(vcpu);
1205b31605c1SDominik Dingel 		if (rc)
1206b31605c1SDominik Dingel 			return rc;
1207b31288faSKonstantin Weitz 	}
12080ac96cafSDavid Hildenbrand 	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1209ca872302SChristian Borntraeger 	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
12109d8d5786SMichael Mueller 
12115102ee87STony Krowiak 	kvm_s390_vcpu_crypto_setup(vcpu);
12125102ee87STony Krowiak 
1213b31605c1SDominik Dingel 	return rc;
1214b0c632dbSHeiko Carstens }
1215b0c632dbSHeiko Carstens 
1216b0c632dbSHeiko Carstens struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1217b0c632dbSHeiko Carstens 				      unsigned int id)
1218b0c632dbSHeiko Carstens {
12194d47555aSCarsten Otte 	struct kvm_vcpu *vcpu;
12207feb6bb8SMichael Mueller 	struct sie_page *sie_page;
12214d47555aSCarsten Otte 	int rc = -EINVAL;
1222b0c632dbSHeiko Carstens 
12234d47555aSCarsten Otte 	if (id >= KVM_MAX_VCPUS)
12244d47555aSCarsten Otte 		goto out;
12254d47555aSCarsten Otte 
12264d47555aSCarsten Otte 	rc = -ENOMEM;
12274d47555aSCarsten Otte 
1228b110feafSMichael Mueller 	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1229b0c632dbSHeiko Carstens 	if (!vcpu)
12304d47555aSCarsten Otte 		goto out;
1231b0c632dbSHeiko Carstens 
12327feb6bb8SMichael Mueller 	sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
12337feb6bb8SMichael Mueller 	if (!sie_page)
1234b0c632dbSHeiko Carstens 		goto out_free_cpu;
1235b0c632dbSHeiko Carstens 
12367feb6bb8SMichael Mueller 	vcpu->arch.sie_block = &sie_page->sie_block;
12377feb6bb8SMichael Mueller 	vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
123868c55750SEric Farman 	vcpu->arch.host_vregs = &sie_page->vregs;
12397feb6bb8SMichael Mueller 
1240b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icpua = id;
124158f9460bSCarsten Otte 	if (!kvm_is_ucontrol(kvm)) {
124258f9460bSCarsten Otte 		if (!kvm->arch.sca) {
124358f9460bSCarsten Otte 			WARN_ON_ONCE(1);
124458f9460bSCarsten Otte 			goto out_free_cpu;
124558f9460bSCarsten Otte 		}
1246abf4a71eSCarsten Otte 		if (!kvm->arch.sca->cpu[id].sda)
124758f9460bSCarsten Otte 			kvm->arch.sca->cpu[id].sda =
124858f9460bSCarsten Otte 				(__u64) vcpu->arch.sie_block;
124958f9460bSCarsten Otte 		vcpu->arch.sie_block->scaoh =
125058f9460bSCarsten Otte 			(__u32)(((__u64)kvm->arch.sca) >> 32);
1251b0c632dbSHeiko Carstens 		vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
1252fc34531dSChristian Borntraeger 		set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
125358f9460bSCarsten Otte 	}
1254b0c632dbSHeiko Carstens 
1255ba5c1e9bSCarsten Otte 	spin_lock_init(&vcpu->arch.local_int.lock);
1256ba5c1e9bSCarsten Otte 	vcpu->arch.local_int.float_int = &kvm->arch.float_int;
1257d0321a24SChristian Borntraeger 	vcpu->arch.local_int.wq = &vcpu->wq;
12585288fbf0SChristian Borntraeger 	vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
1259ba5c1e9bSCarsten Otte 
1260b0c632dbSHeiko Carstens 	rc = kvm_vcpu_init(vcpu, kvm, id);
1261b0c632dbSHeiko Carstens 	if (rc)
12627b06bf2fSWei Yongjun 		goto out_free_sie_block;
1263b0c632dbSHeiko Carstens 	VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
1264b0c632dbSHeiko Carstens 		 vcpu->arch.sie_block);
1265ade38c31SCornelia Huck 	trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
1266b0c632dbSHeiko Carstens 
1267b0c632dbSHeiko Carstens 	return vcpu;
12687b06bf2fSWei Yongjun out_free_sie_block:
12697b06bf2fSWei Yongjun 	free_page((unsigned long)(vcpu->arch.sie_block));
1270b0c632dbSHeiko Carstens out_free_cpu:
1271b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
12724d47555aSCarsten Otte out:
1273b0c632dbSHeiko Carstens 	return ERR_PTR(rc);
1274b0c632dbSHeiko Carstens }
1275b0c632dbSHeiko Carstens 
1276b0c632dbSHeiko Carstens int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1277b0c632dbSHeiko Carstens {
12789a022067SDavid Hildenbrand 	return kvm_s390_vcpu_has_irq(vcpu, 0);
1279b0c632dbSHeiko Carstens }
1280b0c632dbSHeiko Carstens 
128149b99e1eSChristian Borntraeger void s390_vcpu_block(struct kvm_vcpu *vcpu)
128249b99e1eSChristian Borntraeger {
128349b99e1eSChristian Borntraeger 	atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
128449b99e1eSChristian Borntraeger }
128549b99e1eSChristian Borntraeger 
128649b99e1eSChristian Borntraeger void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
128749b99e1eSChristian Borntraeger {
128849b99e1eSChristian Borntraeger 	atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
128949b99e1eSChristian Borntraeger }
129049b99e1eSChristian Borntraeger 
129149b99e1eSChristian Borntraeger /*
129249b99e1eSChristian Borntraeger  * Kick a guest cpu out of SIE and wait until SIE is not running.
129349b99e1eSChristian Borntraeger  * If the CPU is not running (e.g. waiting as idle) the function will
129449b99e1eSChristian Borntraeger  * return immediately. */
129549b99e1eSChristian Borntraeger void exit_sie(struct kvm_vcpu *vcpu)
129649b99e1eSChristian Borntraeger {
129749b99e1eSChristian Borntraeger 	atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
129849b99e1eSChristian Borntraeger 	while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
129949b99e1eSChristian Borntraeger 		cpu_relax();
130049b99e1eSChristian Borntraeger }
130149b99e1eSChristian Borntraeger 
130249b99e1eSChristian Borntraeger /* Kick a guest cpu out of SIE and prevent SIE-reentry */
130349b99e1eSChristian Borntraeger void exit_sie_sync(struct kvm_vcpu *vcpu)
130449b99e1eSChristian Borntraeger {
130549b99e1eSChristian Borntraeger 	s390_vcpu_block(vcpu);
130649b99e1eSChristian Borntraeger 	exit_sie(vcpu);
130749b99e1eSChristian Borntraeger }
130849b99e1eSChristian Borntraeger 
13092c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
13102c70fe44SChristian Borntraeger {
13112c70fe44SChristian Borntraeger 	int i;
13122c70fe44SChristian Borntraeger 	struct kvm *kvm = gmap->private;
13132c70fe44SChristian Borntraeger 	struct kvm_vcpu *vcpu;
13142c70fe44SChristian Borntraeger 
13152c70fe44SChristian Borntraeger 	kvm_for_each_vcpu(i, vcpu, kvm) {
13162c70fe44SChristian Borntraeger 		/* match against both prefix pages */
1317fda902cbSMichael Mueller 		if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
13182c70fe44SChristian Borntraeger 			VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
13192c70fe44SChristian Borntraeger 			kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
13202c70fe44SChristian Borntraeger 			exit_sie_sync(vcpu);
13212c70fe44SChristian Borntraeger 		}
13222c70fe44SChristian Borntraeger 	}
13232c70fe44SChristian Borntraeger }
13242c70fe44SChristian Borntraeger 
1325b6d33834SChristoffer Dall int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1326b6d33834SChristoffer Dall {
1327b6d33834SChristoffer Dall 	/* kvm common code refers to this, but never calls it */
1328b6d33834SChristoffer Dall 	BUG();
1329b6d33834SChristoffer Dall 	return 0;
1330b6d33834SChristoffer Dall }
1331b6d33834SChristoffer Dall 
133214eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
133314eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
133414eebd91SCarsten Otte {
133514eebd91SCarsten Otte 	int r = -EINVAL;
133614eebd91SCarsten Otte 
133714eebd91SCarsten Otte 	switch (reg->id) {
133829b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
133929b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->todpr,
134029b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
134129b7c71bSCarsten Otte 		break;
134229b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
134329b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->epoch,
134429b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
134529b7c71bSCarsten Otte 		break;
134646a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
134746a6dd1cSJason J. herne 		r = put_user(vcpu->arch.sie_block->cputm,
134846a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
134946a6dd1cSJason J. herne 		break;
135046a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
135146a6dd1cSJason J. herne 		r = put_user(vcpu->arch.sie_block->ckc,
135246a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
135346a6dd1cSJason J. herne 		break;
1354536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
1355536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_token,
1356536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1357536336c2SDominik Dingel 		break;
1358536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
1359536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_compare,
1360536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1361536336c2SDominik Dingel 		break;
1362536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
1363536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_select,
1364536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1365536336c2SDominik Dingel 		break;
1366672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
1367672550fbSChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->pp,
1368672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
1369672550fbSChristian Borntraeger 		break;
1370afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
1371afa45ff5SChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->gbea,
1372afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
1373afa45ff5SChristian Borntraeger 		break;
137414eebd91SCarsten Otte 	default:
137514eebd91SCarsten Otte 		break;
137614eebd91SCarsten Otte 	}
137714eebd91SCarsten Otte 
137814eebd91SCarsten Otte 	return r;
137914eebd91SCarsten Otte }
138014eebd91SCarsten Otte 
138114eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
138214eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
138314eebd91SCarsten Otte {
138414eebd91SCarsten Otte 	int r = -EINVAL;
138514eebd91SCarsten Otte 
138614eebd91SCarsten Otte 	switch (reg->id) {
138729b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
138829b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->todpr,
138929b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
139029b7c71bSCarsten Otte 		break;
139129b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
139229b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->epoch,
139329b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
139429b7c71bSCarsten Otte 		break;
139546a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
139646a6dd1cSJason J. herne 		r = get_user(vcpu->arch.sie_block->cputm,
139746a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
139846a6dd1cSJason J. herne 		break;
139946a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
140046a6dd1cSJason J. herne 		r = get_user(vcpu->arch.sie_block->ckc,
140146a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
140246a6dd1cSJason J. herne 		break;
1403536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
1404536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_token,
1405536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
14069fbd8082SDavid Hildenbrand 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
14079fbd8082SDavid Hildenbrand 			kvm_clear_async_pf_completion_queue(vcpu);
1408536336c2SDominik Dingel 		break;
1409536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
1410536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_compare,
1411536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1412536336c2SDominik Dingel 		break;
1413536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
1414536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_select,
1415536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1416536336c2SDominik Dingel 		break;
1417672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
1418672550fbSChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->pp,
1419672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
1420672550fbSChristian Borntraeger 		break;
1421afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
1422afa45ff5SChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->gbea,
1423afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
1424afa45ff5SChristian Borntraeger 		break;
142514eebd91SCarsten Otte 	default:
142614eebd91SCarsten Otte 		break;
142714eebd91SCarsten Otte 	}
142814eebd91SCarsten Otte 
142914eebd91SCarsten Otte 	return r;
143014eebd91SCarsten Otte }
1431b6d33834SChristoffer Dall 
1432b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1433b0c632dbSHeiko Carstens {
1434b0c632dbSHeiko Carstens 	kvm_s390_vcpu_initial_reset(vcpu);
1435b0c632dbSHeiko Carstens 	return 0;
1436b0c632dbSHeiko Carstens }
1437b0c632dbSHeiko Carstens 
1438b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1439b0c632dbSHeiko Carstens {
14405a32c1afSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
1441b0c632dbSHeiko Carstens 	return 0;
1442b0c632dbSHeiko Carstens }
1443b0c632dbSHeiko Carstens 
1444b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1445b0c632dbSHeiko Carstens {
14465a32c1afSChristian Borntraeger 	memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
1447b0c632dbSHeiko Carstens 	return 0;
1448b0c632dbSHeiko Carstens }
1449b0c632dbSHeiko Carstens 
1450b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1451b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
1452b0c632dbSHeiko Carstens {
145359674c1aSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
1454b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
145559674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
1456b0c632dbSHeiko Carstens 	return 0;
1457b0c632dbSHeiko Carstens }
1458b0c632dbSHeiko Carstens 
1459b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1460b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
1461b0c632dbSHeiko Carstens {
146259674c1aSChristian Borntraeger 	memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
1463b0c632dbSHeiko Carstens 	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
1464b0c632dbSHeiko Carstens 	return 0;
1465b0c632dbSHeiko Carstens }
1466b0c632dbSHeiko Carstens 
1467b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1468b0c632dbSHeiko Carstens {
14694725c860SMartin Schwidefsky 	if (test_fp_ctl(fpu->fpc))
14704725c860SMartin Schwidefsky 		return -EINVAL;
1471b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
14724725c860SMartin Schwidefsky 	vcpu->arch.guest_fpregs.fpc = fpu->fpc;
14734725c860SMartin Schwidefsky 	restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
14744725c860SMartin Schwidefsky 	restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
1475b0c632dbSHeiko Carstens 	return 0;
1476b0c632dbSHeiko Carstens }
1477b0c632dbSHeiko Carstens 
1478b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1479b0c632dbSHeiko Carstens {
1480b0c632dbSHeiko Carstens 	memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
1481b0c632dbSHeiko Carstens 	fpu->fpc = vcpu->arch.guest_fpregs.fpc;
1482b0c632dbSHeiko Carstens 	return 0;
1483b0c632dbSHeiko Carstens }
1484b0c632dbSHeiko Carstens 
1485b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1486b0c632dbSHeiko Carstens {
1487b0c632dbSHeiko Carstens 	int rc = 0;
1488b0c632dbSHeiko Carstens 
14897a42fdc2SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
1490b0c632dbSHeiko Carstens 		rc = -EBUSY;
1491d7b0b5ebSCarsten Otte 	else {
1492d7b0b5ebSCarsten Otte 		vcpu->run->psw_mask = psw.mask;
1493d7b0b5ebSCarsten Otte 		vcpu->run->psw_addr = psw.addr;
1494d7b0b5ebSCarsten Otte 	}
1495b0c632dbSHeiko Carstens 	return rc;
1496b0c632dbSHeiko Carstens }
1497b0c632dbSHeiko Carstens 
1498b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1499b0c632dbSHeiko Carstens 				  struct kvm_translation *tr)
1500b0c632dbSHeiko Carstens {
1501b0c632dbSHeiko Carstens 	return -EINVAL; /* not implemented yet */
1502b0c632dbSHeiko Carstens }
1503b0c632dbSHeiko Carstens 
150427291e21SDavid Hildenbrand #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
150527291e21SDavid Hildenbrand 			      KVM_GUESTDBG_USE_HW_BP | \
150627291e21SDavid Hildenbrand 			      KVM_GUESTDBG_ENABLE)
150727291e21SDavid Hildenbrand 
1508d0bfb940SJan Kiszka int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1509d0bfb940SJan Kiszka 					struct kvm_guest_debug *dbg)
1510b0c632dbSHeiko Carstens {
151127291e21SDavid Hildenbrand 	int rc = 0;
151227291e21SDavid Hildenbrand 
151327291e21SDavid Hildenbrand 	vcpu->guest_debug = 0;
151427291e21SDavid Hildenbrand 	kvm_s390_clear_bp_data(vcpu);
151527291e21SDavid Hildenbrand 
15162de3bfc2SDavid Hildenbrand 	if (dbg->control & ~VALID_GUESTDBG_FLAGS)
151727291e21SDavid Hildenbrand 		return -EINVAL;
151827291e21SDavid Hildenbrand 
151927291e21SDavid Hildenbrand 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
152027291e21SDavid Hildenbrand 		vcpu->guest_debug = dbg->control;
152127291e21SDavid Hildenbrand 		/* enforce guest PER */
152227291e21SDavid Hildenbrand 		atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
152327291e21SDavid Hildenbrand 
152427291e21SDavid Hildenbrand 		if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
152527291e21SDavid Hildenbrand 			rc = kvm_s390_import_bp_data(vcpu, dbg);
152627291e21SDavid Hildenbrand 	} else {
152727291e21SDavid Hildenbrand 		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
152827291e21SDavid Hildenbrand 		vcpu->arch.guestdbg.last_bp = 0;
152927291e21SDavid Hildenbrand 	}
153027291e21SDavid Hildenbrand 
153127291e21SDavid Hildenbrand 	if (rc) {
153227291e21SDavid Hildenbrand 		vcpu->guest_debug = 0;
153327291e21SDavid Hildenbrand 		kvm_s390_clear_bp_data(vcpu);
153427291e21SDavid Hildenbrand 		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
153527291e21SDavid Hildenbrand 	}
153627291e21SDavid Hildenbrand 
153727291e21SDavid Hildenbrand 	return rc;
1538b0c632dbSHeiko Carstens }
1539b0c632dbSHeiko Carstens 
154062d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
154162d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
154262d9f0dbSMarcelo Tosatti {
15436352e4d2SDavid Hildenbrand 	/* CHECK_STOP and LOAD are not supported yet */
15446352e4d2SDavid Hildenbrand 	return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
15456352e4d2SDavid Hildenbrand 				       KVM_MP_STATE_OPERATING;
154662d9f0dbSMarcelo Tosatti }
154762d9f0dbSMarcelo Tosatti 
154862d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
154962d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
155062d9f0dbSMarcelo Tosatti {
15516352e4d2SDavid Hildenbrand 	int rc = 0;
15526352e4d2SDavid Hildenbrand 
15536352e4d2SDavid Hildenbrand 	/* user space knows about this interface - let it control the state */
15546352e4d2SDavid Hildenbrand 	vcpu->kvm->arch.user_cpu_state_ctrl = 1;
15556352e4d2SDavid Hildenbrand 
15566352e4d2SDavid Hildenbrand 	switch (mp_state->mp_state) {
15576352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_STOPPED:
15586352e4d2SDavid Hildenbrand 		kvm_s390_vcpu_stop(vcpu);
15596352e4d2SDavid Hildenbrand 		break;
15606352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_OPERATING:
15616352e4d2SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
15626352e4d2SDavid Hildenbrand 		break;
15636352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_LOAD:
15646352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_CHECK_STOP:
15656352e4d2SDavid Hildenbrand 		/* fall through - CHECK_STOP and LOAD are not supported yet */
15666352e4d2SDavid Hildenbrand 	default:
15676352e4d2SDavid Hildenbrand 		rc = -ENXIO;
15686352e4d2SDavid Hildenbrand 	}
15696352e4d2SDavid Hildenbrand 
15706352e4d2SDavid Hildenbrand 	return rc;
157162d9f0dbSMarcelo Tosatti }
157262d9f0dbSMarcelo Tosatti 
1573b31605c1SDominik Dingel bool kvm_s390_cmma_enabled(struct kvm *kvm)
1574b31605c1SDominik Dingel {
1575b31605c1SDominik Dingel 	if (!MACHINE_IS_LPAR)
1576b31605c1SDominik Dingel 		return false;
1577b31605c1SDominik Dingel 	/* only enable for z10 and later */
1578b31605c1SDominik Dingel 	if (!MACHINE_HAS_EDAT1)
1579b31605c1SDominik Dingel 		return false;
1580b31605c1SDominik Dingel 	if (!kvm->arch.use_cmma)
1581b31605c1SDominik Dingel 		return false;
1582b31605c1SDominik Dingel 	return true;
1583b31605c1SDominik Dingel }
1584b31605c1SDominik Dingel 
15858ad35755SDavid Hildenbrand static bool ibs_enabled(struct kvm_vcpu *vcpu)
15868ad35755SDavid Hildenbrand {
15878ad35755SDavid Hildenbrand 	return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
15888ad35755SDavid Hildenbrand }
15898ad35755SDavid Hildenbrand 
15902c70fe44SChristian Borntraeger static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
15912c70fe44SChristian Borntraeger {
15928ad35755SDavid Hildenbrand retry:
15938ad35755SDavid Hildenbrand 	s390_vcpu_unblock(vcpu);
15942c70fe44SChristian Borntraeger 	/*
15952c70fe44SChristian Borntraeger 	 * We use MMU_RELOAD just to re-arm the ipte notifier for the
15962c70fe44SChristian Borntraeger 	 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
15972c70fe44SChristian Borntraeger 	 * This ensures that the ipte instruction for this request has
15982c70fe44SChristian Borntraeger 	 * already finished. We might race against a second unmapper that
15992c70fe44SChristian Borntraeger 	 * wants to set the blocking bit. Lets just retry the request loop.
16002c70fe44SChristian Borntraeger 	 */
16018ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
16022c70fe44SChristian Borntraeger 		int rc;
16032c70fe44SChristian Borntraeger 		rc = gmap_ipte_notify(vcpu->arch.gmap,
1604fda902cbSMichael Mueller 				      kvm_s390_get_prefix(vcpu),
16052c70fe44SChristian Borntraeger 				      PAGE_SIZE * 2);
16062c70fe44SChristian Borntraeger 		if (rc)
16072c70fe44SChristian Borntraeger 			return rc;
16088ad35755SDavid Hildenbrand 		goto retry;
16092c70fe44SChristian Borntraeger 	}
16108ad35755SDavid Hildenbrand 
1611d3d692c8SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1612d3d692c8SDavid Hildenbrand 		vcpu->arch.sie_block->ihcpu = 0xffff;
1613d3d692c8SDavid Hildenbrand 		goto retry;
1614d3d692c8SDavid Hildenbrand 	}
1615d3d692c8SDavid Hildenbrand 
16168ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
16178ad35755SDavid Hildenbrand 		if (!ibs_enabled(vcpu)) {
16188ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
16198ad35755SDavid Hildenbrand 			atomic_set_mask(CPUSTAT_IBS,
16208ad35755SDavid Hildenbrand 					&vcpu->arch.sie_block->cpuflags);
16218ad35755SDavid Hildenbrand 		}
16228ad35755SDavid Hildenbrand 		goto retry;
16238ad35755SDavid Hildenbrand 	}
16248ad35755SDavid Hildenbrand 
16258ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
16268ad35755SDavid Hildenbrand 		if (ibs_enabled(vcpu)) {
16278ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
16288ad35755SDavid Hildenbrand 			atomic_clear_mask(CPUSTAT_IBS,
16298ad35755SDavid Hildenbrand 					  &vcpu->arch.sie_block->cpuflags);
16308ad35755SDavid Hildenbrand 		}
16318ad35755SDavid Hildenbrand 		goto retry;
16328ad35755SDavid Hildenbrand 	}
16338ad35755SDavid Hildenbrand 
16340759d068SDavid Hildenbrand 	/* nothing to do, just clear the request */
16350759d068SDavid Hildenbrand 	clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
16360759d068SDavid Hildenbrand 
16372c70fe44SChristian Borntraeger 	return 0;
16382c70fe44SChristian Borntraeger }
16392c70fe44SChristian Borntraeger 
1640fa576c58SThomas Huth /**
1641fa576c58SThomas Huth  * kvm_arch_fault_in_page - fault-in guest page if necessary
1642fa576c58SThomas Huth  * @vcpu: The corresponding virtual cpu
1643fa576c58SThomas Huth  * @gpa: Guest physical address
1644fa576c58SThomas Huth  * @writable: Whether the page should be writable or not
1645fa576c58SThomas Huth  *
1646fa576c58SThomas Huth  * Make sure that a guest page has been faulted-in on the host.
1647fa576c58SThomas Huth  *
1648fa576c58SThomas Huth  * Return: Zero on success, negative error code otherwise.
1649fa576c58SThomas Huth  */
1650fa576c58SThomas Huth long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
165124eb3a82SDominik Dingel {
1652527e30b4SMartin Schwidefsky 	return gmap_fault(vcpu->arch.gmap, gpa,
1653527e30b4SMartin Schwidefsky 			  writable ? FAULT_FLAG_WRITE : 0);
165424eb3a82SDominik Dingel }
165524eb3a82SDominik Dingel 
16563c038e6bSDominik Dingel static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
16573c038e6bSDominik Dingel 				      unsigned long token)
16583c038e6bSDominik Dingel {
16593c038e6bSDominik Dingel 	struct kvm_s390_interrupt inti;
1660383d0b05SJens Freimann 	struct kvm_s390_irq irq;
16613c038e6bSDominik Dingel 
16623c038e6bSDominik Dingel 	if (start_token) {
1663383d0b05SJens Freimann 		irq.u.ext.ext_params2 = token;
1664383d0b05SJens Freimann 		irq.type = KVM_S390_INT_PFAULT_INIT;
1665383d0b05SJens Freimann 		WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
16663c038e6bSDominik Dingel 	} else {
16673c038e6bSDominik Dingel 		inti.type = KVM_S390_INT_PFAULT_DONE;
1668383d0b05SJens Freimann 		inti.parm64 = token;
16693c038e6bSDominik Dingel 		WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
16703c038e6bSDominik Dingel 	}
16713c038e6bSDominik Dingel }
16723c038e6bSDominik Dingel 
16733c038e6bSDominik Dingel void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
16743c038e6bSDominik Dingel 				     struct kvm_async_pf *work)
16753c038e6bSDominik Dingel {
16763c038e6bSDominik Dingel 	trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
16773c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
16783c038e6bSDominik Dingel }
16793c038e6bSDominik Dingel 
16803c038e6bSDominik Dingel void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
16813c038e6bSDominik Dingel 				 struct kvm_async_pf *work)
16823c038e6bSDominik Dingel {
16833c038e6bSDominik Dingel 	trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
16843c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
16853c038e6bSDominik Dingel }
16863c038e6bSDominik Dingel 
16873c038e6bSDominik Dingel void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
16883c038e6bSDominik Dingel 			       struct kvm_async_pf *work)
16893c038e6bSDominik Dingel {
16903c038e6bSDominik Dingel 	/* s390 will always inject the page directly */
16913c038e6bSDominik Dingel }
16923c038e6bSDominik Dingel 
16933c038e6bSDominik Dingel bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
16943c038e6bSDominik Dingel {
16953c038e6bSDominik Dingel 	/*
16963c038e6bSDominik Dingel 	 * s390 will always inject the page directly,
16973c038e6bSDominik Dingel 	 * but we still want check_async_completion to cleanup
16983c038e6bSDominik Dingel 	 */
16993c038e6bSDominik Dingel 	return true;
17003c038e6bSDominik Dingel }
17013c038e6bSDominik Dingel 
17023c038e6bSDominik Dingel static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
17033c038e6bSDominik Dingel {
17043c038e6bSDominik Dingel 	hva_t hva;
17053c038e6bSDominik Dingel 	struct kvm_arch_async_pf arch;
17063c038e6bSDominik Dingel 	int rc;
17073c038e6bSDominik Dingel 
17083c038e6bSDominik Dingel 	if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
17093c038e6bSDominik Dingel 		return 0;
17103c038e6bSDominik Dingel 	if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
17113c038e6bSDominik Dingel 	    vcpu->arch.pfault_compare)
17123c038e6bSDominik Dingel 		return 0;
17133c038e6bSDominik Dingel 	if (psw_extint_disabled(vcpu))
17143c038e6bSDominik Dingel 		return 0;
17159a022067SDavid Hildenbrand 	if (kvm_s390_vcpu_has_irq(vcpu, 0))
17163c038e6bSDominik Dingel 		return 0;
17173c038e6bSDominik Dingel 	if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
17183c038e6bSDominik Dingel 		return 0;
17193c038e6bSDominik Dingel 	if (!vcpu->arch.gmap->pfault_enabled)
17203c038e6bSDominik Dingel 		return 0;
17213c038e6bSDominik Dingel 
172281480cc1SHeiko Carstens 	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
172381480cc1SHeiko Carstens 	hva += current->thread.gmap_addr & ~PAGE_MASK;
172481480cc1SHeiko Carstens 	if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
17253c038e6bSDominik Dingel 		return 0;
17263c038e6bSDominik Dingel 
17273c038e6bSDominik Dingel 	rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
17283c038e6bSDominik Dingel 	return rc;
17293c038e6bSDominik Dingel }
17303c038e6bSDominik Dingel 
17313fb4c40fSThomas Huth static int vcpu_pre_run(struct kvm_vcpu *vcpu)
1732b0c632dbSHeiko Carstens {
17333fb4c40fSThomas Huth 	int rc, cpuflags;
1734e168bf8dSCarsten Otte 
17353c038e6bSDominik Dingel 	/*
17363c038e6bSDominik Dingel 	 * On s390 notifications for arriving pages will be delivered directly
17373c038e6bSDominik Dingel 	 * to the guest but the house keeping for completed pfaults is
17383c038e6bSDominik Dingel 	 * handled outside the worker.
17393c038e6bSDominik Dingel 	 */
17403c038e6bSDominik Dingel 	kvm_check_async_pf_completion(vcpu);
17413c038e6bSDominik Dingel 
17425a32c1afSChristian Borntraeger 	memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
1743b0c632dbSHeiko Carstens 
1744b0c632dbSHeiko Carstens 	if (need_resched())
1745b0c632dbSHeiko Carstens 		schedule();
1746b0c632dbSHeiko Carstens 
1747d3a73acbSMartin Schwidefsky 	if (test_cpu_flag(CIF_MCCK_PENDING))
174871cde587SChristian Borntraeger 		s390_handle_mcck();
174971cde587SChristian Borntraeger 
175079395031SJens Freimann 	if (!kvm_is_ucontrol(vcpu->kvm)) {
175179395031SJens Freimann 		rc = kvm_s390_deliver_pending_interrupts(vcpu);
175279395031SJens Freimann 		if (rc)
175379395031SJens Freimann 			return rc;
175479395031SJens Freimann 	}
17550ff31867SCarsten Otte 
17562c70fe44SChristian Borntraeger 	rc = kvm_s390_handle_requests(vcpu);
17572c70fe44SChristian Borntraeger 	if (rc)
17582c70fe44SChristian Borntraeger 		return rc;
17592c70fe44SChristian Borntraeger 
176027291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu)) {
176127291e21SDavid Hildenbrand 		kvm_s390_backup_guest_per_regs(vcpu);
176227291e21SDavid Hildenbrand 		kvm_s390_patch_guest_per_regs(vcpu);
176327291e21SDavid Hildenbrand 	}
176427291e21SDavid Hildenbrand 
1765b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icptcode = 0;
17663fb4c40fSThomas Huth 	cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
17673fb4c40fSThomas Huth 	VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
17683fb4c40fSThomas Huth 	trace_kvm_s390_sie_enter(vcpu, cpuflags);
17692b29a9fdSDominik Dingel 
17703fb4c40fSThomas Huth 	return 0;
17713fb4c40fSThomas Huth }
17723fb4c40fSThomas Huth 
1773492d8642SThomas Huth static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
1774492d8642SThomas Huth {
1775492d8642SThomas Huth 	psw_t *psw = &vcpu->arch.sie_block->gpsw;
1776492d8642SThomas Huth 	u8 opcode;
1777492d8642SThomas Huth 	int rc;
1778492d8642SThomas Huth 
1779492d8642SThomas Huth 	VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1780492d8642SThomas Huth 	trace_kvm_s390_sie_fault(vcpu);
1781492d8642SThomas Huth 
1782492d8642SThomas Huth 	/*
1783492d8642SThomas Huth 	 * We want to inject an addressing exception, which is defined as a
1784492d8642SThomas Huth 	 * suppressing or terminating exception. However, since we came here
1785492d8642SThomas Huth 	 * by a DAT access exception, the PSW still points to the faulting
1786492d8642SThomas Huth 	 * instruction since DAT exceptions are nullifying. So we've got
1787492d8642SThomas Huth 	 * to look up the current opcode to get the length of the instruction
1788492d8642SThomas Huth 	 * to be able to forward the PSW.
1789492d8642SThomas Huth 	 */
17908ae04b8fSAlexander Yarygin 	rc = read_guest(vcpu, psw->addr, 0, &opcode, 1);
1791492d8642SThomas Huth 	if (rc)
1792492d8642SThomas Huth 		return kvm_s390_inject_prog_cond(vcpu, rc);
1793492d8642SThomas Huth 	psw->addr = __rewind_psw(*psw, -insn_length(opcode));
1794492d8642SThomas Huth 
1795492d8642SThomas Huth 	return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1796492d8642SThomas Huth }
1797492d8642SThomas Huth 
17983fb4c40fSThomas Huth static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
17993fb4c40fSThomas Huth {
180024eb3a82SDominik Dingel 	int rc = -1;
18012b29a9fdSDominik Dingel 
18022b29a9fdSDominik Dingel 	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
18032b29a9fdSDominik Dingel 		   vcpu->arch.sie_block->icptcode);
18042b29a9fdSDominik Dingel 	trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
18052b29a9fdSDominik Dingel 
180627291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu))
180727291e21SDavid Hildenbrand 		kvm_s390_restore_guest_per_regs(vcpu);
180827291e21SDavid Hildenbrand 
18093fb4c40fSThomas Huth 	if (exit_reason >= 0) {
18107c470539SMartin Schwidefsky 		rc = 0;
1811210b1607SThomas Huth 	} else if (kvm_is_ucontrol(vcpu->kvm)) {
1812210b1607SThomas Huth 		vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1813210b1607SThomas Huth 		vcpu->run->s390_ucontrol.trans_exc_code =
1814210b1607SThomas Huth 						current->thread.gmap_addr;
1815210b1607SThomas Huth 		vcpu->run->s390_ucontrol.pgm_code = 0x10;
1816210b1607SThomas Huth 		rc = -EREMOTE;
181724eb3a82SDominik Dingel 
181824eb3a82SDominik Dingel 	} else if (current->thread.gmap_pfault) {
18193c038e6bSDominik Dingel 		trace_kvm_s390_major_guest_pfault(vcpu);
182024eb3a82SDominik Dingel 		current->thread.gmap_pfault = 0;
1821fa576c58SThomas Huth 		if (kvm_arch_setup_async_pf(vcpu)) {
182224eb3a82SDominik Dingel 			rc = 0;
1823fa576c58SThomas Huth 		} else {
1824fa576c58SThomas Huth 			gpa_t gpa = current->thread.gmap_addr;
1825fa576c58SThomas Huth 			rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1826fa576c58SThomas Huth 		}
182724eb3a82SDominik Dingel 	}
182824eb3a82SDominik Dingel 
1829492d8642SThomas Huth 	if (rc == -1)
1830492d8642SThomas Huth 		rc = vcpu_post_run_fault_in_sie(vcpu);
1831b0c632dbSHeiko Carstens 
18325a32c1afSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
18333fb4c40fSThomas Huth 
1834a76ccff6SThomas Huth 	if (rc == 0) {
1835a76ccff6SThomas Huth 		if (kvm_is_ucontrol(vcpu->kvm))
18362955c83fSChristian Borntraeger 			/* Don't exit for host interrupts. */
18372955c83fSChristian Borntraeger 			rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
1838a76ccff6SThomas Huth 		else
1839a76ccff6SThomas Huth 			rc = kvm_handle_sie_intercept(vcpu);
1840a76ccff6SThomas Huth 	}
1841a76ccff6SThomas Huth 
18423fb4c40fSThomas Huth 	return rc;
18433fb4c40fSThomas Huth }
18443fb4c40fSThomas Huth 
18453fb4c40fSThomas Huth static int __vcpu_run(struct kvm_vcpu *vcpu)
18463fb4c40fSThomas Huth {
18473fb4c40fSThomas Huth 	int rc, exit_reason;
18483fb4c40fSThomas Huth 
1849800c1065SThomas Huth 	/*
1850800c1065SThomas Huth 	 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1851800c1065SThomas Huth 	 * ning the guest), so that memslots (and other stuff) are protected
1852800c1065SThomas Huth 	 */
1853800c1065SThomas Huth 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1854800c1065SThomas Huth 
1855a76ccff6SThomas Huth 	do {
18563fb4c40fSThomas Huth 		rc = vcpu_pre_run(vcpu);
18573fb4c40fSThomas Huth 		if (rc)
1858a76ccff6SThomas Huth 			break;
18593fb4c40fSThomas Huth 
1860800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
18613fb4c40fSThomas Huth 		/*
1862a76ccff6SThomas Huth 		 * As PF_VCPU will be used in fault handler, between
1863a76ccff6SThomas Huth 		 * guest_enter and guest_exit should be no uaccess.
18643fb4c40fSThomas Huth 		 */
18653fb4c40fSThomas Huth 		preempt_disable();
18663fb4c40fSThomas Huth 		kvm_guest_enter();
18673fb4c40fSThomas Huth 		preempt_enable();
1868a76ccff6SThomas Huth 		exit_reason = sie64a(vcpu->arch.sie_block,
1869a76ccff6SThomas Huth 				     vcpu->run->s.regs.gprs);
18703fb4c40fSThomas Huth 		kvm_guest_exit();
1871800c1065SThomas Huth 		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
18723fb4c40fSThomas Huth 
18733fb4c40fSThomas Huth 		rc = vcpu_post_run(vcpu, exit_reason);
187427291e21SDavid Hildenbrand 	} while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
18753fb4c40fSThomas Huth 
1876800c1065SThomas Huth 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1877e168bf8dSCarsten Otte 	return rc;
1878b0c632dbSHeiko Carstens }
1879b0c632dbSHeiko Carstens 
1880b028ee3eSDavid Hildenbrand static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1881b028ee3eSDavid Hildenbrand {
1882b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1883b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
1884b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
1885b028ee3eSDavid Hildenbrand 		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1886b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1887b028ee3eSDavid Hildenbrand 		memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
1888d3d692c8SDavid Hildenbrand 		/* some control register changes require a tlb flush */
1889d3d692c8SDavid Hildenbrand 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1890b028ee3eSDavid Hildenbrand 	}
1891b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
1892b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
1893b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
1894b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
1895b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
1896b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
1897b028ee3eSDavid Hildenbrand 	}
1898b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
1899b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_token = kvm_run->s.regs.pft;
1900b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
1901b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
19029fbd8082SDavid Hildenbrand 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
19039fbd8082SDavid Hildenbrand 			kvm_clear_async_pf_completion_queue(vcpu);
1904b028ee3eSDavid Hildenbrand 	}
1905b028ee3eSDavid Hildenbrand 	kvm_run->kvm_dirty_regs = 0;
1906b028ee3eSDavid Hildenbrand }
1907b028ee3eSDavid Hildenbrand 
1908b028ee3eSDavid Hildenbrand static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1909b028ee3eSDavid Hildenbrand {
1910b028ee3eSDavid Hildenbrand 	kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1911b028ee3eSDavid Hildenbrand 	kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
1912b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
1913b028ee3eSDavid Hildenbrand 	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1914b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
1915b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
1916b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
1917b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
1918b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
1919b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pft = vcpu->arch.pfault_token;
1920b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
1921b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
1922b028ee3eSDavid Hildenbrand }
1923b028ee3eSDavid Hildenbrand 
1924b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1925b0c632dbSHeiko Carstens {
19268f2abe6aSChristian Borntraeger 	int rc;
1927b0c632dbSHeiko Carstens 	sigset_t sigsaved;
1928b0c632dbSHeiko Carstens 
192927291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu)) {
193027291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
193127291e21SDavid Hildenbrand 		return 0;
193227291e21SDavid Hildenbrand 	}
193327291e21SDavid Hildenbrand 
1934b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
1935b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1936b0c632dbSHeiko Carstens 
19376352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
19386852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
19396352e4d2SDavid Hildenbrand 	} else if (is_vcpu_stopped(vcpu)) {
19406352e4d2SDavid Hildenbrand 		pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
19416352e4d2SDavid Hildenbrand 				   vcpu->vcpu_id);
19426352e4d2SDavid Hildenbrand 		return -EINVAL;
19436352e4d2SDavid Hildenbrand 	}
1944b0c632dbSHeiko Carstens 
1945b028ee3eSDavid Hildenbrand 	sync_regs(vcpu, kvm_run);
1946d7b0b5ebSCarsten Otte 
1947dab4079dSHeiko Carstens 	might_fault();
1948e168bf8dSCarsten Otte 	rc = __vcpu_run(vcpu);
19499ace903dSChristian Ehrhardt 
1950b1d16c49SChristian Ehrhardt 	if (signal_pending(current) && !rc) {
1951b1d16c49SChristian Ehrhardt 		kvm_run->exit_reason = KVM_EXIT_INTR;
19528f2abe6aSChristian Borntraeger 		rc = -EINTR;
1953b1d16c49SChristian Ehrhardt 	}
19548f2abe6aSChristian Borntraeger 
195527291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu) && !rc)  {
195627291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
195727291e21SDavid Hildenbrand 		rc = 0;
195827291e21SDavid Hildenbrand 	}
195927291e21SDavid Hildenbrand 
1960b8e660b8SHeiko Carstens 	if (rc == -EOPNOTSUPP) {
19618f2abe6aSChristian Borntraeger 		/* intercept cannot be handled in-kernel, prepare kvm-run */
19628f2abe6aSChristian Borntraeger 		kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
19638f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
19648f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
19658f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
19668f2abe6aSChristian Borntraeger 		rc = 0;
19678f2abe6aSChristian Borntraeger 	}
19688f2abe6aSChristian Borntraeger 
19698f2abe6aSChristian Borntraeger 	if (rc == -EREMOTE) {
19708f2abe6aSChristian Borntraeger 		/* intercept was handled, but userspace support is needed
19718f2abe6aSChristian Borntraeger 		 * kvm_run has been prepared by the handler */
19728f2abe6aSChristian Borntraeger 		rc = 0;
19738f2abe6aSChristian Borntraeger 	}
19748f2abe6aSChristian Borntraeger 
1975b028ee3eSDavid Hildenbrand 	store_regs(vcpu, kvm_run);
1976d7b0b5ebSCarsten Otte 
1977b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
1978b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1979b0c632dbSHeiko Carstens 
1980b0c632dbSHeiko Carstens 	vcpu->stat.exit_userspace++;
19817e8e6ab4SHeiko Carstens 	return rc;
1982b0c632dbSHeiko Carstens }
1983b0c632dbSHeiko Carstens 
1984b0c632dbSHeiko Carstens /*
1985b0c632dbSHeiko Carstens  * store status at address
1986b0c632dbSHeiko Carstens  * we use have two special cases:
1987b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1988b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1989b0c632dbSHeiko Carstens  */
1990d0bce605SHeiko Carstens int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
1991b0c632dbSHeiko Carstens {
1992092670cdSCarsten Otte 	unsigned char archmode = 1;
1993fda902cbSMichael Mueller 	unsigned int px;
1994178bd789SThomas Huth 	u64 clkcomp;
1995d0bce605SHeiko Carstens 	int rc;
1996b0c632dbSHeiko Carstens 
1997d0bce605SHeiko Carstens 	if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1998d0bce605SHeiko Carstens 		if (write_guest_abs(vcpu, 163, &archmode, 1))
1999b0c632dbSHeiko Carstens 			return -EFAULT;
2000d0bce605SHeiko Carstens 		gpa = SAVE_AREA_BASE;
2001d0bce605SHeiko Carstens 	} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2002d0bce605SHeiko Carstens 		if (write_guest_real(vcpu, 163, &archmode, 1))
2003b0c632dbSHeiko Carstens 			return -EFAULT;
2004d0bce605SHeiko Carstens 		gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
2005d0bce605SHeiko Carstens 	}
2006d0bce605SHeiko Carstens 	rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
2007d0bce605SHeiko Carstens 			     vcpu->arch.guest_fpregs.fprs, 128);
2008d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
2009d0bce605SHeiko Carstens 			      vcpu->run->s.regs.gprs, 128);
2010d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
2011d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gpsw, 16);
2012fda902cbSMichael Mueller 	px = kvm_s390_get_prefix(vcpu);
2013d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
2014fda902cbSMichael Mueller 			      &px, 4);
2015d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu,
2016d0bce605SHeiko Carstens 			      gpa + offsetof(struct save_area, fp_ctrl_reg),
2017d0bce605SHeiko Carstens 			      &vcpu->arch.guest_fpregs.fpc, 4);
2018d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
2019d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->todpr, 4);
2020d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
2021d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->cputm, 8);
2022178bd789SThomas Huth 	clkcomp = vcpu->arch.sie_block->ckc >> 8;
2023d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
2024d0bce605SHeiko Carstens 			      &clkcomp, 8);
2025d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
2026d0bce605SHeiko Carstens 			      &vcpu->run->s.regs.acrs, 64);
2027d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
2028d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gcr, 128);
2029d0bce605SHeiko Carstens 	return rc ? -EFAULT : 0;
2030b0c632dbSHeiko Carstens }
2031b0c632dbSHeiko Carstens 
2032e879892cSThomas Huth int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2033e879892cSThomas Huth {
2034e879892cSThomas Huth 	/*
2035e879892cSThomas Huth 	 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2036e879892cSThomas Huth 	 * copying in vcpu load/put. Lets update our copies before we save
2037e879892cSThomas Huth 	 * it into the save area
2038e879892cSThomas Huth 	 */
2039e879892cSThomas Huth 	save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
2040e879892cSThomas Huth 	save_fp_regs(vcpu->arch.guest_fpregs.fprs);
2041e879892cSThomas Huth 	save_access_regs(vcpu->run->s.regs.acrs);
2042e879892cSThomas Huth 
2043e879892cSThomas Huth 	return kvm_s390_store_status_unloaded(vcpu, addr);
2044e879892cSThomas Huth }
2045e879892cSThomas Huth 
2046bc17de7cSEric Farman /*
2047bc17de7cSEric Farman  * store additional status at address
2048bc17de7cSEric Farman  */
2049bc17de7cSEric Farman int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2050bc17de7cSEric Farman 					unsigned long gpa)
2051bc17de7cSEric Farman {
2052bc17de7cSEric Farman 	/* Only bits 0-53 are used for address formation */
2053bc17de7cSEric Farman 	if (!(gpa & ~0x3ff))
2054bc17de7cSEric Farman 		return 0;
2055bc17de7cSEric Farman 
2056bc17de7cSEric Farman 	return write_guest_abs(vcpu, gpa & ~0x3ff,
2057bc17de7cSEric Farman 			       (void *)&vcpu->run->s.regs.vrs, 512);
2058bc17de7cSEric Farman }
2059bc17de7cSEric Farman 
2060bc17de7cSEric Farman int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2061bc17de7cSEric Farman {
2062bc17de7cSEric Farman 	if (!test_kvm_facility(vcpu->kvm, 129))
2063bc17de7cSEric Farman 		return 0;
2064bc17de7cSEric Farman 
2065bc17de7cSEric Farman 	/*
2066bc17de7cSEric Farman 	 * The guest VXRS are in the host VXRs due to the lazy
2067bc17de7cSEric Farman 	 * copying in vcpu load/put. Let's update our copies before we save
2068bc17de7cSEric Farman 	 * it into the save area.
2069bc17de7cSEric Farman 	 */
2070bc17de7cSEric Farman 	save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
2071bc17de7cSEric Farman 
2072bc17de7cSEric Farman 	return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2073bc17de7cSEric Farman }
2074bc17de7cSEric Farman 
20758ad35755SDavid Hildenbrand static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
20768ad35755SDavid Hildenbrand {
20778ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
20788ad35755SDavid Hildenbrand 	kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
20798ad35755SDavid Hildenbrand 	exit_sie_sync(vcpu);
20808ad35755SDavid Hildenbrand }
20818ad35755SDavid Hildenbrand 
20828ad35755SDavid Hildenbrand static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
20838ad35755SDavid Hildenbrand {
20848ad35755SDavid Hildenbrand 	unsigned int i;
20858ad35755SDavid Hildenbrand 	struct kvm_vcpu *vcpu;
20868ad35755SDavid Hildenbrand 
20878ad35755SDavid Hildenbrand 	kvm_for_each_vcpu(i, vcpu, kvm) {
20888ad35755SDavid Hildenbrand 		__disable_ibs_on_vcpu(vcpu);
20898ad35755SDavid Hildenbrand 	}
20908ad35755SDavid Hildenbrand }
20918ad35755SDavid Hildenbrand 
20928ad35755SDavid Hildenbrand static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
20938ad35755SDavid Hildenbrand {
20948ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
20958ad35755SDavid Hildenbrand 	kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
20968ad35755SDavid Hildenbrand 	exit_sie_sync(vcpu);
20978ad35755SDavid Hildenbrand }
20988ad35755SDavid Hildenbrand 
20996852d7b6SDavid Hildenbrand void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
21006852d7b6SDavid Hildenbrand {
21018ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
21028ad35755SDavid Hildenbrand 
21038ad35755SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
21048ad35755SDavid Hildenbrand 		return;
21058ad35755SDavid Hildenbrand 
21066852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
21078ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
2108433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
21098ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
21108ad35755SDavid Hildenbrand 
21118ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
21128ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
21138ad35755SDavid Hildenbrand 			started_vcpus++;
21148ad35755SDavid Hildenbrand 	}
21158ad35755SDavid Hildenbrand 
21168ad35755SDavid Hildenbrand 	if (started_vcpus == 0) {
21178ad35755SDavid Hildenbrand 		/* we're the only active VCPU -> speed it up */
21188ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(vcpu);
21198ad35755SDavid Hildenbrand 	} else if (started_vcpus == 1) {
21208ad35755SDavid Hildenbrand 		/*
21218ad35755SDavid Hildenbrand 		 * As we are starting a second VCPU, we have to disable
21228ad35755SDavid Hildenbrand 		 * the IBS facility on all VCPUs to remove potentially
21238ad35755SDavid Hildenbrand 		 * oustanding ENABLE requests.
21248ad35755SDavid Hildenbrand 		 */
21258ad35755SDavid Hildenbrand 		__disable_ibs_on_all_vcpus(vcpu->kvm);
21268ad35755SDavid Hildenbrand 	}
21278ad35755SDavid Hildenbrand 
21286852d7b6SDavid Hildenbrand 	atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
21298ad35755SDavid Hildenbrand 	/*
21308ad35755SDavid Hildenbrand 	 * Another VCPU might have used IBS while we were offline.
21318ad35755SDavid Hildenbrand 	 * Let's play safe and flush the VCPU at startup.
21328ad35755SDavid Hildenbrand 	 */
2133d3d692c8SDavid Hildenbrand 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2134433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
21358ad35755SDavid Hildenbrand 	return;
21366852d7b6SDavid Hildenbrand }
21376852d7b6SDavid Hildenbrand 
21386852d7b6SDavid Hildenbrand void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
21396852d7b6SDavid Hildenbrand {
21408ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
21418ad35755SDavid Hildenbrand 	struct kvm_vcpu *started_vcpu = NULL;
21428ad35755SDavid Hildenbrand 
21438ad35755SDavid Hildenbrand 	if (is_vcpu_stopped(vcpu))
21448ad35755SDavid Hildenbrand 		return;
21458ad35755SDavid Hildenbrand 
21466852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
21478ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
2148433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
21498ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
21508ad35755SDavid Hildenbrand 
215132f5ff63SDavid Hildenbrand 	/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
21526cddd432SDavid Hildenbrand 	kvm_s390_clear_stop_irq(vcpu);
215332f5ff63SDavid Hildenbrand 
21546cddd432SDavid Hildenbrand 	atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
21558ad35755SDavid Hildenbrand 	__disable_ibs_on_vcpu(vcpu);
21568ad35755SDavid Hildenbrand 
21578ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
21588ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
21598ad35755SDavid Hildenbrand 			started_vcpus++;
21608ad35755SDavid Hildenbrand 			started_vcpu = vcpu->kvm->vcpus[i];
21618ad35755SDavid Hildenbrand 		}
21628ad35755SDavid Hildenbrand 	}
21638ad35755SDavid Hildenbrand 
21648ad35755SDavid Hildenbrand 	if (started_vcpus == 1) {
21658ad35755SDavid Hildenbrand 		/*
21668ad35755SDavid Hildenbrand 		 * As we only have one VCPU left, we want to enable the
21678ad35755SDavid Hildenbrand 		 * IBS facility for that VCPU to speed it up.
21688ad35755SDavid Hildenbrand 		 */
21698ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(started_vcpu);
21708ad35755SDavid Hildenbrand 	}
21718ad35755SDavid Hildenbrand 
2172433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
21738ad35755SDavid Hildenbrand 	return;
21746852d7b6SDavid Hildenbrand }
21756852d7b6SDavid Hildenbrand 
2176d6712df9SCornelia Huck static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2177d6712df9SCornelia Huck 				     struct kvm_enable_cap *cap)
2178d6712df9SCornelia Huck {
2179d6712df9SCornelia Huck 	int r;
2180d6712df9SCornelia Huck 
2181d6712df9SCornelia Huck 	if (cap->flags)
2182d6712df9SCornelia Huck 		return -EINVAL;
2183d6712df9SCornelia Huck 
2184d6712df9SCornelia Huck 	switch (cap->cap) {
2185fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
2186fa6b7fe9SCornelia Huck 		if (!vcpu->kvm->arch.css_support) {
2187fa6b7fe9SCornelia Huck 			vcpu->kvm->arch.css_support = 1;
2188fa6b7fe9SCornelia Huck 			trace_kvm_s390_enable_css(vcpu->kvm);
2189fa6b7fe9SCornelia Huck 		}
2190fa6b7fe9SCornelia Huck 		r = 0;
2191fa6b7fe9SCornelia Huck 		break;
2192d6712df9SCornelia Huck 	default:
2193d6712df9SCornelia Huck 		r = -EINVAL;
2194d6712df9SCornelia Huck 		break;
2195d6712df9SCornelia Huck 	}
2196d6712df9SCornelia Huck 	return r;
2197d6712df9SCornelia Huck }
2198d6712df9SCornelia Huck 
219941408c28SThomas Huth static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
220041408c28SThomas Huth 				  struct kvm_s390_mem_op *mop)
220141408c28SThomas Huth {
220241408c28SThomas Huth 	void __user *uaddr = (void __user *)mop->buf;
220341408c28SThomas Huth 	void *tmpbuf = NULL;
220441408c28SThomas Huth 	int r, srcu_idx;
220541408c28SThomas Huth 	const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
220641408c28SThomas Huth 				    | KVM_S390_MEMOP_F_CHECK_ONLY;
220741408c28SThomas Huth 
220841408c28SThomas Huth 	if (mop->flags & ~supported_flags)
220941408c28SThomas Huth 		return -EINVAL;
221041408c28SThomas Huth 
221141408c28SThomas Huth 	if (mop->size > MEM_OP_MAX_SIZE)
221241408c28SThomas Huth 		return -E2BIG;
221341408c28SThomas Huth 
221441408c28SThomas Huth 	if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
221541408c28SThomas Huth 		tmpbuf = vmalloc(mop->size);
221641408c28SThomas Huth 		if (!tmpbuf)
221741408c28SThomas Huth 			return -ENOMEM;
221841408c28SThomas Huth 	}
221941408c28SThomas Huth 
222041408c28SThomas Huth 	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
222141408c28SThomas Huth 
222241408c28SThomas Huth 	switch (mop->op) {
222341408c28SThomas Huth 	case KVM_S390_MEMOP_LOGICAL_READ:
222441408c28SThomas Huth 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
222541408c28SThomas Huth 			r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false);
222641408c28SThomas Huth 			break;
222741408c28SThomas Huth 		}
222841408c28SThomas Huth 		r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
222941408c28SThomas Huth 		if (r == 0) {
223041408c28SThomas Huth 			if (copy_to_user(uaddr, tmpbuf, mop->size))
223141408c28SThomas Huth 				r = -EFAULT;
223241408c28SThomas Huth 		}
223341408c28SThomas Huth 		break;
223441408c28SThomas Huth 	case KVM_S390_MEMOP_LOGICAL_WRITE:
223541408c28SThomas Huth 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
223641408c28SThomas Huth 			r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true);
223741408c28SThomas Huth 			break;
223841408c28SThomas Huth 		}
223941408c28SThomas Huth 		if (copy_from_user(tmpbuf, uaddr, mop->size)) {
224041408c28SThomas Huth 			r = -EFAULT;
224141408c28SThomas Huth 			break;
224241408c28SThomas Huth 		}
224341408c28SThomas Huth 		r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
224441408c28SThomas Huth 		break;
224541408c28SThomas Huth 	default:
224641408c28SThomas Huth 		r = -EINVAL;
224741408c28SThomas Huth 	}
224841408c28SThomas Huth 
224941408c28SThomas Huth 	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
225041408c28SThomas Huth 
225141408c28SThomas Huth 	if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
225241408c28SThomas Huth 		kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
225341408c28SThomas Huth 
225441408c28SThomas Huth 	vfree(tmpbuf);
225541408c28SThomas Huth 	return r;
225641408c28SThomas Huth }
225741408c28SThomas Huth 
2258b0c632dbSHeiko Carstens long kvm_arch_vcpu_ioctl(struct file *filp,
2259b0c632dbSHeiko Carstens 			 unsigned int ioctl, unsigned long arg)
2260b0c632dbSHeiko Carstens {
2261b0c632dbSHeiko Carstens 	struct kvm_vcpu *vcpu = filp->private_data;
2262b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
2263800c1065SThomas Huth 	int idx;
2264bc923cc9SAvi Kivity 	long r;
2265b0c632dbSHeiko Carstens 
226693736624SAvi Kivity 	switch (ioctl) {
226793736624SAvi Kivity 	case KVM_S390_INTERRUPT: {
2268ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
2269383d0b05SJens Freimann 		struct kvm_s390_irq s390irq;
2270ba5c1e9bSCarsten Otte 
227193736624SAvi Kivity 		r = -EFAULT;
2272ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
227393736624SAvi Kivity 			break;
2274383d0b05SJens Freimann 		if (s390int_to_s390irq(&s390int, &s390irq))
2275383d0b05SJens Freimann 			return -EINVAL;
2276383d0b05SJens Freimann 		r = kvm_s390_inject_vcpu(vcpu, &s390irq);
227793736624SAvi Kivity 		break;
2278ba5c1e9bSCarsten Otte 	}
2279b0c632dbSHeiko Carstens 	case KVM_S390_STORE_STATUS:
2280800c1065SThomas Huth 		idx = srcu_read_lock(&vcpu->kvm->srcu);
2281bc923cc9SAvi Kivity 		r = kvm_s390_vcpu_store_status(vcpu, arg);
2282800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
2283bc923cc9SAvi Kivity 		break;
2284b0c632dbSHeiko Carstens 	case KVM_S390_SET_INITIAL_PSW: {
2285b0c632dbSHeiko Carstens 		psw_t psw;
2286b0c632dbSHeiko Carstens 
2287bc923cc9SAvi Kivity 		r = -EFAULT;
2288b0c632dbSHeiko Carstens 		if (copy_from_user(&psw, argp, sizeof(psw)))
2289bc923cc9SAvi Kivity 			break;
2290bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2291bc923cc9SAvi Kivity 		break;
2292b0c632dbSHeiko Carstens 	}
2293b0c632dbSHeiko Carstens 	case KVM_S390_INITIAL_RESET:
2294bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2295bc923cc9SAvi Kivity 		break;
229614eebd91SCarsten Otte 	case KVM_SET_ONE_REG:
229714eebd91SCarsten Otte 	case KVM_GET_ONE_REG: {
229814eebd91SCarsten Otte 		struct kvm_one_reg reg;
229914eebd91SCarsten Otte 		r = -EFAULT;
230014eebd91SCarsten Otte 		if (copy_from_user(&reg, argp, sizeof(reg)))
230114eebd91SCarsten Otte 			break;
230214eebd91SCarsten Otte 		if (ioctl == KVM_SET_ONE_REG)
230314eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
230414eebd91SCarsten Otte 		else
230514eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
230614eebd91SCarsten Otte 		break;
230714eebd91SCarsten Otte 	}
230827e0393fSCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
230927e0393fSCarsten Otte 	case KVM_S390_UCAS_MAP: {
231027e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
231127e0393fSCarsten Otte 
231227e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
231327e0393fSCarsten Otte 			r = -EFAULT;
231427e0393fSCarsten Otte 			break;
231527e0393fSCarsten Otte 		}
231627e0393fSCarsten Otte 
231727e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
231827e0393fSCarsten Otte 			r = -EINVAL;
231927e0393fSCarsten Otte 			break;
232027e0393fSCarsten Otte 		}
232127e0393fSCarsten Otte 
232227e0393fSCarsten Otte 		r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
232327e0393fSCarsten Otte 				     ucasmap.vcpu_addr, ucasmap.length);
232427e0393fSCarsten Otte 		break;
232527e0393fSCarsten Otte 	}
232627e0393fSCarsten Otte 	case KVM_S390_UCAS_UNMAP: {
232727e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
232827e0393fSCarsten Otte 
232927e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
233027e0393fSCarsten Otte 			r = -EFAULT;
233127e0393fSCarsten Otte 			break;
233227e0393fSCarsten Otte 		}
233327e0393fSCarsten Otte 
233427e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
233527e0393fSCarsten Otte 			r = -EINVAL;
233627e0393fSCarsten Otte 			break;
233727e0393fSCarsten Otte 		}
233827e0393fSCarsten Otte 
233927e0393fSCarsten Otte 		r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
234027e0393fSCarsten Otte 			ucasmap.length);
234127e0393fSCarsten Otte 		break;
234227e0393fSCarsten Otte 	}
234327e0393fSCarsten Otte #endif
2344ccc7910fSCarsten Otte 	case KVM_S390_VCPU_FAULT: {
2345527e30b4SMartin Schwidefsky 		r = gmap_fault(vcpu->arch.gmap, arg, 0);
2346ccc7910fSCarsten Otte 		break;
2347ccc7910fSCarsten Otte 	}
2348d6712df9SCornelia Huck 	case KVM_ENABLE_CAP:
2349d6712df9SCornelia Huck 	{
2350d6712df9SCornelia Huck 		struct kvm_enable_cap cap;
2351d6712df9SCornelia Huck 		r = -EFAULT;
2352d6712df9SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
2353d6712df9SCornelia Huck 			break;
2354d6712df9SCornelia Huck 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2355d6712df9SCornelia Huck 		break;
2356d6712df9SCornelia Huck 	}
235741408c28SThomas Huth 	case KVM_S390_MEM_OP: {
235841408c28SThomas Huth 		struct kvm_s390_mem_op mem_op;
235941408c28SThomas Huth 
236041408c28SThomas Huth 		if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
236141408c28SThomas Huth 			r = kvm_s390_guest_mem_op(vcpu, &mem_op);
236241408c28SThomas Huth 		else
236341408c28SThomas Huth 			r = -EFAULT;
236441408c28SThomas Huth 		break;
236541408c28SThomas Huth 	}
2366b0c632dbSHeiko Carstens 	default:
23673e6afcf1SCarsten Otte 		r = -ENOTTY;
2368b0c632dbSHeiko Carstens 	}
2369bc923cc9SAvi Kivity 	return r;
2370b0c632dbSHeiko Carstens }
2371b0c632dbSHeiko Carstens 
23725b1c1493SCarsten Otte int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
23735b1c1493SCarsten Otte {
23745b1c1493SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
23755b1c1493SCarsten Otte 	if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
23765b1c1493SCarsten Otte 		 && (kvm_is_ucontrol(vcpu->kvm))) {
23775b1c1493SCarsten Otte 		vmf->page = virt_to_page(vcpu->arch.sie_block);
23785b1c1493SCarsten Otte 		get_page(vmf->page);
23795b1c1493SCarsten Otte 		return 0;
23805b1c1493SCarsten Otte 	}
23815b1c1493SCarsten Otte #endif
23825b1c1493SCarsten Otte 	return VM_FAULT_SIGBUS;
23835b1c1493SCarsten Otte }
23845b1c1493SCarsten Otte 
23855587027cSAneesh Kumar K.V int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
23865587027cSAneesh Kumar K.V 			    unsigned long npages)
2387db3fe4ebSTakuya Yoshikawa {
2388db3fe4ebSTakuya Yoshikawa 	return 0;
2389db3fe4ebSTakuya Yoshikawa }
2390db3fe4ebSTakuya Yoshikawa 
2391b0c632dbSHeiko Carstens /* Section: memory related */
2392f7784b8eSMarcelo Tosatti int kvm_arch_prepare_memory_region(struct kvm *kvm,
2393f7784b8eSMarcelo Tosatti 				   struct kvm_memory_slot *memslot,
23947b6195a9STakuya Yoshikawa 				   struct kvm_userspace_memory_region *mem,
23957b6195a9STakuya Yoshikawa 				   enum kvm_mr_change change)
2396b0c632dbSHeiko Carstens {
2397dd2887e7SNick Wang 	/* A few sanity checks. We can have memory slots which have to be
2398dd2887e7SNick Wang 	   located/ended at a segment boundary (1MB). The memory in userland is
2399dd2887e7SNick Wang 	   ok to be fragmented into various different vmas. It is okay to mmap()
2400dd2887e7SNick Wang 	   and munmap() stuff in this slot after doing this call at any time */
2401b0c632dbSHeiko Carstens 
2402598841caSCarsten Otte 	if (mem->userspace_addr & 0xffffful)
2403b0c632dbSHeiko Carstens 		return -EINVAL;
2404b0c632dbSHeiko Carstens 
2405598841caSCarsten Otte 	if (mem->memory_size & 0xffffful)
2406b0c632dbSHeiko Carstens 		return -EINVAL;
2407b0c632dbSHeiko Carstens 
2408f7784b8eSMarcelo Tosatti 	return 0;
2409f7784b8eSMarcelo Tosatti }
2410f7784b8eSMarcelo Tosatti 
2411f7784b8eSMarcelo Tosatti void kvm_arch_commit_memory_region(struct kvm *kvm,
2412f7784b8eSMarcelo Tosatti 				struct kvm_userspace_memory_region *mem,
24138482644aSTakuya Yoshikawa 				const struct kvm_memory_slot *old,
24148482644aSTakuya Yoshikawa 				enum kvm_mr_change change)
2415f7784b8eSMarcelo Tosatti {
2416f7850c92SCarsten Otte 	int rc;
2417f7784b8eSMarcelo Tosatti 
24182cef4debSChristian Borntraeger 	/* If the basics of the memslot do not change, we do not want
24192cef4debSChristian Borntraeger 	 * to update the gmap. Every update causes several unnecessary
24202cef4debSChristian Borntraeger 	 * segment translation exceptions. This is usually handled just
24212cef4debSChristian Borntraeger 	 * fine by the normal fault handler + gmap, but it will also
24222cef4debSChristian Borntraeger 	 * cause faults on the prefix page of running guest CPUs.
24232cef4debSChristian Borntraeger 	 */
24242cef4debSChristian Borntraeger 	if (old->userspace_addr == mem->userspace_addr &&
24252cef4debSChristian Borntraeger 	    old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
24262cef4debSChristian Borntraeger 	    old->npages * PAGE_SIZE == mem->memory_size)
24272cef4debSChristian Borntraeger 		return;
2428598841caSCarsten Otte 
2429598841caSCarsten Otte 	rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2430598841caSCarsten Otte 		mem->guest_phys_addr, mem->memory_size);
2431598841caSCarsten Otte 	if (rc)
2432f7850c92SCarsten Otte 		printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
2433598841caSCarsten Otte 	return;
2434b0c632dbSHeiko Carstens }
2435b0c632dbSHeiko Carstens 
2436b0c632dbSHeiko Carstens static int __init kvm_s390_init(void)
2437b0c632dbSHeiko Carstens {
24389d8d5786SMichael Mueller 	return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
2439b0c632dbSHeiko Carstens }
2440b0c632dbSHeiko Carstens 
2441b0c632dbSHeiko Carstens static void __exit kvm_s390_exit(void)
2442b0c632dbSHeiko Carstens {
2443b0c632dbSHeiko Carstens 	kvm_exit();
2444b0c632dbSHeiko Carstens }
2445b0c632dbSHeiko Carstens 
2446b0c632dbSHeiko Carstens module_init(kvm_s390_init);
2447b0c632dbSHeiko Carstens module_exit(kvm_s390_exit);
2448566af940SCornelia Huck 
2449566af940SCornelia Huck /*
2450566af940SCornelia Huck  * Enable autoloading of the kvm module.
2451566af940SCornelia Huck  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2452566af940SCornelia Huck  * since x86 takes a different approach.
2453566af940SCornelia Huck  */
2454566af940SCornelia Huck #include <linux/miscdevice.h>
2455566af940SCornelia Huck MODULE_ALIAS_MISCDEV(KVM_MINOR);
2456566af940SCornelia Huck MODULE_ALIAS("devname:kvm");
2457