xref: /linux/arch/s390/kvm/kvm-s390.c (revision fa576c583d877d667d9acaed909a3dfc6b03e138)
1b0c632dbSHeiko Carstens /*
2a53c8fabSHeiko Carstens  * hosting zSeries kernel virtual machines
3b0c632dbSHeiko Carstens  *
4628eb9b8SChristian Ehrhardt  * Copyright IBM Corp. 2008, 2009
5b0c632dbSHeiko Carstens  *
6b0c632dbSHeiko Carstens  * This program is free software; you can redistribute it and/or modify
7b0c632dbSHeiko Carstens  * it under the terms of the GNU General Public License (version 2 only)
8b0c632dbSHeiko Carstens  * as published by the Free Software Foundation.
9b0c632dbSHeiko Carstens  *
10b0c632dbSHeiko Carstens  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11b0c632dbSHeiko Carstens  *               Christian Borntraeger <borntraeger@de.ibm.com>
12b0c632dbSHeiko Carstens  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13628eb9b8SChristian Ehrhardt  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
1415f36ebdSJason J. Herne  *               Jason J. Herne <jjherne@us.ibm.com>
15b0c632dbSHeiko Carstens  */
16b0c632dbSHeiko Carstens 
17b0c632dbSHeiko Carstens #include <linux/compiler.h>
18b0c632dbSHeiko Carstens #include <linux/err.h>
19b0c632dbSHeiko Carstens #include <linux/fs.h>
20ca872302SChristian Borntraeger #include <linux/hrtimer.h>
21b0c632dbSHeiko Carstens #include <linux/init.h>
22b0c632dbSHeiko Carstens #include <linux/kvm.h>
23b0c632dbSHeiko Carstens #include <linux/kvm_host.h>
24b0c632dbSHeiko Carstens #include <linux/module.h>
25b0c632dbSHeiko Carstens #include <linux/slab.h>
26ba5c1e9bSCarsten Otte #include <linux/timer.h>
27cbb870c8SHeiko Carstens #include <asm/asm-offsets.h>
28b0c632dbSHeiko Carstens #include <asm/lowcore.h>
29b0c632dbSHeiko Carstens #include <asm/pgtable.h>
30f5daba1dSHeiko Carstens #include <asm/nmi.h>
31a0616cdeSDavid Howells #include <asm/switch_to.h>
3278c4b59fSMichael Mueller #include <asm/facility.h>
331526bf9cSChristian Borntraeger #include <asm/sclp.h>
348f2abe6aSChristian Borntraeger #include "kvm-s390.h"
35b0c632dbSHeiko Carstens #include "gaccess.h"
36b0c632dbSHeiko Carstens 
375786fffaSCornelia Huck #define CREATE_TRACE_POINTS
385786fffaSCornelia Huck #include "trace.h"
39ade38c31SCornelia Huck #include "trace-s390.h"
405786fffaSCornelia Huck 
41b0c632dbSHeiko Carstens #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42b0c632dbSHeiko Carstens 
43b0c632dbSHeiko Carstens struct kvm_stats_debugfs_item debugfs_entries[] = {
44b0c632dbSHeiko Carstens 	{ "userspace_handled", VCPU_STAT(exit_userspace) },
450eaeafa1SChristian Borntraeger 	{ "exit_null", VCPU_STAT(exit_null) },
468f2abe6aSChristian Borntraeger 	{ "exit_validity", VCPU_STAT(exit_validity) },
478f2abe6aSChristian Borntraeger 	{ "exit_stop_request", VCPU_STAT(exit_stop_request) },
488f2abe6aSChristian Borntraeger 	{ "exit_external_request", VCPU_STAT(exit_external_request) },
498f2abe6aSChristian Borntraeger 	{ "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
50ba5c1e9bSCarsten Otte 	{ "exit_instruction", VCPU_STAT(exit_instruction) },
51ba5c1e9bSCarsten Otte 	{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
52ba5c1e9bSCarsten Otte 	{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
53f5e10b09SChristian Borntraeger 	{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
54ba5c1e9bSCarsten Otte 	{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
55aba07508SDavid Hildenbrand 	{ "instruction_stctl", VCPU_STAT(instruction_stctl) },
56aba07508SDavid Hildenbrand 	{ "instruction_stctg", VCPU_STAT(instruction_stctg) },
57ba5c1e9bSCarsten Otte 	{ "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
587697e71fSChristian Ehrhardt 	{ "deliver_external_call", VCPU_STAT(deliver_external_call) },
59ba5c1e9bSCarsten Otte 	{ "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
60ba5c1e9bSCarsten Otte 	{ "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
61ba5c1e9bSCarsten Otte 	{ "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
62ba5c1e9bSCarsten Otte 	{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
63ba5c1e9bSCarsten Otte 	{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
64ba5c1e9bSCarsten Otte 	{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
65ba5c1e9bSCarsten Otte 	{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
6669d0d3a3SChristian Borntraeger 	{ "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
67453423dcSChristian Borntraeger 	{ "instruction_stidp", VCPU_STAT(instruction_stidp) },
68453423dcSChristian Borntraeger 	{ "instruction_spx", VCPU_STAT(instruction_spx) },
69453423dcSChristian Borntraeger 	{ "instruction_stpx", VCPU_STAT(instruction_stpx) },
70453423dcSChristian Borntraeger 	{ "instruction_stap", VCPU_STAT(instruction_stap) },
71453423dcSChristian Borntraeger 	{ "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
728a242234SHeiko Carstens 	{ "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
73453423dcSChristian Borntraeger 	{ "instruction_stsch", VCPU_STAT(instruction_stsch) },
74453423dcSChristian Borntraeger 	{ "instruction_chsc", VCPU_STAT(instruction_chsc) },
75b31288faSKonstantin Weitz 	{ "instruction_essa", VCPU_STAT(instruction_essa) },
76453423dcSChristian Borntraeger 	{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
77453423dcSChristian Borntraeger 	{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
78bb25b9baSChristian Borntraeger 	{ "instruction_tprot", VCPU_STAT(instruction_tprot) },
795288fbf0SChristian Borntraeger 	{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
80bd59d3a4SCornelia Huck 	{ "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
817697e71fSChristian Ehrhardt 	{ "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
825288fbf0SChristian Borntraeger 	{ "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
835288fbf0SChristian Borntraeger 	{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
845288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
855288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
865288fbf0SChristian Borntraeger 	{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
87388186bcSChristian Borntraeger 	{ "diagnose_10", VCPU_STAT(diagnose_10) },
88e28acfeaSChristian Borntraeger 	{ "diagnose_44", VCPU_STAT(diagnose_44) },
8941628d33SKonstantin Weitz 	{ "diagnose_9c", VCPU_STAT(diagnose_9c) },
90b0c632dbSHeiko Carstens 	{ NULL }
91b0c632dbSHeiko Carstens };
92b0c632dbSHeiko Carstens 
9378c4b59fSMichael Mueller unsigned long *vfacilities;
942c70fe44SChristian Borntraeger static struct gmap_notifier gmap_notifier;
95b0c632dbSHeiko Carstens 
9678c4b59fSMichael Mueller /* test availability of vfacility */
97280ef0f1SHeiko Carstens int test_vfacility(unsigned long nr)
9878c4b59fSMichael Mueller {
9978c4b59fSMichael Mueller 	return __test_facility(nr, (void *) vfacilities);
10078c4b59fSMichael Mueller }
10178c4b59fSMichael Mueller 
102b0c632dbSHeiko Carstens /* Section: not file related */
10310474ae8SAlexander Graf int kvm_arch_hardware_enable(void *garbage)
104b0c632dbSHeiko Carstens {
105b0c632dbSHeiko Carstens 	/* every s390 is virtualization enabled ;-) */
10610474ae8SAlexander Graf 	return 0;
107b0c632dbSHeiko Carstens }
108b0c632dbSHeiko Carstens 
109b0c632dbSHeiko Carstens void kvm_arch_hardware_disable(void *garbage)
110b0c632dbSHeiko Carstens {
111b0c632dbSHeiko Carstens }
112b0c632dbSHeiko Carstens 
1132c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
1142c70fe44SChristian Borntraeger 
115b0c632dbSHeiko Carstens int kvm_arch_hardware_setup(void)
116b0c632dbSHeiko Carstens {
1172c70fe44SChristian Borntraeger 	gmap_notifier.notifier_call = kvm_gmap_notifier;
1182c70fe44SChristian Borntraeger 	gmap_register_ipte_notifier(&gmap_notifier);
119b0c632dbSHeiko Carstens 	return 0;
120b0c632dbSHeiko Carstens }
121b0c632dbSHeiko Carstens 
122b0c632dbSHeiko Carstens void kvm_arch_hardware_unsetup(void)
123b0c632dbSHeiko Carstens {
1242c70fe44SChristian Borntraeger 	gmap_unregister_ipte_notifier(&gmap_notifier);
125b0c632dbSHeiko Carstens }
126b0c632dbSHeiko Carstens 
127b0c632dbSHeiko Carstens void kvm_arch_check_processor_compat(void *rtn)
128b0c632dbSHeiko Carstens {
129b0c632dbSHeiko Carstens }
130b0c632dbSHeiko Carstens 
131b0c632dbSHeiko Carstens int kvm_arch_init(void *opaque)
132b0c632dbSHeiko Carstens {
133b0c632dbSHeiko Carstens 	return 0;
134b0c632dbSHeiko Carstens }
135b0c632dbSHeiko Carstens 
136b0c632dbSHeiko Carstens void kvm_arch_exit(void)
137b0c632dbSHeiko Carstens {
138b0c632dbSHeiko Carstens }
139b0c632dbSHeiko Carstens 
140b0c632dbSHeiko Carstens /* Section: device related */
141b0c632dbSHeiko Carstens long kvm_arch_dev_ioctl(struct file *filp,
142b0c632dbSHeiko Carstens 			unsigned int ioctl, unsigned long arg)
143b0c632dbSHeiko Carstens {
144b0c632dbSHeiko Carstens 	if (ioctl == KVM_S390_ENABLE_SIE)
145b0c632dbSHeiko Carstens 		return s390_enable_sie();
146b0c632dbSHeiko Carstens 	return -EINVAL;
147b0c632dbSHeiko Carstens }
148b0c632dbSHeiko Carstens 
149b0c632dbSHeiko Carstens int kvm_dev_ioctl_check_extension(long ext)
150b0c632dbSHeiko Carstens {
151d7b0b5ebSCarsten Otte 	int r;
152d7b0b5ebSCarsten Otte 
1532bd0ac4eSCarsten Otte 	switch (ext) {
154d7b0b5ebSCarsten Otte 	case KVM_CAP_S390_PSW:
155b6cf8788SChristian Borntraeger 	case KVM_CAP_S390_GMAP:
15652e16b18SChristian Borntraeger 	case KVM_CAP_SYNC_MMU:
1571efd0f59SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
1581efd0f59SCarsten Otte 	case KVM_CAP_S390_UCONTROL:
1591efd0f59SCarsten Otte #endif
1603c038e6bSDominik Dingel 	case KVM_CAP_ASYNC_PF:
16160b413c9SChristian Borntraeger 	case KVM_CAP_SYNC_REGS:
16214eebd91SCarsten Otte 	case KVM_CAP_ONE_REG:
163d6712df9SCornelia Huck 	case KVM_CAP_ENABLE_CAP:
164fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
16510ccaa1eSCornelia Huck 	case KVM_CAP_IOEVENTFD:
166c05c4186SJens Freimann 	case KVM_CAP_DEVICE_CTRL:
167d938dc55SCornelia Huck 	case KVM_CAP_ENABLE_CAP_VM:
168f2061656SDominik Dingel 	case KVM_CAP_VM_ATTRIBUTES:
169d7b0b5ebSCarsten Otte 		r = 1;
170d7b0b5ebSCarsten Otte 		break;
171e726b1bdSChristian Borntraeger 	case KVM_CAP_NR_VCPUS:
172e726b1bdSChristian Borntraeger 	case KVM_CAP_MAX_VCPUS:
173e726b1bdSChristian Borntraeger 		r = KVM_MAX_VCPUS;
174e726b1bdSChristian Borntraeger 		break;
175e1e2e605SNick Wang 	case KVM_CAP_NR_MEMSLOTS:
176e1e2e605SNick Wang 		r = KVM_USER_MEM_SLOTS;
177e1e2e605SNick Wang 		break;
1781526bf9cSChristian Borntraeger 	case KVM_CAP_S390_COW:
179abf09bedSMartin Schwidefsky 		r = MACHINE_HAS_ESOP;
1801526bf9cSChristian Borntraeger 		break;
1812bd0ac4eSCarsten Otte 	default:
182d7b0b5ebSCarsten Otte 		r = 0;
183b0c632dbSHeiko Carstens 	}
184d7b0b5ebSCarsten Otte 	return r;
1852bd0ac4eSCarsten Otte }
186b0c632dbSHeiko Carstens 
18715f36ebdSJason J. Herne static void kvm_s390_sync_dirty_log(struct kvm *kvm,
18815f36ebdSJason J. Herne 					struct kvm_memory_slot *memslot)
18915f36ebdSJason J. Herne {
19015f36ebdSJason J. Herne 	gfn_t cur_gfn, last_gfn;
19115f36ebdSJason J. Herne 	unsigned long address;
19215f36ebdSJason J. Herne 	struct gmap *gmap = kvm->arch.gmap;
19315f36ebdSJason J. Herne 
19415f36ebdSJason J. Herne 	down_read(&gmap->mm->mmap_sem);
19515f36ebdSJason J. Herne 	/* Loop over all guest pages */
19615f36ebdSJason J. Herne 	last_gfn = memslot->base_gfn + memslot->npages;
19715f36ebdSJason J. Herne 	for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
19815f36ebdSJason J. Herne 		address = gfn_to_hva_memslot(memslot, cur_gfn);
19915f36ebdSJason J. Herne 
20015f36ebdSJason J. Herne 		if (gmap_test_and_clear_dirty(address, gmap))
20115f36ebdSJason J. Herne 			mark_page_dirty(kvm, cur_gfn);
20215f36ebdSJason J. Herne 	}
20315f36ebdSJason J. Herne 	up_read(&gmap->mm->mmap_sem);
20415f36ebdSJason J. Herne }
20515f36ebdSJason J. Herne 
206b0c632dbSHeiko Carstens /* Section: vm related */
207b0c632dbSHeiko Carstens /*
208b0c632dbSHeiko Carstens  * Get (and clear) the dirty memory log for a memory slot.
209b0c632dbSHeiko Carstens  */
210b0c632dbSHeiko Carstens int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
211b0c632dbSHeiko Carstens 			       struct kvm_dirty_log *log)
212b0c632dbSHeiko Carstens {
21315f36ebdSJason J. Herne 	int r;
21415f36ebdSJason J. Herne 	unsigned long n;
21515f36ebdSJason J. Herne 	struct kvm_memory_slot *memslot;
21615f36ebdSJason J. Herne 	int is_dirty = 0;
21715f36ebdSJason J. Herne 
21815f36ebdSJason J. Herne 	mutex_lock(&kvm->slots_lock);
21915f36ebdSJason J. Herne 
22015f36ebdSJason J. Herne 	r = -EINVAL;
22115f36ebdSJason J. Herne 	if (log->slot >= KVM_USER_MEM_SLOTS)
22215f36ebdSJason J. Herne 		goto out;
22315f36ebdSJason J. Herne 
22415f36ebdSJason J. Herne 	memslot = id_to_memslot(kvm->memslots, log->slot);
22515f36ebdSJason J. Herne 	r = -ENOENT;
22615f36ebdSJason J. Herne 	if (!memslot->dirty_bitmap)
22715f36ebdSJason J. Herne 		goto out;
22815f36ebdSJason J. Herne 
22915f36ebdSJason J. Herne 	kvm_s390_sync_dirty_log(kvm, memslot);
23015f36ebdSJason J. Herne 	r = kvm_get_dirty_log(kvm, log, &is_dirty);
23115f36ebdSJason J. Herne 	if (r)
23215f36ebdSJason J. Herne 		goto out;
23315f36ebdSJason J. Herne 
23415f36ebdSJason J. Herne 	/* Clear the dirty log */
23515f36ebdSJason J. Herne 	if (is_dirty) {
23615f36ebdSJason J. Herne 		n = kvm_dirty_bitmap_bytes(memslot);
23715f36ebdSJason J. Herne 		memset(memslot->dirty_bitmap, 0, n);
23815f36ebdSJason J. Herne 	}
23915f36ebdSJason J. Herne 	r = 0;
24015f36ebdSJason J. Herne out:
24115f36ebdSJason J. Herne 	mutex_unlock(&kvm->slots_lock);
24215f36ebdSJason J. Herne 	return r;
243b0c632dbSHeiko Carstens }
244b0c632dbSHeiko Carstens 
245d938dc55SCornelia Huck static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
246d938dc55SCornelia Huck {
247d938dc55SCornelia Huck 	int r;
248d938dc55SCornelia Huck 
249d938dc55SCornelia Huck 	if (cap->flags)
250d938dc55SCornelia Huck 		return -EINVAL;
251d938dc55SCornelia Huck 
252d938dc55SCornelia Huck 	switch (cap->cap) {
25384223598SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
25484223598SCornelia Huck 		kvm->arch.use_irqchip = 1;
25584223598SCornelia Huck 		r = 0;
25684223598SCornelia Huck 		break;
257d938dc55SCornelia Huck 	default:
258d938dc55SCornelia Huck 		r = -EINVAL;
259d938dc55SCornelia Huck 		break;
260d938dc55SCornelia Huck 	}
261d938dc55SCornelia Huck 	return r;
262d938dc55SCornelia Huck }
263d938dc55SCornelia Huck 
2644f718eabSDominik Dingel static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
2654f718eabSDominik Dingel {
2664f718eabSDominik Dingel 	int ret;
2674f718eabSDominik Dingel 	unsigned int idx;
2684f718eabSDominik Dingel 	switch (attr->attr) {
2694f718eabSDominik Dingel 	case KVM_S390_VM_MEM_ENABLE_CMMA:
2704f718eabSDominik Dingel 		ret = -EBUSY;
2714f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
2724f718eabSDominik Dingel 		if (atomic_read(&kvm->online_vcpus) == 0) {
2734f718eabSDominik Dingel 			kvm->arch.use_cmma = 1;
2744f718eabSDominik Dingel 			ret = 0;
2754f718eabSDominik Dingel 		}
2764f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
2774f718eabSDominik Dingel 		break;
2784f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CLR_CMMA:
2794f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
2804f718eabSDominik Dingel 		idx = srcu_read_lock(&kvm->srcu);
2814f718eabSDominik Dingel 		page_table_reset_pgste(kvm->arch.gmap->mm, 0, TASK_SIZE, false);
2824f718eabSDominik Dingel 		srcu_read_unlock(&kvm->srcu, idx);
2834f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
2844f718eabSDominik Dingel 		ret = 0;
2854f718eabSDominik Dingel 		break;
2864f718eabSDominik Dingel 	default:
2874f718eabSDominik Dingel 		ret = -ENXIO;
2884f718eabSDominik Dingel 		break;
2894f718eabSDominik Dingel 	}
2904f718eabSDominik Dingel 	return ret;
2914f718eabSDominik Dingel }
2924f718eabSDominik Dingel 
293f2061656SDominik Dingel static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
294f2061656SDominik Dingel {
295f2061656SDominik Dingel 	int ret;
296f2061656SDominik Dingel 
297f2061656SDominik Dingel 	switch (attr->group) {
2984f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
2994f718eabSDominik Dingel 		ret = kvm_s390_mem_control(kvm, attr);
3004f718eabSDominik Dingel 		break;
301f2061656SDominik Dingel 	default:
302f2061656SDominik Dingel 		ret = -ENXIO;
303f2061656SDominik Dingel 		break;
304f2061656SDominik Dingel 	}
305f2061656SDominik Dingel 
306f2061656SDominik Dingel 	return ret;
307f2061656SDominik Dingel }
308f2061656SDominik Dingel 
309f2061656SDominik Dingel static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
310f2061656SDominik Dingel {
311f2061656SDominik Dingel 	return -ENXIO;
312f2061656SDominik Dingel }
313f2061656SDominik Dingel 
314f2061656SDominik Dingel static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
315f2061656SDominik Dingel {
316f2061656SDominik Dingel 	int ret;
317f2061656SDominik Dingel 
318f2061656SDominik Dingel 	switch (attr->group) {
3194f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
3204f718eabSDominik Dingel 		switch (attr->attr) {
3214f718eabSDominik Dingel 		case KVM_S390_VM_MEM_ENABLE_CMMA:
3224f718eabSDominik Dingel 		case KVM_S390_VM_MEM_CLR_CMMA:
3234f718eabSDominik Dingel 			ret = 0;
3244f718eabSDominik Dingel 			break;
3254f718eabSDominik Dingel 		default:
3264f718eabSDominik Dingel 			ret = -ENXIO;
3274f718eabSDominik Dingel 			break;
3284f718eabSDominik Dingel 		}
3294f718eabSDominik Dingel 		break;
330f2061656SDominik Dingel 	default:
331f2061656SDominik Dingel 		ret = -ENXIO;
332f2061656SDominik Dingel 		break;
333f2061656SDominik Dingel 	}
334f2061656SDominik Dingel 
335f2061656SDominik Dingel 	return ret;
336f2061656SDominik Dingel }
337f2061656SDominik Dingel 
338b0c632dbSHeiko Carstens long kvm_arch_vm_ioctl(struct file *filp,
339b0c632dbSHeiko Carstens 		       unsigned int ioctl, unsigned long arg)
340b0c632dbSHeiko Carstens {
341b0c632dbSHeiko Carstens 	struct kvm *kvm = filp->private_data;
342b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
343f2061656SDominik Dingel 	struct kvm_device_attr attr;
344b0c632dbSHeiko Carstens 	int r;
345b0c632dbSHeiko Carstens 
346b0c632dbSHeiko Carstens 	switch (ioctl) {
347ba5c1e9bSCarsten Otte 	case KVM_S390_INTERRUPT: {
348ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
349ba5c1e9bSCarsten Otte 
350ba5c1e9bSCarsten Otte 		r = -EFAULT;
351ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
352ba5c1e9bSCarsten Otte 			break;
353ba5c1e9bSCarsten Otte 		r = kvm_s390_inject_vm(kvm, &s390int);
354ba5c1e9bSCarsten Otte 		break;
355ba5c1e9bSCarsten Otte 	}
356d938dc55SCornelia Huck 	case KVM_ENABLE_CAP: {
357d938dc55SCornelia Huck 		struct kvm_enable_cap cap;
358d938dc55SCornelia Huck 		r = -EFAULT;
359d938dc55SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
360d938dc55SCornelia Huck 			break;
361d938dc55SCornelia Huck 		r = kvm_vm_ioctl_enable_cap(kvm, &cap);
362d938dc55SCornelia Huck 		break;
363d938dc55SCornelia Huck 	}
36484223598SCornelia Huck 	case KVM_CREATE_IRQCHIP: {
36584223598SCornelia Huck 		struct kvm_irq_routing_entry routing;
36684223598SCornelia Huck 
36784223598SCornelia Huck 		r = -EINVAL;
36884223598SCornelia Huck 		if (kvm->arch.use_irqchip) {
36984223598SCornelia Huck 			/* Set up dummy routing. */
37084223598SCornelia Huck 			memset(&routing, 0, sizeof(routing));
37184223598SCornelia Huck 			kvm_set_irq_routing(kvm, &routing, 0, 0);
37284223598SCornelia Huck 			r = 0;
37384223598SCornelia Huck 		}
37484223598SCornelia Huck 		break;
37584223598SCornelia Huck 	}
376f2061656SDominik Dingel 	case KVM_SET_DEVICE_ATTR: {
377f2061656SDominik Dingel 		r = -EFAULT;
378f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
379f2061656SDominik Dingel 			break;
380f2061656SDominik Dingel 		r = kvm_s390_vm_set_attr(kvm, &attr);
381f2061656SDominik Dingel 		break;
382f2061656SDominik Dingel 	}
383f2061656SDominik Dingel 	case KVM_GET_DEVICE_ATTR: {
384f2061656SDominik Dingel 		r = -EFAULT;
385f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
386f2061656SDominik Dingel 			break;
387f2061656SDominik Dingel 		r = kvm_s390_vm_get_attr(kvm, &attr);
388f2061656SDominik Dingel 		break;
389f2061656SDominik Dingel 	}
390f2061656SDominik Dingel 	case KVM_HAS_DEVICE_ATTR: {
391f2061656SDominik Dingel 		r = -EFAULT;
392f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
393f2061656SDominik Dingel 			break;
394f2061656SDominik Dingel 		r = kvm_s390_vm_has_attr(kvm, &attr);
395f2061656SDominik Dingel 		break;
396f2061656SDominik Dingel 	}
397b0c632dbSHeiko Carstens 	default:
398367e1319SAvi Kivity 		r = -ENOTTY;
399b0c632dbSHeiko Carstens 	}
400b0c632dbSHeiko Carstens 
401b0c632dbSHeiko Carstens 	return r;
402b0c632dbSHeiko Carstens }
403b0c632dbSHeiko Carstens 
404e08b9637SCarsten Otte int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
405b0c632dbSHeiko Carstens {
406b0c632dbSHeiko Carstens 	int rc;
407b0c632dbSHeiko Carstens 	char debug_name[16];
408f6c137ffSChristian Borntraeger 	static unsigned long sca_offset;
409b0c632dbSHeiko Carstens 
410e08b9637SCarsten Otte 	rc = -EINVAL;
411e08b9637SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
412e08b9637SCarsten Otte 	if (type & ~KVM_VM_S390_UCONTROL)
413e08b9637SCarsten Otte 		goto out_err;
414e08b9637SCarsten Otte 	if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
415e08b9637SCarsten Otte 		goto out_err;
416e08b9637SCarsten Otte #else
417e08b9637SCarsten Otte 	if (type)
418e08b9637SCarsten Otte 		goto out_err;
419e08b9637SCarsten Otte #endif
420e08b9637SCarsten Otte 
421b0c632dbSHeiko Carstens 	rc = s390_enable_sie();
422b0c632dbSHeiko Carstens 	if (rc)
423d89f5effSJan Kiszka 		goto out_err;
424b0c632dbSHeiko Carstens 
425b290411aSCarsten Otte 	rc = -ENOMEM;
426b290411aSCarsten Otte 
427b0c632dbSHeiko Carstens 	kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
428b0c632dbSHeiko Carstens 	if (!kvm->arch.sca)
429d89f5effSJan Kiszka 		goto out_err;
430f6c137ffSChristian Borntraeger 	spin_lock(&kvm_lock);
431f6c137ffSChristian Borntraeger 	sca_offset = (sca_offset + 16) & 0x7f0;
432f6c137ffSChristian Borntraeger 	kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
433f6c137ffSChristian Borntraeger 	spin_unlock(&kvm_lock);
434b0c632dbSHeiko Carstens 
435b0c632dbSHeiko Carstens 	sprintf(debug_name, "kvm-%u", current->pid);
436b0c632dbSHeiko Carstens 
437b0c632dbSHeiko Carstens 	kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
438b0c632dbSHeiko Carstens 	if (!kvm->arch.dbf)
439b0c632dbSHeiko Carstens 		goto out_nodbf;
440b0c632dbSHeiko Carstens 
441ba5c1e9bSCarsten Otte 	spin_lock_init(&kvm->arch.float_int.lock);
442ba5c1e9bSCarsten Otte 	INIT_LIST_HEAD(&kvm->arch.float_int.list);
4438a242234SHeiko Carstens 	init_waitqueue_head(&kvm->arch.ipte_wq);
444ba5c1e9bSCarsten Otte 
445b0c632dbSHeiko Carstens 	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
446b0c632dbSHeiko Carstens 	VM_EVENT(kvm, 3, "%s", "vm created");
447b0c632dbSHeiko Carstens 
448e08b9637SCarsten Otte 	if (type & KVM_VM_S390_UCONTROL) {
449e08b9637SCarsten Otte 		kvm->arch.gmap = NULL;
450e08b9637SCarsten Otte 	} else {
451598841caSCarsten Otte 		kvm->arch.gmap = gmap_alloc(current->mm);
452598841caSCarsten Otte 		if (!kvm->arch.gmap)
453598841caSCarsten Otte 			goto out_nogmap;
4542c70fe44SChristian Borntraeger 		kvm->arch.gmap->private = kvm;
45524eb3a82SDominik Dingel 		kvm->arch.gmap->pfault_enabled = 0;
456e08b9637SCarsten Otte 	}
457fa6b7fe9SCornelia Huck 
458fa6b7fe9SCornelia Huck 	kvm->arch.css_support = 0;
45984223598SCornelia Huck 	kvm->arch.use_irqchip = 0;
460fa6b7fe9SCornelia Huck 
4618ad35755SDavid Hildenbrand 	spin_lock_init(&kvm->arch.start_stop_lock);
4628ad35755SDavid Hildenbrand 
463d89f5effSJan Kiszka 	return 0;
464598841caSCarsten Otte out_nogmap:
465598841caSCarsten Otte 	debug_unregister(kvm->arch.dbf);
466b0c632dbSHeiko Carstens out_nodbf:
467b0c632dbSHeiko Carstens 	free_page((unsigned long)(kvm->arch.sca));
468d89f5effSJan Kiszka out_err:
469d89f5effSJan Kiszka 	return rc;
470b0c632dbSHeiko Carstens }
471b0c632dbSHeiko Carstens 
472d329c035SChristian Borntraeger void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
473d329c035SChristian Borntraeger {
474d329c035SChristian Borntraeger 	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
475ade38c31SCornelia Huck 	trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
47667335e63SChristian Borntraeger 	kvm_s390_clear_local_irqs(vcpu);
4773c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
47858f9460bSCarsten Otte 	if (!kvm_is_ucontrol(vcpu->kvm)) {
47958f9460bSCarsten Otte 		clear_bit(63 - vcpu->vcpu_id,
48058f9460bSCarsten Otte 			  (unsigned long *) &vcpu->kvm->arch.sca->mcn);
481abf4a71eSCarsten Otte 		if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
482abf4a71eSCarsten Otte 		    (__u64) vcpu->arch.sie_block)
483abf4a71eSCarsten Otte 			vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
48458f9460bSCarsten Otte 	}
485abf4a71eSCarsten Otte 	smp_mb();
48627e0393fSCarsten Otte 
48727e0393fSCarsten Otte 	if (kvm_is_ucontrol(vcpu->kvm))
48827e0393fSCarsten Otte 		gmap_free(vcpu->arch.gmap);
48927e0393fSCarsten Otte 
490b31605c1SDominik Dingel 	if (kvm_s390_cmma_enabled(vcpu->kvm))
491b31605c1SDominik Dingel 		kvm_s390_vcpu_unsetup_cmma(vcpu);
492d329c035SChristian Borntraeger 	free_page((unsigned long)(vcpu->arch.sie_block));
493b31288faSKonstantin Weitz 
4946692cef3SChristian Borntraeger 	kvm_vcpu_uninit(vcpu);
495b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
496d329c035SChristian Borntraeger }
497d329c035SChristian Borntraeger 
498d329c035SChristian Borntraeger static void kvm_free_vcpus(struct kvm *kvm)
499d329c035SChristian Borntraeger {
500d329c035SChristian Borntraeger 	unsigned int i;
501988a2caeSGleb Natapov 	struct kvm_vcpu *vcpu;
502d329c035SChristian Borntraeger 
503988a2caeSGleb Natapov 	kvm_for_each_vcpu(i, vcpu, kvm)
504988a2caeSGleb Natapov 		kvm_arch_vcpu_destroy(vcpu);
505988a2caeSGleb Natapov 
506988a2caeSGleb Natapov 	mutex_lock(&kvm->lock);
507988a2caeSGleb Natapov 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
508d329c035SChristian Borntraeger 		kvm->vcpus[i] = NULL;
509988a2caeSGleb Natapov 
510988a2caeSGleb Natapov 	atomic_set(&kvm->online_vcpus, 0);
511988a2caeSGleb Natapov 	mutex_unlock(&kvm->lock);
512d329c035SChristian Borntraeger }
513d329c035SChristian Borntraeger 
514ad8ba2cdSSheng Yang void kvm_arch_sync_events(struct kvm *kvm)
515ad8ba2cdSSheng Yang {
516ad8ba2cdSSheng Yang }
517ad8ba2cdSSheng Yang 
518b0c632dbSHeiko Carstens void kvm_arch_destroy_vm(struct kvm *kvm)
519b0c632dbSHeiko Carstens {
520d329c035SChristian Borntraeger 	kvm_free_vcpus(kvm);
521b0c632dbSHeiko Carstens 	free_page((unsigned long)(kvm->arch.sca));
522d329c035SChristian Borntraeger 	debug_unregister(kvm->arch.dbf);
52327e0393fSCarsten Otte 	if (!kvm_is_ucontrol(kvm))
524598841caSCarsten Otte 		gmap_free(kvm->arch.gmap);
525841b91c5SCornelia Huck 	kvm_s390_destroy_adapters(kvm);
52667335e63SChristian Borntraeger 	kvm_s390_clear_float_irqs(kvm);
527b0c632dbSHeiko Carstens }
528b0c632dbSHeiko Carstens 
529b0c632dbSHeiko Carstens /* Section: vcpu related */
530b0c632dbSHeiko Carstens int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
531b0c632dbSHeiko Carstens {
5323c038e6bSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
5333c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
53427e0393fSCarsten Otte 	if (kvm_is_ucontrol(vcpu->kvm)) {
53527e0393fSCarsten Otte 		vcpu->arch.gmap = gmap_alloc(current->mm);
53627e0393fSCarsten Otte 		if (!vcpu->arch.gmap)
53727e0393fSCarsten Otte 			return -ENOMEM;
5382c70fe44SChristian Borntraeger 		vcpu->arch.gmap->private = vcpu->kvm;
53927e0393fSCarsten Otte 		return 0;
54027e0393fSCarsten Otte 	}
54127e0393fSCarsten Otte 
542598841caSCarsten Otte 	vcpu->arch.gmap = vcpu->kvm->arch.gmap;
54359674c1aSChristian Borntraeger 	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
54459674c1aSChristian Borntraeger 				    KVM_SYNC_GPRS |
5459eed0735SChristian Borntraeger 				    KVM_SYNC_ACRS |
5469eed0735SChristian Borntraeger 				    KVM_SYNC_CRS;
547b0c632dbSHeiko Carstens 	return 0;
548b0c632dbSHeiko Carstens }
549b0c632dbSHeiko Carstens 
550b0c632dbSHeiko Carstens void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
551b0c632dbSHeiko Carstens {
5526692cef3SChristian Borntraeger 	/* Nothing todo */
553b0c632dbSHeiko Carstens }
554b0c632dbSHeiko Carstens 
555b0c632dbSHeiko Carstens void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
556b0c632dbSHeiko Carstens {
5574725c860SMartin Schwidefsky 	save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
5584725c860SMartin Schwidefsky 	save_fp_regs(vcpu->arch.host_fpregs.fprs);
559b0c632dbSHeiko Carstens 	save_access_regs(vcpu->arch.host_acrs);
5604725c860SMartin Schwidefsky 	restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
5614725c860SMartin Schwidefsky 	restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
56259674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
563480e5926SChristian Borntraeger 	gmap_enable(vcpu->arch.gmap);
5649e6dabefSCornelia Huck 	atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
565b0c632dbSHeiko Carstens }
566b0c632dbSHeiko Carstens 
567b0c632dbSHeiko Carstens void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
568b0c632dbSHeiko Carstens {
5699e6dabefSCornelia Huck 	atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
570480e5926SChristian Borntraeger 	gmap_disable(vcpu->arch.gmap);
5714725c860SMartin Schwidefsky 	save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
5724725c860SMartin Schwidefsky 	save_fp_regs(vcpu->arch.guest_fpregs.fprs);
57359674c1aSChristian Borntraeger 	save_access_regs(vcpu->run->s.regs.acrs);
5744725c860SMartin Schwidefsky 	restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
5754725c860SMartin Schwidefsky 	restore_fp_regs(vcpu->arch.host_fpregs.fprs);
576b0c632dbSHeiko Carstens 	restore_access_regs(vcpu->arch.host_acrs);
577b0c632dbSHeiko Carstens }
578b0c632dbSHeiko Carstens 
579b0c632dbSHeiko Carstens static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
580b0c632dbSHeiko Carstens {
581b0c632dbSHeiko Carstens 	/* this equals initial cpu reset in pop, but we don't switch to ESA */
582b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.mask = 0UL;
583b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.addr = 0UL;
5848d26cf7bSChristian Borntraeger 	kvm_s390_set_prefix(vcpu, 0);
585b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->cputm     = 0UL;
586b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->ckc       = 0UL;
587b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->todpr     = 0;
588b0c632dbSHeiko Carstens 	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
589b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
590b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
591b0c632dbSHeiko Carstens 	vcpu->arch.guest_fpregs.fpc = 0;
592b0c632dbSHeiko Carstens 	asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
593b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gbea = 1;
594672550fbSChristian Borntraeger 	vcpu->arch.sie_block->pp = 0;
5953c038e6bSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
5963c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
5976852d7b6SDavid Hildenbrand 	kvm_s390_vcpu_stop(vcpu);
5982ed10cc1SJens Freimann 	kvm_s390_clear_local_irqs(vcpu);
599b0c632dbSHeiko Carstens }
600b0c632dbSHeiko Carstens 
60142897d86SMarcelo Tosatti int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
60242897d86SMarcelo Tosatti {
60342897d86SMarcelo Tosatti 	return 0;
60442897d86SMarcelo Tosatti }
60542897d86SMarcelo Tosatti 
606b31605c1SDominik Dingel void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
607b31605c1SDominik Dingel {
608b31605c1SDominik Dingel 	free_page(vcpu->arch.sie_block->cbrlo);
609b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = 0;
610b31605c1SDominik Dingel }
611b31605c1SDominik Dingel 
612b31605c1SDominik Dingel int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
613b31605c1SDominik Dingel {
614b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
615b31605c1SDominik Dingel 	if (!vcpu->arch.sie_block->cbrlo)
616b31605c1SDominik Dingel 		return -ENOMEM;
617b31605c1SDominik Dingel 
618b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 |= 0x80;
619b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 &= ~0x08;
620b31605c1SDominik Dingel 	return 0;
621b31605c1SDominik Dingel }
622b31605c1SDominik Dingel 
623b0c632dbSHeiko Carstens int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
624b0c632dbSHeiko Carstens {
625b31605c1SDominik Dingel 	int rc = 0;
626b31288faSKonstantin Weitz 
6279e6dabefSCornelia Huck 	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
6289e6dabefSCornelia Huck 						    CPUSTAT_SM |
62969d0d3a3SChristian Borntraeger 						    CPUSTAT_STOPPED |
63069d0d3a3SChristian Borntraeger 						    CPUSTAT_GED);
631fc34531dSChristian Borntraeger 	vcpu->arch.sie_block->ecb   = 6;
6327feb6bb8SMichael Mueller 	if (test_vfacility(50) && test_vfacility(73))
6337feb6bb8SMichael Mueller 		vcpu->arch.sie_block->ecb |= 0x10;
6347feb6bb8SMichael Mueller 
63569d0d3a3SChristian Borntraeger 	vcpu->arch.sie_block->ecb2  = 8;
636217a4406SHeiko Carstens 	vcpu->arch.sie_block->eca   = 0xC1002000U;
637217a4406SHeiko Carstens 	if (sclp_has_siif())
638217a4406SHeiko Carstens 		vcpu->arch.sie_block->eca |= 1;
63978c4b59fSMichael Mueller 	vcpu->arch.sie_block->fac   = (int) (long) vfacilities;
640693ffc08SDominik Dingel 	vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
641b31605c1SDominik Dingel 	if (kvm_s390_cmma_enabled(vcpu->kvm)) {
642b31605c1SDominik Dingel 		rc = kvm_s390_vcpu_setup_cmma(vcpu);
643b31605c1SDominik Dingel 		if (rc)
644b31605c1SDominik Dingel 			return rc;
645b31288faSKonstantin Weitz 	}
646ca872302SChristian Borntraeger 	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
647ca872302SChristian Borntraeger 	tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
648ba5c1e9bSCarsten Otte 		     (unsigned long) vcpu);
649ca872302SChristian Borntraeger 	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
650453423dcSChristian Borntraeger 	get_cpu_id(&vcpu->arch.cpu_id);
65192e6ecf3SChristian Borntraeger 	vcpu->arch.cpu_id.version = 0xff;
652b31605c1SDominik Dingel 	return rc;
653b0c632dbSHeiko Carstens }
654b0c632dbSHeiko Carstens 
655b0c632dbSHeiko Carstens struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
656b0c632dbSHeiko Carstens 				      unsigned int id)
657b0c632dbSHeiko Carstens {
6584d47555aSCarsten Otte 	struct kvm_vcpu *vcpu;
6597feb6bb8SMichael Mueller 	struct sie_page *sie_page;
6604d47555aSCarsten Otte 	int rc = -EINVAL;
661b0c632dbSHeiko Carstens 
6624d47555aSCarsten Otte 	if (id >= KVM_MAX_VCPUS)
6634d47555aSCarsten Otte 		goto out;
6644d47555aSCarsten Otte 
6654d47555aSCarsten Otte 	rc = -ENOMEM;
6664d47555aSCarsten Otte 
667b110feafSMichael Mueller 	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
668b0c632dbSHeiko Carstens 	if (!vcpu)
6694d47555aSCarsten Otte 		goto out;
670b0c632dbSHeiko Carstens 
6717feb6bb8SMichael Mueller 	sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
6727feb6bb8SMichael Mueller 	if (!sie_page)
673b0c632dbSHeiko Carstens 		goto out_free_cpu;
674b0c632dbSHeiko Carstens 
6757feb6bb8SMichael Mueller 	vcpu->arch.sie_block = &sie_page->sie_block;
6767feb6bb8SMichael Mueller 	vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
6777feb6bb8SMichael Mueller 
678b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icpua = id;
67958f9460bSCarsten Otte 	if (!kvm_is_ucontrol(kvm)) {
68058f9460bSCarsten Otte 		if (!kvm->arch.sca) {
68158f9460bSCarsten Otte 			WARN_ON_ONCE(1);
68258f9460bSCarsten Otte 			goto out_free_cpu;
68358f9460bSCarsten Otte 		}
684abf4a71eSCarsten Otte 		if (!kvm->arch.sca->cpu[id].sda)
68558f9460bSCarsten Otte 			kvm->arch.sca->cpu[id].sda =
68658f9460bSCarsten Otte 				(__u64) vcpu->arch.sie_block;
68758f9460bSCarsten Otte 		vcpu->arch.sie_block->scaoh =
68858f9460bSCarsten Otte 			(__u32)(((__u64)kvm->arch.sca) >> 32);
689b0c632dbSHeiko Carstens 		vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
690fc34531dSChristian Borntraeger 		set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
69158f9460bSCarsten Otte 	}
692b0c632dbSHeiko Carstens 
693ba5c1e9bSCarsten Otte 	spin_lock_init(&vcpu->arch.local_int.lock);
694ba5c1e9bSCarsten Otte 	INIT_LIST_HEAD(&vcpu->arch.local_int.list);
695ba5c1e9bSCarsten Otte 	vcpu->arch.local_int.float_int = &kvm->arch.float_int;
696d0321a24SChristian Borntraeger 	vcpu->arch.local_int.wq = &vcpu->wq;
6975288fbf0SChristian Borntraeger 	vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
698ba5c1e9bSCarsten Otte 
699b0c632dbSHeiko Carstens 	rc = kvm_vcpu_init(vcpu, kvm, id);
700b0c632dbSHeiko Carstens 	if (rc)
7017b06bf2fSWei Yongjun 		goto out_free_sie_block;
702b0c632dbSHeiko Carstens 	VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
703b0c632dbSHeiko Carstens 		 vcpu->arch.sie_block);
704ade38c31SCornelia Huck 	trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
705b0c632dbSHeiko Carstens 
706b0c632dbSHeiko Carstens 	return vcpu;
7077b06bf2fSWei Yongjun out_free_sie_block:
7087b06bf2fSWei Yongjun 	free_page((unsigned long)(vcpu->arch.sie_block));
709b0c632dbSHeiko Carstens out_free_cpu:
710b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
7114d47555aSCarsten Otte out:
712b0c632dbSHeiko Carstens 	return ERR_PTR(rc);
713b0c632dbSHeiko Carstens }
714b0c632dbSHeiko Carstens 
715b0c632dbSHeiko Carstens int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
716b0c632dbSHeiko Carstens {
717f87618e8SMichael Mueller 	return kvm_cpu_has_interrupt(vcpu);
718b0c632dbSHeiko Carstens }
719b0c632dbSHeiko Carstens 
72049b99e1eSChristian Borntraeger void s390_vcpu_block(struct kvm_vcpu *vcpu)
72149b99e1eSChristian Borntraeger {
72249b99e1eSChristian Borntraeger 	atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
72349b99e1eSChristian Borntraeger }
72449b99e1eSChristian Borntraeger 
72549b99e1eSChristian Borntraeger void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
72649b99e1eSChristian Borntraeger {
72749b99e1eSChristian Borntraeger 	atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
72849b99e1eSChristian Borntraeger }
72949b99e1eSChristian Borntraeger 
73049b99e1eSChristian Borntraeger /*
73149b99e1eSChristian Borntraeger  * Kick a guest cpu out of SIE and wait until SIE is not running.
73249b99e1eSChristian Borntraeger  * If the CPU is not running (e.g. waiting as idle) the function will
73349b99e1eSChristian Borntraeger  * return immediately. */
73449b99e1eSChristian Borntraeger void exit_sie(struct kvm_vcpu *vcpu)
73549b99e1eSChristian Borntraeger {
73649b99e1eSChristian Borntraeger 	atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
73749b99e1eSChristian Borntraeger 	while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
73849b99e1eSChristian Borntraeger 		cpu_relax();
73949b99e1eSChristian Borntraeger }
74049b99e1eSChristian Borntraeger 
74149b99e1eSChristian Borntraeger /* Kick a guest cpu out of SIE and prevent SIE-reentry */
74249b99e1eSChristian Borntraeger void exit_sie_sync(struct kvm_vcpu *vcpu)
74349b99e1eSChristian Borntraeger {
74449b99e1eSChristian Borntraeger 	s390_vcpu_block(vcpu);
74549b99e1eSChristian Borntraeger 	exit_sie(vcpu);
74649b99e1eSChristian Borntraeger }
74749b99e1eSChristian Borntraeger 
7482c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
7492c70fe44SChristian Borntraeger {
7502c70fe44SChristian Borntraeger 	int i;
7512c70fe44SChristian Borntraeger 	struct kvm *kvm = gmap->private;
7522c70fe44SChristian Borntraeger 	struct kvm_vcpu *vcpu;
7532c70fe44SChristian Borntraeger 
7542c70fe44SChristian Borntraeger 	kvm_for_each_vcpu(i, vcpu, kvm) {
7552c70fe44SChristian Borntraeger 		/* match against both prefix pages */
7562c70fe44SChristian Borntraeger 		if (vcpu->arch.sie_block->prefix == (address & ~0x1000UL)) {
7572c70fe44SChristian Borntraeger 			VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
7582c70fe44SChristian Borntraeger 			kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
7592c70fe44SChristian Borntraeger 			exit_sie_sync(vcpu);
7602c70fe44SChristian Borntraeger 		}
7612c70fe44SChristian Borntraeger 	}
7622c70fe44SChristian Borntraeger }
7632c70fe44SChristian Borntraeger 
764b6d33834SChristoffer Dall int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
765b6d33834SChristoffer Dall {
766b6d33834SChristoffer Dall 	/* kvm common code refers to this, but never calls it */
767b6d33834SChristoffer Dall 	BUG();
768b6d33834SChristoffer Dall 	return 0;
769b6d33834SChristoffer Dall }
770b6d33834SChristoffer Dall 
77114eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
77214eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
77314eebd91SCarsten Otte {
77414eebd91SCarsten Otte 	int r = -EINVAL;
77514eebd91SCarsten Otte 
77614eebd91SCarsten Otte 	switch (reg->id) {
77729b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
77829b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->todpr,
77929b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
78029b7c71bSCarsten Otte 		break;
78129b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
78229b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->epoch,
78329b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
78429b7c71bSCarsten Otte 		break;
78546a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
78646a6dd1cSJason J. herne 		r = put_user(vcpu->arch.sie_block->cputm,
78746a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
78846a6dd1cSJason J. herne 		break;
78946a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
79046a6dd1cSJason J. herne 		r = put_user(vcpu->arch.sie_block->ckc,
79146a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
79246a6dd1cSJason J. herne 		break;
793536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
794536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_token,
795536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
796536336c2SDominik Dingel 		break;
797536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
798536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_compare,
799536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
800536336c2SDominik Dingel 		break;
801536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
802536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_select,
803536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
804536336c2SDominik Dingel 		break;
805672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
806672550fbSChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->pp,
807672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
808672550fbSChristian Borntraeger 		break;
809afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
810afa45ff5SChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->gbea,
811afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
812afa45ff5SChristian Borntraeger 		break;
81314eebd91SCarsten Otte 	default:
81414eebd91SCarsten Otte 		break;
81514eebd91SCarsten Otte 	}
81614eebd91SCarsten Otte 
81714eebd91SCarsten Otte 	return r;
81814eebd91SCarsten Otte }
81914eebd91SCarsten Otte 
82014eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
82114eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
82214eebd91SCarsten Otte {
82314eebd91SCarsten Otte 	int r = -EINVAL;
82414eebd91SCarsten Otte 
82514eebd91SCarsten Otte 	switch (reg->id) {
82629b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
82729b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->todpr,
82829b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
82929b7c71bSCarsten Otte 		break;
83029b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
83129b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->epoch,
83229b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
83329b7c71bSCarsten Otte 		break;
83446a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
83546a6dd1cSJason J. herne 		r = get_user(vcpu->arch.sie_block->cputm,
83646a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
83746a6dd1cSJason J. herne 		break;
83846a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
83946a6dd1cSJason J. herne 		r = get_user(vcpu->arch.sie_block->ckc,
84046a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
84146a6dd1cSJason J. herne 		break;
842536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
843536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_token,
844536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
845536336c2SDominik Dingel 		break;
846536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
847536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_compare,
848536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
849536336c2SDominik Dingel 		break;
850536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
851536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_select,
852536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
853536336c2SDominik Dingel 		break;
854672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
855672550fbSChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->pp,
856672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
857672550fbSChristian Borntraeger 		break;
858afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
859afa45ff5SChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->gbea,
860afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
861afa45ff5SChristian Borntraeger 		break;
86214eebd91SCarsten Otte 	default:
86314eebd91SCarsten Otte 		break;
86414eebd91SCarsten Otte 	}
86514eebd91SCarsten Otte 
86614eebd91SCarsten Otte 	return r;
86714eebd91SCarsten Otte }
868b6d33834SChristoffer Dall 
869b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
870b0c632dbSHeiko Carstens {
871b0c632dbSHeiko Carstens 	kvm_s390_vcpu_initial_reset(vcpu);
872b0c632dbSHeiko Carstens 	return 0;
873b0c632dbSHeiko Carstens }
874b0c632dbSHeiko Carstens 
875b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
876b0c632dbSHeiko Carstens {
8775a32c1afSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
878b0c632dbSHeiko Carstens 	return 0;
879b0c632dbSHeiko Carstens }
880b0c632dbSHeiko Carstens 
881b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
882b0c632dbSHeiko Carstens {
8835a32c1afSChristian Borntraeger 	memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
884b0c632dbSHeiko Carstens 	return 0;
885b0c632dbSHeiko Carstens }
886b0c632dbSHeiko Carstens 
887b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
888b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
889b0c632dbSHeiko Carstens {
89059674c1aSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
891b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
89259674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
893b0c632dbSHeiko Carstens 	return 0;
894b0c632dbSHeiko Carstens }
895b0c632dbSHeiko Carstens 
896b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
897b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
898b0c632dbSHeiko Carstens {
89959674c1aSChristian Borntraeger 	memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
900b0c632dbSHeiko Carstens 	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
901b0c632dbSHeiko Carstens 	return 0;
902b0c632dbSHeiko Carstens }
903b0c632dbSHeiko Carstens 
904b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
905b0c632dbSHeiko Carstens {
9064725c860SMartin Schwidefsky 	if (test_fp_ctl(fpu->fpc))
9074725c860SMartin Schwidefsky 		return -EINVAL;
908b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
9094725c860SMartin Schwidefsky 	vcpu->arch.guest_fpregs.fpc = fpu->fpc;
9104725c860SMartin Schwidefsky 	restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
9114725c860SMartin Schwidefsky 	restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
912b0c632dbSHeiko Carstens 	return 0;
913b0c632dbSHeiko Carstens }
914b0c632dbSHeiko Carstens 
915b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
916b0c632dbSHeiko Carstens {
917b0c632dbSHeiko Carstens 	memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
918b0c632dbSHeiko Carstens 	fpu->fpc = vcpu->arch.guest_fpregs.fpc;
919b0c632dbSHeiko Carstens 	return 0;
920b0c632dbSHeiko Carstens }
921b0c632dbSHeiko Carstens 
922b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
923b0c632dbSHeiko Carstens {
924b0c632dbSHeiko Carstens 	int rc = 0;
925b0c632dbSHeiko Carstens 
9269e6dabefSCornelia Huck 	if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
927b0c632dbSHeiko Carstens 		rc = -EBUSY;
928d7b0b5ebSCarsten Otte 	else {
929d7b0b5ebSCarsten Otte 		vcpu->run->psw_mask = psw.mask;
930d7b0b5ebSCarsten Otte 		vcpu->run->psw_addr = psw.addr;
931d7b0b5ebSCarsten Otte 	}
932b0c632dbSHeiko Carstens 	return rc;
933b0c632dbSHeiko Carstens }
934b0c632dbSHeiko Carstens 
935b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
936b0c632dbSHeiko Carstens 				  struct kvm_translation *tr)
937b0c632dbSHeiko Carstens {
938b0c632dbSHeiko Carstens 	return -EINVAL; /* not implemented yet */
939b0c632dbSHeiko Carstens }
940b0c632dbSHeiko Carstens 
94127291e21SDavid Hildenbrand #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
94227291e21SDavid Hildenbrand 			      KVM_GUESTDBG_USE_HW_BP | \
94327291e21SDavid Hildenbrand 			      KVM_GUESTDBG_ENABLE)
94427291e21SDavid Hildenbrand 
945d0bfb940SJan Kiszka int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
946d0bfb940SJan Kiszka 					struct kvm_guest_debug *dbg)
947b0c632dbSHeiko Carstens {
94827291e21SDavid Hildenbrand 	int rc = 0;
94927291e21SDavid Hildenbrand 
95027291e21SDavid Hildenbrand 	vcpu->guest_debug = 0;
95127291e21SDavid Hildenbrand 	kvm_s390_clear_bp_data(vcpu);
95227291e21SDavid Hildenbrand 
95327291e21SDavid Hildenbrand 	if (vcpu->guest_debug & ~VALID_GUESTDBG_FLAGS)
95427291e21SDavid Hildenbrand 		return -EINVAL;
95527291e21SDavid Hildenbrand 
95627291e21SDavid Hildenbrand 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
95727291e21SDavid Hildenbrand 		vcpu->guest_debug = dbg->control;
95827291e21SDavid Hildenbrand 		/* enforce guest PER */
95927291e21SDavid Hildenbrand 		atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
96027291e21SDavid Hildenbrand 
96127291e21SDavid Hildenbrand 		if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
96227291e21SDavid Hildenbrand 			rc = kvm_s390_import_bp_data(vcpu, dbg);
96327291e21SDavid Hildenbrand 	} else {
96427291e21SDavid Hildenbrand 		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
96527291e21SDavid Hildenbrand 		vcpu->arch.guestdbg.last_bp = 0;
96627291e21SDavid Hildenbrand 	}
96727291e21SDavid Hildenbrand 
96827291e21SDavid Hildenbrand 	if (rc) {
96927291e21SDavid Hildenbrand 		vcpu->guest_debug = 0;
97027291e21SDavid Hildenbrand 		kvm_s390_clear_bp_data(vcpu);
97127291e21SDavid Hildenbrand 		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
97227291e21SDavid Hildenbrand 	}
97327291e21SDavid Hildenbrand 
97427291e21SDavid Hildenbrand 	return rc;
975b0c632dbSHeiko Carstens }
976b0c632dbSHeiko Carstens 
97762d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
97862d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
97962d9f0dbSMarcelo Tosatti {
98062d9f0dbSMarcelo Tosatti 	return -EINVAL; /* not implemented yet */
98162d9f0dbSMarcelo Tosatti }
98262d9f0dbSMarcelo Tosatti 
98362d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
98462d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
98562d9f0dbSMarcelo Tosatti {
98662d9f0dbSMarcelo Tosatti 	return -EINVAL; /* not implemented yet */
98762d9f0dbSMarcelo Tosatti }
98862d9f0dbSMarcelo Tosatti 
989b31605c1SDominik Dingel bool kvm_s390_cmma_enabled(struct kvm *kvm)
990b31605c1SDominik Dingel {
991b31605c1SDominik Dingel 	if (!MACHINE_IS_LPAR)
992b31605c1SDominik Dingel 		return false;
993b31605c1SDominik Dingel 	/* only enable for z10 and later */
994b31605c1SDominik Dingel 	if (!MACHINE_HAS_EDAT1)
995b31605c1SDominik Dingel 		return false;
996b31605c1SDominik Dingel 	if (!kvm->arch.use_cmma)
997b31605c1SDominik Dingel 		return false;
998b31605c1SDominik Dingel 	return true;
999b31605c1SDominik Dingel }
1000b31605c1SDominik Dingel 
10018ad35755SDavid Hildenbrand static bool ibs_enabled(struct kvm_vcpu *vcpu)
10028ad35755SDavid Hildenbrand {
10038ad35755SDavid Hildenbrand 	return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
10048ad35755SDavid Hildenbrand }
10058ad35755SDavid Hildenbrand 
10062c70fe44SChristian Borntraeger static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
10072c70fe44SChristian Borntraeger {
10088ad35755SDavid Hildenbrand retry:
10098ad35755SDavid Hildenbrand 	s390_vcpu_unblock(vcpu);
10102c70fe44SChristian Borntraeger 	/*
10112c70fe44SChristian Borntraeger 	 * We use MMU_RELOAD just to re-arm the ipte notifier for the
10122c70fe44SChristian Borntraeger 	 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
10132c70fe44SChristian Borntraeger 	 * This ensures that the ipte instruction for this request has
10142c70fe44SChristian Borntraeger 	 * already finished. We might race against a second unmapper that
10152c70fe44SChristian Borntraeger 	 * wants to set the blocking bit. Lets just retry the request loop.
10162c70fe44SChristian Borntraeger 	 */
10178ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
10182c70fe44SChristian Borntraeger 		int rc;
10192c70fe44SChristian Borntraeger 		rc = gmap_ipte_notify(vcpu->arch.gmap,
10202c70fe44SChristian Borntraeger 				      vcpu->arch.sie_block->prefix,
10212c70fe44SChristian Borntraeger 				      PAGE_SIZE * 2);
10222c70fe44SChristian Borntraeger 		if (rc)
10232c70fe44SChristian Borntraeger 			return rc;
10248ad35755SDavid Hildenbrand 		goto retry;
10252c70fe44SChristian Borntraeger 	}
10268ad35755SDavid Hildenbrand 
10278ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
10288ad35755SDavid Hildenbrand 		if (!ibs_enabled(vcpu)) {
10298ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
10308ad35755SDavid Hildenbrand 			atomic_set_mask(CPUSTAT_IBS,
10318ad35755SDavid Hildenbrand 					&vcpu->arch.sie_block->cpuflags);
10328ad35755SDavid Hildenbrand 		}
10338ad35755SDavid Hildenbrand 		goto retry;
10348ad35755SDavid Hildenbrand 	}
10358ad35755SDavid Hildenbrand 
10368ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
10378ad35755SDavid Hildenbrand 		if (ibs_enabled(vcpu)) {
10388ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
10398ad35755SDavid Hildenbrand 			atomic_clear_mask(CPUSTAT_IBS,
10408ad35755SDavid Hildenbrand 					  &vcpu->arch.sie_block->cpuflags);
10418ad35755SDavid Hildenbrand 		}
10428ad35755SDavid Hildenbrand 		goto retry;
10438ad35755SDavid Hildenbrand 	}
10448ad35755SDavid Hildenbrand 
10452c70fe44SChristian Borntraeger 	return 0;
10462c70fe44SChristian Borntraeger }
10472c70fe44SChristian Borntraeger 
1048*fa576c58SThomas Huth /**
1049*fa576c58SThomas Huth  * kvm_arch_fault_in_page - fault-in guest page if necessary
1050*fa576c58SThomas Huth  * @vcpu: The corresponding virtual cpu
1051*fa576c58SThomas Huth  * @gpa: Guest physical address
1052*fa576c58SThomas Huth  * @writable: Whether the page should be writable or not
1053*fa576c58SThomas Huth  *
1054*fa576c58SThomas Huth  * Make sure that a guest page has been faulted-in on the host.
1055*fa576c58SThomas Huth  *
1056*fa576c58SThomas Huth  * Return: Zero on success, negative error code otherwise.
1057*fa576c58SThomas Huth  */
1058*fa576c58SThomas Huth long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
105924eb3a82SDominik Dingel {
106024eb3a82SDominik Dingel 	struct mm_struct *mm = current->mm;
1061*fa576c58SThomas Huth 	hva_t hva;
1062*fa576c58SThomas Huth 	long rc;
1063*fa576c58SThomas Huth 
1064*fa576c58SThomas Huth 	hva = gmap_fault(gpa, vcpu->arch.gmap);
1065*fa576c58SThomas Huth 	if (IS_ERR_VALUE(hva))
1066*fa576c58SThomas Huth 		return (long)hva;
106724eb3a82SDominik Dingel 	down_read(&mm->mmap_sem);
1068*fa576c58SThomas Huth 	rc = get_user_pages(current, mm, hva, 1, writable, 0, NULL, NULL);
106924eb3a82SDominik Dingel 	up_read(&mm->mmap_sem);
1070*fa576c58SThomas Huth 
1071*fa576c58SThomas Huth 	return rc < 0 ? rc : 0;
107224eb3a82SDominik Dingel }
107324eb3a82SDominik Dingel 
10743c038e6bSDominik Dingel static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
10753c038e6bSDominik Dingel 				      unsigned long token)
10763c038e6bSDominik Dingel {
10773c038e6bSDominik Dingel 	struct kvm_s390_interrupt inti;
10783c038e6bSDominik Dingel 	inti.parm64 = token;
10793c038e6bSDominik Dingel 
10803c038e6bSDominik Dingel 	if (start_token) {
10813c038e6bSDominik Dingel 		inti.type = KVM_S390_INT_PFAULT_INIT;
10823c038e6bSDominik Dingel 		WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti));
10833c038e6bSDominik Dingel 	} else {
10843c038e6bSDominik Dingel 		inti.type = KVM_S390_INT_PFAULT_DONE;
10853c038e6bSDominik Dingel 		WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
10863c038e6bSDominik Dingel 	}
10873c038e6bSDominik Dingel }
10883c038e6bSDominik Dingel 
10893c038e6bSDominik Dingel void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
10903c038e6bSDominik Dingel 				     struct kvm_async_pf *work)
10913c038e6bSDominik Dingel {
10923c038e6bSDominik Dingel 	trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
10933c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
10943c038e6bSDominik Dingel }
10953c038e6bSDominik Dingel 
10963c038e6bSDominik Dingel void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
10973c038e6bSDominik Dingel 				 struct kvm_async_pf *work)
10983c038e6bSDominik Dingel {
10993c038e6bSDominik Dingel 	trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
11003c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
11013c038e6bSDominik Dingel }
11023c038e6bSDominik Dingel 
11033c038e6bSDominik Dingel void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
11043c038e6bSDominik Dingel 			       struct kvm_async_pf *work)
11053c038e6bSDominik Dingel {
11063c038e6bSDominik Dingel 	/* s390 will always inject the page directly */
11073c038e6bSDominik Dingel }
11083c038e6bSDominik Dingel 
11093c038e6bSDominik Dingel bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
11103c038e6bSDominik Dingel {
11113c038e6bSDominik Dingel 	/*
11123c038e6bSDominik Dingel 	 * s390 will always inject the page directly,
11133c038e6bSDominik Dingel 	 * but we still want check_async_completion to cleanup
11143c038e6bSDominik Dingel 	 */
11153c038e6bSDominik Dingel 	return true;
11163c038e6bSDominik Dingel }
11173c038e6bSDominik Dingel 
11183c038e6bSDominik Dingel static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
11193c038e6bSDominik Dingel {
11203c038e6bSDominik Dingel 	hva_t hva;
11213c038e6bSDominik Dingel 	struct kvm_arch_async_pf arch;
11223c038e6bSDominik Dingel 	int rc;
11233c038e6bSDominik Dingel 
11243c038e6bSDominik Dingel 	if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
11253c038e6bSDominik Dingel 		return 0;
11263c038e6bSDominik Dingel 	if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
11273c038e6bSDominik Dingel 	    vcpu->arch.pfault_compare)
11283c038e6bSDominik Dingel 		return 0;
11293c038e6bSDominik Dingel 	if (psw_extint_disabled(vcpu))
11303c038e6bSDominik Dingel 		return 0;
11313c038e6bSDominik Dingel 	if (kvm_cpu_has_interrupt(vcpu))
11323c038e6bSDominik Dingel 		return 0;
11333c038e6bSDominik Dingel 	if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
11343c038e6bSDominik Dingel 		return 0;
11353c038e6bSDominik Dingel 	if (!vcpu->arch.gmap->pfault_enabled)
11363c038e6bSDominik Dingel 		return 0;
11373c038e6bSDominik Dingel 
113881480cc1SHeiko Carstens 	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
113981480cc1SHeiko Carstens 	hva += current->thread.gmap_addr & ~PAGE_MASK;
114081480cc1SHeiko Carstens 	if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
11413c038e6bSDominik Dingel 		return 0;
11423c038e6bSDominik Dingel 
11433c038e6bSDominik Dingel 	rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
11443c038e6bSDominik Dingel 	return rc;
11453c038e6bSDominik Dingel }
11463c038e6bSDominik Dingel 
11473fb4c40fSThomas Huth static int vcpu_pre_run(struct kvm_vcpu *vcpu)
1148b0c632dbSHeiko Carstens {
11493fb4c40fSThomas Huth 	int rc, cpuflags;
1150e168bf8dSCarsten Otte 
11513c038e6bSDominik Dingel 	/*
11523c038e6bSDominik Dingel 	 * On s390 notifications for arriving pages will be delivered directly
11533c038e6bSDominik Dingel 	 * to the guest but the house keeping for completed pfaults is
11543c038e6bSDominik Dingel 	 * handled outside the worker.
11553c038e6bSDominik Dingel 	 */
11563c038e6bSDominik Dingel 	kvm_check_async_pf_completion(vcpu);
11573c038e6bSDominik Dingel 
11585a32c1afSChristian Borntraeger 	memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
1159b0c632dbSHeiko Carstens 
1160b0c632dbSHeiko Carstens 	if (need_resched())
1161b0c632dbSHeiko Carstens 		schedule();
1162b0c632dbSHeiko Carstens 
116371cde587SChristian Borntraeger 	if (test_thread_flag(TIF_MCCK_PENDING))
116471cde587SChristian Borntraeger 		s390_handle_mcck();
116571cde587SChristian Borntraeger 
1166d6b6d166SCarsten Otte 	if (!kvm_is_ucontrol(vcpu->kvm))
11670ff31867SCarsten Otte 		kvm_s390_deliver_pending_interrupts(vcpu);
11680ff31867SCarsten Otte 
11692c70fe44SChristian Borntraeger 	rc = kvm_s390_handle_requests(vcpu);
11702c70fe44SChristian Borntraeger 	if (rc)
11712c70fe44SChristian Borntraeger 		return rc;
11722c70fe44SChristian Borntraeger 
117327291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu)) {
117427291e21SDavid Hildenbrand 		kvm_s390_backup_guest_per_regs(vcpu);
117527291e21SDavid Hildenbrand 		kvm_s390_patch_guest_per_regs(vcpu);
117627291e21SDavid Hildenbrand 	}
117727291e21SDavid Hildenbrand 
1178b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icptcode = 0;
11793fb4c40fSThomas Huth 	cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
11803fb4c40fSThomas Huth 	VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
11813fb4c40fSThomas Huth 	trace_kvm_s390_sie_enter(vcpu, cpuflags);
11822b29a9fdSDominik Dingel 
11833fb4c40fSThomas Huth 	return 0;
11843fb4c40fSThomas Huth }
11853fb4c40fSThomas Huth 
11863fb4c40fSThomas Huth static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
11873fb4c40fSThomas Huth {
118824eb3a82SDominik Dingel 	int rc = -1;
11892b29a9fdSDominik Dingel 
11902b29a9fdSDominik Dingel 	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
11912b29a9fdSDominik Dingel 		   vcpu->arch.sie_block->icptcode);
11922b29a9fdSDominik Dingel 	trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
11932b29a9fdSDominik Dingel 
119427291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu))
119527291e21SDavid Hildenbrand 		kvm_s390_restore_guest_per_regs(vcpu);
119627291e21SDavid Hildenbrand 
11973fb4c40fSThomas Huth 	if (exit_reason >= 0) {
11987c470539SMartin Schwidefsky 		rc = 0;
1199210b1607SThomas Huth 	} else if (kvm_is_ucontrol(vcpu->kvm)) {
1200210b1607SThomas Huth 		vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1201210b1607SThomas Huth 		vcpu->run->s390_ucontrol.trans_exc_code =
1202210b1607SThomas Huth 						current->thread.gmap_addr;
1203210b1607SThomas Huth 		vcpu->run->s390_ucontrol.pgm_code = 0x10;
1204210b1607SThomas Huth 		rc = -EREMOTE;
120524eb3a82SDominik Dingel 
120624eb3a82SDominik Dingel 	} else if (current->thread.gmap_pfault) {
12073c038e6bSDominik Dingel 		trace_kvm_s390_major_guest_pfault(vcpu);
120824eb3a82SDominik Dingel 		current->thread.gmap_pfault = 0;
1209*fa576c58SThomas Huth 		if (kvm_arch_setup_async_pf(vcpu)) {
121024eb3a82SDominik Dingel 			rc = 0;
1211*fa576c58SThomas Huth 		} else {
1212*fa576c58SThomas Huth 			gpa_t gpa = current->thread.gmap_addr;
1213*fa576c58SThomas Huth 			rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1214*fa576c58SThomas Huth 		}
121524eb3a82SDominik Dingel 	}
121624eb3a82SDominik Dingel 
121724eb3a82SDominik Dingel 	if (rc == -1) {
1218699bde3bSChristian Borntraeger 		VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1219699bde3bSChristian Borntraeger 		trace_kvm_s390_sie_fault(vcpu);
1220699bde3bSChristian Borntraeger 		rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
12211f0d0f09SCarsten Otte 	}
1222b0c632dbSHeiko Carstens 
12235a32c1afSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
12243fb4c40fSThomas Huth 
1225a76ccff6SThomas Huth 	if (rc == 0) {
1226a76ccff6SThomas Huth 		if (kvm_is_ucontrol(vcpu->kvm))
12272955c83fSChristian Borntraeger 			/* Don't exit for host interrupts. */
12282955c83fSChristian Borntraeger 			rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
1229a76ccff6SThomas Huth 		else
1230a76ccff6SThomas Huth 			rc = kvm_handle_sie_intercept(vcpu);
1231a76ccff6SThomas Huth 	}
1232a76ccff6SThomas Huth 
12333fb4c40fSThomas Huth 	return rc;
12343fb4c40fSThomas Huth }
12353fb4c40fSThomas Huth 
12363fb4c40fSThomas Huth static int __vcpu_run(struct kvm_vcpu *vcpu)
12373fb4c40fSThomas Huth {
12383fb4c40fSThomas Huth 	int rc, exit_reason;
12393fb4c40fSThomas Huth 
1240800c1065SThomas Huth 	/*
1241800c1065SThomas Huth 	 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1242800c1065SThomas Huth 	 * ning the guest), so that memslots (and other stuff) are protected
1243800c1065SThomas Huth 	 */
1244800c1065SThomas Huth 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1245800c1065SThomas Huth 
1246a76ccff6SThomas Huth 	do {
12473fb4c40fSThomas Huth 		rc = vcpu_pre_run(vcpu);
12483fb4c40fSThomas Huth 		if (rc)
1249a76ccff6SThomas Huth 			break;
12503fb4c40fSThomas Huth 
1251800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
12523fb4c40fSThomas Huth 		/*
1253a76ccff6SThomas Huth 		 * As PF_VCPU will be used in fault handler, between
1254a76ccff6SThomas Huth 		 * guest_enter and guest_exit should be no uaccess.
12553fb4c40fSThomas Huth 		 */
12563fb4c40fSThomas Huth 		preempt_disable();
12573fb4c40fSThomas Huth 		kvm_guest_enter();
12583fb4c40fSThomas Huth 		preempt_enable();
1259a76ccff6SThomas Huth 		exit_reason = sie64a(vcpu->arch.sie_block,
1260a76ccff6SThomas Huth 				     vcpu->run->s.regs.gprs);
12613fb4c40fSThomas Huth 		kvm_guest_exit();
1262800c1065SThomas Huth 		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
12633fb4c40fSThomas Huth 
12643fb4c40fSThomas Huth 		rc = vcpu_post_run(vcpu, exit_reason);
126527291e21SDavid Hildenbrand 	} while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
12663fb4c40fSThomas Huth 
1267800c1065SThomas Huth 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1268e168bf8dSCarsten Otte 	return rc;
1269b0c632dbSHeiko Carstens }
1270b0c632dbSHeiko Carstens 
1271b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1272b0c632dbSHeiko Carstens {
12738f2abe6aSChristian Borntraeger 	int rc;
1274b0c632dbSHeiko Carstens 	sigset_t sigsaved;
1275b0c632dbSHeiko Carstens 
127627291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu)) {
127727291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
127827291e21SDavid Hildenbrand 		return 0;
127927291e21SDavid Hildenbrand 	}
128027291e21SDavid Hildenbrand 
1281b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
1282b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1283b0c632dbSHeiko Carstens 
12846852d7b6SDavid Hildenbrand 	kvm_s390_vcpu_start(vcpu);
1285b0c632dbSHeiko Carstens 
12868f2abe6aSChristian Borntraeger 	switch (kvm_run->exit_reason) {
12878f2abe6aSChristian Borntraeger 	case KVM_EXIT_S390_SIEIC:
12888f2abe6aSChristian Borntraeger 	case KVM_EXIT_UNKNOWN:
12899ace903dSChristian Ehrhardt 	case KVM_EXIT_INTR:
12908f2abe6aSChristian Borntraeger 	case KVM_EXIT_S390_RESET:
1291e168bf8dSCarsten Otte 	case KVM_EXIT_S390_UCONTROL:
1292fa6b7fe9SCornelia Huck 	case KVM_EXIT_S390_TSCH:
129327291e21SDavid Hildenbrand 	case KVM_EXIT_DEBUG:
12948f2abe6aSChristian Borntraeger 		break;
12958f2abe6aSChristian Borntraeger 	default:
12968f2abe6aSChristian Borntraeger 		BUG();
12978f2abe6aSChristian Borntraeger 	}
12988f2abe6aSChristian Borntraeger 
1299d7b0b5ebSCarsten Otte 	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1300d7b0b5ebSCarsten Otte 	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
130160b413c9SChristian Borntraeger 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
130260b413c9SChristian Borntraeger 		kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
130360b413c9SChristian Borntraeger 		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
130460b413c9SChristian Borntraeger 	}
13059eed0735SChristian Borntraeger 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
13069eed0735SChristian Borntraeger 		kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
13079eed0735SChristian Borntraeger 		memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
13089eed0735SChristian Borntraeger 		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
13099eed0735SChristian Borntraeger 	}
1310d7b0b5ebSCarsten Otte 
1311dab4079dSHeiko Carstens 	might_fault();
1312e168bf8dSCarsten Otte 	rc = __vcpu_run(vcpu);
13139ace903dSChristian Ehrhardt 
1314b1d16c49SChristian Ehrhardt 	if (signal_pending(current) && !rc) {
1315b1d16c49SChristian Ehrhardt 		kvm_run->exit_reason = KVM_EXIT_INTR;
13168f2abe6aSChristian Borntraeger 		rc = -EINTR;
1317b1d16c49SChristian Ehrhardt 	}
13188f2abe6aSChristian Borntraeger 
131927291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu) && !rc)  {
132027291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
132127291e21SDavid Hildenbrand 		rc = 0;
132227291e21SDavid Hildenbrand 	}
132327291e21SDavid Hildenbrand 
1324b8e660b8SHeiko Carstens 	if (rc == -EOPNOTSUPP) {
13258f2abe6aSChristian Borntraeger 		/* intercept cannot be handled in-kernel, prepare kvm-run */
13268f2abe6aSChristian Borntraeger 		kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
13278f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
13288f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
13298f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
13308f2abe6aSChristian Borntraeger 		rc = 0;
13318f2abe6aSChristian Borntraeger 	}
13328f2abe6aSChristian Borntraeger 
13338f2abe6aSChristian Borntraeger 	if (rc == -EREMOTE) {
13348f2abe6aSChristian Borntraeger 		/* intercept was handled, but userspace support is needed
13358f2abe6aSChristian Borntraeger 		 * kvm_run has been prepared by the handler */
13368f2abe6aSChristian Borntraeger 		rc = 0;
13378f2abe6aSChristian Borntraeger 	}
13388f2abe6aSChristian Borntraeger 
1339d7b0b5ebSCarsten Otte 	kvm_run->psw_mask     = vcpu->arch.sie_block->gpsw.mask;
1340d7b0b5ebSCarsten Otte 	kvm_run->psw_addr     = vcpu->arch.sie_block->gpsw.addr;
134160b413c9SChristian Borntraeger 	kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
13429eed0735SChristian Borntraeger 	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1343d7b0b5ebSCarsten Otte 
1344b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
1345b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1346b0c632dbSHeiko Carstens 
1347b0c632dbSHeiko Carstens 	vcpu->stat.exit_userspace++;
13487e8e6ab4SHeiko Carstens 	return rc;
1349b0c632dbSHeiko Carstens }
1350b0c632dbSHeiko Carstens 
1351b0c632dbSHeiko Carstens /*
1352b0c632dbSHeiko Carstens  * store status at address
1353b0c632dbSHeiko Carstens  * we use have two special cases:
1354b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1355b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1356b0c632dbSHeiko Carstens  */
1357d0bce605SHeiko Carstens int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
1358b0c632dbSHeiko Carstens {
1359092670cdSCarsten Otte 	unsigned char archmode = 1;
1360178bd789SThomas Huth 	u64 clkcomp;
1361d0bce605SHeiko Carstens 	int rc;
1362b0c632dbSHeiko Carstens 
1363d0bce605SHeiko Carstens 	if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1364d0bce605SHeiko Carstens 		if (write_guest_abs(vcpu, 163, &archmode, 1))
1365b0c632dbSHeiko Carstens 			return -EFAULT;
1366d0bce605SHeiko Carstens 		gpa = SAVE_AREA_BASE;
1367d0bce605SHeiko Carstens 	} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1368d0bce605SHeiko Carstens 		if (write_guest_real(vcpu, 163, &archmode, 1))
1369b0c632dbSHeiko Carstens 			return -EFAULT;
1370d0bce605SHeiko Carstens 		gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1371d0bce605SHeiko Carstens 	}
1372d0bce605SHeiko Carstens 	rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1373d0bce605SHeiko Carstens 			     vcpu->arch.guest_fpregs.fprs, 128);
1374d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1375d0bce605SHeiko Carstens 			      vcpu->run->s.regs.gprs, 128);
1376d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1377d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gpsw, 16);
1378d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
1379d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->prefix, 4);
1380d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu,
1381d0bce605SHeiko Carstens 			      gpa + offsetof(struct save_area, fp_ctrl_reg),
1382d0bce605SHeiko Carstens 			      &vcpu->arch.guest_fpregs.fpc, 4);
1383d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
1384d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->todpr, 4);
1385d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
1386d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->cputm, 8);
1387178bd789SThomas Huth 	clkcomp = vcpu->arch.sie_block->ckc >> 8;
1388d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
1389d0bce605SHeiko Carstens 			      &clkcomp, 8);
1390d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
1391d0bce605SHeiko Carstens 			      &vcpu->run->s.regs.acrs, 64);
1392d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
1393d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gcr, 128);
1394d0bce605SHeiko Carstens 	return rc ? -EFAULT : 0;
1395b0c632dbSHeiko Carstens }
1396b0c632dbSHeiko Carstens 
1397e879892cSThomas Huth int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1398e879892cSThomas Huth {
1399e879892cSThomas Huth 	/*
1400e879892cSThomas Huth 	 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1401e879892cSThomas Huth 	 * copying in vcpu load/put. Lets update our copies before we save
1402e879892cSThomas Huth 	 * it into the save area
1403e879892cSThomas Huth 	 */
1404e879892cSThomas Huth 	save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1405e879892cSThomas Huth 	save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1406e879892cSThomas Huth 	save_access_regs(vcpu->run->s.regs.acrs);
1407e879892cSThomas Huth 
1408e879892cSThomas Huth 	return kvm_s390_store_status_unloaded(vcpu, addr);
1409e879892cSThomas Huth }
1410e879892cSThomas Huth 
14118ad35755SDavid Hildenbrand static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
14128ad35755SDavid Hildenbrand {
14138ad35755SDavid Hildenbrand 	return atomic_read(&(vcpu)->arch.sie_block->cpuflags) & CPUSTAT_STOPPED;
14148ad35755SDavid Hildenbrand }
14158ad35755SDavid Hildenbrand 
14168ad35755SDavid Hildenbrand static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
14178ad35755SDavid Hildenbrand {
14188ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
14198ad35755SDavid Hildenbrand 	kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
14208ad35755SDavid Hildenbrand 	exit_sie_sync(vcpu);
14218ad35755SDavid Hildenbrand }
14228ad35755SDavid Hildenbrand 
14238ad35755SDavid Hildenbrand static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
14248ad35755SDavid Hildenbrand {
14258ad35755SDavid Hildenbrand 	unsigned int i;
14268ad35755SDavid Hildenbrand 	struct kvm_vcpu *vcpu;
14278ad35755SDavid Hildenbrand 
14288ad35755SDavid Hildenbrand 	kvm_for_each_vcpu(i, vcpu, kvm) {
14298ad35755SDavid Hildenbrand 		__disable_ibs_on_vcpu(vcpu);
14308ad35755SDavid Hildenbrand 	}
14318ad35755SDavid Hildenbrand }
14328ad35755SDavid Hildenbrand 
14338ad35755SDavid Hildenbrand static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
14348ad35755SDavid Hildenbrand {
14358ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
14368ad35755SDavid Hildenbrand 	kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
14378ad35755SDavid Hildenbrand 	exit_sie_sync(vcpu);
14388ad35755SDavid Hildenbrand }
14398ad35755SDavid Hildenbrand 
14406852d7b6SDavid Hildenbrand void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
14416852d7b6SDavid Hildenbrand {
14428ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
14438ad35755SDavid Hildenbrand 
14448ad35755SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
14458ad35755SDavid Hildenbrand 		return;
14468ad35755SDavid Hildenbrand 
14476852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
14488ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
14498ad35755SDavid Hildenbrand 	spin_lock_bh(&vcpu->kvm->arch.start_stop_lock);
14508ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
14518ad35755SDavid Hildenbrand 
14528ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
14538ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
14548ad35755SDavid Hildenbrand 			started_vcpus++;
14558ad35755SDavid Hildenbrand 	}
14568ad35755SDavid Hildenbrand 
14578ad35755SDavid Hildenbrand 	if (started_vcpus == 0) {
14588ad35755SDavid Hildenbrand 		/* we're the only active VCPU -> speed it up */
14598ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(vcpu);
14608ad35755SDavid Hildenbrand 	} else if (started_vcpus == 1) {
14618ad35755SDavid Hildenbrand 		/*
14628ad35755SDavid Hildenbrand 		 * As we are starting a second VCPU, we have to disable
14638ad35755SDavid Hildenbrand 		 * the IBS facility on all VCPUs to remove potentially
14648ad35755SDavid Hildenbrand 		 * oustanding ENABLE requests.
14658ad35755SDavid Hildenbrand 		 */
14668ad35755SDavid Hildenbrand 		__disable_ibs_on_all_vcpus(vcpu->kvm);
14678ad35755SDavid Hildenbrand 	}
14688ad35755SDavid Hildenbrand 
14696852d7b6SDavid Hildenbrand 	atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
14708ad35755SDavid Hildenbrand 	/*
14718ad35755SDavid Hildenbrand 	 * Another VCPU might have used IBS while we were offline.
14728ad35755SDavid Hildenbrand 	 * Let's play safe and flush the VCPU at startup.
14738ad35755SDavid Hildenbrand 	 */
14748ad35755SDavid Hildenbrand 	vcpu->arch.sie_block->ihcpu  = 0xffff;
14758ad35755SDavid Hildenbrand 	spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock);
14768ad35755SDavid Hildenbrand 	return;
14776852d7b6SDavid Hildenbrand }
14786852d7b6SDavid Hildenbrand 
14796852d7b6SDavid Hildenbrand void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
14806852d7b6SDavid Hildenbrand {
14818ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
14828ad35755SDavid Hildenbrand 	struct kvm_vcpu *started_vcpu = NULL;
14838ad35755SDavid Hildenbrand 
14848ad35755SDavid Hildenbrand 	if (is_vcpu_stopped(vcpu))
14858ad35755SDavid Hildenbrand 		return;
14868ad35755SDavid Hildenbrand 
14876852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
14888ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
14898ad35755SDavid Hildenbrand 	spin_lock_bh(&vcpu->kvm->arch.start_stop_lock);
14908ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
14918ad35755SDavid Hildenbrand 
14926852d7b6SDavid Hildenbrand 	atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
14938ad35755SDavid Hildenbrand 	__disable_ibs_on_vcpu(vcpu);
14948ad35755SDavid Hildenbrand 
14958ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
14968ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
14978ad35755SDavid Hildenbrand 			started_vcpus++;
14988ad35755SDavid Hildenbrand 			started_vcpu = vcpu->kvm->vcpus[i];
14998ad35755SDavid Hildenbrand 		}
15008ad35755SDavid Hildenbrand 	}
15018ad35755SDavid Hildenbrand 
15028ad35755SDavid Hildenbrand 	if (started_vcpus == 1) {
15038ad35755SDavid Hildenbrand 		/*
15048ad35755SDavid Hildenbrand 		 * As we only have one VCPU left, we want to enable the
15058ad35755SDavid Hildenbrand 		 * IBS facility for that VCPU to speed it up.
15068ad35755SDavid Hildenbrand 		 */
15078ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(started_vcpu);
15088ad35755SDavid Hildenbrand 	}
15098ad35755SDavid Hildenbrand 
15108ad35755SDavid Hildenbrand 	spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock);
15118ad35755SDavid Hildenbrand 	return;
15126852d7b6SDavid Hildenbrand }
15136852d7b6SDavid Hildenbrand 
1514d6712df9SCornelia Huck static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1515d6712df9SCornelia Huck 				     struct kvm_enable_cap *cap)
1516d6712df9SCornelia Huck {
1517d6712df9SCornelia Huck 	int r;
1518d6712df9SCornelia Huck 
1519d6712df9SCornelia Huck 	if (cap->flags)
1520d6712df9SCornelia Huck 		return -EINVAL;
1521d6712df9SCornelia Huck 
1522d6712df9SCornelia Huck 	switch (cap->cap) {
1523fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
1524fa6b7fe9SCornelia Huck 		if (!vcpu->kvm->arch.css_support) {
1525fa6b7fe9SCornelia Huck 			vcpu->kvm->arch.css_support = 1;
1526fa6b7fe9SCornelia Huck 			trace_kvm_s390_enable_css(vcpu->kvm);
1527fa6b7fe9SCornelia Huck 		}
1528fa6b7fe9SCornelia Huck 		r = 0;
1529fa6b7fe9SCornelia Huck 		break;
1530d6712df9SCornelia Huck 	default:
1531d6712df9SCornelia Huck 		r = -EINVAL;
1532d6712df9SCornelia Huck 		break;
1533d6712df9SCornelia Huck 	}
1534d6712df9SCornelia Huck 	return r;
1535d6712df9SCornelia Huck }
1536d6712df9SCornelia Huck 
1537b0c632dbSHeiko Carstens long kvm_arch_vcpu_ioctl(struct file *filp,
1538b0c632dbSHeiko Carstens 			 unsigned int ioctl, unsigned long arg)
1539b0c632dbSHeiko Carstens {
1540b0c632dbSHeiko Carstens 	struct kvm_vcpu *vcpu = filp->private_data;
1541b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
1542800c1065SThomas Huth 	int idx;
1543bc923cc9SAvi Kivity 	long r;
1544b0c632dbSHeiko Carstens 
154593736624SAvi Kivity 	switch (ioctl) {
154693736624SAvi Kivity 	case KVM_S390_INTERRUPT: {
1547ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
1548ba5c1e9bSCarsten Otte 
154993736624SAvi Kivity 		r = -EFAULT;
1550ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
155193736624SAvi Kivity 			break;
155293736624SAvi Kivity 		r = kvm_s390_inject_vcpu(vcpu, &s390int);
155393736624SAvi Kivity 		break;
1554ba5c1e9bSCarsten Otte 	}
1555b0c632dbSHeiko Carstens 	case KVM_S390_STORE_STATUS:
1556800c1065SThomas Huth 		idx = srcu_read_lock(&vcpu->kvm->srcu);
1557bc923cc9SAvi Kivity 		r = kvm_s390_vcpu_store_status(vcpu, arg);
1558800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
1559bc923cc9SAvi Kivity 		break;
1560b0c632dbSHeiko Carstens 	case KVM_S390_SET_INITIAL_PSW: {
1561b0c632dbSHeiko Carstens 		psw_t psw;
1562b0c632dbSHeiko Carstens 
1563bc923cc9SAvi Kivity 		r = -EFAULT;
1564b0c632dbSHeiko Carstens 		if (copy_from_user(&psw, argp, sizeof(psw)))
1565bc923cc9SAvi Kivity 			break;
1566bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1567bc923cc9SAvi Kivity 		break;
1568b0c632dbSHeiko Carstens 	}
1569b0c632dbSHeiko Carstens 	case KVM_S390_INITIAL_RESET:
1570bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1571bc923cc9SAvi Kivity 		break;
157214eebd91SCarsten Otte 	case KVM_SET_ONE_REG:
157314eebd91SCarsten Otte 	case KVM_GET_ONE_REG: {
157414eebd91SCarsten Otte 		struct kvm_one_reg reg;
157514eebd91SCarsten Otte 		r = -EFAULT;
157614eebd91SCarsten Otte 		if (copy_from_user(&reg, argp, sizeof(reg)))
157714eebd91SCarsten Otte 			break;
157814eebd91SCarsten Otte 		if (ioctl == KVM_SET_ONE_REG)
157914eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
158014eebd91SCarsten Otte 		else
158114eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
158214eebd91SCarsten Otte 		break;
158314eebd91SCarsten Otte 	}
158427e0393fSCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
158527e0393fSCarsten Otte 	case KVM_S390_UCAS_MAP: {
158627e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
158727e0393fSCarsten Otte 
158827e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
158927e0393fSCarsten Otte 			r = -EFAULT;
159027e0393fSCarsten Otte 			break;
159127e0393fSCarsten Otte 		}
159227e0393fSCarsten Otte 
159327e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
159427e0393fSCarsten Otte 			r = -EINVAL;
159527e0393fSCarsten Otte 			break;
159627e0393fSCarsten Otte 		}
159727e0393fSCarsten Otte 
159827e0393fSCarsten Otte 		r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
159927e0393fSCarsten Otte 				     ucasmap.vcpu_addr, ucasmap.length);
160027e0393fSCarsten Otte 		break;
160127e0393fSCarsten Otte 	}
160227e0393fSCarsten Otte 	case KVM_S390_UCAS_UNMAP: {
160327e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
160427e0393fSCarsten Otte 
160527e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
160627e0393fSCarsten Otte 			r = -EFAULT;
160727e0393fSCarsten Otte 			break;
160827e0393fSCarsten Otte 		}
160927e0393fSCarsten Otte 
161027e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
161127e0393fSCarsten Otte 			r = -EINVAL;
161227e0393fSCarsten Otte 			break;
161327e0393fSCarsten Otte 		}
161427e0393fSCarsten Otte 
161527e0393fSCarsten Otte 		r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
161627e0393fSCarsten Otte 			ucasmap.length);
161727e0393fSCarsten Otte 		break;
161827e0393fSCarsten Otte 	}
161927e0393fSCarsten Otte #endif
1620ccc7910fSCarsten Otte 	case KVM_S390_VCPU_FAULT: {
1621ccc7910fSCarsten Otte 		r = gmap_fault(arg, vcpu->arch.gmap);
1622ccc7910fSCarsten Otte 		if (!IS_ERR_VALUE(r))
1623ccc7910fSCarsten Otte 			r = 0;
1624ccc7910fSCarsten Otte 		break;
1625ccc7910fSCarsten Otte 	}
1626d6712df9SCornelia Huck 	case KVM_ENABLE_CAP:
1627d6712df9SCornelia Huck 	{
1628d6712df9SCornelia Huck 		struct kvm_enable_cap cap;
1629d6712df9SCornelia Huck 		r = -EFAULT;
1630d6712df9SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
1631d6712df9SCornelia Huck 			break;
1632d6712df9SCornelia Huck 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1633d6712df9SCornelia Huck 		break;
1634d6712df9SCornelia Huck 	}
1635b0c632dbSHeiko Carstens 	default:
16363e6afcf1SCarsten Otte 		r = -ENOTTY;
1637b0c632dbSHeiko Carstens 	}
1638bc923cc9SAvi Kivity 	return r;
1639b0c632dbSHeiko Carstens }
1640b0c632dbSHeiko Carstens 
16415b1c1493SCarsten Otte int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
16425b1c1493SCarsten Otte {
16435b1c1493SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
16445b1c1493SCarsten Otte 	if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
16455b1c1493SCarsten Otte 		 && (kvm_is_ucontrol(vcpu->kvm))) {
16465b1c1493SCarsten Otte 		vmf->page = virt_to_page(vcpu->arch.sie_block);
16475b1c1493SCarsten Otte 		get_page(vmf->page);
16485b1c1493SCarsten Otte 		return 0;
16495b1c1493SCarsten Otte 	}
16505b1c1493SCarsten Otte #endif
16515b1c1493SCarsten Otte 	return VM_FAULT_SIGBUS;
16525b1c1493SCarsten Otte }
16535b1c1493SCarsten Otte 
16545587027cSAneesh Kumar K.V void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
1655db3fe4ebSTakuya Yoshikawa 			   struct kvm_memory_slot *dont)
1656db3fe4ebSTakuya Yoshikawa {
1657db3fe4ebSTakuya Yoshikawa }
1658db3fe4ebSTakuya Yoshikawa 
16595587027cSAneesh Kumar K.V int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
16605587027cSAneesh Kumar K.V 			    unsigned long npages)
1661db3fe4ebSTakuya Yoshikawa {
1662db3fe4ebSTakuya Yoshikawa 	return 0;
1663db3fe4ebSTakuya Yoshikawa }
1664db3fe4ebSTakuya Yoshikawa 
1665e59dbe09STakuya Yoshikawa void kvm_arch_memslots_updated(struct kvm *kvm)
1666e59dbe09STakuya Yoshikawa {
1667e59dbe09STakuya Yoshikawa }
1668e59dbe09STakuya Yoshikawa 
1669b0c632dbSHeiko Carstens /* Section: memory related */
1670f7784b8eSMarcelo Tosatti int kvm_arch_prepare_memory_region(struct kvm *kvm,
1671f7784b8eSMarcelo Tosatti 				   struct kvm_memory_slot *memslot,
16727b6195a9STakuya Yoshikawa 				   struct kvm_userspace_memory_region *mem,
16737b6195a9STakuya Yoshikawa 				   enum kvm_mr_change change)
1674b0c632dbSHeiko Carstens {
1675dd2887e7SNick Wang 	/* A few sanity checks. We can have memory slots which have to be
1676dd2887e7SNick Wang 	   located/ended at a segment boundary (1MB). The memory in userland is
1677dd2887e7SNick Wang 	   ok to be fragmented into various different vmas. It is okay to mmap()
1678dd2887e7SNick Wang 	   and munmap() stuff in this slot after doing this call at any time */
1679b0c632dbSHeiko Carstens 
1680598841caSCarsten Otte 	if (mem->userspace_addr & 0xffffful)
1681b0c632dbSHeiko Carstens 		return -EINVAL;
1682b0c632dbSHeiko Carstens 
1683598841caSCarsten Otte 	if (mem->memory_size & 0xffffful)
1684b0c632dbSHeiko Carstens 		return -EINVAL;
1685b0c632dbSHeiko Carstens 
1686f7784b8eSMarcelo Tosatti 	return 0;
1687f7784b8eSMarcelo Tosatti }
1688f7784b8eSMarcelo Tosatti 
1689f7784b8eSMarcelo Tosatti void kvm_arch_commit_memory_region(struct kvm *kvm,
1690f7784b8eSMarcelo Tosatti 				struct kvm_userspace_memory_region *mem,
16918482644aSTakuya Yoshikawa 				const struct kvm_memory_slot *old,
16928482644aSTakuya Yoshikawa 				enum kvm_mr_change change)
1693f7784b8eSMarcelo Tosatti {
1694f7850c92SCarsten Otte 	int rc;
1695f7784b8eSMarcelo Tosatti 
16962cef4debSChristian Borntraeger 	/* If the basics of the memslot do not change, we do not want
16972cef4debSChristian Borntraeger 	 * to update the gmap. Every update causes several unnecessary
16982cef4debSChristian Borntraeger 	 * segment translation exceptions. This is usually handled just
16992cef4debSChristian Borntraeger 	 * fine by the normal fault handler + gmap, but it will also
17002cef4debSChristian Borntraeger 	 * cause faults on the prefix page of running guest CPUs.
17012cef4debSChristian Borntraeger 	 */
17022cef4debSChristian Borntraeger 	if (old->userspace_addr == mem->userspace_addr &&
17032cef4debSChristian Borntraeger 	    old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
17042cef4debSChristian Borntraeger 	    old->npages * PAGE_SIZE == mem->memory_size)
17052cef4debSChristian Borntraeger 		return;
1706598841caSCarsten Otte 
1707598841caSCarsten Otte 	rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1708598841caSCarsten Otte 		mem->guest_phys_addr, mem->memory_size);
1709598841caSCarsten Otte 	if (rc)
1710f7850c92SCarsten Otte 		printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
1711598841caSCarsten Otte 	return;
1712b0c632dbSHeiko Carstens }
1713b0c632dbSHeiko Carstens 
17142df72e9bSMarcelo Tosatti void kvm_arch_flush_shadow_all(struct kvm *kvm)
17152df72e9bSMarcelo Tosatti {
17162df72e9bSMarcelo Tosatti }
17172df72e9bSMarcelo Tosatti 
17182df72e9bSMarcelo Tosatti void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
17192df72e9bSMarcelo Tosatti 				   struct kvm_memory_slot *slot)
172034d4cb8fSMarcelo Tosatti {
172134d4cb8fSMarcelo Tosatti }
172234d4cb8fSMarcelo Tosatti 
1723b0c632dbSHeiko Carstens static int __init kvm_s390_init(void)
1724b0c632dbSHeiko Carstens {
1725ef50f7acSChristian Borntraeger 	int ret;
17260ee75beaSAvi Kivity 	ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1727ef50f7acSChristian Borntraeger 	if (ret)
1728ef50f7acSChristian Borntraeger 		return ret;
1729ef50f7acSChristian Borntraeger 
1730ef50f7acSChristian Borntraeger 	/*
1731ef50f7acSChristian Borntraeger 	 * guests can ask for up to 255+1 double words, we need a full page
173225985edcSLucas De Marchi 	 * to hold the maximum amount of facilities. On the other hand, we
1733ef50f7acSChristian Borntraeger 	 * only set facilities that are known to work in KVM.
1734ef50f7acSChristian Borntraeger 	 */
173578c4b59fSMichael Mueller 	vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
173678c4b59fSMichael Mueller 	if (!vfacilities) {
1737ef50f7acSChristian Borntraeger 		kvm_exit();
1738ef50f7acSChristian Borntraeger 		return -ENOMEM;
1739ef50f7acSChristian Borntraeger 	}
174078c4b59fSMichael Mueller 	memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
1741d208c79dSThomas Huth 	vfacilities[0] &= 0xff82fff3f4fc2000UL;
17427feb6bb8SMichael Mueller 	vfacilities[1] &= 0x005c000000000000UL;
1743ef50f7acSChristian Borntraeger 	return 0;
1744b0c632dbSHeiko Carstens }
1745b0c632dbSHeiko Carstens 
1746b0c632dbSHeiko Carstens static void __exit kvm_s390_exit(void)
1747b0c632dbSHeiko Carstens {
174878c4b59fSMichael Mueller 	free_page((unsigned long) vfacilities);
1749b0c632dbSHeiko Carstens 	kvm_exit();
1750b0c632dbSHeiko Carstens }
1751b0c632dbSHeiko Carstens 
1752b0c632dbSHeiko Carstens module_init(kvm_s390_init);
1753b0c632dbSHeiko Carstens module_exit(kvm_s390_exit);
1754566af940SCornelia Huck 
1755566af940SCornelia Huck /*
1756566af940SCornelia Huck  * Enable autoloading of the kvm module.
1757566af940SCornelia Huck  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1758566af940SCornelia Huck  * since x86 takes a different approach.
1759566af940SCornelia Huck  */
1760566af940SCornelia Huck #include <linux/miscdevice.h>
1761566af940SCornelia Huck MODULE_ALIAS_MISCDEV(KVM_MINOR);
1762566af940SCornelia Huck MODULE_ALIAS("devname:kvm");
1763